filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_26020
|
import threading
import cv2
import numpy as np
class ImageThread:
def __init__(self, URL):
# threading.Thread.__init__(self)
self.URL = URL
self.cap = cv2.VideoCapture(URL)
self.frame = self.cap.read()
self.frame = self.frame[1]
self.stopped = False
def start(self):
threading.Thread(target=self.update, args=()).start()
print("START")
return self
def update(self):
while not self.stopped:
self.frame = self.cap.read()
self.frame = self.frame[1]
def read(self):
return self.frame
def stop(self):
self.stopped = True
def calibrate(frame):
# cv2.imshow("Calibration", frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2Luv)
r = cv2.selectROI("ROI", frame)
cv2.destroyWindow("ROI")
imCrop = frame[int(r[1]) : int(r[1] + r[3]), int(r[0]) : int(r[0] + r[2])]
# Use k-means clustering to create a palette
# with the most representative colors in the region
pixels = np.float32(imCrop.reshape(-1, 3))
n_colors = 5
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, 0.1)
flags = cv2.KMEANS_RANDOM_CENTERS
_, labels, palette = cv2.kmeans(pixels, n_colors, None, criteria, 10, flags)
_, counts = np.unique(labels, return_counts=True)
dominant = palette[np.argmax(counts)]
return dominant
def mask(img, color, thresh_delta=10):
kernel = np.ones((5, 5), np.uint8)
cnv_img = cv2.cvtColor(img, cv2.COLOR_BGR2Luv)
lower = color - thresh_delta
upper = color + thresh_delta
masked_img = cv2.inRange(cnv_img, lower, upper)
masked_img = cv2.dilate(masked_img, kernel, iterations=5)
edges = cv2.Canny(masked_img, 75, 200)
# find contours in the thresholded image
contours, hierarchy = cv2.findContours(
edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
if len(contours):
c = [cv2.convexHull(cnt) for cnt in contours]
c = max(c, key=cv2.contourArea)
cv2.drawContours(img, c, -1, (0, 255, 0), 2)
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(img, (cX, cY), 7, (0, 0, 255), -1)
else:
cX = 0
cY = 0
return img, (cX, cY)
|
the-stack_0_26021
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Helper script for building JAX's libjax easily.
import argparse
import collections
import hashlib
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import urllib
# pylint: disable=g-import-not-at-top
if hasattr(urllib, "urlretrieve"):
urlretrieve = urllib.urlretrieve
else:
import urllib.request
urlretrieve = urllib.request.urlretrieve
if hasattr(shutil, "which"):
which = shutil.which
else:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
def is_windows():
return sys.platform.startswith("win32")
def shell(cmd):
output = subprocess.check_output(cmd)
return output.decode("UTF-8").strip()
# Python
def get_python_bin_path(python_bin_path_flag):
"""Returns the path to the Python interpreter to use."""
path = python_bin_path_flag or sys.executable
return path.replace(os.sep, "/")
def get_python_version(python_bin_path):
version_output = shell(
[python_bin_path, "-c",
"import sys; print(\"{}.{}\".format(sys.version_info[0], "
"sys.version_info[1]))"])
major, minor = map(int, version_output.split("."))
return major, minor
def check_python_version(python_version):
if python_version < (3, 6):
print("JAX requires Python 3.6 or newer.")
sys.exit(-1)
# Bazel
BAZEL_BASE_URI = "https://github.com/bazelbuild/bazel/releases/download/3.7.2/"
BazelPackage = collections.namedtuple("BazelPackage", ["file", "sha256"])
bazel_packages = {
"Linux":
BazelPackage(
file="bazel-3.7.2-linux-x86_64",
sha256=
"70dc0bee198a4c3d332925a32d464d9036a831977501f66d4996854ad4e4fc0d"),
"Darwin":
BazelPackage(
file="bazel-3.7.2-darwin-x86_64",
sha256=
"80c82e93a12ba30021692b11c78007807e82383a673be1602573b944beb359ab"),
"Windows":
BazelPackage(
file="bazel-3.7.2-windows-x86_64.exe",
sha256=
"ecb696b1b9c9da6728d92fbfe8410bafb4b3a65c358980e49742233f33f74d10"),
}
def download_and_verify_bazel():
"""Downloads a bazel binary from Github, verifying its SHA256 hash."""
package = bazel_packages.get(platform.system())
if package is None:
return None
if not os.access(package.file, os.X_OK):
uri = BAZEL_BASE_URI + package.file
sys.stdout.write("Downloading bazel from: {}\n".format(uri))
def progress(block_count, block_size, total_size):
if total_size <= 0:
total_size = 170**6
progress = (block_count * block_size) / total_size
num_chars = 40
progress_chars = int(num_chars * progress)
sys.stdout.write("{} [{}{}] {}%\r".format(
package.file, "#" * progress_chars,
"." * (num_chars - progress_chars), int(progress * 100.0)))
tmp_path, _ = urlretrieve(uri, None,
progress if sys.stdout.isatty() else None)
sys.stdout.write("\n")
# Verify that the downloaded Bazel binary has the expected SHA256.
with open(tmp_path, "rb") as downloaded_file:
contents = downloaded_file.read()
digest = hashlib.sha256(contents).hexdigest()
if digest != package.sha256:
print(
"Checksum mismatch for downloaded bazel binary (expected {}; got {})."
.format(package.sha256, digest))
sys.exit(-1)
# Write the file as the bazel file name.
with open(package.file, "wb") as out_file:
out_file.write(contents)
# Mark the file as executable.
st = os.stat(package.file)
os.chmod(package.file,
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
return os.path.join(".", package.file)
def get_bazel_paths(bazel_path_flag):
"""Yields a sequence of guesses about bazel path. Some of sequence elements
can be None. The resulting iterator is lazy and potentially has a side
effects."""
yield bazel_path_flag
yield which("bazel")
yield download_and_verify_bazel()
def get_bazel_path(bazel_path_flag):
"""Returns the path to a Bazel binary, downloading Bazel if not found. Also,
it checks Bazel's version at lease newer than 2.0.0.
NOTE Manual version check is reasonably only for bazel < 2.0.0. Newer bazel
releases performs version check against .bazelversion (see for details
https://blog.bazel.build/2019/12/19/bazel-2.0.html#other-important-changes).
"""
for path in filter(None, get_bazel_paths(bazel_path_flag)):
if check_bazel_version(path):
return path
print("Cannot find or download bazel. Please install bazel.")
sys.exit(-1)
def check_bazel_version(bazel_path):
try:
version_output = shell([bazel_path, "--bazelrc=/dev/null", "version"])
except subprocess.CalledProcessError:
return False
match = re.search("Build label: *([0-9\\.]+)[^0-9\\.]", version_output)
if match is None:
return False
actual_ints = [int(x) for x in match.group(1).split(".")]
return actual_ints >= [2, 0, 0]
BAZELRC_TEMPLATE = """
# Flag to enable remote config
common --experimental_repo_remote_exec
build --repo_env PYTHON_BIN_PATH="{python_bin_path}"
build --action_env=PYENV_ROOT
build --python_path="{python_bin_path}"
build --repo_env TF_NEED_CUDA="{tf_need_cuda}"
build --action_env TF_CUDA_COMPUTE_CAPABILITIES="{cuda_compute_capabilities}"
build --repo_env TF_NEED_ROCM="{tf_need_rocm}"
build --action_env TF_ROCM_AMDGPU_TARGETS="{rocm_amdgpu_targets}"
build --distinct_host_configuration=false
build:posix --copt=-Wno-sign-compare
build -c opt
build:avx_posix --copt=-mavx
build:avx_posix --host_copt=-mavx
build:avx_windows --copt=/arch=AVX
build:native_arch_posix --copt=-march=native
build:native_arch_posix --host_copt=-march=native
build:mkl_open_source_only --define=tensorflow_mkldnn_contraction_kernel=1
# Sets the default Apple platform to macOS.
build --apple_platform_type=macos
build --macos_minimum_os=10.9
# Make Bazel print out all options from rc files.
build --announce_rc
build --define open_source_build=true
# Disable enabled-by-default TensorFlow features that we don't care about.
build:posix --define=no_aws_support=true
build:posix --define=no_gcp_support=true
build:posix --define=no_hdfs_support=true
build --define=no_kafka_support=true
build --define=no_ignite_support=true
build --define=grpc_no_ares=true
build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain
build:cuda --@local_config_cuda//:enable_cuda
build:rocm --crosstool_top=@local_config_rocm//crosstool:toolchain
build:rocm --define=using_rocm=true --define=using_rocm_hipcc=true
build:nonccl --define=no_nccl_support=true
build --spawn_strategy=standalone
build --strategy=Genrule=standalone
build --enable_platform_specific_config
# Tensorflow uses M_* math constants that only get defined by MSVC headers if
# _USE_MATH_DEFINES is defined.
build:windows --copt=/D_USE_MATH_DEFINES
build:windows --host_copt=/D_USE_MATH_DEFINES
# Make sure to include as little of windows.h as possible
build:windows --copt=-DWIN32_LEAN_AND_MEAN
build:windows --host_copt=-DWIN32_LEAN_AND_MEAN
build:windows --copt=-DNOGDI
build:windows --host_copt=-DNOGDI
# https://devblogs.microsoft.com/cppblog/announcing-full-support-for-a-c-c-conformant-preprocessor-in-msvc/
# otherwise, there will be some compiling error due to preprocessing.
build:windows --copt=/Zc:preprocessor
build:posix --cxxopt=-std=c++14
build:posix --host_cxxopt=-std=c++14
build:windows --cxxopt=/std:c++14
build:windows --host_cxxopt=/std:c++14
build:linux --config=posix
build:macos --config=posix
# Generate PDB files, to generate useful PDBs, in opt compilation_mode
# --copt /Z7 is needed.
build:windows --linkopt=/DEBUG
build:windows --host_linkopt=/DEBUG
build:windows --linkopt=/OPT:REF
build:windows --host_linkopt=/OPT:REF
build:windows --linkopt=/OPT:ICF
build:windows --host_linkopt=/OPT:ICF
build:windows --experimental_strict_action_env=true
# Suppress all warning messages.
build:short_logs --output_filter=DONT_MATCH_ANYTHING
# Workaround for gcc 10+ warnings related to upb.
# See https://github.com/tensorflow/tensorflow/issues/39467
build:linux --copt=-Wno-stringop-truncation
"""
def write_bazelrc(cuda_toolkit_path=None, cudnn_install_path=None,
cuda_version=None, cudnn_version=None, rocm_toolkit_path=None, **kwargs):
with open("../.bazelrc", "w") as f:
f.write(BAZELRC_TEMPLATE.format(**kwargs))
if cuda_toolkit_path:
f.write("build --action_env CUDA_TOOLKIT_PATH=\"{cuda_toolkit_path}\"\n"
.format(cuda_toolkit_path=cuda_toolkit_path))
if cudnn_install_path:
f.write("build --action_env CUDNN_INSTALL_PATH=\"{cudnn_install_path}\"\n"
.format(cudnn_install_path=cudnn_install_path))
if cuda_version:
f.write("build --action_env TF_CUDA_VERSION=\"{cuda_version}\"\n"
.format(cuda_version=cuda_version))
if cudnn_version:
f.write("build --action_env TF_CUDNN_VERSION=\"{cudnn_version}\"\n"
.format(cudnn_version=cudnn_version))
if rocm_toolkit_path:
f.write("build --action_env ROCM_PATH=\"{rocm_toolkit_path}\"\n"
.format(rocm_toolkit_path=rocm_toolkit_path))
BANNER = r"""
_ _ __ __
| | / \ \ \/ /
_ | |/ _ \ \ /
| |_| / ___ \/ \
\___/_/ \/_/\_\
"""
EPILOG = """
From the 'build' directory in the JAX repository, run
python build.py
or
python3 build.py
to download and build JAX's XLA (jaxlib) dependency.
"""
def _parse_string_as_bool(s):
"""Parses a string as a boolean argument."""
lower = s.lower()
if lower == "true":
return True
elif lower == "false":
return False
else:
raise ValueError("Expected either 'true' or 'false'; got {}".format(s))
def add_boolean_argument(parser, name, default=False, help_str=None):
"""Creates a boolean flag."""
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--" + name,
nargs="?",
default=default,
const=True,
type=_parse_string_as_bool,
help=help_str)
group.add_argument("--no" + name, dest=name, action="store_false")
def main():
cwd = os.getcwd()
parser = argparse.ArgumentParser(
description="Builds jaxlib from source.", epilog=EPILOG)
parser.add_argument(
"--bazel_path",
help="Path to the Bazel binary to use. The default is to find bazel via "
"the PATH; if none is found, downloads a fresh copy of bazel from "
"GitHub.")
parser.add_argument(
"--python_bin_path",
help="Path to Python binary to use. The default is the Python "
"interpreter used to run the build script.")
parser.add_argument(
"--target_cpu_features",
choices=["release", "native", "default"],
default="release",
help="What CPU features should we target? 'release' enables CPU "
"features that should be enabled for a release build, which on "
"x86-64 architectures enables AVX. 'native' enables "
"-march=native, which generates code targeted to use all "
"features of the current machine. 'default' means don't opt-in "
"to any architectural features and use whatever the C compiler "
"generates by default.")
add_boolean_argument(
parser,
"enable_mkl_dnn",
default=True,
help_str="Should we build with MKL-DNN enabled?")
add_boolean_argument(
parser,
"enable_cuda",
help_str="Should we build with CUDA enabled? Requires CUDA and CuDNN.")
add_boolean_argument(
parser,
"enable_tpu",
help_str="Should we build with Cloud TPU support enabled?")
add_boolean_argument(
parser,
"enable_rocm",
help_str="Should we build with ROCm enabled?")
parser.add_argument(
"--cuda_path",
default=None,
help="Path to the CUDA toolkit.")
parser.add_argument(
"--cudnn_path",
default=None,
help="Path to CUDNN libraries.")
parser.add_argument(
"--cuda_version",
default=None,
help="CUDA toolkit version, e.g., 11.1")
parser.add_argument(
"--cudnn_version",
default=None,
help="CUDNN version, e.g., 8")
parser.add_argument(
"--cuda_compute_capabilities",
default="3.5,5.2,6.0,6.1,7.0",
help="A comma-separated list of CUDA compute capabilities to support.")
parser.add_argument(
"--rocm_path",
default=None,
help="Path to the ROCm toolkit.")
parser.add_argument(
"--rocm_amdgpu_targets",
default="gfx803,gfx900,gfx906,gfx1010",
help="A comma-separated list of ROCm amdgpu targets to support.")
parser.add_argument(
"--bazel_startup_options",
action="append", default=[],
help="Additional startup options to pass to bazel.")
parser.add_argument(
"--bazel_options",
action="append", default=[],
help="Additional options to pass to bazel.")
parser.add_argument(
"--output_path",
default=os.path.join(cwd, "dist"),
help="Directory to which the jaxlib wheel should be written")
args = parser.parse_args()
if is_windows() and args.enable_cuda:
if args.cuda_version is None:
parser.error("--cuda_version is needed for Windows CUDA build.")
if args.cudnn_version is None:
parser.error("--cudnn_version is needed for Windows CUDA build.")
if args.enable_cuda and args.enable_rocm:
parser.error("--enable_cuda and --enable_rocm cannot be enabled at the same time.")
print(BANNER)
output_path = os.path.abspath(args.output_path)
os.chdir(os.path.dirname(__file__ or args.prog) or '.')
# Find a working Bazel.
bazel_path = get_bazel_path(args.bazel_path)
print("Bazel binary path: {}".format(bazel_path))
python_bin_path = get_python_bin_path(args.python_bin_path)
print("Python binary path: {}".format(python_bin_path))
python_version = get_python_version(python_bin_path)
print("Python version: {}".format(".".join(map(str, python_version))))
check_python_version(python_version)
print("MKL-DNN enabled: {}".format("yes" if args.enable_mkl_dnn else "no"))
print("Target CPU features: {}".format(args.target_cpu_features))
cuda_toolkit_path = args.cuda_path
cudnn_install_path = args.cudnn_path
rocm_toolkit_path = args.rocm_path
print("CUDA enabled: {}".format("yes" if args.enable_cuda else "no"))
if args.enable_cuda:
if cuda_toolkit_path:
print("CUDA toolkit path: {}".format(cuda_toolkit_path))
if cudnn_install_path:
print("CUDNN library path: {}".format(cudnn_install_path))
print("CUDA compute capabilities: {}".format(args.cuda_compute_capabilities))
if args.cuda_version:
print("CUDA version: {}".format(args.cuda_version))
if args.cudnn_version:
print("CUDNN version: {}".format(args.cudnn_version))
print("TPU enabled: {}".format("yes" if args.enable_tpu else "no"))
print("ROCm enabled: {}".format("yes" if args.enable_rocm else "no"))
if args.enable_rocm:
if rocm_toolkit_path:
print("ROCm toolkit path: {}".format(rocm_toolkit_path))
print("ROCm amdgpu targets: {}".format(args.rocm_amdgpu_targets))
write_bazelrc(
python_bin_path=python_bin_path,
tf_need_cuda=1 if args.enable_cuda else 0,
tf_need_rocm=1 if args.enable_rocm else 0,
cuda_toolkit_path=cuda_toolkit_path,
cudnn_install_path=cudnn_install_path,
cuda_compute_capabilities=args.cuda_compute_capabilities,
cuda_version=args.cuda_version,
cudnn_version=args.cudnn_version,
rocm_toolkit_path=rocm_toolkit_path,
rocm_amdgpu_targets=args.rocm_amdgpu_targets,
)
print("\nBuilding XLA and installing it in the jaxlib source tree...")
config_args = args.bazel_options
config_args += ["--config=short_logs"]
if args.target_cpu_features == "release":
if platform.uname().machine == "x86_64":
config_args += ["--config=avx_windows" if is_windows()
else "--config=avx_posix"]
elif args.target_cpu_features == "native":
if is_windows():
print("--target_cpu_features=native is not supported on Windows; ignoring.")
else:
config_args += ["--config=native_arch_posix"]
if args.enable_mkl_dnn:
config_args += ["--config=mkl_open_source_only"]
if args.enable_cuda:
config_args += ["--config=cuda"]
config_args += ["--define=xla_python_enable_gpu=true"]
if args.enable_tpu:
config_args += ["--define=with_tpu_support=true"]
if args.enable_rocm:
config_args += ["--config=rocm"]
config_args += ["--config=nonccl"]
config_args += ["--define=xla_python_enable_gpu=true"]
command = ([bazel_path] + args.bazel_startup_options +
["run", "--verbose_failures=true"] + config_args +
[":build_wheel", "--",
f"--output_path={output_path}"])
print(" ".join(command))
shell(command)
shell([bazel_path, "shutdown"])
if __name__ == "__main__":
main()
|
the-stack_0_26024
|
import os
import tarfile
import time
from cli.mmt.fileformats import CompactFileFormat
from testcases import ModernMTTestCase, TEST_RESOURCES
from testcases.utils.connectors import BackupDaemonConnector
class __BackupTest(ModernMTTestCase):
mmt_engine_archive = os.path.join(TEST_RESOURCES, 'multilingual_echo_engine.tar.gz')
backup_daemon_archive = os.path.join(TEST_RESOURCES, 'backup_daemon.tar.gz')
CORPUS_DE = CompactFileFormat('en', 'de', os.path.join(TEST_RESOURCES, 'onlinelearning', 'Memory.en__de.cfc'))
CORPUS_ES = CompactFileFormat('en', 'es', os.path.join(TEST_RESOURCES, 'onlinelearning', 'Memory.en__es.cfc'))
CORPUS_FR = CompactFileFormat('en', 'fr', os.path.join(TEST_RESOURCES, 'onlinelearning', 'Memory.en__fr.cfc'))
CORPUS_IT = CompactFileFormat('en', 'it', os.path.join(TEST_RESOURCES, 'onlinelearning', 'Memory.en__it.cfc'))
CORPUS_ZH = CompactFileFormat('en', 'zh', os.path.join(TEST_RESOURCES, 'onlinelearning', 'Memory.en__zh.cfc'))
ALL_CORPORA = [CORPUS_DE, CORPUS_ES, CORPUS_FR, CORPUS_IT, CORPUS_ZH]
BACKUP_CORPORA = [CORPUS_DE, CORPUS_ES, CORPUS_IT, CORPUS_ZH]
MEMORY_CORPORA = [CORPUS_ES, CORPUS_IT, CORPUS_ZH]
@staticmethod
def map_lang(lang):
if lang.startswith('es'):
if lang == 'es' or lang == 'es-ES':
return 'es-ES'
else:
return 'es-MX'
elif lang.startswith('zh'):
if lang == 'zh-HK' or lang == 'zh-TW':
return 'zh-TW'
else:
return 'zh-CN'
else:
return lang.split('-')[0]
def setUp(self):
super().setUp()
self.backup_daemon = BackupDaemonConnector(self.__class__.__name__ + '_backup')
if self.backup_daemon_archive is not None:
self.backup_daemon.delete()
os.makedirs(self.backup_daemon.engine.path)
tar = tarfile.open(self.backup_daemon_archive, 'r:gz')
tar.extractall(self.backup_daemon.engine.path)
tar.close()
self.backup_daemon.start()
def tearDown(self):
super().tearDown()
self.backup_daemon.stop()
self.backup_daemon.delete()
# Assertion
def assertInContent(self, content, element):
element = ''.join(element.split())
content = [''.join(line.split()) for line in content]
self.assertIn(element, content)
def assertInParallelContent(self, content, sentence, translation):
sentence = ''.join(sentence.split())
translation = ''.join(translation.split())
content = [(''.join(s.split()), ''.join(t.split())) for s, t in content]
self.assertIn((sentence, translation), content)
# Utils
@staticmethod
def _update_of(tgt_line):
if int(tgt_line[-1]) % 2 == 0:
return tgt_line + ' - UPDATE'
else:
return None
class BackupTest(__BackupTest):
def _send_updates(self, memories):
for corpus in self.ALL_CORPORA:
memory = memories[corpus.name]
memory_id = int(memory['id'])
job = None
with corpus.reader_with_languages() as reader:
for src_lang, tgt_lang, src_line, tgt_line in reader:
updated_tgt_line = self._update_of(tgt_line)
if updated_tgt_line is not None:
job = self.mmt.api.replace_in_memory(src_lang, tgt_lang, memory_id,
src_line, updated_tgt_line, src_line, tgt_line)
if job is not None:
self.mmt.wait_import_job(job)
fr_memory = memories['Memory.en__fr']
self.mmt.api.delete_memory(fr_memory['id'])
# Tests
def test_backup_import_with_all_language_combinations(self):
memories = {}
for corpus in self.ALL_CORPORA:
memory = self.mmt.api.create_memory(corpus.name)
job = self.mmt.api.import_into_memory(memory['id'], compact=corpus.file_path)
self.mmt.wait_import_job(job)
memories[corpus.name] = memory
self._send_updates(memories)
self._verify_index_integrity(memories)
def test_backup_add_with_all_language_combinations(self):
memories = {}
for corpus in self.ALL_CORPORA:
memory = self.mmt.api.create_memory(corpus.name)
job = None
with corpus.reader_with_languages() as reader:
for src_lang, tgt_lang, src_line, tgt_line in reader:
job = self.mmt.api.append_to_memory(src_lang, tgt_lang, memory['id'], src_line, tgt_line)
if job is not None:
self.mmt.wait_import_job(job)
memories[corpus.name] = memory
self._send_updates(memories)
self._verify_index_integrity(memories)
def _verify_index_integrity(self, memories):
time.sleep(10) # wait to ensure backup engine has completed
# Dump engine content
self.mmt.stop()
self.backup_daemon.stop()
translation_memory = self.backup_daemon.dump_translation_memory()
backup_memory = self.backup_daemon.dump_backup_translation_memory()
# Verify translation memory
self.assertEqual(3, len(translation_memory))
for corpus in self.MEMORY_CORPORA:
memory = memories[corpus.name]
memory_id = int(memory['id'])
self.assertIn(memory_id, translation_memory)
content = translation_memory[memory_id]
with corpus.reader_with_languages() as reader:
for src_lang, tgt_lang, src_line, tgt_line in reader:
src_lang, tgt_lang = self.map_lang(src_lang), self.map_lang(tgt_lang)
updated_tgt_line = self._update_of(tgt_line)
self.assertIn((src_lang, tgt_lang, src_line, updated_tgt_line or tgt_line), content)
# Verify backup memory
self.assertEqual(4, len(backup_memory))
for corpus in self.BACKUP_CORPORA:
memory = memories[corpus.name]
memory_id = int(memory['id'])
self.assertIn(memory_id, backup_memory)
content = backup_memory[memory_id]
with corpus.reader_with_languages() as reader:
for src_lang, tgt_lang, src_line, tgt_line in reader:
updated_tgt_line = self._update_of(tgt_line)
self.assertIn((src_lang, tgt_lang, src_line, updated_tgt_line or tgt_line), content)
|
the-stack_0_26025
|
import re
str = "I study python 3.8 every_day"
# ---------- match ----------
m1 = re.match(r"I", str)
m2 = re.match(r"\w", str)
m3 = re.match(r".", str)
m4 = re.match(r"\S", str)
print(m1.group())
print(m2.group())
print(m3.group())
print(m4.group())
# ---------- search ----------
s1 = re.search(r"study", str)
s2 = re.search(r"s\w+", str)
s3 = re.search(r"I (\w+)", str)
s4 = re.search(r"y", str)
print(s1.group())
print(s2.group())
print(s3.group())
print(s4.group())
# ---------- findall ----------
f1 = re.findall(r"y", str)
f2 = re.findall(r"python 3.8", str)
f3 = re.findall(r"p\w+ 3.8",str)
f4 = re.findall(r"p.+\d", str)
print(f1)
print(f2)
print(f3)
print(f4)
# ---------- sub ----------
su1 = re.sub(r"every_day", r"EveryDay", str)
su2 = re.sub(r"e\w+", r"EveryDay", str)
print(su1)
print(su1)
# ---------- test ----------
str1 = "<div><a class='title' href='https://www.baidu.com'>百度baidu</a></div>"
t1 = re.findall(r"百度",str1)
t2 = re.findall(r"[\u4e00-\u9fa5]",str1)
t3 = re.findall(r"[\u4e00-\u9fa5]+\w+",str1)
t4 = re.findall(r"<a class='title' href='https://www.baidu.com'>(.*)</a>",str1)
t5 = re.sub(r"div","span",str1)
t6 = re.sub(r'<div>(<a .+a>)</div>', r'<span>\1</span>', str1)
print(t6)
|
the-stack_0_26026
|
from typing import List
from canvasapi.course import Course
from canvasapi.requester import Requester
from canvasapi.util import combine_kwargs, get_institution_url
def get_course_stream(course_id: int, base_url: str, access_token: str, **kwargs: dict) -> dict:
"""
Parameters
----------
course_id : `int`
Course id
base_url : `str`
Base URL of the Canvas instance's API
access_token : `str`
API key to authenticate requests with
Returns
-------
`dict`
JSON response for course activity stream
"""
access_token = access_token.strip()
base_url = get_institution_url(base_url)
requester = Requester(base_url, access_token)
response = requester.request(
"GET",
f"courses/{course_id}/activity_stream",
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
def get_course_url(course_id: str, base_url) -> str:
"""
Parameters
----------
course_id : `str`
Course id
base_url : `str`
Base URL of the Canvas instance's API
Returns
-------
`str`
URL of course page
"""
base_url = get_institution_url(base_url)
return f"{base_url}/courses/{course_id}"
def get_staff_ids(course: Course) -> List[int]:
"""
Parameters
----------
course : `Course`
The course to get staff IDs for
Returns
-------
`List[int]`
A list of the IDs of all professors and TAs in the given course.
"""
staff = course.get_users(enrollment_type=["teacher", "ta"])
staff_ids = list(map(lambda user: user.id, staff))
return staff_ids
|
the-stack_0_26027
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from nova import exception
from nova.i18n import _
# Define the minimum and maximum version of the API across all of the
# REST API. The format of the version is:
# X.Y where:
#
# - X will only be changed if a significant backwards incompatible API
# change is made which affects the API as whole. That is, something
# that is only very very rarely incremented.
#
# - Y when you make any change to the API. Note that this includes
# semantic changes which may not affect the input or output formats or
# even originate in the API code layer. We are not distinguishing
# between backwards compatible and backwards incompatible changes in
# the versioning system. It must be made clear in the documentation as
# to what is a backwards compatible change and what is a backwards
# incompatible one.
#
# You must update the API version history string below with a one or
# two line description as well as update rest_api_version_history.rst
REST_API_VERSION_HISTORY = """REST API Version History:
* 2.1 - Initial version. Equivalent to v2.0 code
* 2.2 - Adds (keypair) type parameter for os-keypairs plugin
Fixes success status code for create/delete a keypair method
* 2.3 - Exposes additional os-extended-server-attributes
Exposes delete_on_termination for os-extended-volumes
* 2.4 - Exposes reserved field in os-fixed-ips.
* 2.5 - Allow server search option ip6 for non-admin
* 2.6 - Consolidate the APIs for getting remote consoles
* 2.7 - Check flavor type before add tenant access.
* 2.8 - Add new protocol for VM console (mks)
* 2.9 - Exposes lock information in server details.
* 2.10 - Allow admins to query, create and delete keypairs owned by any
user.
* 2.11 - Exposes forced_down attribute for os-services
* 2.12 - Exposes VIF net-id in os-virtual-interfaces
"""
# The minimum and maximum versions of the API supported
# The default api version request is definied to be the
# the minimum version of the API supported.
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = "2.1"
_MAX_API_VERSION = "2.12"
DEFAULT_API_VERSION = _MIN_API_VERSION
# NOTE(cyeoh): min and max versions declared as functions so we can
# mock them for unittests. Do not use the constants directly anywhere
# else.
def min_api_version():
return APIVersionRequest(_MIN_API_VERSION)
def max_api_version():
return APIVersionRequest(_MAX_API_VERSION)
def is_supported(req, min_version=_MIN_API_VERSION,
max_version=_MAX_API_VERSION):
"""Check if API request version satisfies version restrictions.
:param req: request object
:param min_version: minimal version of API needed for correct
request processing
:param max_version: maximum version of API needed for correct
request processing
:returns True if request satisfies minimal and maximum API version
requirements. False in other case.
"""
return (APIVersionRequest(max_version) >= req.api_version_request >=
APIVersionRequest(min_version))
class APIVersionRequest(object):
"""This class represents an API Version Request with convenience
methods for manipulation and comparison of version
numbers that we need to do to implement microversions.
"""
def __init__(self, version_string=None):
"""Create an API version request object.
:param version_string: String representation of APIVersionRequest.
Correct format is 'X.Y', where 'X' and 'Y' are int values.
None value should be used to create Null APIVersionRequest,
which is equal to 0.0
"""
self.ver_major = 0
self.ver_minor = 0
if version_string is not None:
match = re.match(r"^([1-9]\d*)\.([1-9]\d*|0)$",
version_string)
if match:
self.ver_major = int(match.group(1))
self.ver_minor = int(match.group(2))
else:
raise exception.InvalidAPIVersionString(version=version_string)
def __str__(self):
"""Debug/Logging representation of object."""
return ("API Version Request Major: %s, Minor: %s"
% (self.ver_major, self.ver_minor))
def is_null(self):
return self.ver_major == 0 and self.ver_minor == 0
def _format_type_error(self, other):
return TypeError(_("'%(other)s' should be an instance of '%(cls)s'") %
{"other": other, "cls": self.__class__})
def __lt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) <
(other.ver_major, other.ver_minor))
def __eq__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) ==
(other.ver_major, other.ver_minor))
def __gt__(self, other):
if not isinstance(other, APIVersionRequest):
raise self._format_type_error(other)
return ((self.ver_major, self.ver_minor) >
(other.ver_major, other.ver_minor))
def __le__(self, other):
return self < other or self == other
def __ne__(self, other):
return not self.__eq__(other)
def __ge__(self, other):
return self > other or self == other
def matches(self, min_version, max_version):
"""Returns whether the version object represents a version
greater than or equal to the minimum version and less than
or equal to the maximum version.
@param min_version: Minimum acceptable version.
@param max_version: Maximum acceptable version.
@returns: boolean
If min_version is null then there is no minimum limit.
If max_version is null then there is no maximum limit.
If self is null then raise ValueError
"""
if self.is_null():
raise ValueError
if max_version.is_null() and min_version.is_null():
return True
elif max_version.is_null():
return min_version <= self
elif min_version.is_null():
return self <= max_version
else:
return min_version <= self <= max_version
def get_string(self):
"""Converts object to string representation which if used to create
an APIVersionRequest object results in the same version request.
"""
if self.is_null():
raise ValueError
return "%s.%s" % (self.ver_major, self.ver_minor)
|
the-stack_0_26028
|
# -*- coding: utf-8 -*-
VERSION = (4, 4, 2)
__version__ = '.'.join(map(str, VERSION))
def get_redis_connection(alias='default', write=True):
"""
Helper used for obtain a raw redis client.
"""
try:
from django.core.cache import caches
except ImportError:
# Django <1.7
from django.core.cache import get_cache
else:
def get_cache(alias):
return caches[alias]
cache = get_cache(alias)
if not hasattr(cache, "client"):
raise NotImplementedError("This backend does not supports this feature")
if not hasattr(cache.client, "get_client"):
raise NotImplementedError("This backend does not supports this feature")
return cache.client.get_client(write)
|
the-stack_0_26029
|
from bridges.color import *
from bridges.grid import *
import base64
##
# @brief This is a class in BRIDGES for representing an (n x n) grid.
#
# A ColorGrid is essentially an image. One can construct an image of
# a particular size using the ColorGrid() constructor to be either
# blank or filled with a particular Color depending on which
# constructor is called.
#
# \code{.py}
# grid = new ColorGrid(rows, columns)
# grid.set(2, 3, Color("lightsalmon")
# \endcode
#
# You can get a ColorGrid from an existing Bridges ColorGrid assignment using
# bridges.get_color_grid_from_assignment(bridges.get_username(), bridges.get_assignment_id(), 0)
#
# @author David Burlinson, Matthew McQuaigue
#
# @date 2018, 7/24/19
#
# Color grid tutorial at http://bridgesuncc.github.io/tutorials/Grid.html
#
class ColorGrid(Grid):
baseColor = Color(r=0, g=0, b=0, a=1.0)
def get_data_structure_type(self) -> str:
"""
Get the data structure type
Returns:
str : data structure type
"""
return "ColorGrid"
def __init__(self, rows: int = 10, cols: int = 10, color: Color = baseColor) -> None:
"""
Color Grid constructor
Args:
rows: number of rows in the grid
cols: number of columns in the grid
color: base color of the each grid pixel
Returns:
None
"""
super(ColorGrid, self).__init__(rows=rows, cols=cols)
self.base_color = color
self.grid_size = [rows, cols]
self.initialize_grid()
def initialize_grid(self) -> None:
"""
initialize the grid anf populate with base colors
Returns:
None
"""
for i in range(self.grid_size[0]):
for j in range(self.grid_size[1]):
self.set(i, j, self.base_color)
def set(self, row: int, col: int, color: Color) -> None:
"""
Set the (row, col) element in the color grid
Args:
row - which row to access
col - which col to access
color - background color for the cell at row,col
Returns:
None
"""
super(ColorGrid, self).set(row, col, color)
def get_rle(self) -> bytearray:
"""
Get the run length encoding of color grid
Returns:
bytearray
"""
img_bytes = bytearray()
count = 0
total_count = 0
pos = 0
last = self.grid[0][0]
while pos < self.grid_size[0] * self.grid_size[1]:
posY = pos / self.grid_size[1]
posX = pos % self.grid_size[1]
current = self.grid[int(posY)][int(posX)]
if count == 0:
count = 1
last = current
else:
if last == current:
count += 1
else:
total_count += count
img_bytes.append(count-1)
last = last.get_byte_representation()
for k in range(len(last)):
img_bytes.append(last[k])
count = 1
last = current
if count == 256:
total_count += count
img_bytes.append(count-1)
last = last.get_byte_representation()
for k in range(len(last)):
img_bytes.append(last[k])
count = 0
pos += 1
total_count += count
img_bytes.append(count-1)
last = last.get_byte_representation()
for k in range(len(last)):
img_bytes.append(last[k])
if total_count != self.grid_size[0] * self.grid_size[1]:
print("Something broke in getRLE construction")
return img_bytes
def get_raw(self) -> bytearray:
"""
Get raw encoding of color grid
Returns:
bytearray: representing the colors of grid cells
"""
img_bytes = bytearray()
for i in range(self.grid_size[0]):
if self.grid[i] is not None:
for j in range(self.grid_size[1]):
if self.grid[i][j] is not None:
color = self.grid[i][j]
color = color.get_byte_representation()
for k in range(len(color)):
img_bytes.append(color[k])
return img_bytes
def get_data_structure_representation(self) -> dict:
"""
Get the JSON representation of the color grid
Returns:
str: representing the json of the color grid
"""
byte_buff = self.get_rle()
encoding = "RLE"
if len(byte_buff) > self.grid_size[0] * self.grid_size[1] * 4:
encoding = "RAW"
byte_buff = self.get_raw()
print("RAW ran")
else:
print("RLE ran")
json_dict = {
"encoding": encoding,
"nodes": [base64.b64encode(bytes(byte_buff)).decode()],
"dimensions": [self.grid_size[0], self.grid_size[1]]
}
return json_dict
|
the-stack_0_26030
|
import csv
import os
import shutil
import subprocess
import tarfile
import urllib.request
import zipfile
def in_colab_shell():
"""Tests if the code is being executed within Google Colab."""
try:
import google.colab # pylint: disable=unused-variable
return True
except ImportError:
return False
def is_drive_mounted():
"""Checks whether Google Drive is mounted in Google Colab.
Returns:
bool: Returns True if Google Drive is mounted, False otherwise.
"""
drive_path = "/content/drive/My Drive"
if os.path.exists(drive_path):
return True
else:
return False
def download_from_url(url, out_file_name=None, out_dir=".", unzip=True, verbose=True):
"""Download a file from a URL (e.g., https://github.com/giswqs/whitebox/raw/master/examples/testdata.zip)
Args:
url (str): The HTTP URL to download.
out_file_name (str, optional): The output file name to use. Defaults to None.
out_dir (str, optional): The output directory to use. Defaults to '.'.
unzip (bool, optional): Whether to unzip the downloaded file if it is a zip file. Defaults to True.
verbose (bool, optional): Whether to display or not the output of the function
"""
in_file_name = os.path.basename(url)
if out_file_name is None:
out_file_name = in_file_name
out_file_path = os.path.join(os.path.abspath(out_dir), out_file_name)
if verbose:
print("Downloading {} ...".format(url))
try:
urllib.request.urlretrieve(url, out_file_path)
except Exception:
raise Exception("The URL is invalid. Please double check the URL.")
final_path = out_file_path
if unzip:
# if it is a zip file
if ".zip" in out_file_name:
if verbose:
print("Unzipping {} ...".format(out_file_name))
with zipfile.ZipFile(out_file_path, "r") as zip_ref:
zip_ref.extractall(out_dir)
final_path = os.path.join(
os.path.abspath(out_dir), out_file_name.replace(".zip", "")
)
# if it is a tar file
if ".tar" in out_file_name:
if verbose:
print("Unzipping {} ...".format(out_file_name))
with tarfile.open(out_file_path, "r") as tar_ref:
tar_ref.extractall(out_dir)
final_path = os.path.join(
os.path.abspath(out_dir), out_file_name.replace(".tart", "")
)
if verbose:
print("Data downloaded to: {}".format(final_path))
return
def download_from_gdrive(gfile_url, file_name, out_dir=".", unzip=True, verbose=True):
"""Download a file shared via Google Drive
(e.g., https://drive.google.com/file/d/18SUo_HcDGltuWYZs1s7PpOmOq_FvFn04/view?usp=sharing)
Args:
gfile_url (str): The Google Drive shared file URL
file_name (str): The output file name to use.
out_dir (str, optional): The output directory. Defaults to '.'.
unzip (bool, optional): Whether to unzip the output file if it is a zip file. Defaults to True.
verbose (bool, optional): Whether to display or not the output of the function
"""
try:
from google_drive_downloader import GoogleDriveDownloader as gdd
except ImportError:
print("GoogleDriveDownloader package not installed. Installing ...")
subprocess.check_call(
["python", "-m", "pip", "install", "googledrivedownloader"]
)
from google_drive_downloader import GoogleDriveDownloader as gdd
file_id = gfile_url.split("/")[5]
if verbose:
print("Google Drive file id: {}".format(file_id))
dest_path = os.path.join(out_dir, file_name)
gdd.download_file_from_google_drive(file_id, dest_path, True, unzip)
return
def csv_points_to_shp(in_csv, out_shp, latitude="latitude", longitude="longitude"):
"""Converts a csv file containing points (latitude, longitude) into a shapefile.
Args:
in_csv (str): File path or HTTP URL to the input csv file. For example, https://raw.githubusercontent.com/giswqs/data/main/world/world_cities.csv
out_shp (str): File path to the output shapefile.
latitude (str, optional): Column name for the latitude column. Defaults to 'latitude'.
longitude (str, optional): Column name for the longitude column. Defaults to 'longitude'.
"""
import whitebox
if in_csv.startswith("http") and in_csv.endswith(".csv"):
out_dir = os.path.join(os.path.expanduser("~"), "Downloads")
out_name = os.path.basename(in_csv)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
download_from_url(in_csv, out_dir=out_dir)
in_csv = os.path.join(out_dir, out_name)
wbt = whitebox.WhiteboxTools()
in_csv = os.path.abspath(in_csv)
out_shp = os.path.abspath(out_shp)
if not os.path.exists(in_csv):
raise Exception("The provided csv file does not exist.")
with open(in_csv, encoding="utf-8") as csv_file:
reader = csv.DictReader(csv_file)
fields = reader.fieldnames
xfield = fields.index(longitude)
yfield = fields.index(latitude)
wbt.csv_points_to_vector(in_csv, out_shp, xfield=xfield, yfield=yfield, epsg=4326)
def csv_to_shp(in_csv, out_shp, latitude="latitude", longitude="longitude"):
"""Converts a csv file with latlon info to a point shapefile.
Args:
in_csv (str): The input csv file containing longitude and latitude columns.
out_shp (str): The file path to the output shapefile.
latitude (str, optional): The column name of the latitude column. Defaults to 'latitude'.
longitude (str, optional): The column name of the longitude column. Defaults to 'longitude'.
"""
import csv
import shapefile as shp
if in_csv.startswith("http") and in_csv.endswith(".csv"):
out_dir = os.path.join(os.path.expanduser("~"), "Downloads")
out_name = os.path.basename(in_csv)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
download_from_url(in_csv, out_dir=out_dir)
in_csv = os.path.join(out_dir, out_name)
out_dir = os.path.dirname(out_shp)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
try:
points = shp.Writer(out_shp, shapeType=shp.POINT)
with open(in_csv, encoding="utf-8") as csvfile:
csvreader = csv.DictReader(csvfile)
header = csvreader.fieldnames
[points.field(field) for field in header]
for row in csvreader:
points.point((float(row[longitude])), (float(row[latitude])))
points.record(*tuple([row[f] for f in header]))
out_prj = out_shp.replace(".shp", ".prj")
with open(out_prj, "w") as f:
prj_str = 'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.0174532925199433]] '
f.write(prj_str)
except Exception as e:
print(e)
def clone_repo(out_dir=".", unzip=True):
"""Clones the lidar GitHub repository.
Args:
out_dir (str, optional): Output folder for the repo. Defaults to '.'.
unzip (bool, optional): Whether to unzip the repository. Defaults to True.
"""
url = "https://github.com/giswqs/lidar/archive/master.zip"
filename = "lidar-master.zip"
download_from_url(url, out_file_name=filename, out_dir=out_dir, unzip=unzip)
def check_install(package):
"""Checks whether a package is installed. If not, it will install the package.
Args:
package (str): The name of the package to check.
"""
import subprocess
try:
__import__(package)
# print('{} is already installed.'.format(package))
except ImportError:
print("{} is not installed. Installing ...".format(package))
try:
subprocess.check_call(["python", "-m", "pip", "install", package])
except Exception as e:
print("Failed to install {}".format(package))
print(e)
print("{} has been installed successfully.".format(package))
def update_package():
"""Updates the lidar package from the lidar GitHub repository without the need to use pip or conda.
In this way, I don't have to keep updating pypi and conda-forge with every minor update of the package.
"""
import shutil
try:
download_dir = os.path.join(os.path.expanduser("~"), "Downloads")
if not os.path.exists(download_dir):
os.makedirs(download_dir)
clone_repo(out_dir=download_dir)
pkg_dir = os.path.join(download_dir, "lidar-master")
work_dir = os.getcwd()
os.chdir(pkg_dir)
if shutil.which("pip") is None:
cmd = "pip3 install ."
else:
cmd = "pip install ."
os.system(cmd)
os.chdir(work_dir)
print(
"\nPlease comment out 'lidar.update_package()' and restart the kernel to take effect:\nJupyter menu -> Kernel -> Restart & Clear Output"
)
except Exception as e:
raise Exception(e)
def check_package(name, URL=""):
try:
__import__(name.lower())
except Exception:
raise ImportError(
f"{name} is not installed. Please install it before proceeding. {URL}"
)
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
from shutil import which
return which(name) is not None
def random_string(string_length=3):
"""Generates a random string of fixed length.
Args:
string_length (int, optional): Fixed length. Defaults to 3.
Returns:
str: A random string
"""
import random
import string
# random.seed(1001)
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(string_length))
|
the-stack_0_26032
|
__description__ = \
"""
Drive a set of LED matrix panels.
"""
__author__ = "Michael J. Harms"
__date__ = "2017-01-01"
import numpy as np
class Panel:
"""
Hold the basic properties of an LED panel.
"""
def __init__(self,x_size=32,y_size=32):
"""
Initialize an instance of Panel.
"""
self._shape = (x_size,y_size)
self._offset = (0,0)
self._chain_offset = 0
self._transform_function = None
@property
def shape(self):
"""
The dimensions of the panel.
"""
return self._shape
@property
def offset(self):
"""
The offset of the panel pixels (from top-left of display).
"""
return self._offset
@offset.setter
def offset(self,offset):
if len(offset) != 2:
err = "Offset must have 2 dimensions.\n"
raise ValueError(err)
self._offset = tuple(offset)
@property
def chain_offset(self):
"""
The offset of the panel pixels in the chain (left to right).
"""
return self._chain_offset
@chain_offset.setter
def chain_offset(self,chain_offset):
if type(chain_offset) != int:
err = "Chain offset must be an integer.\n"
raise ValueError(err)
self._chain_offset = chain_offset
def _r90(self,m):
"""
Rotate by 90 degrees.
"""
return np.rot90(m,1)
def _r180(self,m):
"""
Rotate by 180 degrees.
"""
return np.rot90(m,2)
def _r270(self,m):
"""
Rotate by 270 degrees.
"""
return np.rot90(m,3)
def set_transform_function(self,transform_key):
"""
Define the function to transform the function. Transform key should be
0 (no rotation), 90 (90 degree rot), 180, or 270.
"""
options = { 0:None,
90:self._r90 ,
180:self._r180,
270:self._r270}
self._transform_function = options[transform_key]
def transform(self,some_matrix):
"""
Transform matrix appropriately.
"""
if self._transform_function == None:
return some_matrix
return self._transform_function(some_matrix)
class Display:
"""
A collection of Panel instances arranged according to "layout" in space and
according to "chain" electronically. Each panel can also have a specified
rotation (0,90,180,270). After initializing the Panel, passing an RGB
(or RGBA) matrix to the "draw" method will then plot the image on the LED
panels.
"""
def __init__(self,layout,chain,rotation=(),backend="rgbmatrix"):
"""
layout: a 2D array (or 2D list) of Panel instaces indicating their
arrangement in space.
chain: a 1D array that has all Panel instances in layout indicating
how the panels are actually wired together as a chain.
rotation: a 1D array indicating the rotation to apply to each panel.
backend: how to plot. rgbmatrix will use the rgbmatrix library to
draw on LED panels. matplotlib will use matplotlib to plot
on a graph.
"""
self._layout = np.array(layout)
self._chain = np.array(chain)
# -------------- Do a bunch of sanity checks --------------------
if len(self._layout.shape) != 2:
err = "Layout must be a 2D array!\n"
raise ValueError(err)
if len(self._chain.shape) != 1:
err = "Chain must be a 1D array!\n"
raise ValueError(err)
chain_dict = dict([(c,0) for c in self._chain])
if len(chain_dict.keys()) != self._chain.shape[0]:
err = "Chain entries must be unique!\n"
raise ValueError(err)
for i in range(self._layout.shape[0]):
for j in range(self._layout.shape[1]):
try:
chain_dict.pop(self._layout[i,j])
except KeyError:
err = "Chain must contain all entries in layout!\n"
raise ValueError(err)
if len(chain_dict) != 0:
err = "Layout must have all entries in chain!\n"
raise ValueError(err)
if len(rotation) != 0 and len(rotation) != len(self._chain):
err = "Rotations must be specified for no panels or all panels.\n"
raise ValueError(err)
x_dims = []
y_dims = []
for s in self._chain:
x_dims.append(s.shape[0])
y_dims.append(s.shape[1])
if len(np.unique(x_dims)) != 1 or len(np.unique(y_dims)) != 1:
err = "Sub panels must all have the same dimensions.\n"
raise ValueError(err)
# -------------- End sanity checks --------------------
# Record size of each subpanel
self._subpanel_x_size = self._chain[0].shape[0]
self._subpanel_y_size = self._chain[0].shape[1]
# Figure out the total size of the panel
self._total_x_size = sum([p.shape[0] for p in self._layout[:,0]])
self._total_y_size = sum([p.shape[1] for p in self._layout[0,:]])
# Figure out offsets for the coordinates of each panel, mapping them to
# the global coordinates. This uses the top-left corner as the origin.
x_offset = 0
y_offset = 0
for i in range(self._layout.shape[0]):
for j in range(self._layout.shape[1]):
self._layout[i,j].offset = [x_offset,y_offset]
y_offset += self._subpanel_y_size
y_offset = 0
x_offset += self._subpanel_x_size
# Figure out where to place each panel in the chain
chain_offset = 0
for i, s in enumerate(self._chain):
s.chain_offset = chain_offset
chain_offset += s.shape[0]
if len(rotation) != 0:
s.set_transform_function(rotation[i])
self._chain_matrix = np.zeros((self._subpanel_y_size,
len(self._chain)*self._subpanel_x_size,
3),dtype=np.int)
# Deal with graphical backend
if backend == "rgbmatrix":
self._backend = RgbmatrixBackend(self._subpanel_y_size,
len(self._chain),1)
elif backend == "matplotlib":
self._backend = MatplotlibBackend()
else:
err = "backend {} not recognized.\n".format(backend)
raise ValueError(err)
def draw(self,image):
"""
Take a matrix of RGB values and draw them using the chosen backend.
Use the specified layout, chain, and rotation to plot each chunk of
of the image on the appropriate panel with the appropriate orientation.
"""
# Make sure the image has the correct dimensions
if image.shape[0] != self._total_x_size or image.shape[1] != self._total_y_size:
local_shape = (self._total_x_size,self._total_y_size)
err = "Image dimensions ({}) do not match panel dimensions ({})\n".format(image.shape,
local_shape)
raise ValueError(err)
# Make sure the image has RGB channels
if image.shape[2] < 3:
err = "Image must have at least RGB channels\n"
raise ValueError(err)
# Map the matrix into the chain
for i, s in enumerate(self._chain):
chain_x_0 = 0
chain_x_1 = self._subpanel_y_size
chain_y_0 = s.chain_offset
chain_y_1 = s.chain_offset + self._subpanel_x_size
image_x_0 = s.offset[0]
image_x_1 = s.offset[0] + self._subpanel_x_size
image_y_0 = s.offset[1]
image_y_1 = s.offset[1] + self._subpanel_y_size
# Map and rotate
self._chain_matrix[chain_x_0:chain_x_1,
chain_y_0:chain_y_1,:3] = \
s.transform(image[image_x_0:image_x_1,
image_y_0:image_y_1,:3])
# Draw the image.
self._backend.draw(self._chain_matrix)
class Backend:
"""
Dummy Backend that, when subclassed allows plotting of matrices.
"""
def __init__(self):
pass
def draw(self,matrix):
pass
class MatplotlibBackend(Backend):
def __init__(self):
from matplotlib import pyplot as plt
self._plt = plt
def draw(self,matrix):
self._plt.imshow(matrix,interpolation="nearest")
self._plt.show()
class RgbmatrixBackend(Backend):
def __init__(self,rows,chain_length,num_parallel=1,pwmbits=11,brightness=40,corr_luminance=True):
"""
Initialize rgbmatrix.
"""
from PIL import Image
self._img = Image
from rgbmatrix import RGBMatrix
self._rows = int(rows)
self._chain_length = int(chain_length)
self._num_parallel = int(num_parallel)
self._pwmbits = int(round(pwmbits,0))
if self._pwmbits < 0 or self._pwmbits < 11:
err = "pwmbits must be integer between 0 and 11.\n"
raise ValueError(err)
self._brightness = int(round(brightness,0))
if self._brightness < 0 or self._brightness > 100:
err = "Brightness must be integer between 0 and 100.\n"
raise ValueError(err)
self._corr_luminance = bool(corr_luminance)
self._matrix = RGBMatrix(self._rows,self._chain_length,self._num_parallel)
self._matrix.pwmBits = self._pwmbits
self._matrix.brightness = self._brightness
self._matrix.luminanceCorrect = self._corr_luminance
self._canvas = self._matrix.CreateFrameCanvas()
def draw(self,matrix):
"""
Draw the graphic on the panels.
"""
# Create a PIL image from the matrix
img = self._img.fromarray(np.uint8(matrix))
# Draw it.
self._canvas.SetImage(img,0,0)
self._canvas = self._matrix.SwapOnVSync(self._canvas)
|
the-stack_0_26034
|
from os import getenv
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = getenv('DATABASE_URL', 'sqlite:///./test.db')
def get_engine() -> Engine:
if 'sqlite' in SQLALCHEMY_DATABASE_URL:
return create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
return create_engine(SQLALCHEMY_DATABASE_URL)
engine = get_engine()
SessionLocal = sessionmaker(
autocommit=False, autoflush=False, bind=engine
)
Base = declarative_base()
|
the-stack_0_26035
|
# -*- coding: utf-8 -*-
'''
Some of the utils used by salt
'''
# Import python libs
from __future__ import absolute_import, division, print_function
import contextlib
import copy
import collections
import datetime
import distutils.version # pylint: disable=import-error,no-name-in-module
import errno
import fnmatch
import hashlib
import imp
import json
import logging
import numbers
import os
import random
import re
import shlex
import shutil
import socket
import stat
import sys
import pstats
import tempfile
import time
import types
import warnings
import string
import subprocess
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=no-name-in-module
# pylint: disable=redefined-builtin
from salt.ext.six.moves import range
from salt.ext.six.moves import zip
from salt.ext.six.moves import map
from stat import S_IMODE
# pylint: enable=import-error,redefined-builtin
try:
import cProfile
HAS_CPROFILE = True
except ImportError:
HAS_CPROFILE = False
# Import 3rd-party libs
try:
import Crypto.Random
HAS_CRYPTO = True
except ImportError:
HAS_CRYPTO = False
try:
import timelib
HAS_TIMELIB = True
except ImportError:
HAS_TIMELIB = False
try:
import parsedatetime
HAS_PARSEDATETIME = True
except ImportError:
HAS_PARSEDATETIME = False
try:
import fcntl
HAS_FCNTL = True
except ImportError:
# fcntl is not available on windows
HAS_FCNTL = False
try:
import win32api
HAS_WIN32API = True
except ImportError:
HAS_WIN32API = False
try:
import grp
HAS_GRP = True
except ImportError:
# grp is not available on windows
HAS_GRP = False
try:
import pwd
HAS_PWD = True
except ImportError:
# pwd is not available on windows
HAS_PWD = False
try:
import setproctitle
HAS_SETPROCTITLE = True
except ImportError:
HAS_SETPROCTITLE = False
try:
import ctypes
import ctypes.util
libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
res_init = libc.__res_init
HAS_RESINIT = True
except (ImportError, OSError, AttributeError, TypeError):
HAS_RESINIT = False
# Import salt libs
from salt.defaults import DEFAULT_TARGET_DELIM
import salt.defaults.exitcodes
import salt.log
import salt.version
from salt.utils.decorators import memoize as real_memoize
from salt.textformat import TextFormat
from salt.exceptions import (
CommandExecutionError, SaltClientError,
CommandNotFoundError, SaltSystemExit,
SaltInvocationError
)
log = logging.getLogger(__name__)
_empty = object()
def safe_rm(tgt):
'''
Safely remove a file
'''
try:
os.remove(tgt)
except (IOError, OSError):
pass
def is_empty(filename):
'''
Is a file empty?
'''
try:
return os.stat(filename).st_size == 0
except OSError:
# Non-existent file or permission denied to the parent dir
return False
def is_hex(value):
'''
Returns True if value is a hexidecimal string, otherwise returns False
'''
try:
int(value, 16)
return True
except (TypeError, ValueError):
return False
def get_color_theme(theme):
'''
Return the color theme to use
'''
# Keep the heavy lifting out of the module space
import yaml
if not os.path.isfile(theme):
log.warning('The named theme {0} if not available'.format(theme))
try:
with fopen(theme, 'rb') as fp_:
colors = yaml.safe_load(fp_.read())
ret = {}
for color in colors:
ret[color] = '\033[{0}m'.format(colors[color])
if not isinstance(colors, dict):
log.warning('The theme file {0} is not a dict'.format(theme))
return {}
return ret
except Exception:
log.warning('Failed to read the color theme {0}'.format(theme))
return {}
def get_colors(use=True, theme=None):
'''
Return the colors as an easy to use dict. Pass `False` to deactivate all
colors by setting them to empty strings. Pass a string containing only the
name of a single color to be used in place of all colors. Examples:
.. code-block:: python
colors = get_colors() # enable all colors
no_colors = get_colors(False) # disable all colors
red_colors = get_colors('RED') # set all colors to red
'''
colors = {
'BLACK': TextFormat('black'),
'DARK_GRAY': TextFormat('bold', 'black'),
'RED': TextFormat('red'),
'LIGHT_RED': TextFormat('bold', 'red'),
'GREEN': TextFormat('green'),
'LIGHT_GREEN': TextFormat('bold', 'green'),
'YELLOW': TextFormat('yellow'),
'LIGHT_YELLOW': TextFormat('bold', 'yellow'),
'BLUE': TextFormat('blue'),
'LIGHT_BLUE': TextFormat('bold', 'blue'),
'MAGENTA': TextFormat('magenta'),
'LIGHT_MAGENTA': TextFormat('bold', 'magenta'),
'CYAN': TextFormat('cyan'),
'LIGHT_CYAN': TextFormat('bold', 'cyan'),
'LIGHT_GRAY': TextFormat('white'),
'WHITE': TextFormat('bold', 'white'),
'DEFAULT_COLOR': TextFormat('default'),
'ENDC': TextFormat('reset'),
}
if theme:
colors.update(get_color_theme(theme))
if not use:
for color in colors:
colors[color] = ''
if isinstance(use, str):
# Try to set all of the colors to the passed color
if use in colors:
for color in colors:
# except for color reset
if color == 'ENDC':
continue
colors[color] = colors[use]
return colors
def get_context(template, line, num_lines=5, marker=None):
'''
Returns debugging context around a line in a given string
Returns:: string
'''
template_lines = template.splitlines()
num_template_lines = len(template_lines)
# in test, a single line template would return a crazy line number like,
# 357. do this sanity check and if the given line is obviously wrong, just
# return the entire template
if line > num_template_lines:
return template
context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing
context_end = min(num_template_lines, line + num_lines)
error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx
buf = []
if context_start > 0:
buf.append('[...]')
error_line_in_context += 1
buf.extend(template_lines[context_start:context_end])
if context_end < num_template_lines:
buf.append('[...]')
if marker:
buf[error_line_in_context] += marker
# warning: jinja content may contain unicode strings
# instead of utf-8.
buf = [to_str(i) if isinstance(i, six.text_type) else i for i in buf]
return '---\n{0}\n---'.format('\n'.join(buf))
def get_user():
'''
Get the current user
'''
if HAS_PWD:
return pwd.getpwuid(os.geteuid()).pw_name
else:
user_name = win32api.GetUserNameEx(win32api.NameSamCompatible)
if user_name[-1] == '$' and win32api.GetUserName() == 'SYSTEM':
# Make the system account easier to identify.
user_name = 'SYSTEM'
return user_name
def get_uid(user=None):
"""
Get the uid for a given user name. If no user given,
the current euid will be returned. If the user
does not exist, None will be returned. On
systems which do not support pwd or os.geteuid
it will return None.
"""
if not HAS_PWD:
result = None
elif user is None:
try:
result = os.geteuid()
except AttributeError:
result = None
else:
try:
u_struct = pwd.getpwnam(user)
except KeyError:
result = None
else:
result = u_struct.pw_uid
return result
def get_gid(group=None):
"""
Get the gid for a given group name. If no group given,
the current egid will be returned. If the group
does not exist, None will be returned. On
systems which do not support grp or os.getegid
it will return None.
"""
if grp is None:
result = None
elif group is None:
try:
result = os.getegid()
except AttributeError:
result = None
else:
try:
g_struct = grp.getgrnam(group)
except KeyError:
result = None
else:
result = g_struct.gr_gid
return result
def _win_user_token_is_admin(user_token):
'''
Using the win32 api, determine if the user with token 'user_token' has
administrator rights.
See MSDN entry here:
http://msdn.microsoft.com/en-us/library/aa376389(VS.85).aspx
'''
class SID_IDENTIFIER_AUTHORITY(ctypes.Structure):
_fields_ = [
("byte0", ctypes.c_byte),
("byte1", ctypes.c_byte),
("byte2", ctypes.c_byte),
("byte3", ctypes.c_byte),
("byte4", ctypes.c_byte),
("byte5", ctypes.c_byte),
]
nt_authority = SID_IDENTIFIER_AUTHORITY()
nt_authority.byte5 = 5
SECURITY_BUILTIN_DOMAIN_RID = 0x20
DOMAIN_ALIAS_RID_ADMINS = 0x220
administrators_group = ctypes.c_void_p()
if ctypes.windll.advapi32.AllocateAndInitializeSid(
ctypes.byref(nt_authority),
2,
SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS,
0, 0, 0, 0, 0, 0,
ctypes.byref(administrators_group)) == 0:
raise Exception("AllocateAndInitializeSid failed")
try:
is_admin = ctypes.wintypes.BOOL()
if ctypes.windll.advapi32.CheckTokenMembership(
user_token,
administrators_group,
ctypes.byref(is_admin)) == 0:
raise Exception("CheckTokenMembership failed")
return is_admin.value != 0
finally:
ctypes.windll.advapi32.FreeSid(administrators_group)
def _win_current_user_is_admin():
'''
ctypes.windll.shell32.IsUserAnAdmin() is intentionally avoided due to this
function being deprecated.
'''
return _win_user_token_is_admin(0)
def get_specific_user():
'''
Get a user name for publishing. If you find the user is "root" attempt to be
more specific
'''
user = get_user()
if is_windows():
if _win_current_user_is_admin():
return 'sudo_{0}'.format(user)
else:
env_vars = ('SUDO_USER',)
if user == 'root':
for evar in env_vars:
if evar in os.environ:
return 'sudo_{0}'.format(os.environ[evar])
return user
def get_master_key(key_user, opts, skip_perm_errors=False):
if key_user == 'root':
if opts.get('user', 'root') != 'root':
key_user = opts.get('user', 'root')
if key_user.startswith('sudo_'):
key_user = opts.get('user', 'root')
if salt.utils.is_windows():
# The username may contain '\' if it is in Windows
# 'DOMAIN\username' format. Fix this for the keyfile path.
key_user = key_user.replace('\\', '_')
keyfile = os.path.join(opts['cachedir'],
'.{0}_key'.format(key_user))
# Make sure all key parent directories are accessible
salt.utils.verify.check_path_traversal(opts['cachedir'],
key_user,
skip_perm_errors)
try:
with salt.utils.fopen(keyfile, 'r') as key:
return key.read()
except (OSError, IOError):
# Fall back to eauth
return ''
def reinit_crypto():
'''
When a fork arrises, pycrypto needs to reinit
From its doc::
Caveat: For the random number generator to work correctly,
you must call Random.atfork() in both the parent and
child processes after using os.fork()
'''
if HAS_CRYPTO:
Crypto.Random.atfork()
def daemonize(redirect_out=True):
'''
Daemonize a process
'''
try:
pid = os.fork()
if pid > 0:
# exit first parent
reinit_crypto()
sys.exit(salt.defaults.exitcodes.EX_OK)
except OSError as exc:
log.error(
'fork #1 failed: {0} ({1})'.format(exc.errno, exc.strerror)
)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# decouple from parent environment
os.chdir('/')
# noinspection PyArgumentList
os.setsid()
os.umask(18)
# do second fork
try:
pid = os.fork()
if pid > 0:
reinit_crypto()
sys.exit(salt.defaults.exitcodes.EX_OK)
except OSError as exc:
log.error(
'fork #2 failed: {0} ({1})'.format(
exc.errno, exc.strerror
)
)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
reinit_crypto()
# A normal daemonization redirects the process output to /dev/null.
# Unfortunately when a python multiprocess is called the output is
# not cleanly redirected and the parent process dies when the
# multiprocessing process attempts to access stdout or err.
if redirect_out:
with fopen('/dev/null', 'r+') as dev_null:
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
def daemonize_if(opts):
'''
Daemonize a module function process if multiprocessing is True and the
process is not being called by salt-call
'''
if 'salt-call' in sys.argv[0]:
return
if not opts.get('multiprocessing', True):
return
if sys.platform.startswith('win'):
return
daemonize(False)
def profile_func(filename=None):
'''
Decorator for adding profiling to a nested function in Salt
'''
def proffunc(fun):
def profiled_func(*args, **kwargs):
logging.info('Profiling function {0}'.format(fun.__name__))
try:
profiler = cProfile.Profile()
retval = profiler.runcall(fun, *args, **kwargs)
profiler.dump_stats((filename or '{0}_func.profile'
.format(fun.__name__)))
except IOError:
logging.exception(
'Could not open profile file {0}'.format(filename)
)
return retval
return profiled_func
return proffunc
def rand_str(size=9999999999, hash_type=None):
'''
Return a random string
'''
if not hash_type:
hash_type = 'md5'
hasher = getattr(hashlib, hash_type)
return hasher(to_bytes(str(random.SystemRandom().randint(0, size)))).hexdigest()
def which(exe=None):
'''
Python clone of /usr/bin/which
'''
def _is_executable_file_or_link(exe):
# check for os.X_OK doesn't suffice because directory may executable
return (os.access(exe, os.X_OK) and
(os.path.isfile(exe) or os.path.islink(exe)))
if exe:
if _is_executable_file_or_link(exe):
# executable in cwd or fullpath
return exe
ext_list = os.environ.get('PATHEXT', '.EXE').split(';')
@real_memoize
def _exe_has_ext():
'''
Do a case insensitive test if exe has a file extension match in
PATHEXT
'''
for ext in ext_list:
try:
pattern = r'.*\.' + ext.lstrip('.') + r'$'
re.match(pattern, exe, re.I).groups()
return True
except AttributeError:
continue
return False
# Enhance POSIX path for the reliability at some environments, when $PATH is changing
# This also keeps order, where 'first came, first win' for cases to find optional alternatives
search_path = os.environ.get('PATH') and os.environ['PATH'].split(os.pathsep) or list()
for default_path in ['/bin', '/sbin', '/usr/bin', '/usr/sbin', '/usr/local/bin']:
if default_path not in search_path:
search_path.append(default_path)
os.environ['PATH'] = os.pathsep.join(search_path)
for path in search_path:
full_path = os.path.join(path, exe)
if _is_executable_file_or_link(full_path):
return full_path
elif is_windows() and not _exe_has_ext():
# On Windows, check for any extensions in PATHEXT.
# Allows both 'cmd' and 'cmd.exe' to be matched.
for ext in ext_list:
# Windows filesystem is case insensitive so we
# safely rely on that behavior
if _is_executable_file_or_link(full_path + ext):
return full_path + ext
log.trace('\'{0}\' could not be found in the following search path: \'{1}\''.format(exe, search_path))
else:
log.error('No executable was passed to be searched by salt.utils.which()')
return None
def which_bin(exes):
'''
Scan over some possible executables and return the first one that is found
'''
if not isinstance(exes, collections.Iterable):
return None
for exe in exes:
path = which(exe)
if not path:
continue
return path
return None
def activate_profile(test=True):
pr = None
if test:
if HAS_CPROFILE:
pr = cProfile.Profile()
pr.enable()
else:
log.error('cProfile is not available on your platform')
return pr
def output_profile(pr, stats_path='/tmp/stats', stop=False, id_=None):
if pr is not None and HAS_CPROFILE:
try:
pr.disable()
if not os.path.isdir(stats_path):
os.makedirs(stats_path)
date = datetime.datetime.now().isoformat()
if id_ is None:
id_ = rand_str(size=32)
ficp = os.path.join(stats_path, '{0}.{1}.pstats'.format(id_, date))
fico = os.path.join(stats_path, '{0}.{1}.dot'.format(id_, date))
ficn = os.path.join(stats_path, '{0}.{1}.stats'.format(id_, date))
if not os.path.exists(ficp):
pr.dump_stats(ficp)
with fopen(ficn, 'w') as fic:
pstats.Stats(pr, stream=fic).sort_stats('cumulative')
log.info('PROFILING: {0} generated'.format(ficp))
log.info('PROFILING (cumulative): {0} generated'.format(ficn))
pyprof = which('pyprof2calltree')
cmd = [pyprof, '-i', ficp, '-o', fico]
if pyprof:
failed = False
try:
pro = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
failed = True
if pro.returncode:
failed = True
if failed:
log.error('PROFILING (dot problem')
else:
log.info('PROFILING (dot): {0} generated'.format(fico))
log.trace('pyprof2calltree output:')
log.trace(to_str(pro.stdout.read()).strip() +
to_str(pro.stderr.read()).strip())
else:
log.info('You can run {0} for additional stats.'.format(cmd))
finally:
if not stop:
pr.enable()
return pr
def list_files(directory):
'''
Return a list of all files found under directory
'''
ret = set()
ret.add(directory)
for root, dirs, files in safe_walk(directory):
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return list(ret)
def gen_mac(prefix='AC:DE:48'):
'''
Generates a MAC address with the defined OUI prefix.
Common prefixes:
- ``00:16:3E`` -- Xen
- ``00:18:51`` -- OpenVZ
- ``00:50:56`` -- VMware (manually generated)
- ``52:54:00`` -- QEMU/KVM
- ``AC:DE:48`` -- PRIVATE
References:
- http://standards.ieee.org/develop/regauth/oui/oui.txt
- https://www.wireshark.org/tools/oui-lookup.html
- https://en.wikipedia.org/wiki/MAC_address
'''
return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix,
random.randint(0, 0xff),
random.randint(0, 0xff),
random.randint(0, 0xff))
def mac_str_to_bytes(mac_str):
'''
Convert a MAC address string into bytes. Works with or without separators:
b1 = mac_str_to_bytes('08:00:27:13:69:77')
b2 = mac_str_to_bytes('080027136977')
assert b1 == b2
assert isinstance(b1, bytes)
'''
if len(mac_str) == 12:
pass
elif len(mac_str) == 17:
sep = mac_str[2]
mac_str = mac_str.replace(sep, '')
else:
raise ValueError('Invalid MAC address')
if six.PY3:
mac_bytes = bytes(int(mac_str[s:s+2], 16) for s in range(0, 12, 2))
else:
mac_bytes = ''.join(chr(int(mac_str[s:s+2], 16)) for s in range(0, 12, 2))
return mac_bytes
def ip_bracket(addr):
'''
Convert IP address representation to ZMQ (URL) format. ZMQ expects
brackets around IPv6 literals, since they are used in URLs.
'''
if addr and ':' in addr and not addr.startswith('['):
return '[{0}]'.format(addr)
return addr
def dns_check(addr, safe=False, ipv6=False):
'''
Return the ip resolved by dns, but do not exit on failure, only raise an
exception. Obeys system preference for IPv4/6 address resolution.
'''
error = False
lookup = addr
seen_ipv6 = False
try:
# issue #21397: force glibc to re-read resolv.conf
if HAS_RESINIT:
res_init()
hostnames = socket.getaddrinfo(
addr, None, socket.AF_UNSPEC, socket.SOCK_STREAM
)
if not hostnames:
error = True
else:
addr = False
for h in hostnames:
if h[0] == socket.AF_INET:
addr = ip_bracket(h[4][0])
break
elif h[0] == socket.AF_INET6:
if not ipv6:
seen_ipv6 = True
continue
addr = ip_bracket(h[4][0])
break
if not addr:
error = True
except TypeError:
err = ('Attempt to resolve address \'{0}\' failed. Invalid or unresolveable address').format(lookup)
raise SaltSystemExit(code=42, msg=err)
except socket.error:
error = True
if error:
if seen_ipv6 and not addr:
err = ('DNS lookup of \'{0}\' failed, but ipv6 address ignored. Enable ipv6 in config to use it.').format(lookup)
else:
err = ('DNS lookup of \'{0}\' failed.').format(lookup)
if safe:
if salt.log.is_console_configured():
# If logging is not configured it also means that either
# the master or minion instance calling this hasn't even
# started running
log.error(err)
raise SaltClientError()
raise SaltSystemExit(code=42, msg=err)
return addr
def required_module_list(docstring=None):
'''
Return a list of python modules required by a salt module that aren't
in stdlib and don't exist on the current pythonpath.
'''
if not docstring:
return []
ret = []
modules = parse_docstring(docstring).get('deps', [])
for mod in modules:
try:
imp.find_module(mod)
except ImportError:
ret.append(mod)
return ret
def required_modules_error(name, docstring):
'''
Pretty print error messages in critical salt modules which are
missing deps not always in stdlib such as win32api on windows.
'''
modules = required_module_list(docstring)
if not modules:
return ''
filename = os.path.basename(name).split('.')[0]
msg = '\'{0}\' requires these python modules: {1}'
return msg.format(filename, ', '.join(modules))
def get_accumulator_dir(cachedir):
'''
Return the directory that accumulator data is stored in, creating it if it
doesn't exist.
'''
fn_ = os.path.join(cachedir, 'accumulator')
if not os.path.isdir(fn_):
# accumulator_dir is not present, create it
os.makedirs(fn_)
return fn_
def check_or_die(command):
'''
Simple convenience function for modules to use for gracefully blowing up
if a required tool is not available in the system path.
Lazily import `salt.modules.cmdmod` to avoid any sort of circular
dependencies.
'''
if command is None:
raise CommandNotFoundError('\'None\' is not a valid command.')
if not which(command):
raise CommandNotFoundError('\'{0}\' is not in the path'.format(command))
def backup_minion(path, bkroot):
'''
Backup a file on the minion
'''
dname, bname = os.path.split(path)
if salt.utils.is_windows():
src_dir = dname.replace(':', '_')
else:
src_dir = dname[1:]
if not salt.utils.is_windows():
fstat = os.stat(path)
msecs = str(int(time.time() * 1000000))[-6:]
if salt.utils.is_windows():
# ':' is an illegal filesystem path character on Windows
stamp = time.strftime('%a_%b_%d_%H-%M-%S_%Y')
else:
stamp = time.strftime('%a_%b_%d_%H:%M:%S_%Y')
stamp = '{0}{1}_{2}'.format(stamp[:-4], msecs, stamp[-4:])
bkpath = os.path.join(bkroot,
src_dir,
'{0}_{1}'.format(bname, stamp))
if not os.path.isdir(os.path.dirname(bkpath)):
os.makedirs(os.path.dirname(bkpath))
shutil.copyfile(path, bkpath)
if not salt.utils.is_windows():
os.chown(bkpath, fstat.st_uid, fstat.st_gid)
os.chmod(bkpath, fstat.st_mode)
def path_join(*parts):
'''
This functions tries to solve some issues when joining multiple absolute
paths on both *nix and windows platforms.
See tests/unit/utils/path_join_test.py for some examples on what's being
talked about here.
'''
if six.PY3:
new_parts = []
for part in parts:
new_parts.append(to_str(part))
parts = new_parts
# Normalize path converting any os.sep as needed
parts = [os.path.normpath(p) for p in parts]
try:
root = parts.pop(0)
except IndexError:
# No args passed to func
return ''
if not parts:
ret = root
else:
if is_windows():
if len(root) == 1:
root += ':'
root = root.rstrip(os.sep) + os.sep
stripped = [p.lstrip(os.sep) for p in parts]
try:
ret = os.path.join(root, *stripped)
except UnicodeDecodeError:
# This is probably Python 2 and one of the parts contains unicode
# characters in a bytestring. First try to decode to the system
# encoding.
try:
enc = __salt_system_encoding__
except NameError:
enc = sys.stdin.encoding or sys.getdefaultencoding()
try:
ret = os.path.join(root.decode(enc),
*[x.decode(enc) for x in stripped])
except UnicodeDecodeError:
# Last resort, try UTF-8
ret = os.path.join(root.decode('UTF-8'),
*[x.decode('UTF-8') for x in stripped])
return os.path.normpath(ret)
def pem_finger(path=None, key=None, sum_type='sha256'):
'''
Pass in either a raw pem string, or the path on disk to the location of a
pem file, and the type of cryptographic hash to use. The default is SHA256.
The fingerprint of the pem will be returned.
If neither a key nor a path are passed in, a blank string will be returned.
'''
if not key:
if not os.path.isfile(path):
return ''
with fopen(path, 'rb') as fp_:
key = b''.join([x for x in fp_.readlines() if x.strip()][1:-1])
pre = getattr(hashlib, sum_type)(key).hexdigest()
finger = ''
for ind in range(len(pre)):
if ind % 2:
# Is odd
finger += '{0}:'.format(pre[ind])
else:
finger += pre[ind]
return finger.rstrip(':')
def build_whitespace_split_regex(text):
'''
Create a regular expression at runtime which should match ignoring the
addition or deletion of white space or line breaks, unless between commas
Example:
.. code-block:: yaml
>>> import re
>>> from salt.utils import *
>>> regex = build_whitespace_split_regex(
... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then"""
... )
>>> regex
'(?:[\\s]+)?if(?:[\\s]+)?\\[(?:[\\s]+)?\\-z(?:[\\s]+)?\\"\\$debian'
'\\_chroot\\"(?:[\\s]+)?\\](?:[\\s]+)?\\&\\&(?:[\\s]+)?\\[(?:[\\s]+)?'
'\\-r(?:[\\s]+)?\\/etc\\/debian\\_chroot(?:[\\s]+)?\\]\\;(?:[\\s]+)?'
'then(?:[\\s]+)?'
>>> re.search(
... regex,
... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then"""
... )
<_sre.SRE_Match object at 0xb70639c0>
>>>
'''
def __build_parts(text):
lexer = shlex.shlex(text)
lexer.whitespace_split = True
lexer.commenters = ''
if '\'' in text:
lexer.quotes = '"'
elif '"' in text:
lexer.quotes = '\''
return list(lexer)
regex = r''
for line in text.splitlines():
parts = [re.escape(s) for s in __build_parts(line)]
regex += r'(?:[\s]+)?{0}(?:[\s]+)?'.format(r'(?:[\s]+)?'.join(parts))
return r'(?m)^{0}$'.format(regex)
def format_call(fun,
data,
initial_ret=None,
expected_extra_kws=()):
'''
Build the required arguments and keyword arguments required for the passed
function.
:param fun: The function to get the argspec from
:param data: A dictionary containing the required data to build the
arguments and keyword arguments.
:param initial_ret: The initial return data pre-populated as dictionary or
None
:param expected_extra_kws: Any expected extra keyword argument names which
should not trigger a :ref:`SaltInvocationError`
:returns: A dictionary with the function required arguments and keyword
arguments.
'''
ret = initial_ret is not None and initial_ret or {}
ret['args'] = []
ret['kwargs'] = {}
aspec = salt.utils.args.get_function_argspec(fun)
arg_data = arg_lookup(fun, aspec)
args = arg_data['args']
kwargs = arg_data['kwargs']
# Since we WILL be changing the data dictionary, let's change a copy of it
data = data.copy()
missing_args = []
for key in kwargs:
try:
kwargs[key] = data.pop(key)
except KeyError:
# Let's leave the default value in place
pass
while args:
arg = args.pop(0)
try:
ret['args'].append(data.pop(arg))
except KeyError:
missing_args.append(arg)
if missing_args:
used_args_count = len(ret['args']) + len(args)
args_count = used_args_count + len(missing_args)
raise SaltInvocationError(
'{0} takes at least {1} argument{2} ({3} given)'.format(
fun.__name__,
args_count,
args_count > 1 and 's' or '',
used_args_count
)
)
ret['kwargs'].update(kwargs)
if aspec.keywords:
# The function accepts **kwargs, any non expected extra keyword
# arguments will made available.
for key, value in six.iteritems(data):
if key in expected_extra_kws:
continue
ret['kwargs'][key] = value
# No need to check for extra keyword arguments since they are all
# **kwargs now. Return
return ret
# Did not return yet? Lets gather any remaining and unexpected keyword
# arguments
extra = {}
for key, value in six.iteritems(data):
if key in expected_extra_kws:
continue
extra[key] = copy.deepcopy(value)
# We'll be showing errors to the users until Salt Nitrogen comes out, after
# which, errors will be raised instead.
warn_until(
'Nitrogen',
'It\'s time to start raising `SaltInvocationError` instead of '
'returning warnings',
# Let's not show the deprecation warning on the console, there's no
# need.
_dont_call_warnings=True
)
if extra:
# Found unexpected keyword arguments, raise an error to the user
if len(extra) == 1:
msg = '\'{0[0]}\' is an invalid keyword argument for \'{1}\''.format(
list(extra.keys()),
ret.get(
# In case this is being called for a state module
'full',
# Not a state module, build the name
'{0}.{1}'.format(fun.__module__, fun.__name__)
)
)
else:
msg = '{0} and \'{1}\' are invalid keyword arguments for \'{2}\''.format(
', '.join(['\'{0}\''.format(e) for e in extra][:-1]),
list(extra.keys())[-1],
ret.get(
# In case this is being called for a state module
'full',
# Not a state module, build the name
'{0}.{1}'.format(fun.__module__, fun.__name__)
)
)
# Return a warning to the user explaining what's going on
ret.setdefault('warnings', []).append(
'{0}. If you were trying to pass additional data to be used '
'in a template context, please populate \'context\' with '
'\'key: value\' pairs. Your approach will work until Salt '
'Nitrogen is out.{1}'.format(
msg,
'' if 'full' not in ret else ' Please update your state files.'
)
)
# Lets pack the current extra kwargs as template context
ret.setdefault('context', {}).update(extra)
return ret
def arg_lookup(fun, aspec=None):
'''
Return a dict containing the arguments and default arguments to the
function.
'''
ret = {'kwargs': {}}
if aspec is None:
aspec = salt.utils.args.get_function_argspec(fun)
if aspec.defaults:
ret['kwargs'] = dict(zip(aspec.args[::-1], aspec.defaults[::-1]))
ret['args'] = [arg for arg in aspec.args if arg not in ret['kwargs']]
return ret
def istextfile(fp_, blocksize=512):
'''
Uses heuristics to guess whether the given file is text or binary,
by reading a single block of bytes from the file.
If more than 30% of the chars in the block are non-text, or there
are NUL ('\x00') bytes in the block, assume this is a binary file.
'''
int2byte = (lambda x: bytes((x,))) if six.PY3 else chr
text_characters = (
b''.join(int2byte(i) for i in range(32, 127)) +
b'\n\r\t\f\b')
try:
block = fp_.read(blocksize)
except AttributeError:
# This wasn't an open filehandle, so treat it as a file path and try to
# open the file
try:
with fopen(fp_, 'rb') as fp2_:
block = fp2_.read(blocksize)
except IOError:
# Unable to open file, bail out and return false
return False
if b'\x00' in block:
# Files with null bytes are binary
return False
elif not block:
# An empty file is considered a valid text file
return True
try:
block.decode('utf-8')
return True
except UnicodeDecodeError:
pass
nontext = block.translate(None, text_characters)
return float(len(nontext)) / len(block) <= 0.30
def isorted(to_sort):
'''
Sort a list of strings ignoring case.
>>> L = ['foo', 'Foo', 'bar', 'Bar']
>>> sorted(L)
['Bar', 'Foo', 'bar', 'foo']
>>> sorted(L, key=lambda x: x.lower())
['bar', 'Bar', 'foo', 'Foo']
>>>
'''
return sorted(to_sort, key=lambda x: x.lower())
def mysql_to_dict(data, key):
'''
Convert MySQL-style output to a python dictionary
'''
ret = {}
headers = ['']
for line in data:
if not line:
continue
if line.startswith('+'):
continue
comps = line.split('|')
for comp in range(len(comps)):
comps[comp] = comps[comp].strip()
if len(headers) > 1:
index = len(headers) - 1
row = {}
for field in range(index):
if field < 1:
continue
else:
row[headers[field]] = str_to_num(comps[field])
ret[row[key]] = row
else:
headers = comps
return ret
def contains_whitespace(text):
'''
Returns True if there are any whitespace characters in the string
'''
return any(x.isspace() for x in text)
def str_to_num(text):
'''
Convert a string to a number.
Returns an integer if the string represents an integer, a floating
point number if the string is a real number, or the string unchanged
otherwise.
'''
try:
return int(text)
except ValueError:
try:
return float(text)
except ValueError:
return text
def fopen(*args, **kwargs):
'''
Wrapper around open() built-in to set CLOEXEC on the fd.
This flag specifies that the file descriptor should be closed when an exec
function is invoked;
When a file descriptor is allocated (as with open or dup), this bit is
initially cleared on the new file descriptor, meaning that descriptor will
survive into the new program after exec.
NB! We still have small race condition between open and fcntl.
'''
# ensure 'binary' mode is always used on Windows in Python 2
if ((six.PY2 and is_windows() and 'binary' not in kwargs) or
kwargs.pop('binary', False)):
if len(args) > 1:
args = list(args)
if 'b' not in args[1]:
args[1] += 'b'
elif kwargs.get('mode', None):
if 'b' not in kwargs['mode']:
kwargs['mode'] += 'b'
else:
# the default is to read
kwargs['mode'] = 'rb'
elif six.PY3 and 'encoding' not in kwargs:
# In Python 3, if text mode is used and the encoding
# is not specified, set the encoding to 'utf-8'.
binary = False
if len(args) > 1:
args = list(args)
if 'b' in args[1]:
binary = True
if kwargs.get('mode', None):
if 'b' in kwargs['mode']:
binary = True
if not binary:
kwargs['encoding'] = 'utf-8'
fhandle = open(*args, **kwargs)
if is_fcntl_available():
# modify the file descriptor on systems with fcntl
# unix and unix-like systems only
try:
FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103
except AttributeError:
FD_CLOEXEC = 1 # pylint: disable=C0103
old_flags = fcntl.fcntl(fhandle.fileno(), fcntl.F_GETFD)
fcntl.fcntl(fhandle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)
return fhandle
@contextlib.contextmanager
def flopen(*args, **kwargs):
'''
Shortcut for fopen with lock and context manager
'''
with fopen(*args, **kwargs) as fhandle:
try:
if is_fcntl_available(check_sunos=True):
fcntl.flock(fhandle.fileno(), fcntl.LOCK_SH)
yield fhandle
finally:
if is_fcntl_available(check_sunos=True):
fcntl.flock(fhandle.fileno(), fcntl.LOCK_UN)
@contextlib.contextmanager
def fpopen(*args, **kwargs):
'''
Shortcut for fopen with extra uid, gid and mode options.
Supported optional Keyword Arguments:
mode: explicit mode to set. Mode is anything os.chmod
would accept as input for mode. Works only on unix/unix
like systems.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the path is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the path is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
# Remove uid, gid and mode from kwargs if present
uid = kwargs.pop('uid', -1) # -1 means no change to current uid
gid = kwargs.pop('gid', -1) # -1 means no change to current gid
mode = kwargs.pop('mode', None)
with fopen(*args, **kwargs) as fhandle:
path = args[0]
d_stat = os.stat(path)
if hasattr(os, 'chown'):
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(path, uid, gid)
if mode is not None:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode:
os.chmod(path, (d_stat.st_mode ^ mode_part) | mode)
yield fhandle
def expr_match(line, expr):
'''
Evaluate a line of text against an expression. First try a full-string
match, next try globbing, and then try to match assuming expr is a regular
expression. Originally designed to match minion IDs for
whitelists/blacklists.
'''
if line == expr:
return True
if fnmatch.fnmatch(line, expr):
return True
try:
if re.match(r'\A{0}\Z'.format(expr), line):
return True
except re.error:
pass
return False
def check_whitelist_blacklist(value, whitelist=None, blacklist=None):
'''
Check a whitelist and/or blacklist to see if the value matches it.
'''
if blacklist is not None:
if not hasattr(blacklist, '__iter__'):
blacklist = [blacklist]
try:
for expr in blacklist:
if expr_match(value, expr):
return False
except TypeError:
log.error('Non-iterable blacklist {0}'.format(blacklist))
if whitelist:
if not hasattr(whitelist, '__iter__'):
whitelist = [whitelist]
try:
for expr in whitelist:
if expr_match(value, expr):
return True
except TypeError:
log.error('Non-iterable whitelist {0}'.format(whitelist))
else:
return True
return False
def get_values_of_matching_keys(pattern_dict, user_name):
'''
Check a whitelist and/or blacklist to see if the value matches it.
'''
ret = []
for expr in pattern_dict:
if expr_match(user_name, expr):
ret.extend(pattern_dict[expr])
return ret
def subdict_match(data,
expr,
delimiter=DEFAULT_TARGET_DELIM,
regex_match=False,
exact_match=False):
'''
Check for a match in a dictionary using a delimiter character to denote
levels of subdicts, and also allowing the delimiter character to be
matched. Thus, 'foo:bar:baz' will match data['foo'] == 'bar:baz' and
data['foo']['bar'] == 'baz'. The former would take priority over the
latter.
'''
def _match(target, pattern, regex_match=False, exact_match=False):
if regex_match:
try:
return re.match(pattern.lower(), str(target).lower())
except Exception:
log.error('Invalid regex \'{0}\' in match'.format(pattern))
return False
elif exact_match:
return str(target).lower() == pattern.lower()
else:
return fnmatch.fnmatch(str(target).lower(), pattern.lower())
def _dict_match(target, pattern, regex_match=False, exact_match=False):
wildcard = pattern.startswith('*:')
if wildcard:
pattern = pattern[2:]
if pattern == '*':
# We are just checking that the key exists
return True
elif pattern in target:
# We might want to search for a key
return True
elif subdict_match(target,
pattern,
regex_match=regex_match,
exact_match=exact_match):
return True
if wildcard:
for key in target.keys():
if _match(key,
pattern,
regex_match=regex_match,
exact_match=exact_match):
return True
if isinstance(target[key], dict):
if _dict_match(target[key],
pattern,
regex_match=regex_match,
exact_match=exact_match):
return True
elif isinstance(target[key], list):
for item in target[key]:
if _match(item,
pattern,
regex_match=regex_match,
exact_match=exact_match):
return True
return False
for idx in range(1, expr.count(delimiter) + 1):
splits = expr.split(delimiter)
key = delimiter.join(splits[:idx])
matchstr = delimiter.join(splits[idx:])
log.debug('Attempting to match \'{0}\' in \'{1}\' using delimiter '
'\'{2}\''.format(matchstr, key, delimiter))
match = traverse_dict_and_list(data, key, {}, delimiter=delimiter)
if match == {}:
continue
if isinstance(match, dict):
if _dict_match(match,
matchstr,
regex_match=regex_match,
exact_match=exact_match):
return True
continue
if isinstance(match, list):
# We are matching a single component to a single list member
for member in match:
if isinstance(member, dict):
if _dict_match(member,
matchstr,
regex_match=regex_match,
exact_match=exact_match):
return True
if _match(member,
matchstr,
regex_match=regex_match,
exact_match=exact_match):
return True
continue
if _match(match,
matchstr,
regex_match=regex_match,
exact_match=exact_match):
return True
return False
def traverse_dict(data, key, default, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict using a colon-delimited (or otherwise delimited, using the
'delimiter' param) target string. The target 'foo:bar:baz' will return
data['foo']['bar']['baz'] if this value exists, and will otherwise return
the dict in the default argument.
'''
try:
for each in key.split(delimiter):
data = data[each]
except (KeyError, IndexError, TypeError):
# Encountered a non-indexable value in the middle of traversing
return default
return data
def traverse_dict_and_list(data, key, default, delimiter=DEFAULT_TARGET_DELIM):
'''
Traverse a dict or list using a colon-delimited (or otherwise delimited,
using the 'delimiter' param) target string. The target 'foo:bar:0' will
return data['foo']['bar'][0] if this value exists, and will otherwise
return the dict in the default argument.
Function will automatically determine the target type.
The target 'foo:bar:0' will return data['foo']['bar'][0] if data like
{'foo':{'bar':['baz']}} , if data like {'foo':{'bar':{'0':'baz'}}}
then return data['foo']['bar']['0']
'''
for each in key.split(delimiter):
if isinstance(data, list):
try:
idx = int(each)
except ValueError:
embed_match = False
# Index was not numeric, lets look at any embedded dicts
for embedded in (x for x in data if isinstance(x, dict)):
try:
data = embedded[each]
embed_match = True
break
except KeyError:
pass
if not embed_match:
# No embedded dicts matched, return the default
return default
else:
try:
data = data[idx]
except IndexError:
return default
else:
try:
data = data[each]
except (KeyError, TypeError):
return default
return data
def mkstemp(*args, **kwargs):
'''
Helper function which does exactly what `tempfile.mkstemp()` does but
accepts another argument, `close_fd`, which, by default, is true and closes
the fd before returning the file path. Something commonly done throughout
Salt's code.
'''
close_fd = kwargs.pop('close_fd', True)
fd_, fpath = tempfile.mkstemp(*args, **kwargs)
if close_fd is False:
return (fd_, fpath)
os.close(fd_)
del fd_
return fpath
def clean_kwargs(**kwargs):
'''
Return a dict without any of the __pub* keys (or any other keys starting
with a dunder) from the kwargs dict passed into the execution module
functions. These keys are useful for tracking what was used to invoke
the function call, but they may not be desierable to have if passing the
kwargs forward wholesale.
'''
ret = {}
for key, val in six.iteritems(kwargs):
if not key.startswith('__'):
ret[key] = val
return ret
@real_memoize
def is_windows():
'''
Simple function to return if a host is Windows or not
'''
return sys.platform.startswith('win')
def sanitize_win_path_string(winpath):
'''
Remove illegal path characters for windows
'''
intab = '<>:|?*'
outtab = '_' * len(intab)
trantab = ''.maketrans(intab, outtab) if six.PY3 else string.maketrans(intab, outtab)
if isinstance(winpath, str):
winpath = winpath.translate(trantab)
elif isinstance(winpath, six.text_type):
winpath = winpath.translate(dict((ord(c), u'_') for c in intab))
return winpath
@real_memoize
def is_proxy():
'''
Return True if this minion is a proxy minion.
Leverages the fact that is_linux() and is_windows
both return False for proxies.
TODO: Need to extend this for proxies that might run on
other Unices
'''
import __main__ as main
# This is a hack. If a proxy minion is started by other
# means, e.g. a custom script that creates the minion objects
# then this will fail.
is_proxy = False
try:
if 'salt-proxy' in main.__file__:
is_proxy = True
except AttributeError:
pass
return is_proxy
@real_memoize
def is_linux():
'''
Simple function to return if a host is Linux or not.
Note for a proxy minion, we need to return something else
'''
return sys.platform.startswith('linux')
@real_memoize
def is_darwin():
'''
Simple function to return if a host is Darwin (OS X) or not
'''
return sys.platform.startswith('darwin')
@real_memoize
def is_sunos():
'''
Simple function to return if host is SunOS or not
'''
return sys.platform.startswith('sunos')
@real_memoize
def is_smartos():
'''
Simple function to return if host is SmartOS (Illumos) or not
'''
if not is_sunos():
return False
else:
return os.uname()[3].startswith('joyent_')
@real_memoize
def is_smartos_globalzone():
'''
Function to return if host is SmartOS (Illumos) global zone or not
'''
if not is_smartos():
return False
else:
cmd = ['zonename']
try:
zonename = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
return False
if zonename.returncode:
return False
if zonename.stdout.read().strip() == 'global':
return True
return False
@real_memoize
def is_smartos_zone():
'''
Function to return if host is SmartOS (Illumos) and not the gz
'''
if not is_smartos():
return False
else:
cmd = ['zonename']
try:
zonename = subprocess.Popen(
cmd, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
return False
if zonename.returncode:
return False
if zonename.stdout.read().strip() == 'global':
return False
return True
@real_memoize
def is_freebsd():
'''
Simple function to return if host is FreeBSD or not
'''
return sys.platform.startswith('freebsd')
@real_memoize
def is_netbsd():
'''
Simple function to return if host is NetBSD or not
'''
return sys.platform.startswith('netbsd')
@real_memoize
def is_openbsd():
'''
Simple function to return if host is OpenBSD or not
'''
return sys.platform.startswith('openbsd')
def is_fcntl_available(check_sunos=False):
'''
Simple function to check if the `fcntl` module is available or not.
If `check_sunos` is passed as `True` an additional check to see if host is
SunOS is also made. For additional information see: http://goo.gl/159FF8
'''
if check_sunos and is_sunos():
return False
return HAS_FCNTL
def check_include_exclude(path_str, include_pat=None, exclude_pat=None):
'''
Check for glob or regexp patterns for include_pat and exclude_pat in the
'path_str' string and return True/False conditions as follows.
- Default: return 'True' if no include_pat or exclude_pat patterns are
supplied
- If only include_pat or exclude_pat is supplied: return 'True' if string
passes the include_pat test or fails exclude_pat test respectively
- If both include_pat and exclude_pat are supplied: return 'True' if
include_pat matches AND exclude_pat does not match
'''
ret = True # -- default true
# Before pattern match, check if it is regexp (E@'') or glob(default)
if include_pat:
if re.match('E@', include_pat):
retchk_include = True if re.search(
include_pat[2:],
path_str
) else False
else:
retchk_include = True if fnmatch.fnmatch(
path_str,
include_pat
) else False
if exclude_pat:
if re.match('E@', exclude_pat):
retchk_exclude = False if re.search(
exclude_pat[2:],
path_str
) else True
else:
retchk_exclude = False if fnmatch.fnmatch(
path_str,
exclude_pat
) else True
# Now apply include/exclude conditions
if include_pat and not exclude_pat:
ret = retchk_include
elif exclude_pat and not include_pat:
ret = retchk_exclude
elif include_pat and exclude_pat:
ret = retchk_include and retchk_exclude
else:
ret = True
return ret
def gen_state_tag(low):
'''
Generate the running dict tag string from the low data structure
'''
return '{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(low)
def check_state_result(running, recurse=False):
'''
Check the total return value of the run and determine if the running
dict has any issues
'''
if not isinstance(running, dict):
return False
if not running:
return False
ret = True
for state_result in six.itervalues(running):
if not recurse and not isinstance(state_result, dict):
ret = False
if ret and isinstance(state_result, dict):
result = state_result.get('result', _empty)
if result is False:
ret = False
# only override return value if we are not already failed
elif result is _empty and isinstance(state_result, dict) and ret:
ret = check_state_result(state_result, recurse=True)
# return as soon as we got a failure
if not ret:
break
return ret
def st_mode_to_octal(mode):
'''
Convert the st_mode value from a stat(2) call (as returned from os.stat())
to an octal mode.
'''
try:
return oct(mode)[-4:]
except (TypeError, IndexError):
return ''
def normalize_mode(mode):
'''
Return a mode value, normalized to a string and containing a leading zero
if it does not have one.
Allow "keep" as a valid mode (used by file state/module to preserve mode
from the Salt fileserver in file states).
'''
if mode is None:
return None
if not isinstance(mode, six.string_types):
mode = str(mode)
# Strip any quotes any initial zeroes, then though zero-pad it up to 4.
# This ensures that somethign like '00644' is normalized to '0644'
return mode.strip('"').strip('\'').lstrip('0').zfill(4)
def test_mode(**kwargs):
'''
Examines the kwargs passed and returns True if any kwarg which matching
"Test" in any variation on capitalization (i.e. "TEST", "Test", "TeSt",
etc) contains a True value (as determined by salt.utils.is_true).
'''
for arg, value in six.iteritems(kwargs):
try:
if arg.lower() == 'test' and is_true(value):
return True
except AttributeError:
continue
return False
def is_true(value=None):
'''
Returns a boolean value representing the "truth" of the value passed. The
rules for what is a "True" value are:
1. Integer/float values greater than 0
2. The string values "True" and "true"
3. Any object for which bool(obj) returns True
'''
# First, try int/float conversion
try:
value = int(value)
except (ValueError, TypeError):
pass
try:
value = float(value)
except (ValueError, TypeError):
pass
# Now check for truthiness
if isinstance(value, (int, float)):
return value > 0
elif isinstance(value, six.string_types):
return str(value).lower() == 'true'
else:
return bool(value)
def exactly_n(l, n=1):
'''
Tests that exactly N items in an iterable are "truthy" (neither None,
False, nor 0).
'''
i = iter(l)
return all(any(i) for j in range(n)) and not any(i)
def exactly_one(l):
'''
Check if only one item is not None, False, or 0 in an iterable.
'''
return exactly_n(l)
def rm_rf(path):
'''
Platform-independent recursive delete. Includes code from
http://stackoverflow.com/a/2656405
'''
def _onerror(func, path, exc_info):
'''
Error handler for `shutil.rmtree`.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : `shutil.rmtree(path, onerror=onerror)`
'''
if is_windows() and not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise # pylint: disable=E0704
if os.path.isdir(path):
shutil.rmtree(path, onerror=_onerror)
else:
os.remove(path)
def option(value, default='', opts=None, pillar=None):
'''
Pass in a generic option and receive the value that will be assigned
'''
if opts is None:
opts = {}
if pillar is None:
pillar = {}
sources = (
(opts, value),
(pillar, 'master:{0}'.format(value)),
(pillar, value),
)
for source, val in sources:
out = traverse_dict_and_list(source, val, default)
if out is not default:
return out
return default
def parse_docstring(docstring):
'''
Parse a docstring into its parts.
Currently only parses dependencies, can be extended to parse whatever is
needed.
Parses into a dictionary:
{
'full': full docstring,
'deps': list of dependencies (empty list if none)
}
'''
# First try with regex search for :depends:
ret = {}
ret['full'] = docstring
regex = r'([ \t]*):depends:[ \t]+- (\w+)[^\n]*\n(\1[ \t]+- (\w+)[^\n]*\n)*'
match = re.search(regex, docstring, re.M)
if match:
deps = []
regex = r'- (\w+)'
for line in match.group(0).strip().splitlines():
deps.append(re.search(regex, line).group(1))
ret['deps'] = deps
return ret
# Try searching for a one-liner instead
else:
txt = 'Required python modules: '
data = docstring.splitlines()
dep_list = list(x for x in data if x.strip().startswith(txt))
if not dep_list:
ret['deps'] = []
return ret
deps = dep_list[0].replace(txt, '').strip().split(', ')
ret['deps'] = deps
return ret
def print_cli(msg):
'''
Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely).
'''
try:
try:
print(msg)
except UnicodeEncodeError:
print(msg.encode('utf-8'))
except IOError as exc:
if exc.errno != errno.EPIPE:
raise
def safe_walk(top, topdown=True, onerror=None, followlinks=True, _seen=None):
'''
A clone of the python os.walk function with some checks for recursive
symlinks. Unlike os.walk this follows symlinks by default.
'''
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
if _seen is None:
_seen = set()
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = os.listdir(top)
except os.error as err:
if onerror is not None:
onerror(err)
return
if followlinks:
status = os.stat(top)
# st_ino is always 0 on some filesystems (FAT, NTFS); ignore them
if status.st_ino != 0:
node = (status.st_dev, status.st_ino)
if node in _seen:
return
_seen.add(node)
dirs, nondirs = [], []
for name in names:
full_path = join(top, name)
if isdir(full_path):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in safe_walk(new_path, topdown, onerror, followlinks, _seen):
yield x
if not topdown:
yield top, dirs, nondirs
def get_hash(path, form='sha256', chunk_size=65536):
'''
Get the hash sum of a file
This is better than ``get_sum`` for the following reasons:
- It does not read the entire file into memory.
- It does not return a string on error. The returned value of
``get_sum`` cannot really be trusted since it is vulnerable to
collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
'''
hash_type = hasattr(hashlib, form) and getattr(hashlib, form) or None
if hash_type is None:
raise ValueError('Invalid hash type: {0}'.format(form))
with salt.utils.fopen(path, 'rb') as ifile:
hash_obj = hash_type()
# read the file in in chunks, not the entire file
for chunk in iter(lambda: ifile.read(chunk_size), b''):
hash_obj.update(chunk)
return hash_obj.hexdigest()
def namespaced_function(function, global_dict, defaults=None, preserve_context=False):
'''
Redefine (clone) a function under a different globals() namespace scope
preserve_context:
Allow to keep the context taken from orignal namespace,
and extend it with globals() taken from
new targetted namespace.
'''
if defaults is None:
defaults = function.__defaults__
if preserve_context:
_global_dict = function.__globals__.copy()
_global_dict.update(global_dict)
global_dict = _global_dict
new_namespaced_function = types.FunctionType(
function.__code__,
global_dict,
name=function.__name__,
argdefs=defaults
)
new_namespaced_function.__dict__.update(function.__dict__)
return new_namespaced_function
def alias_function(fun, name, doc=None):
'''
Copy a function
'''
alias_fun = types.FunctionType(fun.__code__,
fun.__globals__,
name,
fun.__defaults__,
fun.__closure__)
alias_fun.__dict__.update(fun.__dict__)
if doc and isinstance(doc, six.string_types):
alias_fun.__doc__ = doc
else:
orig_name = fun.__name__
alias_msg = ('\nThis function is an alias of '
'``{0}``.\n'.format(orig_name))
alias_fun.__doc__ = alias_msg + fun.__doc__
return alias_fun
def _win_console_event_handler(event):
if event == 5:
# Do nothing on CTRL_LOGOFF_EVENT
return True
return False
def enable_ctrl_logoff_handler():
if HAS_WIN32API:
win32api.SetConsoleCtrlHandler(_win_console_event_handler, 1)
def date_cast(date):
'''
Casts any object into a datetime.datetime object
date
any datetime, time string representation...
'''
if date is None:
return datetime.datetime.now()
elif isinstance(date, datetime.datetime):
return date
# fuzzy date
try:
if isinstance(date, six.string_types):
try:
if HAS_TIMELIB:
# py3: yes, timelib.strtodatetime wants bytes, not str :/
return timelib.strtodatetime(to_bytes(date))
except ValueError:
pass
# not parsed yet, obviously a timestamp?
if date.isdigit():
date = int(date)
else:
date = float(date)
return datetime.datetime.fromtimestamp(date)
except Exception:
if HAS_TIMELIB:
raise ValueError('Unable to parse {0}'.format(date))
raise RuntimeError('Unable to parse {0}.'
' Consider installing timelib'.format(date))
def date_format(date=None, format="%Y-%m-%d"):
'''
Converts date into a time-based string
date
any datetime, time string representation...
format
:ref:`strftime<http://docs.python.org/2/library/datetime.html#datetime.datetime.strftime>` format
>>> import datetime
>>> src = datetime.datetime(2002, 12, 25, 12, 00, 00, 00)
>>> date_format(src)
'2002-12-25'
>>> src = '2002/12/25'
>>> date_format(src)
'2002-12-25'
>>> src = 1040814000
>>> date_format(src)
'2002-12-25'
>>> src = '1040814000'
>>> date_format(src)
'2002-12-25'
'''
return date_cast(date).strftime(format)
def warn_until(version,
message,
category=DeprecationWarning,
stacklevel=None,
_version_info_=None,
_dont_call_warnings=False):
'''
Helper function to raise a warning, by default, a ``DeprecationWarning``,
until the provided ``version``, after which, a ``RuntimeError`` will
be raised to remind the developers to remove the warning because the
target version has been reached.
:param version: The version info or name after which the warning becomes a
``RuntimeError``. For example ``(0, 17)`` or ``Hydrogen``
or an instance of :class:`salt.version.SaltStackVersion`.
:param message: The warning message to be displayed.
:param category: The warning class to be thrown, by default
``DeprecationWarning``
:param stacklevel: There should be no need to set the value of
``stacklevel``. Salt should be able to do the right thing.
:param _version_info_: In order to reuse this function for other SaltStack
projects, they need to be able to provide the
version info to compare to.
:param _dont_call_warnings: This parameter is used just to get the
functionality until the actual error is to be
issued. When we're only after the salt version
checks to raise a ``RuntimeError``.
'''
if not isinstance(version, (tuple,
six.string_types,
salt.version.SaltStackVersion)):
raise RuntimeError(
'The \'version\' argument should be passed as a tuple, string or '
'an instance of \'salt.version.SaltStackVersion\'.'
)
elif isinstance(version, tuple):
version = salt.version.SaltStackVersion(*version)
elif isinstance(version, six.string_types):
version = salt.version.SaltStackVersion.from_name(version)
if stacklevel is None:
# Attribute the warning to the calling function, not to warn_until()
stacklevel = 2
if _version_info_ is None:
_version_info_ = salt.version.__version_info__
_version_ = salt.version.SaltStackVersion(*_version_info_)
if _version_ >= version:
import inspect
caller = inspect.getframeinfo(sys._getframe(stacklevel - 1))
raise RuntimeError(
'The warning triggered on filename \'{filename}\', line number '
'{lineno}, is supposed to be shown until version '
'{until_version} is released. Current version is now '
'{salt_version}. Please remove the warning.'.format(
filename=caller.filename,
lineno=caller.lineno,
until_version=version.formatted_version,
salt_version=_version_.formatted_version
),
)
if _dont_call_warnings is False:
def _formatwarning(message,
category,
filename,
lineno,
line=None): # pylint: disable=W0613
'''
Replacement for warnings.formatwarning that disables the echoing of
the 'line' parameter.
'''
return '{0}:{1}: {2}: {3}\n'.format(
filename, lineno, category.__name__, message
)
saved = warnings.formatwarning
warnings.formatwarning = _formatwarning
warnings.warn(
message.format(version=version.formatted_version),
category,
stacklevel=stacklevel
)
warnings.formatwarning = saved
def kwargs_warn_until(kwargs,
version,
category=DeprecationWarning,
stacklevel=None,
_version_info_=None,
_dont_call_warnings=False):
'''
Helper function to raise a warning (by default, a ``DeprecationWarning``)
when unhandled keyword arguments are passed to function, until the
provided ``version_info``, after which, a ``RuntimeError`` will be raised
to remind the developers to remove the ``**kwargs`` because the target
version has been reached.
This function is used to help deprecate unused legacy ``**kwargs`` that
were added to function parameters lists to preserve backwards compatibility
when removing a parameter. See
:doc:`the deprecation development docs </topics/development/deprecations>`
for the modern strategy for deprecating a function parameter.
:param kwargs: The caller's ``**kwargs`` argument value (a ``dict``).
:param version: The version info or name after which the warning becomes a
``RuntimeError``. For example ``(0, 17)`` or ``Hydrogen``
or an instance of :class:`salt.version.SaltStackVersion`.
:param category: The warning class to be thrown, by default
``DeprecationWarning``
:param stacklevel: There should be no need to set the value of
``stacklevel``. Salt should be able to do the right thing.
:param _version_info_: In order to reuse this function for other SaltStack
projects, they need to be able to provide the
version info to compare to.
:param _dont_call_warnings: This parameter is used just to get the
functionality until the actual error is to be
issued. When we're only after the salt version
checks to raise a ``RuntimeError``.
'''
if not isinstance(version, (tuple,
six.string_types,
salt.version.SaltStackVersion)):
raise RuntimeError(
'The \'version\' argument should be passed as a tuple, string or '
'an instance of \'salt.version.SaltStackVersion\'.'
)
elif isinstance(version, tuple):
version = salt.version.SaltStackVersion(*version)
elif isinstance(version, six.string_types):
version = salt.version.SaltStackVersion.from_name(version)
if stacklevel is None:
# Attribute the warning to the calling function,
# not to kwargs_warn_until() or warn_until()
stacklevel = 3
if _version_info_ is None:
_version_info_ = salt.version.__version_info__
_version_ = salt.version.SaltStackVersion(*_version_info_)
if kwargs or _version_.info >= version.info:
arg_names = ', '.join('\'{0}\''.format(key) for key in kwargs)
warn_until(
version,
message='The following parameter(s) have been deprecated and '
'will be removed in \'{0}\': {1}.'.format(version.string,
arg_names),
category=category,
stacklevel=stacklevel,
_version_info_=_version_.info,
_dont_call_warnings=_dont_call_warnings
)
def version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
Compares two version strings using distutils.version.LooseVersion. This is
a fallback for providers which don't have a version comparison utility
built into them. Return -1 if version1 < version2, 0 if version1 ==
version2, and 1 if version1 > version2. Return None if there was a problem
making the comparison.
'''
normalize = lambda x: str(x).split(':', 1)[-1] if ignore_epoch else str(x)
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
try:
# pylint: disable=no-member
if distutils.version.LooseVersion(pkg1) < \
distutils.version.LooseVersion(pkg2):
return -1
elif distutils.version.LooseVersion(pkg1) == \
distutils.version.LooseVersion(pkg2):
return 0
elif distutils.version.LooseVersion(pkg1) > \
distutils.version.LooseVersion(pkg2):
return 1
except Exception as exc:
log.exception(exc)
return None
def compare_versions(ver1='',
oper='==',
ver2='',
cmp_func=None,
ignore_epoch=False):
'''
Compares two version numbers. Accepts a custom function to perform the
cmp-style version comparison, otherwise uses version_cmp().
'''
cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,),
'>=': (0, 1), '>': (1,)}
if oper not in ('!=',) and oper not in cmp_map:
log.error('Invalid operator \'%s\' for version comparison', oper)
return False
if cmp_func is None:
cmp_func = version_cmp
cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch)
if cmp_result is None:
return False
# Check if integer/long
if not isinstance(cmp_result, numbers.Integral):
log.error('The version comparison function did not return an '
'integer/long.')
return False
if oper == '!=':
return cmp_result not in cmp_map['==']
else:
# Gracefully handle cmp_result not in (-1, 0, 1).
if cmp_result < -1:
cmp_result = -1
elif cmp_result > 1:
cmp_result = 1
return cmp_result in cmp_map[oper]
def compare_dicts(old=None, new=None):
'''
Compare before and after results from various salt functions, returning a
dict describing the changes that were made.
'''
ret = {}
for key in set((new or {})).union((old or {})):
if key not in old:
# New key
ret[key] = {'old': '',
'new': new[key]}
elif key not in new:
# Key removed
ret[key] = {'new': '',
'old': old[key]}
elif new[key] != old[key]:
# Key modified
ret[key] = {'old': old[key],
'new': new[key]}
return ret
def compare_lists(old=None, new=None):
'''
Compare before and after results from various salt functions, returning a
dict describing the changes that were made
'''
ret = dict()
for item in new:
if item not in old:
ret['new'] = item
for item in old:
if item not in new:
ret['old'] = item
return ret
def argspec_report(functions, module=''):
'''
Pass in a functions dict as it is returned from the loader and return the
argspec function signatures
'''
ret = {}
if '*' in module or '.' in module:
for fun in fnmatch.filter(functions, module):
try:
aspec = salt.utils.args.get_function_argspec(functions[fun])
except TypeError:
# this happens if not callable
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
else:
# "sys" should just match sys without also matching sysctl
moduledot = module + '.'
for fun in functions:
if fun.startswith(moduledot):
try:
aspec = salt.utils.args.get_function_argspec(functions[fun])
except TypeError:
# this happens if not callable
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
return ret
def decode_list(data):
'''
JSON decodes as unicode, Jinja needs bytes...
'''
rv = []
for item in data:
if isinstance(item, six.text_type) and six.PY2:
item = item.encode('utf-8')
elif isinstance(item, list):
item = decode_list(item)
elif isinstance(item, dict):
item = decode_dict(item)
rv.append(item)
return rv
def decode_dict(data):
'''
JSON decodes as unicode, Jinja needs bytes...
'''
rv = {}
for key, value in six.iteritems(data):
if isinstance(key, six.text_type) and six.PY2:
key = key.encode('utf-8')
if isinstance(value, six.text_type) and six.PY2:
value = value.encode('utf-8')
elif isinstance(value, list):
value = decode_list(value)
elif isinstance(value, dict):
value = decode_dict(value)
rv[key] = value
return rv
def find_json(raw):
'''
Pass in a raw string and load the json when is starts. This allows for a
string to start with garbage and end with json but be cleanly loaded
'''
ret = {}
for ind in range(len(raw)):
working = '\n'.join(raw.splitlines()[ind:])
try:
ret = json.loads(working, object_hook=decode_dict)
except ValueError:
continue
if ret:
return ret
if not ret:
# Not json, raise an error
raise ValueError
def is_bin_file(path):
'''
Detects if the file is a binary, returns bool. Returns True if the file is
a bin, False if the file is not and None if the file is not available.
'''
if not os.path.isfile(path):
return None
try:
with fopen(path, 'r') as fp_:
return is_bin_str(fp_.read(2048))
except os.error:
return None
def is_bin_str(data):
'''
Detects if the passed string of data is bin or text
'''
if '\0' in data:
return True
if not data:
return False
text_characters = ''.join([chr(x) for x in range(32, 127)] + list('\n\r\t\b'))
# Get the non-text characters (map each character to itself then use the
# 'remove' option to get rid of the text characters.)
if six.PY3:
trans = ''.maketrans('', '', text_characters)
nontext = data.translate(trans)
else:
trans = string.maketrans('', '')
nontext = data.translate(trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if len(nontext) / len(data) > 0.30:
return True
return False
def is_dictlist(data):
'''
Returns True if data is a list of one-element dicts (as found in many SLS
schemas), otherwise returns False
'''
if isinstance(data, list):
for element in data:
if isinstance(element, dict):
if len(element) != 1:
return False
else:
return False
return True
return False
def repack_dictlist(data,
strict=False,
recurse=False,
key_cb=None,
val_cb=None):
'''
Takes a list of one-element dicts (as found in many SLS schemas) and
repacks into a single dictionary.
'''
if isinstance(data, six.string_types):
try:
import yaml
data = yaml.safe_load(data)
except yaml.parser.ParserError as err:
log.error(err)
return {}
if key_cb is None:
key_cb = lambda x: x
if val_cb is None:
val_cb = lambda x, y: y
valid_non_dict = (six.string_types, int, float)
if isinstance(data, list):
for element in data:
if isinstance(element, valid_non_dict):
continue
elif isinstance(element, dict):
if len(element) != 1:
log.error(
'Invalid input for repack_dictlist: key/value pairs '
'must contain only one element (data passed: %s).',
element
)
return {}
else:
log.error(
'Invalid input for repack_dictlist: element %s is '
'not a string/dict/numeric value', element
)
return {}
else:
log.error(
'Invalid input for repack_dictlist, data passed is not a list '
'(%s)', data
)
return {}
ret = {}
for element in data:
if isinstance(element, valid_non_dict):
ret[key_cb(element)] = None
else:
key = next(iter(element))
val = element[key]
if is_dictlist(val):
if recurse:
ret[key_cb(key)] = repack_dictlist(val, recurse=recurse)
elif strict:
log.error(
'Invalid input for repack_dictlist: nested dictlist '
'found, but recurse is set to False'
)
return {}
else:
ret[key_cb(key)] = val_cb(key, val)
else:
ret[key_cb(key)] = val_cb(key, val)
return ret
def get_group_list(user=None, include_default=True):
'''
Returns a list of all of the system group names of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
# We don't work on platforms that don't have grp and pwd
# Just return an empty list
return []
group_names = None
ugroups = set()
if not isinstance(user, six.string_types):
raise Exception
if hasattr(os, 'getgrouplist'):
# Try os.getgrouplist, available in python >= 3.3
log.trace('Trying os.getgrouplist for \'{0}\''.format(user))
try:
group_names = [
grp.getgrgid(grpid).gr_name for grpid in
os.getgrouplist(user, pwd.getpwnam(user).pw_gid)
]
except Exception:
pass
else:
# Try pysss.getgrouplist
log.trace('Trying pysss.getgrouplist for \'{0}\''.format(user))
try:
import pysss # pylint: disable=import-error
group_names = list(pysss.getgrouplist(user))
except Exception:
pass
if group_names is None:
# Fall back to generic code
# Include the user's default group to behave like
# os.getgrouplist() and pysss.getgrouplist() do
log.trace('Trying generic group list for \'{0}\''.format(user))
group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
if default_group not in group_names:
group_names.append(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
ugroups.update(group_names)
if include_default is False:
# Historically, saltstack code for getting group lists did not
# include the default group. Some things may only want
# supplemental groups, so include_default=False omits the users
# default group.
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
ugroups.remove(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
log.trace('Group list for user \'{0}\': \'{1}\''.format(user, sorted(ugroups)))
return sorted(ugroups)
def get_group_dict(user=None, include_default=True):
'''
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
'''
if HAS_GRP is False or HAS_PWD is False:
# We don't work on platforms that don't have grp and pwd
# Just return an empty dict
return {}
group_dict = {}
group_names = get_group_list(user, include_default=include_default)
for group in group_names:
group_dict.update({group: grp.getgrnam(group).gr_gid})
return group_dict
def get_gid_list(user=None, include_default=True):
'''
Returns a list of all of the system group IDs of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
# We don't work on platforms that don't have grp and pwd
# Just return an empty list
return []
gid_list = [
gid for (group, gid) in
six.iteritems(salt.utils.get_group_dict(user, include_default=include_default))
]
return sorted(set(gid_list))
def total_seconds(td):
'''
Takes a timedelta and returns the total number of seconds
represented by the object. Wrapper for the total_seconds()
method which does not exist in versions of Python < 2.7.
'''
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def import_json():
'''
Import a json module, starting with the quick ones and going down the list)
'''
for fast_json in ('ujson', 'yajl', 'json'):
try:
mod = __import__(fast_json)
log.trace('loaded {0} json lib'.format(fast_json))
return mod
except ImportError:
continue
def appendproctitle(name):
'''
Append "name" to the current process title
'''
if HAS_SETPROCTITLE:
setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)
def chugid(runas):
'''
Change the current process to belong to
the imputed user (and the groups he belongs to)
'''
uinfo = pwd.getpwnam(runas)
supgroups = []
supgroups_seen = set()
# The line below used to exclude the current user's primary gid.
# However, when root belongs to more than one group
# this causes root's primary group of '0' to be dropped from
# his grouplist. On FreeBSD, at least, this makes some
# command executions fail with 'access denied'.
#
# The Python documentation says that os.setgroups sets only
# the supplemental groups for a running process. On FreeBSD
# this does not appear to be strictly true.
group_list = get_group_dict(runas, include_default=True)
if sys.platform == 'darwin':
group_list = dict((k, v) for k, v in six.iteritems(group_list)
if not k.startswith('_'))
for group_name in group_list:
gid = group_list[group_name]
if (gid not in supgroups_seen
and not supgroups_seen.add(gid)):
supgroups.append(gid)
if os.getgid() != uinfo.pw_gid:
try:
os.setgid(uinfo.pw_gid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from gid {0} to {1}. Error: {2}'.format(
os.getgid(), uinfo.pw_gid, err
)
)
# Set supplemental groups
if sorted(os.getgroups()) != sorted(supgroups):
try:
os.setgroups(supgroups)
except OSError as err:
raise CommandExecutionError(
'Failed to set supplemental groups to {0}. Error: {1}'.format(
supgroups, err
)
)
if os.getuid() != uinfo.pw_uid:
try:
os.setuid(uinfo.pw_uid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from uid {0} to {1}. Error: {2}'.format(
os.getuid(), uinfo.pw_uid, err
)
)
def chugid_and_umask(runas, umask):
'''
Helper method for for subprocess.Popen to initialise uid/gid and umask
for the new process.
'''
if runas is not None:
chugid(runas)
if umask is not None:
os.umask(umask)
def rand_string(size=32):
key = os.urandom(size)
return key.encode('base64').replace('\n', '')
def relpath(path, start='.'):
'''
Work around Python bug #5117, which is not (and will not be) patched in
Python 2.6 (http://bugs.python.org/issue5117)
'''
if sys.version_info < (2, 7) and 'posix' in sys.builtin_module_names:
# The below code block is based on posixpath.relpath from Python 2.7,
# which has the fix for this bug.
if not path:
raise ValueError('no path specified')
start_list = [
x for x in os.path.abspath(start).split(os.path.sep) if x
]
path_list = [
x for x in os.path.abspath(path).split(os.path.sep) if x
]
# work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
return os.path.relpath(path, start=start)
def human_size_to_bytes(human_size):
'''
Convert human-readable units to bytes
'''
size_exp_map = {'K': 1, 'M': 2, 'G': 3, 'T': 4, 'P': 5}
human_size_str = str(human_size)
match = re.match(r'^(\d+)([KMGTP])?$', human_size_str)
if not match:
raise ValueError(
'Size must be all digits, with an optional unit type '
'(K, M, G, T, or P)'
)
size_num = int(match.group(1))
unit_multiplier = 1024 ** size_exp_map.get(match.group(2), 0)
return size_num * unit_multiplier
def to_str(s, encoding=None):
'''
Given str, bytes, bytearray, or unicode (py2), return str
'''
if isinstance(s, str):
return s
if six.PY3:
if isinstance(s, (bytes, bytearray)):
return s.decode(encoding or __salt_system_encoding__)
raise TypeError('expected str, bytes, or bytearray')
else:
if isinstance(s, bytearray):
return str(s)
if isinstance(s, unicode): # pylint: disable=incompatible-py3-code
return s.encode(encoding or __salt_system_encoding__)
raise TypeError('expected str, bytearray, or unicode')
def to_bytes(s, encoding=None):
'''
Given bytes, bytearray, str, or unicode (python 2), return bytes (str for
python 2)
'''
if six.PY3:
if isinstance(s, bytes):
return s
if isinstance(s, bytearray):
return bytes(s)
if isinstance(s, str):
return s.encode(encoding or __salt_system_encoding__)
raise TypeError('expected bytes, bytearray, or str')
else:
return to_str(s, encoding)
def to_unicode(s, encoding=None):
'''
Given str or unicode, return unicode (str for python 3)
'''
if six.PY3:
return to_str(s, encoding)
else:
if isinstance(s, str):
return s.decode(encoding or __salt_system_encoding__)
return unicode(s) # pylint: disable=incompatible-py3-code
def is_list(value):
'''
Check if a variable is a list.
'''
return isinstance(value, list)
def is_iter(y, ignore=six.string_types):
'''
Test if an object is iterable, but not a string type.
Test if an object is an iterator or is iterable itself. By default this
does not return True for string objects.
The `ignore` argument defaults to a list of string types that are not
considered iterable. This can be used to also exclude things like
dictionaries or named tuples.
Based on https://bitbucket.org/petershinners/yter
'''
if ignore and isinstance(y, ignore):
return False
try:
iter(y)
return True
except TypeError:
return False
def invalid_kwargs(invalid_kwargs, raise_exc=True):
'''
Raise a SaltInvocationError if invalid_kwargs is non-empty
'''
if invalid_kwargs:
if isinstance(invalid_kwargs, dict):
new_invalid = [
'{0}={1}'.format(x, y)
for x, y in six.iteritems(invalid_kwargs)
]
invalid_kwargs = new_invalid
msg = (
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if raise_exc:
raise SaltInvocationError(msg)
else:
return msg
def shlex_split(s, **kwargs):
'''
Only split if variable is a string
'''
if isinstance(s, six.string_types):
return shlex.split(s, **kwargs)
else:
return s
def split_input(val):
'''
Take an input value and split it into a list, returning the resulting list
'''
if isinstance(val, list):
return val
try:
return [x.strip() for x in val.split(',')]
except AttributeError:
return [x.strip() for x in str(val).split(',')]
def str_version_to_evr(verstring):
'''
Split the package version string into epoch, version and release.
Return this as tuple.
The epoch is always not empty. The version and the release can be an empty
string if such a component could not be found in the version string.
"2:1.0-1.2" => ('2', '1.0', '1.2)
"1.0" => ('0', '1.0', '')
"" => ('0', '', '')
'''
if verstring in [None, '']:
return '0', '', ''
idx_e = verstring.find(':')
if idx_e != -1:
try:
epoch = str(int(verstring[:idx_e]))
except ValueError:
# look, garbage in the epoch field, how fun, kill it
epoch = '0' # this is our fallback, deal
else:
epoch = '0'
idx_r = verstring.find('-')
if idx_r != -1:
version = verstring[idx_e + 1:idx_r]
release = verstring[idx_r + 1:]
else:
version = verstring[idx_e + 1:]
release = ''
return epoch, version, release
def simple_types_filter(data):
'''
Convert the data list, dictionary into simple types, i.e., int, float, string,
bool, etc.
'''
if data is None:
return data
simpletypes_keys = (six.string_types, six.text_type, six.integer_types, float, bool)
simpletypes_values = tuple(list(simpletypes_keys) + [list, tuple])
if isinstance(data, (list, tuple)):
simplearray = []
for value in data:
if value is not None:
if isinstance(value, (dict, list)):
value = simple_types_filter(value)
elif not isinstance(value, simpletypes_values):
value = repr(value)
simplearray.append(value)
return simplearray
if isinstance(data, dict):
simpledict = {}
for key, value in six.iteritems(data):
if key is not None and not isinstance(key, simpletypes_keys):
key = repr(key)
if value is not None and isinstance(value, (dict, list, tuple)):
value = simple_types_filter(value)
elif value is not None and not isinstance(value, simpletypes_values):
value = repr(value)
simpledict[key] = value
return simpledict
return data
|
the-stack_0_26038
|
from __future__ import print_function, division
from sympy.core import Add, S, sympify, oo, pi, Symbol, Dummy, expand_func
from sympy.core.compatibility import range, as_int
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.logic import fuzzy_and, fuzzy_not
from sympy.functions.special.zeta_functions import zeta
from sympy.functions.special.error_functions import erf, erfc, Ei
from sympy.functions.elementary.complexes import re
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import sin, cos, cot
from sympy.functions.combinatorial.numbers import bernoulli, harmonic
from sympy.functions.combinatorial.factorials import factorial, rf, RisingFactorial
def intlike(n):
try:
as_int(n, strict=False)
return True
except ValueError:
return False
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
r"""
The gamma function
.. math::
\Gamma(x) := \int^{\infty}_{0} t^{x-1} e^{-t} \mathrm{d}t.
The ``gamma`` function implements the function which passes through the
values of the factorial function, i.e. `\Gamma(n) = (n - 1)!` when n is
an integer. More general, `\Gamma(z)` is defined in the whole complex
plane except at the negative integers where there are simple poles.
Examples
========
>>> from sympy import S, I, pi, oo, gamma
>>> from sympy.abc import x
Several special values are known:
>>> gamma(1)
1
>>> gamma(4)
6
>>> gamma(S(3)/2)
sqrt(pi)/2
The Gamma function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(gamma(x))
gamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(gamma(x), x)
gamma(x)*polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(gamma(x), x, 0, 3)
1/x - EulerGamma + x*(EulerGamma**2/2 + pi**2/12) + x**2*(-EulerGamma*pi**2/12 + polygamma(2, 1)/6 - EulerGamma**3/6) + O(x**3)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> gamma(pi).evalf(40)
2.288037795340032417959588909060233922890
>>> gamma(1+I).evalf(20)
0.49801566811835604271 - 0.15494982830181068512*I
See Also
========
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/GammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma/
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return self.func(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif intlike(arg):
if arg.is_positive:
return factorial(arg - 1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
def _eval_expand_func(self, **hints):
arg = self.args[0]
if arg.is_Rational:
if abs(arg.p) > arg.q:
x = Dummy('x')
n = arg.p // arg.q
p = arg.p - n*arg.q
return self.func(x + n)._eval_expand_func().subs(x, Rational(p, arg.q))
if arg.is_Add:
coeff, tail = arg.as_coeff_add()
if coeff and coeff.q != 1:
intpart = floor(coeff)
tail = (coeff - intpart,) + tail
coeff = intpart
tail = arg._new_rawargs(*tail, reeval=False)
return self.func(tail)*RisingFactorial(tail, coeff)
return self.func(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
x = self.args[0]
if x.is_nonpositive and x.is_integer:
return False
if intlike(x) and x <= 0:
return False
if x.is_positive or x.is_noninteger:
return True
def _eval_is_positive(self):
x = self.args[0]
if x.is_positive:
return True
elif x.is_noninteger:
return floor(x).is_even
def _eval_rewrite_as_tractable(self, z, **kwargs):
return exp(loggamma(z))
def _eval_rewrite_as_factorial(self, z, **kwargs):
return factorial(z - 1)
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if not (x0.is_Integer and x0 <= 0):
return super(gamma, self)._eval_nseries(x, n, logx)
t = self.args[0] - x0
return (self.func(t + 1)/rf(self.args[0], -x0 + 1))._eval_nseries(x, n, logx)
def _sage_(self):
import sage.all as sage
return sage.gamma(self.args[0]._sage_())
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
r"""
The lower incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\gamma(s, x) := \int_0^x t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \Gamma(s, x).
This can be shown to be the same as
.. math::
\gamma(s, x) = \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
Examples
========
>>> from sympy import lowergamma, S
>>> from sympy.abc import s, x
>>> lowergamma(s, x)
lowergamma(s, x)
>>> lowergamma(3, x)
-2*(x**2/2 + x + 1)*exp(-x) + 2
>>> lowergamma(-S(1)/2, x)
-2*sqrt(pi)*erf(sqrt(x)) - 2*exp(-x)/sqrt(x)
See Also
========
gamma: Gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Incomplete_gamma_function#Lower_incomplete_Gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return gamma(a)*digamma(a) - log(z)*uppergamma(a, z) \
- meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, x):
# For lack of a better place, we use this one to extract branching
# information. The following can be
# found in the literature (c/f references given above), albeit scattered:
# 1) For fixed x != 0, lowergamma(s, x) is an entire function of s
# 2) For fixed positive integers s, lowergamma(s, x) is an entire
# function of x.
# 3) For fixed non-positive integers s,
# lowergamma(s, exp(I*2*pi*n)*x) =
# 2*pi*I*n*(-1)**(-s)/factorial(-s) + lowergamma(s, x)
# (this follows from lowergamma(s, x).diff(x) = x**(s-1)*exp(-x)).
# 4) For fixed non-integral s,
# lowergamma(s, x) = x**s*gamma(s)*lowergamma_unbranched(s, x),
# where lowergamma_unbranched(s, x) is an entire function (in fact
# of both s and x), i.e.
# lowergamma(s, exp(2*I*pi*n)*x) = exp(2*pi*I*n*a)*lowergamma(a, x)
from sympy import unpolarify, I
if x == 0:
return S.Zero
nx, n = x.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(x)
if nx != x:
return lowergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return 2*pi*I*n*(-1)**(-a)/factorial(-a) + lowergamma(a, nx)
elif n != 0:
return exp(2*pi*I*n*a)*lowergamma(a, nx)
# Special values.
if a.is_Number:
if a is S.One:
return S.One - exp(-x)
elif a is S.Half:
return sqrt(pi)*erf(sqrt(x))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
if a.is_integer:
return factorial(b) - exp(-x) * factorial(b) * Add(*[x ** k / factorial(k) for k in range(a)])
else:
return gamma(a) * (lowergamma(S.Half, x)/sqrt(pi) - exp(-x) * Add(*[x**(k-S.Half) / gamma(S.Half+k) for k in range(1, a+S.Half)]))
if not a.is_Integer:
return (-1)**(S.Half - a) * pi*erf(sqrt(x)) / gamma(1-a) + exp(-x) * Add(*[x**(k+a-1)*gamma(a) / gamma(a+k) for k in range(1, S(3)/2-a)])
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
if all(x.is_number for x in self.args):
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, 0, z)
return Expr._from_mpmath(res, prec)
else:
return self
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_uppergamma(self, s, x, **kwargs):
return gamma(s) - uppergamma(s, x)
def _eval_rewrite_as_expint(self, s, x, **kwargs):
from sympy import expint
if s.is_integer and s.is_nonpositive:
return self
return self.rewrite(uppergamma).rewrite(expint)
class uppergamma(Function):
r"""
The upper incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\Gamma(s, x) := \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \gamma(s, x).
where `\gamma(s, x)` is the lower incomplete gamma function,
:class:`lowergamma`. This can be shown to be the same as
.. math::
\Gamma(s, x) = \Gamma(s) - \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
The upper incomplete gamma function is also essentially equivalent to the
generalized exponential integral:
.. math::
\operatorname{E}_{n}(x) = \int_{1}^{\infty}{\frac{e^{-xt}}{t^n} \, dt} = x^{n-1}\Gamma(1-n,x).
Examples
========
>>> from sympy import uppergamma, S
>>> from sympy.abc import s, x
>>> uppergamma(s, x)
uppergamma(s, x)
>>> uppergamma(3, x)
2*(x**2/2 + x + 1)*exp(-x)
>>> uppergamma(-S(1)/2, x)
-2*sqrt(pi)*erfc(sqrt(x)) + 2*exp(-x)/sqrt(x)
>>> uppergamma(-2, x)
expint(3, x)/x**2
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Incomplete_gamma_function#Upper_incomplete_Gamma_function
.. [2] Abramowitz, Milton; Stegun, Irene A., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
.. [6] https://en.wikipedia.org/wiki/Exponential_integral#Relation_with_other_functions
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return -exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return uppergamma(a, z)*log(z) + meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
if all(x.is_number for x in self.args):
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, z, mp.inf)
return Expr._from_mpmath(res, prec)
return self
@classmethod
def eval(cls, a, z):
from sympy import unpolarify, I, expint
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
return S.Zero
elif z is S.Zero:
if re(a).is_positive:
return gamma(a)
# We extract branching information here. C/f lowergamma.
nx, n = z.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(z)
if z != nx:
return uppergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return -2*pi*I*n*(-1)**(-a)/factorial(-a) + uppergamma(a, nx)
elif n != 0:
return gamma(a)*(1 - exp(2*pi*I*n*a)) + exp(2*pi*I*n*a)*uppergamma(a, nx)
# Special values.
if a.is_Number:
if a is S.Zero and z.is_positive:
return -Ei(-z)
elif a is S.One:
return exp(-z)
elif a is S.Half:
return sqrt(pi)*erfc(sqrt(z))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
if a.is_integer:
return exp(-z) * factorial(b) * Add(*[z**k / factorial(k) for k in range(a)])
else:
return gamma(a) * erfc(sqrt(z)) + (-1)**(a - S(3)/2) * exp(-z) * sqrt(z) * Add(*[gamma(-S.Half - k) * (-z)**k / gamma(1-a) for k in range(a - S.Half)])
elif b.is_Integer:
return expint(-b, z)*unpolarify(z)**(b + 1)
if not a.is_Integer:
return (-1)**(S.Half - a) * pi*erfc(sqrt(z))/gamma(1-a) - z**a * exp(-z) * Add(*[z**k * gamma(a) / gamma(a+k+1) for k in range(S.Half - a)])
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_lowergamma(self, s, x, **kwargs):
return gamma(s) - lowergamma(s, x)
def _eval_rewrite_as_expint(self, s, x, **kwargs):
from sympy import expint
return expint(1 - s, x)*x**s
def _sage_(self):
import sage.all as sage
return sage.gamma(self.args[0]._sage_(), self.args[1]._sage_())
###############################################################################
###################### POLYGAMMA and LOGGAMMA FUNCTIONS #######################
###############################################################################
class polygamma(Function):
r"""
The function ``polygamma(n, z)`` returns ``log(gamma(z)).diff(n + 1)``.
It is a meromorphic function on `\mathbb{C}` and defined as the (n+1)-th
derivative of the logarithm of the gamma function:
.. math::
\psi^{(n)} (z) := \frac{\mathrm{d}^{n+1}}{\mathrm{d} z^{n+1}} \log\Gamma(z).
Examples
========
Several special values are known:
>>> from sympy import S, polygamma
>>> polygamma(0, 1)
-EulerGamma
>>> polygamma(0, 1/S(2))
-2*log(2) - EulerGamma
>>> polygamma(0, 1/S(3))
-log(3) - sqrt(3)*pi/6 - EulerGamma - log(sqrt(3))
>>> polygamma(0, 1/S(4))
-pi/2 - log(4) - log(2) - EulerGamma
>>> polygamma(0, 2)
1 - EulerGamma
>>> polygamma(0, 23)
19093197/5173168 - EulerGamma
>>> from sympy import oo, I
>>> polygamma(0, oo)
oo
>>> polygamma(0, -oo)
oo
>>> polygamma(0, I*oo)
oo
>>> polygamma(0, -I*oo)
oo
Differentiation with respect to x is supported:
>>> from sympy import Symbol, diff
>>> x = Symbol("x")
>>> diff(polygamma(0, x), x)
polygamma(1, x)
>>> diff(polygamma(0, x), x, 2)
polygamma(2, x)
>>> diff(polygamma(0, x), x, 3)
polygamma(3, x)
>>> diff(polygamma(1, x), x)
polygamma(2, x)
>>> diff(polygamma(1, x), x, 2)
polygamma(3, x)
>>> diff(polygamma(2, x), x)
polygamma(3, x)
>>> diff(polygamma(2, x), x, 2)
polygamma(4, x)
>>> n = Symbol("n")
>>> diff(polygamma(n, x), x)
polygamma(n + 1, x)
>>> diff(polygamma(n, x), x, 2)
polygamma(n + 2, x)
We can rewrite polygamma functions in terms of harmonic numbers:
>>> from sympy import harmonic
>>> polygamma(0, x).rewrite(harmonic)
harmonic(x - 1) - EulerGamma
>>> polygamma(2, x).rewrite(harmonic)
2*harmonic(x - 1, 3) - 2*zeta(3)
>>> ni = Symbol("n", integer=True)
>>> polygamma(ni, x).rewrite(harmonic)
(-1)**(n + 1)*(-harmonic(x - 1, n + 1) + zeta(n + 1))*factorial(n)
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Polygamma_function
.. [2] http://mathworld.wolfram.com/PolygammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma/
.. [4] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
def _eval_evalf(self, prec):
n = self.args[0]
# the mpmath polygamma implementation valid only for nonnegative integers
if n.is_number and n.is_real:
if (n.is_integer or n == int(n)) and n.is_nonnegative:
return super(polygamma, self)._eval_evalf(prec)
def fdiff(self, argindex=2):
if argindex == 2:
n, z = self.args[:2]
return polygamma(n + 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_real(self):
if self.args[0].is_positive and self.args[1].is_positive:
return True
def _eval_is_positive(self):
if self.args[0].is_positive and self.args[1].is_positive:
return self.args[0].is_odd
def _eval_is_negative(self):
if self.args[0].is_positive and self.args[1].is_positive:
return self.args[0].is_even
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[1] != oo or not \
(self.args[0].is_Integer and self.args[0].is_nonnegative):
return super(polygamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[1]
N = self.args[0]
if N == 0:
# digamma function series
# Abramowitz & Stegun, p. 259, 6.3.18
r = log(z) - 1/(2*z)
o = None
if n < 2:
o = Order(1/z, x)
else:
m = ceiling((n + 1)//2)
l = [bernoulli(2*k) / (2*k*z**(2*k)) for k in range(1, m)]
r -= Add(*l)
o = Order(1/z**(2*m), x)
return r._eval_nseries(x, n, logx) + o
else:
# proper polygamma function
# Abramowitz & Stegun, p. 260, 6.4.10
# We return terms to order higher than O(x**n) on purpose
# -- otherwise we would not be able to return any terms for
# quite a long time!
fac = gamma(N)
e0 = fac + N*fac/(2*z)
m = ceiling((n + 1)//2)
for k in range(1, m):
fac = fac*(2*k + N - 1)*(2*k + N - 2) / ((2*k)*(2*k - 1))
e0 += bernoulli(2*k)*fac/z**(2*k)
o = Order(1/z**(2*m), x)
if n == 0:
o = Order(1/z, x)
elif n == 1:
o = Order(1/z**2, x)
r = e0._eval_nseries(z, n, logx) + o
return (-1 * (-1/z)**N * r)._eval_nseries(x, n, logx)
@classmethod
def eval(cls, n, z):
n, z = map(sympify, (n, z))
from sympy import unpolarify
if n.is_integer:
if n.is_nonnegative:
nz = unpolarify(z)
if z != nz:
return polygamma(n, nz)
if n == -1:
return loggamma(z)
else:
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z is S.Infinity:
if n.is_Number:
if n is S.Zero:
return S.Infinity
else:
return S.Zero
elif z.is_Integer:
if z.is_nonpositive:
return S.ComplexInfinity
else:
if n is S.Zero:
return -S.EulerGamma + harmonic(z - 1, 1)
elif n.is_odd:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
if n == 0:
if z is S.NaN:
return S.NaN
elif z.is_Rational:
p, q = z.as_numer_denom()
# only expand for small denominators to avoid creating long expressions
if q <= 5:
return expand_func(polygamma(n, z, evaluate=False))
elif z in (S.Infinity, S.NegativeInfinity):
return S.Infinity
else:
t = z.extract_multiplicatively(S.ImaginaryUnit)
if t in (S.Infinity, S.NegativeInfinity):
return S.Infinity
# TODO n == 1 also can do some rational z
def _eval_expand_func(self, **hints):
n, z = self.args
if n.is_Integer and n.is_nonnegative:
if z.is_Add:
coeff = z.args[0]
if coeff.is_Integer:
e = -(n + 1)
if coeff > 0:
tail = Add(*[Pow(
z - i, e) for i in range(1, int(coeff) + 1)])
else:
tail = -Add(*[Pow(
z + i, e) for i in range(0, int(-coeff))])
return polygamma(n, z - coeff) + (-1)**n*factorial(n)*tail
elif z.is_Mul:
coeff, z = z.as_two_terms()
if coeff.is_Integer and coeff.is_positive:
tail = [ polygamma(n, z + Rational(
i, coeff)) for i in range(0, int(coeff)) ]
if n == 0:
return Add(*tail)/coeff + log(coeff)
else:
return Add(*tail)/coeff**(n + 1)
z *= coeff
if n == 0 and z.is_Rational:
p, q = z.as_numer_denom()
# Reference:
# Values of the polygamma functions at rational arguments, J. Choi, 2007
part_1 = -S.EulerGamma - pi * cot(p * pi / q) / 2 - log(q) + Add(
*[cos(2 * k * pi * p / q) * log(2 * sin(k * pi / q)) for k in range(1, q)])
if z > 0:
n = floor(z)
z0 = z - n
return part_1 + Add(*[1 / (z0 + k) for k in range(n)])
elif z < 0:
n = floor(1 - z)
z0 = z + n
return part_1 - Add(*[1 / (z0 - 1 - k) for k in range(n)])
return polygamma(n, z)
def _eval_rewrite_as_zeta(self, n, z, **kwargs):
if n.is_integer:
if (n - S.One).is_nonnegative:
return (-1)**(n + 1)*factorial(n)*zeta(n + 1, z)
def _eval_rewrite_as_harmonic(self, n, z, **kwargs):
if n.is_integer:
if n == S.Zero:
return harmonic(z - 1) - S.EulerGamma
else:
return S.NegativeOne**(n+1) * factorial(n) * (zeta(n+1) - harmonic(z-1, n+1))
def _eval_as_leading_term(self, x):
from sympy import Order
n, z = [a.as_leading_term(x) for a in self.args]
o = Order(z, x)
if n == 0 and o.contains(1/x):
return o.getn() * log(x)
else:
return self.func(n, z)
class loggamma(Function):
r"""
The ``loggamma`` function implements the logarithm of the
gamma function i.e, `\log\Gamma(x)`.
Examples
========
Several special values are known. For numerical integral
arguments we have:
>>> from sympy import loggamma
>>> loggamma(-2)
oo
>>> loggamma(0)
oo
>>> loggamma(1)
0
>>> loggamma(2)
0
>>> loggamma(3)
log(2)
and for symbolic values:
>>> from sympy import Symbol
>>> n = Symbol("n", integer=True, positive=True)
>>> loggamma(n)
log(gamma(n))
>>> loggamma(-n)
oo
for half-integral values:
>>> from sympy import S, pi
>>> loggamma(S(5)/2)
log(3*sqrt(pi)/4)
>>> loggamma(n/2)
log(2**(1 - n)*sqrt(pi)*gamma(n)/gamma(n/2 + 1/2))
and general rational arguments:
>>> from sympy import expand_func
>>> L = loggamma(S(16)/3)
>>> expand_func(L).doit()
-5*log(3) + loggamma(1/3) + log(4) + log(7) + log(10) + log(13)
>>> L = loggamma(S(19)/4)
>>> expand_func(L).doit()
-4*log(4) + loggamma(3/4) + log(3) + log(7) + log(11) + log(15)
>>> L = loggamma(S(23)/7)
>>> expand_func(L).doit()
-3*log(7) + log(2) + loggamma(2/7) + log(9) + log(16)
The loggamma function has the following limits towards infinity:
>>> from sympy import oo
>>> loggamma(oo)
oo
>>> loggamma(-oo)
zoo
The loggamma function obeys the mirror symmetry
if `x \in \mathbb{C} \setminus \{-\infty, 0\}`:
>>> from sympy.abc import x
>>> from sympy import conjugate
>>> conjugate(loggamma(x))
loggamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(loggamma(x), x)
polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(loggamma(x), x, 0, 4)
-log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + O(x**4)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> from sympy import I
>>> loggamma(5).evalf(30)
3.17805383034794561964694160130
>>> loggamma(I).evalf(20)
-0.65092319930185633889 - 1.8724366472624298171*I
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/LogGammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/LogGamma/
"""
@classmethod
def eval(cls, z):
z = sympify(z)
if z.is_integer:
if z.is_nonpositive:
return S.Infinity
elif z.is_positive:
return log(gamma(z))
elif z.is_rational:
p, q = z.as_numer_denom()
# Half-integral values:
if p.is_positive and q == 2:
return log(sqrt(S.Pi) * 2**(1 - p) * gamma(p) / gamma((p + 1)*S.Half))
if z is S.Infinity:
return S.Infinity
elif abs(z) is S.Infinity:
return S.ComplexInfinity
if z is S.NaN:
return S.NaN
def _eval_expand_func(self, **hints):
from sympy import Sum
z = self.args[0]
if z.is_Rational:
p, q = z.as_numer_denom()
# General rational arguments (u + p/q)
# Split z as n + p/q with p < q
n = p // q
p = p - n*q
if p.is_positive and q.is_positive and p < q:
k = Dummy("k")
if n.is_positive:
return loggamma(p / q) - n*log(q) + Sum(log((k - 1)*q + p), (k, 1, n))
elif n.is_negative:
return loggamma(p / q) - n*log(q) + S.Pi*S.ImaginaryUnit*n - Sum(log(k*q - p), (k, 1, -n))
elif n.is_zero:
return loggamma(p / q)
return self
def _eval_nseries(self, x, n, logx=None):
x0 = self.args[0].limit(x, 0)
if x0 is S.Zero:
f = self._eval_rewrite_as_intractable(*self.args)
return f._eval_nseries(x, n, logx)
return super(loggamma, self)._eval_nseries(x, n, logx)
def _eval_aseries(self, n, args0, x, logx):
from sympy import Order
if args0[0] != oo:
return super(loggamma, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
m = min(n, ceiling((n + S(1))/2))
r = log(z)*(z - S(1)/2) - z + log(2*pi)/2
l = [bernoulli(2*k) / (2*k*(2*k - 1)*z**(2*k - 1)) for k in range(1, m)]
o = None
if m == 0:
o = Order(1, x)
else:
o = Order(1/z**(2*m - 1), x)
# It is very inefficient to first add the order and then do the nseries
return (r + Add(*l))._eval_nseries(x, n, logx) + o
def _eval_rewrite_as_intractable(self, z, **kwargs):
return log(gamma(z))
def _eval_is_real(self):
z = self.args[0]
if z.is_positive:
return True
elif z.is_nonpositive:
return False
def _eval_conjugate(self):
z = self.args[0]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(z.conjugate())
def fdiff(self, argindex=1):
if argindex == 1:
return polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _sage_(self):
import sage.all as sage
return sage.log_gamma(self.args[0]._sage_())
def digamma(x):
r"""
The digamma function is the first derivative of the loggamma function i.e,
.. math::
\psi(x) := \frac{\mathrm{d}}{\mathrm{d} z} \log\Gamma(z)
= \frac{\Gamma'(z)}{\Gamma(z) }
In this case, ``digamma(z) = polygamma(0, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Digamma_function
.. [2] http://mathworld.wolfram.com/DigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(0, x)
def trigamma(x):
r"""
The trigamma function is the second derivative of the loggamma function i.e,
.. math::
\psi^{(1)}(z) := \frac{\mathrm{d}^{2}}{\mathrm{d} z^{2}} \log\Gamma(z).
In this case, ``trigamma(z) = polygamma(1, z)``.
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Trigamma_function
.. [2] http://mathworld.wolfram.com/TrigammaFunction.html
.. [3] http://functions.wolfram.com/GammaBetaErf/PolyGamma2/
"""
return polygamma(1, x)
###############################################################################
##################### COMPLETE MULTIVARIATE GAMMA FUNCTION ####################
###############################################################################
class multigamma(Function):
r"""
The multivariate gamma function is a generalization of the gamma function i.e,
.. math::
\Gamma_p(z) = \pi^{p(p-1)/4}\prod_{k=1}^p \Gamma[z + (1 - k)/2].
Special case, multigamma(x, 1) = gamma(x)
Parameters
==========
p: order or dimension of the multivariate gamma function
Examples
========
>>> from sympy import S, I, pi, oo, gamma, multigamma
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> p = Symbol('p', positive=True, integer=True)
>>> multigamma(x, p)
pi**(p*(p - 1)/4)*Product(gamma(-_k/2 + x + 1/2), (_k, 1, p))
Several special values are known:
>>> multigamma(1, 1)
1
>>> multigamma(4, 1)
6
>>> multigamma(S(3)/2, 1)
sqrt(pi)/2
Writing multigamma in terms of gamma function
>>> multigamma(x, 1)
gamma(x)
>>> multigamma(x, 2)
sqrt(pi)*gamma(x)*gamma(x - 1/2)
>>> multigamma(x, 3)
pi**(3/2)*gamma(x)*gamma(x - 1)*gamma(x - 1/2)
See Also
========
gamma, lowergamma, uppergamma, polygamma, loggamma, digamma, trigamma
sympy.functions.special.beta_functions.beta
References
==========
.. [1] https://en.wikipedia.org/wiki/Multivariate_gamma_function
"""
unbranched = True
def fdiff(self, argindex=2):
from sympy import Sum
if argindex == 2:
x, p = self.args
k = Dummy("k")
return self.func(x, p)*Sum(polygamma(0, x + (1 - k)/2), (k, 1, p))
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, x, p):
from sympy import Product
x, p = map(sympify, (x, p))
if p.is_positive is False or p.is_integer is False:
raise ValueError('Order parameter p must be positive integer.')
k = Dummy("k")
return (pi**(p*(p - 1)/4)*Product(gamma(x + (1 - k)/2),
(k, 1, p))).doit()
def _eval_conjugate(self):
x, p = self.args
return self.func(x.conjugate(), p)
def _eval_is_real(self):
x, p = self.args
y = 2*x
if y.is_integer and (y <= (p - 1)) is True:
return False
if intlike(y) and (y <= (p - 1)):
return False
if y > (p - 1) or y.is_noninteger:
return True
|
the-stack_0_26039
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.http import Http404
from django.urls import reverse
from django.views.generic import CreateView, FormView, DetailView, View, UpdateView
from django.views.generic.edit import FormMixin
from django.shortcuts import render, redirect
from django.utils.safestring import mark_safe
from talisa.mixins import NextUrlMixin, RequestFormAttachMixin
from .forms import LoginForm, RegisterForm, GuestForm, ReactivateEmailForm, UserDetailChangeForm
from .models import EmailActivation
from .signals import user_logged_in
class AccountHomeView(LoginRequiredMixin, DetailView):
template_name = 'accounts/home.html'
def get_object(self):
return self.request.user
class AccountEmailActivateView(FormMixin, View):
success_url = '/login/'
form_class = ReactivateEmailForm
key = None
def get(self, request, key=None, *args, **kwargs):
self.key = key
if key is not None:
qs = EmailActivation.objects.filter(key__iexact=key)
confirm_qs = qs.confirmable()
if confirm_qs.count() == 1:
obj = confirm_qs.first()
obj.activate()
messages.success(request, "Your email has been confirmed. Please login.")
return redirect("login")
else:
activated_qs = qs.filter(activated=True)
if activated_qs.exists():
reset_link = reverse("password_reset")
msg = """Your email has already been confirmed
Do you need to <a href="{link}">reset your password</a>?
""".format(link=reset_link)
messages.success(request, mark_safe(msg))
return redirect("login")
context = {'form': self.get_form(), 'key': key}
return render(request, 'registration/activation-error.html', context)
def post(self, request, *args, **kwargs):
# create form to receive an email
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
msg = """Activation link sent, please check your email."""
request = self.request
messages.success(request, msg)
email = form.cleaned_data.get("email")
obj = EmailActivation.objects.email_exists(email).first()
user = obj.user
new_activation = EmailActivation.objects.create(user=user, email=email)
new_activation.send_activation()
return super(AccountEmailActivateView, self).form_valid(form)
def form_invalid(self, form):
context = {'form': form, "key": self.key}
return render(self.request, 'registration/activation-error.html', context)
class GuestRegisterView(NextUrlMixin, RequestFormAttachMixin, CreateView):
form_class = GuestForm
default_next = '/register/'
def get_success_url(self):
return self.get_next_url()
def form_invalid(self, form):
return redirect(self.default_next)
class LoginView(NextUrlMixin, RequestFormAttachMixin, FormView):
form_class = LoginForm
success_url = '/'
template_name = 'accounts/login.html'
default_next = '/'
def form_valid(self, form):
next_path = self.get_next_url()
return redirect(next_path)
def render_to_response(self, context, **response_kwargs):
if context:
if self.request.user.is_authenticated:
raise Http404
return super(LoginView, self).render_to_response(context, **response_kwargs)
class RegisterView(CreateView):
form_class = RegisterForm
template_name = 'accounts/register.html'
success_url = '/login/'
def render_to_response(self, context, **response_kwargs):
if context:
if self.request.user.is_authenticated:
raise Http404
return super(RegisterView, self).render_to_response(context, **response_kwargs)
class UserDetailUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserDetailChangeForm
template_name = 'accounts/detail-update-view.html'
def get_object(self):
return self.request.user
def get_context_data(self, *args, **kwargs):
context = super(UserDetailUpdateView, self).get_context_data(*args, **kwargs)
context['title'] = 'Change Your Account Details'
return context
def get_success_url(self):
return reverse("account:home")
|
the-stack_0_26041
|
import click
from globus_sdk import LocalGlobusConnectPersonal
from globus_cli.parsing import command, one_use_option
@command(
"local-id",
short_help="Display UUID of locally installed endpoint",
disable_options=["format", "map_http_status"],
adoc_examples="""Do a Globus ls command on the current local endpoint.
[source,bash]
----
$ globus ls "$(globus endpoint local-id)"':/~/'
----
On the assumption that the default directory for Globus Connect Personal is the
user's homedir, list files in the current working directory via Globus:
[source,bash]
----
#!/bin/bash
# NOTE: this script only works in subdirs of $HOME
if [[ $PWD/ != $HOME/* ]]; then
echo "Only works in homedir" >&2
exit 1
fi
# get the CWD as a path relative to the homedir
dir_to_ls=${PWD/#$HOME/'~'}
ep_id="$(globus endpoint local-id)"
globus ls "${ep_id}:/${dir_to_ls}"
----
""",
)
@one_use_option(
"--personal",
is_flag=True,
default=True,
help="Use local Globus Connect Personal endpoint (default)",
)
def local_id(personal: bool) -> None:
"""
Look for data referring to a local installation of Globus Connect Personal software
and display the associated endpoint ID.
This operates by looking for Globus Connect Personal data in the current user's
home directory.
"""
if personal:
try:
ep_id = LocalGlobusConnectPersonal().endpoint_id
except OSError as e:
click.echo(e, err=True)
click.get_current_context().exit(1)
if ep_id is not None:
click.echo(ep_id)
else:
click.echo("No Globus Connect Personal installation found.")
click.get_current_context().exit(1)
|
the-stack_0_26043
|
import pulsectl
from typing import Union
from .app_logger import create_logger
logger = create_logger(__name__)
def _log_and_raise(msg: str):
logger.error(msg)
raise Exception(msg)
class VirtSink:
def __init__(self, name: str, description: str=None):
self.name = name
self.description = description if description else name
class VirtSource:
def __init__(self, name: str, description: str=None):
self.name = name
self.description = description if description else name
class Rerouting:
"""
Class handles all reroutings via specified virt sinks and sources (which created by this calss too).
Every exception thrown by this class are logged, so excpicit logging is not necessary
"""
def __init__(self, virt_sink1: VirtSink, virt_sink2: VirtSink, virt_mic1: VirtSource, virt_mic2: VirtSource):
self._virt_sink1 = virt_sink1
self._virt_sink2 = virt_sink2
self._virt_mic1 = virt_mic1
self._virt_mic2 = virt_mic2
self._routes = {}
def _get_sink_by_appname(self, pulse: pulsectl.Pulse, app_name: str):
return next(filter(lambda sink: sink.proplist.get('application.name', None) == app_name, pulse.sink_input_list()), None)
def _unload(self, pulse: pulsectl.Pulse, entities: Union[pulsectl.PulseSinkInfo, pulsectl.PulseSourceInfo], name: str):
entity = next(filter(lambda sink: sink.name == name, entities), None)
if entity is None:
logger.warn(f'Object with name {name} not found. Already unloaded?')
return
try:
pulse.module_unload(entity.owner_module)
logger.info(f'Module {name} unloaded as {entity.owner_module}')
except Exception as exc:
logger.error(f'Failed to unload module {name} as {entity.owner_module}: {exc}')
def init(self):
try:
with pulsectl.Pulse() as pulse:
pulse.module_load('module-null-sink', f'sink_name={self._virt_sink1.name} sink_properties=device.description={self._virt_sink1.description}')
logger.info(f'Sink {self._virt_sink1.name} created')
pulse.module_load('module-remap-source', f'source_name={self._virt_mic1.name} master={self._virt_sink1.name}.monitor source_properties=device.description={self._virt_mic1.description}')
logger.info(f'Source {self._virt_mic1.name} created from {self._virt_sink1.name}.monitor')
pulse.module_load('module-null-sink', f'sink_name={self._virt_sink2.name} sink_properties=device.description={self._virt_sink2.description}')
logger.info(f'Sink {self._virt_sink2.name} created')
pulse.module_load('module-loopback', f'source_dont_move=true sink_dont_move=true sink={self._virt_sink2.name} source={self._virt_sink1.name}.monitor')
logger.info(f'Source {self._virt_sink1.name}.monitor routed to {self._virt_sink2.name}')
pulse.module_load('module-loopback', f'sink_dont_move=true sink={self._virt_sink2.name}')
logger.info(f'Sink {self._virt_sink2.name} loopbacked')
pulse.module_load('module-remap-source', f'source_name={self._virt_mic2.name} master={self._virt_sink2.name}.monitor source_properties=device.description={self._virt_mic2.description}')
logger.info(f'Source {self._virt_mic2.name} created from {self._virt_sink2.name}.monitor')
pulse.module_load('module-loopback', f'source_dont_move=true source={self._virt_sink1.name}.monitor')
logger.info(f'Source {self._virt_sink1.name}.monitor loopbacked')
except Exception as exc:
logger.error(f'Failed to initalize rerouter with virt_sink1: {self._virt_sink1} virt_sink2: {self._virt_sink2} virt_mic1: {self._virt_mic1} virt_mic2: {self._virt_mic2}')
raise
def route(self, app_name: str):
with pulsectl.Pulse() as pulse:
app_input_sink = self._get_sink_by_appname(pulse, app_name)
if app_input_sink is None:
_log_and_raise(f'No input sink with app name {app_name} found')
virt1_sink = pulse.get_sink_by_name(self._virt_sink1.name)
if virt1_sink is None:
_log_and_raise(f'No app sink with name {self._virt_sink1.name} found')
pulse.sink_input_move(app_input_sink.index, virt1_sink.index)
logger.info(f'App {app_name} sink input {app_input_sink.index} moved to {virt1_sink.index}')
self._routes[app_name] = app_input_sink.sink
def cleanup(self):
with pulsectl.Pulse() as pulse:
for app_name in self._routes:
app_input_sink = self._get_sink_by_appname(pulse, app_name)
if app_input_sink is None:
continue
try:
pulse.sink_input_move(app_input_sink.index, self._routes[app_name])
logger.info(f'App {app_name} sink input {app_input_sink.index} moved back to {self._routes[app_name]}')
except Exception as exc:
logger.error(f'Failed to move app {app_name} sink input {app_input_sink.index} back to {self._routes[app_name]}: {exc}')
pass
self._unload(pulse, pulse.sink_list(), self._virt_sink1.name)
self._unload(pulse, pulse.sink_list(), self._virt_sink2.name)
self._unload(pulse, pulse.source_list(), self._virt_mic1.name)
self._unload(pulse, pulse.source_list(), self._virt_mic2.name)
|
the-stack_0_26048
|
import argparse
import json
import os
import pickle
import numpy as np
import torch
from .config import FLAGS
from .torch_model import Generator
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def convert_to_haiku(a, h, device):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
generator.eval()
generator.remove_weight_norm()
hk_map = {}
for a, b in generator.state_dict().items():
print(a, b.shape)
if a.startswith('conv_pre'):
a = 'generator/~/conv1_d'
elif a.startswith('conv_post'):
a = 'generator/~/conv1_d_1'
elif a.startswith('ups.'):
ii = a.split('.')[1]
a = f'generator/~/ups_{ii}'
elif a.startswith('resblocks.'):
_, x, y, z, _ = a.split('.')
a = f'generator/~/res_block1_{x}/~/{y}_{z}'
print(a, b.shape)
if a not in hk_map:
hk_map[a] = {}
if len(b.shape) == 1:
hk_map[a]['b'] = b.numpy()
else:
if 'ups' in a:
hk_map[a]['w'] = np.rot90(b.numpy(), k=1, axes=(0, 2))
elif 'conv' in a:
hk_map[a]['w'] = np.swapaxes(b.numpy(), 0, 2)
else:
hk_map[a]['w'] = b.numpy()
FLAGS.ckpt_dir.mkdir(parents=True, exist_ok=True)
with open(FLAGS.ckpt_dir / 'hk_hifi.pickle', 'wb') as f:
pickle.dump(hk_map, f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint-file', required=True)
parser.add_argument('--config-file', required=True)
a = parser.parse_args()
config_file = a.config_file
with open(config_file) as f:
data = f.read()
json_config = json.loads(data)
h = AttrDict(json_config)
device = torch.device('cpu')
convert_to_haiku(a, h, device)
if __name__ == '__main__':
main()
|
the-stack_0_26049
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Theoscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listreceivedbyaddress API
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(BitcoinTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].setgenerate(True, 10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].setgenerate(True, 10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].setgenerate(True, 10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
|
the-stack_0_26051
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper for creating the ant environment in gym_mujoco."""
import math
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class PointEnv(mujoco_env.MujocoEnv, utils.EzPickle):
FILE = "point.xml"
ORI_IND = 2
def __init__(self, file_path=None, expose_all_qpos=True):
self._expose_all_qpos = expose_all_qpos
mujoco_env.MujocoEnv.__init__(self, file_path, 1)
utils.EzPickle.__init__(self)
@property
def physics(self):
return self.model
def _step(self, a):
return self.step(a)
def step(self, action):
action[0] = 0.2 * action[0]
qpos = np.copy(self.physics.data.qpos)
qpos[2] += action[1]
ori = qpos[2]
# compute increment in each direction
dx = math.cos(ori) * action[0]
dy = math.sin(ori) * action[0]
# ensure that the robot is within reasonable range
qpos[0] = np.clip(qpos[0] + dx, -100, 100)
qpos[1] = np.clip(qpos[1] + dy, -100, 100)
qvel = self.physics.data.qvel
self.set_state(qpos, qvel)
for _ in range(0, self.frame_skip):
self.physics.step()
next_obs = self._get_obs()
reward = 0
done = False
info = {}
return next_obs, reward, done, info
def _get_obs(self):
if self._expose_all_qpos:
return np.concatenate([
self.physics.data.qpos.flat[:3], # Only point-relevant coords.
self.physics.data.qvel.flat[:3]])
return np.concatenate([
self.physics.data.qpos.flat[2:3],
self.physics.data.qvel.flat[:3]])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(
size=self.physics.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.physics.model.nv) * .1
# Set everything other than point to original position and 0 velocity.
qpos[3:] = self.init_qpos[3:]
qvel[3:] = 0.
self.set_state(qpos, qvel)
return self._get_obs()
def get_ori(self):
return self.model.data.qpos[self.__class__.ORI_IND]
def set_xy(self, xy):
qpos = np.copy(self.physics.data.qpos)
qpos[0] = xy[0]
qpos[1] = xy[1]
qvel = self.physics.data.qvel
|
the-stack_0_26056
|
import tensorflow as tf
import numpy as np
import matplotlib.pylab as plt
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, EarlyStopping, TerminateOnNaN, LambdaCallback
import os
from common import MaterialLoader
from models.AE import AE
from sklearn import preprocessing
materials = MaterialLoader()
train_dataset_n = []
train_dataset_k = []
wavelength_start = 300
wavelength_end = 1200
WLstep = 2
log_dir = 'logs/'
for sample in materials.load_total_material_generator():
wl = sample[1][0]
n = sample[1][1]
k = sample[1][2]
if wl[0] <= wavelength_start and wl[-1] >= wavelength_end:
print(sample[0],len(n), len(k))
n_tmp = []
k_tmp = []
for idx, wl in enumerate(wl):
if wl >= wavelength_start and wl < wavelength_end:
if idx % WLstep == 0:
n_tmp.append(n[idx])
k_tmp.append(k[idx])
train_dataset_n.append(n_tmp)
train_dataset_k.append(k_tmp)
min_max_scaler = preprocessing.MinMaxScaler((0, 1))
train_dataset_n = min_max_scaler.fit_transform(train_dataset_n)
train_dataset_k = min_max_scaler.fit_transform(train_dataset_k)
nan_pos = np.isnan(train_dataset_n)
train_dataset_n = train_dataset_n[~nan_pos.any(axis=1)]
train_dataset_k = train_dataset_k[~nan_pos.any(axis=1)]
train_dataset_input = tf.data.Dataset.from_tensor_slices({'n_in': train_dataset_n,'k_in': train_dataset_k})
train_dataset_output = tf.data.Dataset.from_tensor_slices({'n_out': train_dataset_n,'k_out': train_dataset_k})
train_dataset = tf.data.Dataset.zip((train_dataset_input, train_dataset_output))
# train_dataset = tf.data.Dataset.from_tensor_slices((train_dataset, train_dataset))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(32)
# print(list(train_dataset.as_numpy_iterator()))
logging = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=False, write_grads=False, write_images=False, update_freq='batch')
checkpoint = ModelCheckpoint(os.path.join(log_dir, 'ep{epoch:03d}-loss{loss:.3f}.h5'),
monitor='loss',
verbose=1,
save_weights_only=False,
save_best_only=True,
period=1)
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, patience=10, verbose=1, cooldown=0, min_lr=1e-10)
early_stopping = EarlyStopping(monitor='loss', min_delta=0, patience=10, verbose=1)
terminate_on_nan = TerminateOnNaN()
callbacks=[logging, checkpoint, reduce_lr, early_stopping, terminate_on_nan]
ae = AE(encoding_dim=10,
input_shapes=((wavelength_end-wavelength_start)/WLstep, 2),
input_length=int((wavelength_end-wavelength_start)/WLstep))
ae.encoder.compile(loss=['mae', 'mae'], optimizer=tf.keras.optimizers.Adam(learning_rate=0.001))
hist = ae.encoder.fit(train_dataset, epochs=100, callbacks=callbacks)
# print(hist.history().values())
|
the-stack_0_26060
|
#!/usr/bin/env python3
import argparse
from jackal import Service, ServiceSearch
from jackal.utils import print_json, print_line
def main():
services = ServiceSearch()
arg = argparse.ArgumentParser(parents=[services.argparser], conflict_handler='resolve')
arg.add_argument('-c', '--count', help="Only show the number of results", action="store_true")
arguments = arg.parse_args()
if arguments.count:
print_line("Number of services: {}".format(services.argument_count()))
else:
response = services.get_services()
for hit in response:
print_json(hit.to_dict(include_meta=True))
def overview():
"""
Function to create an overview of the services.
Will print a list of ports found an the number of times the port was seen.
"""
search = Service.search()
search = search.filter("term", state='open')
search.aggs.bucket('port_count', 'terms', field='port', order={'_count': 'desc'}, size=100) \
.metric('unique_count', 'cardinality', field='address')
response = search.execute()
print_line("Port Count")
print_line("---------------")
for entry in response.aggregations.port_count.buckets:
print_line("{0:<7} {1}".format(entry.key, entry.unique_count.value))
if __name__ == '__main__':
main()
|
the-stack_0_26062
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Recode by @xgothboi
# FROM KEN-UBOT
# t.me/KennedyProject
#
""" Userbot module containing commands for keeping costum global notes. """
from userbot import BOTLOG_CHATID, CMD_HELP
from userbot.events import register
@register(outgoing=True,
pattern=r"\.\w*",
ignore_unsafe=True,
disable_errors=True)
async def on_snip(event):
"""costums logic."""
try:
from userbot.modules.sql_helper.snips_sql import get_snip
except AttributeError:
return
name = event.text[1:]
snip = get_snip(name)
message_id_to_reply = event.message.reply_to_msg_id
if not message_id_to_reply:
message_id_to_reply = None
if snip:
if snip.f_mesg_id:
msg_o = await event.client.get_messages(
entity=BOTLOG_CHATID, ids=int(snip.f_mesg_id)
)
await event.client.send_message(
event.chat_id,
msg_o.message,
reply_to=message_id_to_reply,
file=msg_o.media,
)
await event.delete()
elif snip.reply:
await event.client.send_message(
event.chat_id, snip.reply, reply_to=message_id_to_reply
)
await event.delete()
@register(outgoing=True, pattern=r"^\.costum (\w*)")
async def on_snip_save(event):
"""For .costum command, saves costums for future use."""
try:
from userbot.modules.sql_helper.snips_sql import add_snip
except AtrributeError:
await event.edit("**Berjalan pada mode Non-SQL!**")
return
keyword = event.pattern_match.group(1)
string = event.text.partition(keyword)[2]
msg = await event.get_reply_message()
msg_id = None
if msg and msg.media and not string:
if BOTLOG_CHATID:
await event.client.send_message(
BOTLOG_CHATID,
f"📝 **#COSTUM**\
\n • **KEYWORD:** `{keyword}`\
\n • ☄️ Pesan ini disimpan sebagai catatan data untuk costum, Tolong JANGAN Dihapus!!",
)
msg_o = await event.client.forward_messages(
entity=BOTLOG_CHATID, messages=msg, from_peer=event.chat_id, silent=True
)
msg_id = msg_o.id
else:
await event.edit(
"**Menyimpan kostum dengan media membutuhkan `BOTLOG_CHATID` untuk disetel.**"
)
return
elif event.reply_to_msg_id and not string:
rep_msg = await event.get_reply_message()
string = rep_msg.text
success = (
"**Costum {} disimpan. Gunakan** `.{}` **di mana saja untuk menggunakannya**"
)
if add_snip(keyword, string, msg_id) is False:
await event.edit(success.format("Berhasil", keyword))
else:
await event.edit(success.format("Berhasil", keyword))
@register(outgoing=True, pattern=r"^\.costums$")
async def on_snip_list(event):
"""For .costums command, lists costums saved by you."""
try:
from userbot.modules.sql_helper.snips_sql import get_snips
except AttributeError:
await event.edit("**Berjalan pada mode Non-SQL!**")
return
message = "**Tidak ada kostum yang tersedia saat ini.**"
all_snips = get_snips()
for a_snip in all_snips:
if message == "**Tidak ada kostum yang tersedia saat ini.**":
message = "**List Costum yang tersedia:**\n"
message += f"✣ `.{a_snip.snip}`\n"
await event.edit(message)
@register(outgoing=True, pattern=r"^\.delcostum (\w*)")
async def on_snip_delete(event):
"""For .delcostum command, deletes a costum."""
try:
from userbot.modules.sql_helper.snips_sql import remove_snip
except AttributeError:
await event.edit("**Berjalan pada mode Non-SQL!**")
return
name = event.pattern_match.group(1)
if remove_snip(name) is True:
await event.edit(f"**Berhasil menghapus costum:** `{name}`")
else:
await event.edit(f"**Tidak dapat menemukan costum:** `{name}`")
CMD_HELP.update(
{
"costum": "**Plugin : **`costum`\
\n\n Cmd : `.costum` <nama> <data> atau membalas pesan dengan .costum <nama>\
\nUsage : **Menyimpan pesan costum (catatan global) dengan nama. (bisa dengan gambar, docs, dan stickers!)\
\n\n Cmd : `.costums`\
\nUsage : Mendapat semua costums yang disimpan\
\n\n Cmd : `.delcostum` <nama_costum>\
\nUsage : Menghapus costum yang ditentukan\
"
}
)
|
the-stack_0_26063
|
#!/usr/bin/env python3
import re
import os
import random
from video_datasets_api.cater.definitions import NUM_CLASSES_TASK1, NUM_CLASSES_TASK2
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="Generate CATER task 1 and 2 splits.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("cater_dir", help="max2action dir")
parser.add_argument("--partial", action='store_true', help="Saves only part of the videos")
parser.add_argument("--prob", type=float, default=0.25, help="Probability to include an example. Applied when --partial is specified.")
parser.add_argument("--seed", type=int, default=12, help="Random seed for partial sampling.")
return parser
parser = get_parser()
args = parser.parse_args()
if __name__ == '__main__':
random.seed(args.seed)
lists_dir = os.path.join(args.cater_dir, "lists")
if args.partial:
output_dir = os.path.join(args.cater_dir, "splits_frames_partial")
else:
output_dir = os.path.join(args.cater_dir, "splits_frames")
frames_dir = os.path.join(args.cater_dir, "frames")
action_types = ['actions_order_uniq', 'actions_present']
num_classes_of_action_types = [NUM_CLASSES_TASK2, NUM_CLASSES_TASK1]
split_files = ['train.txt', 'val.txt', 'train_subsetT.txt', 'train_subsetV.txt']
def gen_split(input_path, output_path, num_classes):
with open(input_path, 'r') as input_file:
with open(output_path, 'w') as output_file:
output_file.write(f'{num_classes}\n')
while True:
line = input_file.readline()
if not line:
break
if args.partial and random.random() < 1-args.prob:
# skip the video
continue
video_name, labels = line.split(' ')
video_name_wo_ext = os.path.splitext(video_name)[0]
video_id_str = re.search('CATER_new_(\d+).avi', video_name).group(1)
video_id = int(video_id_str)
output_line_list = [f'{video_name_wo_ext}/{{:05d}}.jpg', str(video_id), labels.strip(), "0", "300"]
# check corrupted video
if os.path.isfile(os.path.join(frames_dir, video_name_wo_ext, '00300.jpg')):
output_file.write(' '.join(output_line_list) + '\n')
else:
print(f"Skipping corrupted video: {video_name_wo_ext}")
for action_type, num_classes in zip(action_types, num_classes_of_action_types):
os.makedirs(os.path.join(output_dir, action_type), exist_ok=True)
for split_file in split_files:
input_path = os.path.join(lists_dir, action_type, split_file)
output_path = os.path.join(output_dir, action_type, split_file)
gen_split(input_path, output_path, num_classes)
|
the-stack_0_26064
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import netutil
from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
from tornado import gen
from tornado.http1connection import HTTP1Connection
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado.netutil import ssl_options_to_context
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
from contextlib import closing
import datetime
import gzip
import os
import shutil
import socket
import ssl
import sys
import tempfile
from io import BytesIO
def read_stream_body(stream, callback):
"""Reads an HTTP response from `stream` and runs callback with its
headers and body."""
chunks = []
class Delegate(HTTPMessageDelegate):
def headers_received(self, start_line, headers):
self.headers = headers
def data_received(self, chunk):
chunks.append(chunk)
def finish(self):
callback((self.headers, b''.join(chunks)))
conn = HTTP1Connection(stream, True)
conn.read_response(Delegate())
class HandlerBaseTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', self.__class__.Handler)])
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
class HelloWorldRequestHandler(RequestHandler):
def initialize(self, protocol="http"):
self.expected_protocol = protocol
def get(self):
if self.request.protocol != self.expected_protocol:
raise Exception("unexpected protocol")
self.finish("Hello world")
def post(self):
self.finish("Got %d bytes in POST" % len(self.request.body))
# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
# ClientHello messages, which are rejected by SSLv3 and TLSv1
# servers. Note that while the OPENSSL_VERSION_INFO was formally
# introduced in python3.2, it was present but undocumented in
# python 2.7
skipIfOldSSL = unittest.skipIf(
getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
"old version of ssl module and/or openssl")
class BaseSSLTest(AsyncHTTPSTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler,
dict(protocol="https"))])
class SSLTestMixin(object):
def get_ssl_options(self):
return dict(ssl_version=self.get_ssl_version(),
**AsyncHTTPSTestCase.get_ssl_options())
def get_ssl_version(self):
raise NotImplementedError()
def test_ssl(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
def test_large_post(self):
response = self.fetch('/',
method='POST',
body='A' * 5000)
self.assertEqual(response.body, b"Got 5000 bytes in POST")
def test_non_ssl_request(self):
# Make sure the server closes the connection when it gets a non-ssl
# connection, rather than waiting for a timeout or otherwise
# misbehaving.
with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
with ExpectLog(gen_log, 'Uncaught exception', required=False):
self.http_client.fetch(
self.get_url("/").replace('https:', 'http:'),
self.stop,
request_timeout=3600,
connect_timeout=3600)
response = self.wait()
self.assertEqual(response.code, 599)
# Python's SSL implementation differs significantly between versions.
# For example, SSLv3 and TLSv1 throw an exception if you try to read
# from the socket before the handshake is complete, but the default
# of SSLv23 allows it.
class SSLv23Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv23
@skipIfOldSSL
class SSLv3Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv3
@skipIfOldSSL
class TLSv1Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_TLSv1
@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
class SSLContextTest(BaseSSLTest, SSLTestMixin):
def get_ssl_options(self):
context = ssl_options_to_context(
AsyncHTTPSTestCase.get_ssl_options(self))
assert isinstance(context, ssl.SSLContext)
return context
class BadSSLOptionsTest(unittest.TestCase):
def test_missing_arguments(self):
application = Application()
self.assertRaises(KeyError, HTTPServer, application, ssl_options={
"keyfile": "/__missing__.crt",
})
def test_missing_key(self):
"""A missing SSL key should cause an immediate exception."""
application = Application()
module_dir = os.path.dirname(__file__)
existing_certificate = os.path.join(module_dir, 'test.crt')
self.assertRaises(ValueError, HTTPServer, application, ssl_options={
"certfile": "/__mising__.crt",
})
self.assertRaises(ValueError, HTTPServer, application, ssl_options={
"certfile": existing_certificate,
"keyfile": "/__missing__.key"
})
# This actually works because both files exist
HTTPServer(application, ssl_options={
"certfile": existing_certificate,
"keyfile": existing_certificate
})
class MultipartTestHandler(RequestHandler):
def post(self):
self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
"argument": self.get_argument("argument"),
"filename": self.request.files["files"][0].filename,
"filebody": _unicode(self.request.files["files"][0]["body"]),
})
# This test is also called from wsgi_test
class HTTPConnectionTest(AsyncHTTPTestCase):
def get_handlers(self):
return [("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler)]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body, newline=b"\r\n"):
with closing(IOStream(socket.socket())) as stream:
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
stream.write(
newline.join(headers +
[utf8("Content-Length: %d" % len(body))]) +
newline + newline + body)
read_stream_body(stream, self.stop)
headers, body = self.wait()
return body
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch([
b"POST /multipart HTTP/1.0",
b"Content-Type: multipart/form-data; boundary=1234567890",
b"X-Header-encoding-test: \xe9",
],
b"\r\n".join([
b"Content-Disposition: form-data; name=argument",
b"",
u("\u00e1").encode("utf-8"),
b"--1234567890",
u('Content-Disposition: form-data; name="files"; filename="\u00f3"').encode("utf8"),
b"",
u("\u00fa").encode("utf-8"),
b"--1234567890--",
b"",
]))
data = json_decode(response)
self.assertEqual(u("\u00e9"), data["header"])
self.assertEqual(u("\u00e1"), data["argument"])
self.assertEqual(u("\u00f3"), data["filename"])
self.assertEqual(u("\u00fa"), data["filebody"])
def test_newlines(self):
# We support both CRLF and bare LF as line separators.
for newline in (b"\r\n", b"\n"):
response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"",
newline=newline)
self.assertEqual(response, b'Hello world')
def test_100_continue(self):
# Run through a 100-continue interaction by hand:
# When given Expect: 100-continue, we get a 100 response after the
# headers, and then the real response after the body.
stream = IOStream(socket.socket(), io_loop=self.io_loop)
stream.connect(("localhost", self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
b"Content-Length: 1024",
b"Expect: 100-continue",
b"Connection: close",
b"\r\n"]), callback=self.stop)
self.wait()
stream.read_until(b"\r\n\r\n", self.stop)
data = self.wait()
self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
stream.write(b"a" * 1024)
stream.read_until(b"\r\n", self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
stream.read_until(b"\r\n\r\n", self.stop)
header_data = self.wait()
headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Got 1024 bytes in POST")
stream.close()
class EchoHandler(RequestHandler):
def get(self):
self.write(recursive_unicode(self.request.arguments))
def post(self):
self.write(recursive_unicode(self.request.arguments))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
fields = [
('method', str),
('uri', str),
('version', str),
('remote_ip', str),
('protocol', str),
('host', str),
('path', str),
('query', str),
]
for field, expected_type in fields:
self.check_type(field, getattr(self.request, field), expected_type)
self.check_type('header_key', list(self.request.headers.keys())[0], str)
self.check_type('header_value', list(self.request.headers.values())[0], str)
self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
# secure cookies
self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes)
def post(self):
self.check_type('body', self.request.body, bytes)
self.write(self.errors)
def get(self):
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class HTTPServerTest(AsyncHTTPTestCase):
def get_app(self):
return Application([("/echo", EchoHandler),
("/typecheck", TypeCheckHandler),
("//doubleslash", EchoHandler),
])
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u("\u00e9")]})
def test_empty_query_string(self):
response = self.fetch("/echo?foo=&foo=")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u(""), u("")]})
def test_empty_post_parameters(self):
response = self.fetch("/echo", method="POST", body="foo=&bar=")
data = json_decode(response.body)
self.assertEqual(data, {u("foo"): [u("")], u("bar"): [u("")]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
response = self.fetch("/typecheck?foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
def test_double_slash(self):
# urlparse.urlsplit (which tornado.httpserver used to use
# incorrectly) would parse paths beginning with "//" as
# protocol-relative urls.
response = self.fetch("//doubleslash")
self.assertEqual(200, response.code)
self.assertEqual(json_decode(response.body), {})
def test_malformed_body(self):
# parse_qs is pretty forgiving, but it will fail on python 3
# if the data is not utf8. On python 2 parse_qs will work,
# but then the recursive_unicode call in EchoHandler will
# fail.
if str is bytes:
return
with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
response = self.fetch(
'/echo', method="POST",
headers={'Content-Type': 'application/x-www-form-urlencoded'},
body=b'\xe9')
self.assertEqual(200, response.code)
self.assertEqual(b'{}', response.body)
class HTTPServerRawTest(AsyncHTTPTestCase):
def get_app(self):
return Application([
('/echo', EchoHandler),
])
def setUp(self):
super(HTTPServerRawTest, self).setUp()
self.stream = IOStream(socket.socket())
self.stream.connect(('localhost', self.get_http_port()), self.stop)
self.wait()
def tearDown(self):
self.stream.close()
super(HTTPServerRawTest, self).tearDown()
def test_empty_request(self):
self.stream.close()
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
def test_malformed_first_line(self):
with ExpectLog(gen_log, '.*Malformed HTTP request line'):
self.stream.write(b'asdf\r\n\r\n')
# TODO: need an async version of ExpectLog so we don't need
# hard-coded timeouts here.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.01),
self.stop)
self.wait()
def test_malformed_headers(self):
with ExpectLog(gen_log, '.*Malformed HTTP headers'):
self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
self.io_loop.add_timeout(datetime.timedelta(seconds=0.01),
self.stop)
self.wait()
def test_chunked_request_body(self):
# Chunked requests are not widely supported and we don't have a way
# to generate them in AsyncHTTPClient, but HTTPServer will read them.
self.stream.write(b"""\
POST /echo HTTP/1.1
Transfer-Encoding: chunked
Content-Type: application/x-www-form-urlencoded
4
foo=
3
bar
0
""".replace(b"\n", b"\r\n"))
read_stream_body(self.stream, self.stop)
headers, response = self.wait()
self.assertEqual(json_decode(response), {u('foo'): [u('bar')]})
class XHeaderTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(remote_ip=self.request.remote_ip,
remote_protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(xheaders=True)
def test_ip_headers(self):
self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
"4.4.4.4")
valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
"4.4.4.4")
valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
invalid_chars = {"X-Real-IP": "4.4.4.4<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars)["remote_ip"],
"127.0.0.1")
invalid_chars_list = {"X-Forwarded-For": "4.4.4.4, 5.5.5.5<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars_list)["remote_ip"],
"127.0.0.1")
invalid_host = {"X-Real-IP": "www.google.com"}
self.assertEqual(
self.fetch_json("/", headers=invalid_host)["remote_ip"],
"127.0.0.1")
def test_scheme_headers(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "http")
https_scheme = {"X-Scheme": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_scheme)["remote_protocol"],
"https")
https_forwarded = {"X-Forwarded-Proto": "https"}
self.assertEqual(
self.fetch_json("/", headers=https_forwarded)["remote_protocol"],
"https")
bad_forwarded = {"X-Forwarded-Proto": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_forwarded)["remote_protocol"],
"http")
class SSLXHeaderTest(AsyncHTTPSTestCase, HandlerBaseTestCase):
def get_app(self):
return Application([('/', XHeaderTest.Handler)])
def get_httpserver_options(self):
output = super(SSLXHeaderTest, self).get_httpserver_options()
output['xheaders'] = True
return output
def test_request_without_xprotocol(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "https")
http_scheme = {"X-Scheme": "http"}
self.assertEqual(
self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http")
bad_scheme = {"X-Scheme": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https")
class ManualProtocolTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(protocol=self.request.protocol))
def get_httpserver_options(self):
return dict(protocol='https')
def test_manual_protocol(self):
self.assertEqual(self.fetch_json('/')['protocol'], 'https')
@unittest.skipIf(not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin',
"unix sockets not supported on this platform")
class UnixSocketTest(AsyncTestCase):
"""HTTPServers can listen on Unix sockets too.
Why would you want to do this? Nginx can proxy to backends listening
on unix sockets, for one thing (and managing a namespace for unix
sockets can be easier than managing a bunch of TCP port numbers).
Unfortunately, there's no way to specify a unix socket in a url for
an HTTP client, so we have to test this by hand.
"""
def setUp(self):
super(UnixSocketTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
self.sockfile = os.path.join(self.tmpdir, "test.sock")
sock = netutil.bind_unix_socket(self.sockfile)
app = Application([("/hello", HelloWorldRequestHandler)])
self.server = HTTPServer(app, io_loop=self.io_loop)
self.server.add_socket(sock)
self.stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
self.stream.connect(self.sockfile, self.stop)
self.wait()
def tearDown(self):
self.stream.close()
self.server.stop()
shutil.rmtree(self.tmpdir)
super(UnixSocketTest, self).tearDown()
def test_unix_socket(self):
self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n")
self.stream.read_until(b"\r\n", self.stop)
response = self.wait()
self.assertEqual(response, b"HTTP/1.1 200 OK\r\n")
self.stream.read_until(b"\r\n\r\n", self.stop)
headers = HTTPHeaders.parse(self.wait().decode('latin1'))
self.stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b"Hello world")
def test_unix_socket_bad_request(self):
# Unix sockets don't have remote addresses so they just return an
# empty string.
with ExpectLog(gen_log, "Malformed HTTP message from"):
self.stream.write(b"garbage\r\n\r\n")
self.stream.read_until_close(self.stop)
response = self.wait()
self.assertEqual(response, b"")
class KeepAliveTest(AsyncHTTPTestCase):
"""Tests various scenarios for HTTP 1.1 keep-alive support.
These tests don't use AsyncHTTPClient because we want to control
connection reuse and closing.
"""
def get_app(self):
class HelloHandler(RequestHandler):
def get(self):
self.finish('Hello world')
def post(self):
self.finish('Hello world')
class LargeHandler(RequestHandler):
def get(self):
# 512KB should be bigger than the socket buffers so it will
# be written out in chunks.
self.write(''.join(chr(i % 256) * 1024 for i in range(512)))
class FinishOnCloseHandler(RequestHandler):
@asynchronous
def get(self):
self.flush()
def on_connection_close(self):
# This is not very realistic, but finishing the request
# from the close callback has the right timing to mimic
# some errors seen in the wild.
self.finish('closed')
return Application([('/', HelloHandler),
('/large', LargeHandler),
('/finish_on_close', FinishOnCloseHandler)])
def setUp(self):
super(KeepAliveTest, self).setUp()
self.http_version = b'HTTP/1.1'
def tearDown(self):
# We just closed the client side of the socket; let the IOLoop run
# once to make sure the server side got the message.
self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
self.wait()
if hasattr(self, 'stream'):
self.stream.close()
super(KeepAliveTest, self).tearDown()
# The next few methods are a crude manual http client
def connect(self):
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('localhost', self.get_http_port()), self.stop)
self.wait()
def read_headers(self):
self.stream.read_until(b'\r\n', self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b'HTTP/1.1 200'), first_line)
self.stream.read_until(b'\r\n\r\n', self.stop)
header_bytes = self.wait()
headers = HTTPHeaders.parse(header_bytes.decode('latin1'))
return headers
def read_response(self):
self.headers = self.read_headers()
self.stream.read_bytes(int(self.headers['Content-Length']), self.stop)
body = self.wait()
self.assertEqual(b'Hello world', body)
def close(self):
self.stream.close()
del self.stream
def test_two_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.stream.write(b'GET / HTTP/1.1\r\n\r\n')
self.read_response()
self.close()
def test_request_close(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\nConnection: close\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.close()
# keepalive is supported for http 1.0 too, but it's opt-in
def test_http10(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\n\r\n')
self.read_response()
self.stream.read_until_close(callback=self.stop)
data = self.wait()
self.assertTrue(not data)
self.assertTrue('Connection' not in self.headers)
self.close()
def test_http10_keepalive(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_http10_keepalive_extra_crlf(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
def test_pipelined_requests(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
self.read_response()
self.read_response()
self.close()
def test_pipelined_cancel(self):
self.connect()
self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n')
# only read once
self.read_response()
self.close()
def test_cancel_during_download(self):
self.connect()
self.stream.write(b'GET /large HTTP/1.1\r\n\r\n')
self.read_headers()
self.stream.read_bytes(1024, self.stop)
self.wait()
self.close()
def test_finish_while_closed(self):
self.connect()
self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n')
self.read_headers()
self.close()
def test_keepalive_chunked(self):
self.http_version = b'HTTP/1.0'
self.connect()
self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n0\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n')
self.read_response()
self.assertEqual(self.headers['Connection'], 'Keep-Alive')
self.close()
class GzipBaseTest(object):
def get_app(self):
return Application([('/', EchoHandler)])
def post_gzip(self, body):
bytesio = BytesIO()
gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio)
gzip_file.write(utf8(body))
gzip_file.close()
compressed_body = bytesio.getvalue()
return self.fetch('/', method='POST', body=compressed_body,
headers={'Content-Encoding': 'gzip'})
def test_uncompressed(self):
response = self.fetch('/', method='POST', body='foo=bar')
self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
class GzipTest(GzipBaseTest, AsyncHTTPTestCase):
def get_httpserver_options(self):
return dict(decompress_request=True)
def test_gzip(self):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {u('foo'): [u('bar')]})
class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase):
def test_gzip_unsupported(self):
# Gzip support is opt-in; without it the server fails to parse
# the body (but parsing form bodies is currently just a log message,
# not a fatal error).
with ExpectLog(gen_log, "Unsupported Content-Encoding"):
response = self.post_gzip('foo=bar')
self.assertEquals(json_decode(response.body), {})
class StreamingChunkSizeTest(AsyncHTTPTestCase):
# 50 characters long, and repetitive so it can be compressed.
BODY = b'01234567890123456789012345678901234567890123456789'
CHUNK_SIZE = 16
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def get_httpserver_options(self):
return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True)
class MessageDelegate(HTTPMessageDelegate):
def __init__(self, connection):
self.connection = connection
def headers_received(self, start_line, headers):
self.chunk_lengths = []
def data_received(self, chunk):
self.chunk_lengths.append(len(chunk))
def finish(self):
response_body = utf8(json_encode(self.chunk_lengths))
self.connection.write_headers(
ResponseStartLine('HTTP/1.1', 200, 'OK'),
HTTPHeaders({'Content-Length': str(len(response_body))}))
self.connection.write(response_body)
self.connection.finish()
def get_app(self):
class App(HTTPServerConnectionDelegate):
def start_request(self, server_conn, request_conn):
return StreamingChunkSizeTest.MessageDelegate(request_conn)
return App()
def fetch_chunk_sizes(self, **kwargs):
response = self.fetch('/', method='POST', **kwargs)
response.rethrow()
chunks = json_decode(response.body)
self.assertEqual(len(self.BODY), sum(chunks))
for chunk_size in chunks:
self.assertLessEqual(chunk_size, self.CHUNK_SIZE,
'oversized chunk: ' + str(chunks))
self.assertGreater(chunk_size, 0,
'empty chunk: ' + str(chunks))
return chunks
def compress(self, body):
bytesio = BytesIO()
gzfile = gzip.GzipFile(mode='w', fileobj=bytesio)
gzfile.write(body)
gzfile.close()
compressed = bytesio.getvalue()
if len(compressed) >= len(body):
raise Exception("body did not shrink when compressed")
return compressed
def test_regular_body(self):
chunks = self.fetch_chunk_sizes(body=self.BODY)
# Without compression we know exactly what to expect.
self.assertEqual([16, 16, 16, 2], chunks)
def test_compressed_body(self):
self.fetch_chunk_sizes(body=self.compress(self.BODY),
headers={'Content-Encoding': 'gzip'})
# Compression creates irregular boundaries so the assertions
# in fetch_chunk_sizes are as specific as we can get.
def test_chunked_body(self):
def body_producer(write):
write(self.BODY[:20])
write(self.BODY[20:])
chunks = self.fetch_chunk_sizes(body_producer=body_producer)
# HTTP chunk boundaries translate to application-visible breaks
self.assertEqual([16, 4, 16, 14], chunks)
def test_chunked_compressed(self):
compressed = self.compress(self.BODY)
self.assertGreater(len(compressed), 20)
def body_producer(write):
write(compressed[:20])
write(compressed[20:])
self.fetch_chunk_sizes(body_producer=body_producer,
headers={'Content-Encoding': 'gzip'})
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(max_header_size=1024)
def test_small_headers(self):
response = self.fetch("/", headers={'X-Filler': 'a' * 100})
response.rethrow()
self.assertEqual(response.body, b"Hello world")
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch("/", headers={'X-Filler': 'a' * 1000})
self.assertEqual(response.code, 599)
@skipOnTravis
class IdleTimeoutTest(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(idle_connection_timeout=0.1)
def setUp(self):
super(IdleTimeoutTest, self).setUp()
self.streams = []
def tearDown(self):
super(IdleTimeoutTest, self).tearDown()
for stream in self.streams:
stream.close()
def connect(self):
stream = IOStream(socket.socket())
stream.connect(('localhost', self.get_http_port()), self.stop)
self.wait()
self.streams.append(stream)
return stream
def test_unused_connection(self):
stream = self.connect()
stream.set_close_callback(self.stop)
self.wait()
def test_idle_after_use(self):
stream = self.connect()
stream.set_close_callback(lambda: self.stop("closed"))
# Use the connection twice to make sure keep-alives are working
for i in range(2):
stream.write(b"GET / HTTP/1.1\r\n\r\n")
stream.read_until(b"\r\n\r\n", self.stop)
self.wait()
stream.read_bytes(11, self.stop)
data = self.wait()
self.assertEqual(data, b"Hello world")
# Now let the timeout trigger and close the connection.
data = self.wait()
self.assertEqual(data, "closed")
class BodyLimitsTest(AsyncHTTPTestCase):
def get_app(self):
class BufferedHandler(RequestHandler):
def put(self):
self.write(str(len(self.request.body)))
@stream_request_body
class StreamingHandler(RequestHandler):
def initialize(self):
self.bytes_read = 0
def prepare(self):
if 'expected_size' in self.request.arguments:
self.request.connection.set_max_body_size(
int(self.get_argument('expected_size')))
if 'body_timeout' in self.request.arguments:
self.request.connection.set_body_timeout(
float(self.get_argument('body_timeout')))
def data_received(self, data):
self.bytes_read += len(data)
def put(self):
self.write(str(self.bytes_read))
return Application([('/buffered', BufferedHandler),
('/streaming', StreamingHandler)])
def get_httpserver_options(self):
return dict(body_timeout=3600, max_body_size=4096)
def get_http_client(self):
# body_producer doesn't work on curl_httpclient, so override the
# configured AsyncHTTPClient implementation.
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_small_body(self):
response = self.fetch('/buffered', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
response = self.fetch('/streaming', method='PUT', body=b'a' * 4096)
self.assertEqual(response.body, b'4096')
def test_large_body_buffered(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/buffered', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 599)
def test_large_body_buffered_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/buffered', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.code, 599)
def test_large_body_streaming(self):
with ExpectLog(gen_log, '.*Content-Length too long'):
response = self.fetch('/streaming', method='PUT', body=b'a' * 10240)
self.assertEqual(response.code, 599)
def test_large_body_streaming_chunked(self):
with ExpectLog(gen_log, '.*chunked body too large'):
response = self.fetch('/streaming', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.code, 599)
def test_large_body_streaming_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body=b'a' * 10240)
self.assertEqual(response.body, b'10240')
def test_large_body_streaming_chunked_override(self):
response = self.fetch('/streaming?expected_size=10240', method='PUT',
body_producer=lambda write: write(b'a' * 10240))
self.assertEqual(response.body, b'10240')
@gen_test
def test_timeout(self):
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream because AsyncHTTPClient won't let us read a
# response without finishing a body.
stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n'
b'Content-Length: 42\r\n\r\n')
with ExpectLog(gen_log, 'Timeout reading body'):
response = yield stream.read_until_close()
self.assertEqual(response, b'')
finally:
stream.close()
@gen_test
def test_body_size_override_reset(self):
# The max_body_size override is reset between requests.
stream = IOStream(socket.socket())
try:
yield stream.connect(('127.0.0.1', self.get_http_port()))
# Use a raw stream so we can make sure it's all on one connection.
stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
stream.write(b'a' * 10240)
headers, response = yield gen.Task(read_stream_body, stream)
self.assertEqual(response, b'10240')
# Without the ?expected_size parameter, we get the old default value
stream.write(b'PUT /streaming HTTP/1.1\r\n'
b'Content-Length: 10240\r\n\r\n')
with ExpectLog(gen_log, '.*Content-Length too long'):
data = yield stream.read_until_close()
self.assertEqual(data, b'')
finally:
stream.close()
class LegacyInterfaceTest(AsyncHTTPTestCase):
def get_app(self):
# The old request_callback interface does not implement the
# delegate interface, and writes its response via request.write
# instead of request.connection.write_headers.
def handle_request(request):
message = b"Hello world"
request.write(utf8("HTTP/1.1 200 OK\r\n"
"Content-Length: %d\r\n\r\n" % len(message)))
request.write(message)
request.finish()
return handle_request
def test_legacy_interface(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
|
the-stack_0_26065
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('challenges', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='challenge',
name='challenge_data',
field=models.TextField(blank=True),
),
]
|
the-stack_0_26066
|
# -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list Cloud Storage objects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.storage import expansion
from six.moves import queue
@base.Hidden
class List(base.ListCommand):
"""List the objects in Cloud Storage buckets."""
detailed_help = {
'DESCRIPTION': """\
*{command}* lets you list the objects in your Cloud Storage buckets.
Forward slashes in object names are logically treated as directories for
the purposes of listing contents. See below for example of how to use
wildcards to get the listing behavior you want.
""",
'EXAMPLES': """\
To list the contents of a bucket:
$ {command} gs://my-bucket
This will list the direct contents of the bucket. To recursively list the
contents of all directories in the bucket:
$ {command} gs://my-bucket --recursive
You can use wildcards to match multiple paths (including multiple
buckets). Bucket wildcards are expanded only to the buckets contained in
your current project:
$ {command} gs://my-b*/log*.txt
The following wildcards are valid and match only within the current
directory:
*: Matches zero or more characters
?: Matches zero or one characters
[]: Matches a character range (ex. [a-z] or [0-9])
You can use double-star wildcards to match zero or more directory levels
in a path:
$ {command} gs://my-bucket/**/log*.txt
You can also use double-star to match all files after a root in a path:
$ {command} gs://my-bucket/**
Double-star expansion can not be combined with other expressions in a
given path segment and will operate as a single star in that context. For
example:
gs://my-bucket/dir**/log.txt is treated as:
gs://my-bucket/dir*/log.txt and instead should be written as:
gs://my-bucket/dir*/**/log.txt to get the recursive behavior.
""",
}
OBJECT_FORMAT_STRING = """\
table(
path:label=PATH,
data.size.size(zero=""):label=SIZE,
data.timeCreated.date():label=CREATED,
data.updated.date():label=UPDATED
)"""
@staticmethod
def Args(parser):
parser.add_argument(
'path',
nargs='*',
help='The path of objects and directories to list. The path must begin '
'with gs:// and may or may not contain wildcard characters.')
parser.add_argument(
'--recursive',
action='store_true',
help='Recursively list the contents of any directories that match the '
'path expression.')
parser.add_argument(
'--flatten-results',
action='store_true',
help='Show all matching objects in one list as opposed to grouping by '
'directory.')
parser.display_info.AddFormat("""\
table[no-heading](
format('{0}:', dir),
objects:format='%s'
)""" % List.OBJECT_FORMAT_STRING)
def Run(self, args):
paths = args.path or ['gs://']
expander = expansion.GCSPathExpander()
objects, dirs = expander.ExpandPaths(paths)
if args.IsSpecified('flatten_results'):
# Respect the user's choice if given explicitly.
flatten = args.flatten_results
else:
# Get a default for this mode if not specifically provided.
# Simplest case where we are listing only files or a single directory,
# don't nest output in tables by directory.
flatten = bool(not args.recursive and
not (objects and dirs) and
len(dirs) < 2)
# First collect all the directly matching objects.
results = []
if objects:
results.append(
{'dir': '',
'objects': expander.GetSortedObjectDetails(objects)})
# For each matching directory, get the objects directly under it.
dirs_to_process = queue.Queue()
for d in sorted(dirs):
dirs_to_process.put(d)
while not dirs_to_process.empty():
d = dirs_to_process.get()
children = [d + o for o in sorted(expander.ListDir(d))]
details = expander.GetSortedObjectDetails(children)
results.append({'dir': d, 'objects': details})
if args.recursive:
# Recurse on any directories that are found under the current parent.
for c in children:
if expander.IsDir(c):
dirs_to_process.put(c + '/')
if not flatten:
return results
# Flatten results.
args.GetDisplayInfo().AddFormat(List.OBJECT_FORMAT_STRING)
return itertools.chain.from_iterable([x['objects'] for x in results])
|
the-stack_0_26068
|
# -*- coding: utf-8 -*-
from pandas_ta.utils import get_drift, get_offset, non_zero_range, verify_series
def pdist(open_, high, low, close, drift=None, offset=None, **kwargs):
"""Indicator: Price Distance (PDIST)"""
# Validate Arguments
open_ = verify_series(open_)
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
drift = get_drift(drift)
offset = get_offset(offset)
# Calculate Result
pdist = 2 * non_zero_range(high, low)
pdist += non_zero_range(open_, close.shift(drift)).abs()
pdist -= non_zero_range(close, open_).abs()
# Offset
if offset != 0:
pdist = pdist.shift(offset)
# Name & Category
pdist.name = "PDIST"
pdist.category = "volatility"
return pdist
pdist.__doc__ = \
"""Price Distance (PDIST)
Measures the "distance" covered by price movements.
Sources:
https://www.prorealcode.com/prorealtime-indicators/pricedistance/
Calculation:
Default Inputs:
drift=1
PDIST = 2(high - low) - ABS(close - open) + ABS(open - close[drift])
Args:
open_ (pd.Series): Series of 'opens's
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
drift (int): The difference period. Default: 1
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
|
the-stack_0_26070
|
import os
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Layer
from model import create_model, TripletLossLayer
import numpy as np
import os.path
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from align import AlignDlib
from train import train_model
from data import triplet_generator
from sklearn.metrics import f1_score, accuracy_score
from utils import load_metadata, load_image, download_landmarks
from sklearn.preprocessing import LabelEncoder
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
import warnings
dst_dir = 'models'
dst_file = os.path.join(dst_dir, 'landmarks.dat')
if not os.path.exists(dst_file):
os.makedirs(dst_dir)
download_landmarks(dst_file)
nn4_small2_train = create_model()
nn4_small2_train.load_weights('weights/nn4.small2.v1.h5')
# try:
# open('weights/nn4.small2.myTrain.h5', 'r')
# nn4_small2_train.load_weights('weights/nn4.small2.myTrain.h5')
# except FileNotFoundError:
#
# nn4_small2_train = train_model()
metadata = load_metadata('images')
# Initialize the OpenFace face alignment utility
alignment = AlignDlib('models/landmarks.dat')
# Load an image of Schwarzenegger
jc_orig = load_image(metadata[92].image_path())
# Detect face and return bounding box
bb = alignment.getLargestFaceBoundingBox(jc_orig)
# Transform image using specified face landmark indices and crop image to 96x96
jc_aligned = alignment.align(96, jc_orig, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
# Show original image
plt.subplot(131)
plt.imshow(jc_orig)
# Show original image with bounding box
plt.subplot(132)
plt.imshow(jc_orig)
plt.gca().add_patch(patches.Rectangle((bb.left(), bb.top()), bb.width(), bb.height(), fill=False, color='red'))
# Show aligned image
plt.subplot(133)
plt.imshow(jc_aligned)
plt.show()
def align_image(img):
return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img),
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
embedded = np.zeros((metadata.shape[0], 128))
for i, m in enumerate(metadata):
img = load_image(m.image_path())
img = align_image(img)
# scale RGB values to interval [0,1]
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
embedded[i] = nn4_small2_train.predict(np.expand_dims(img, axis=0))[0]
# Verify
def distance(emb1, emb2):
return np.sum(np.square(emb1 - emb2))
def show_pair(idx1, idx2):
plt.figure(figsize=(8,3))
plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}')
plt.subplot(121)
plt.imshow(load_image(metadata[idx1].image_path()))
plt.subplot(122)
plt.imshow(load_image(metadata[idx2].image_path()));
show_pair(94, 95)
show_pair(94, 89)
plt.show()
distances = [] # squared L2 distance between pairs
identical = [] # 1 if same identity, 0 otherwise
num = len(metadata)
for i in range(num - 1):
for j in range(1, num):
distances.append(distance(embedded[i], embedded[j]))
identical.append(1 if metadata[i].name == metadata[j].name else 0)
distances = np.array(distances)
identical = np.array(identical)
thresholds = np.arange(0.3, 1.0, 0.01)
f1_scores = [f1_score(identical, distances < t) for t in thresholds]
acc_scores = [accuracy_score(identical, distances < t) for t in thresholds]
opt_idx = np.argmax(f1_scores)
# Threshold at maximal F1 score
opt_tau = thresholds[opt_idx]
# Accuracy at maximal F1 score
opt_acc = accuracy_score(identical, distances < opt_tau)
# Plot F1 score and accuracy as function of distance threshold
plt.plot(thresholds, f1_scores, label='F1 score')
plt.plot(thresholds, acc_scores, label='Accuracy')
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title(f'Accuracy at threshold {opt_tau:.2f} = {opt_acc:.3f}');
plt.xlabel('Distance threshold')
plt.legend()
dist_pos = distances[identical == 1]
dist_neg = distances[identical == 0]
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.hist(dist_pos)
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title('Distances (pos. pairs)')
plt.legend()
plt.subplot(122)
plt.hist(dist_neg)
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title('Distances (neg. pairs)')
plt.legend()
dist_pos = distances[identical == 1]
dist_neg = distances[identical == 0]
plt.figure(figsize=(12,4))
plt.subplot(121)
plt.hist(dist_pos)
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title('Distances (pos. pairs)')
plt.legend()
plt.subplot(122)
plt.hist(dist_neg)
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title('Distances (neg. pairs)')
plt.legend()
plt.show()
targets = np.array([m.name for m in metadata])
encoder = LabelEncoder()
encoder.fit(targets)
# Numerical encoding of identities
y = encoder.transform(targets)
train_idx = np.arange(metadata.shape[0]) % 2 != 0
test_idx = np.arange(metadata.shape[0]) % 2 == 0
# 50 train examples of 10 identities (5 examples each)
X_train = embedded[train_idx]
# 50 test examples of 10 identities (5 examples each)
X_test = embedded[test_idx]
y_train = y[train_idx]
y_test = y[test_idx]
knn = KNeighborsClassifier(n_neighbors=1, metric='euclidean')
svc = LinearSVC()
knn.fit(X_train, y_train)
svc.fit(X_train, y_train)
acc_knn = accuracy_score(y_test, knn.predict(X_test))
acc_svc = accuracy_score(y_test, svc.predict(X_test))
print(f'KNN accuracy = {acc_knn}, SVM accuracy = {acc_svc}')
# Suppress LabelEncoder warning
warnings.filterwarnings('ignore')
example_idx = 23
# example_image = load_image('test/220px-Arnold_Schwarzenegger_September_2017.jpg')
example_image = load_image(metadata[test_idx][example_idx].image_path())
bb = alignment.getLargestFaceBoundingBox(example_image)
example_prediction = svc.predict([embedded[test_idx][example_idx]])
example_identity = encoder.inverse_transform(example_prediction)[0]
print(example_identity)
plt.imshow(example_image)
plt.title(f'Recognized as {example_identity} using SVM')
plt.gca().add_patch(patches.Rectangle((bb.left(), bb.top()), bb.width(), bb.height(), fill=False, color='red'))
plt.show()
from sklearn.manifold import TSNE
X_embedded = TSNE(n_components=2).fit_transform(embedded)
for i, t in enumerate(set(targets)):
idx = targets == t
plt.scatter(X_embedded[idx, 0], X_embedded[idx, 1], label=t)
plt.legend(bbox_to_anchor=(1, 1))
plt.show()
|
the-stack_0_26071
|
'''
Created on March 1, 2019
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
from devices.motion.motion import MotionDevice
from devices.entry.entry import EntryDevice
import utilities
# You would have your own machine learning implementation here.
import intelligence.ml_example.ml_engine_example as ml
# List of minutes that we should evaluate for away mode activity
EVALUATION_INTERVALS_IN_MINUTES = [15, 20, 25, 30, 35, 40, 45, 50, 55, 60]
# Minimum confidence threshold that must be met to go into internal AWAY mode
# This should ideally be different for different users, based on feedback.
# If we predict that the user is away and then it turns out they're not, turn this up.
# If we constantly hit the max amount of time before predicting the user is away, turn this down.
AWAY_CONFIDENCE_THRESHOLD = 0.7
# Maximum confidence threshold
MAX_AWAY_CONFIDENCE_THRESHOLD = 0.85
# Minimum confidence threshold
MIN_AWAY_CONFIDENCE_THRESHOLD = 0.4
# Minimum confidence threshold that must be met to go into internal H2A mode
H2A_CONFIDENCE_THRESHOLD = 0.1
# Amount to adjust the away mode confidence threshold by when we make a mistake or reach the peak
AWAY_CONFIDENCE_THRESHOLD_INCREMENT = 0.05
# Minimum number of motion sensors required for away mode machine learning algorithms to work and be applied. Otherwise we have to wait for the final length of time defined in EVALUATION_INTERVALS_IN_MINUTES.
MINIMUM_NUMBER_OF_MOTION_SENSORS_FOR_AWAY_ML_ALGORITHMS = 2
# Non-volatile variable name used to store the ML model
VARIABLE_ABSENT_MODELS = "absent_models"
# Timer reference - evaluate whether the family is absent.
HOME_AWAY_EVALUATION_REFERENCE = "ha"
# Timer reference - force the location into an absent state.
FORCE_AWAY_REFERENCE = "fa"
# Data stream message reason why the mode changed - ML algorithm made the change
ML_ALGORITHM_MODE_CHANGE = "ML"
# The user changed the mode
USER_ALGORITHM_MODE_CHANGE = "USER"
# Data stream message reason why the mode changed - The last mode change was a mistake and the ML algorithm is trying to fix that mistake
ML_ALGORITHM_FIXING_MISTAKE = "MISTAKE"
# We rely upon Space identification to classify and ignore 'naughty' sensors, defined at installation of the sensor.
# But we also don't always trust end users to set up the service correctly.
# So as a fallback, we use the descriptive names of the sensors as context to
# ignore blatently obvious entry sensors that are not on the perimeter of the home.
NAUGHTY_ENTRY_SENSOR_NAMES = ['fridge', 'cabinet', 'bath', 'refri', 'freez']
# Location Tags for customer support activities
AWAY_ML_SUCCESS = "away_ml_success"
AWAY_ML_FAILURE = "away_ml_fail"
NOT_ENOUGH_MOTION_SENSOR_TAG = "not_enough_motion_sensors"
class LocationMlExampleMicroservice(Intelligence):
"""
Example machine learning algorithm infrastructure.
This will show some basic infrastructure around an ABSENT detection occupancy sensing machine learning service.
The basic concept behind ABSENT classification is this: assume the primary entrances to the home are instrumented
with entry sensors, and assume there are a couple motion sensors in the home to detect activity. When a door
closes, either there's someone in the house or there's not. Spend about 15 minutes seeing if there's motion
activity. Then, if there's no motion to indicate the house is occupied, start asking the machine learning
algorithm what it thinks on the schedule defined by the EVALUATION_INTERVALS_IN_MINUTES variable above.
There could be someone asleep in bed where there are no sensors, so the machine learning algorithms fill in the
gaps in the data.
IMPORTANT: This microservice is not meant to be completely functional, but serves as a reference of what we should
think about and how we should structure code as we develop machine learning services. We're simply showing the base
infrastructure and best practices to implement machine learning algorithms that safely regenerate models each week,
custom-tailored for the specific family that lives at this Location.
Your bot should include the 'data_request' microservice package in its structure.json. This will wake up about
once per week and download all data from a Location. The data is then passed around to all microservices through the
data_request_ready() event. The 'data_request' microservice package uses a reference 'all' for its data requests,
so when the data_request_ready() event fires, you can check the reference to see if this data request includes
all data from the location as requested by the 'data_request' microservice package.
Some machine learning developers choose to preprocess the raw CSV data before building models. For example,
you may take the data you're interested in, and merge it together into a single chronological narrative. While
we're not showing this preprocess step here, you could assume it would take place in the ml_engine_example.py.
Note the structure.json file in this microservice package includes scipy, numpy, and sklearn packages, which
would normally be an important set of tools for most machine learning services.
"""
def __init__(self, botengine, parent):
"""
Instantiate this object
:param parent: Parent object, either a location or a device object.
"""
Intelligence.__init__(self, botengine, parent)
# List of durations, in minutes, that were used in the generation of our decision models. For example, [15, 30, 45]
# These are the amounts of time after a door closes that we wait to evaluate whether the house is occupied.
self.durations = None
# Specific duration index we're focused on right now
self.focused_duration_index = 0
# Away confidence threshold which we can dial up and down like a knob to make the ML algorithms more discerning
self.away_confidence_threshold = AWAY_CONFIDENCE_THRESHOLD
# Reference the the last door closed object
self.last_door_closed_object = None
# First boot - request all existing data from this location to generate initial models.
# This data stream message is picked up by the 'data_request' microservice package, and will later
# result in the data_request_ready() event triggering below.
self.parent.distribute_datastream_message(botengine, 'download_data')
def initialize(self, botengine):
"""
Initialize
:param botengine: BotEngine environment
"""
return
def destroy(self, botengine):
"""
This device or object is getting permanently deleted - it is no longer in the user's account.
:param botengine: BotEngine environment
"""
# This microservice is getting destroyed, so remove its non-volatile memory so we don't end up
# with zombie wasted space.
botengine.delete_variable(VARIABLE_ABSENT_MODELS)
def mode_updated(self, botengine, current_mode):
"""
Mode was updated
:param botengine: BotEngine environment
:param current_mode: Current mode
:param current_timestamp: Current timestamp
"""
return
def device_measurements_updated(self, botengine, device_object):
"""
Device was updated
:param botengine: BotEngine environment
:param device_object: Device object that was updated
"""
if isinstance(device_object, EntryDevice):
# Door opened or closed
if not self._is_entry_sensor_naughty(botengine, device_object):
if device_object.did_close(botengine):
# Door closed
self.focused_duration_index = 0
self.door_closed_timestamps_ms = botengine.get_timestamp()
self.last_door_closed_object = device_object
self.start_timer_s(botengine, self._calculate_next_duration(botengine), argument=[HOME_AWAY_EVALUATION_REFERENCE, device_object], reference=HOME_AWAY_EVALUATION_REFERENCE)
elif isinstance(device_object, MotionDevice):
# Motion detected or not
if device_object.did_start_detecting_motion(botengine):
# Motion detected
if self.is_timer_running(botengine, reference=HOME_AWAY_EVALUATION_REFERENCE) or self.is_timer_running(botengine, reference=FORCE_AWAY_REFERENCE):
botengine.get_logger().info("location_mlexample_microservice: '{}' detected motion. Canceling the away detection timer.".format(device_object.description))
self.cancel_timers(botengine, reference=HOME_AWAY_EVALUATION_REFERENCE)
self.cancel_timers(botengine, reference=FORCE_AWAY_REFERENCE)
def device_metadata_updated(self, botengine, device_object):
"""
Evaluate a device that is new or whose goal/scenario was recently updated
:param botengine: BotEngine environment
:param device_object: Device object that was updated
"""
return
def device_alert(self, botengine, device_object, alert_type, alert_params):
"""
Device sent an alert.
When a device disconnects, it will send an alert like this: [{u'alertType': u'status', u'params': [{u'name': u'deviceStatus', u'value': u'2'}], u'deviceId': u'eb10e80a006f0d00'}]
When a device reconnects, it will send an alert like this: [{u'alertType': u'on', u'deviceId': u'eb10e80a006f0d00'}]
:param botengine: BotEngine environment
:param device_object: Device object that sent the alert
:param alert_type: Type of alert
"""
return
def device_deleted(self, botengine, device_object):
"""
Device is getting deleted
:param botengine: BotEngine environment
:param device_object: Device object that is getting deleted
"""
return
def question_answered(self, botengine, question):
"""
The user answered a question
:param botengine: BotEngine environment
:param question: Question object
"""
return
def datastream_updated(self, botengine, address, content):
"""
Data Stream Message Received
:param botengine: BotEngine environment
:param address: Data Stream address
:param content: Content of the message
"""
if hasattr(self, address):
getattr(self, address)(botengine, content)
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
:param botengine: BotEngine environment
:param schedule_id: Schedule ID that is executing from our list of runtime schedules
"""
return
def timer_fired(self, botengine, argument):
"""
The bot's intelligence timer fired
:param botengine: Current botengine environment
:param argument: Argument applied when setting the timer
"""
# First we're going to tag accounts that don't have enough sensors for this machine learning service to operate.
motion_devices = 0
for device_id in self.parent.devices:
motion_devices += isinstance(self.parent.devices[device_id], MotionDevice)
if motion_devices < MINIMUM_NUMBER_OF_MOTION_SENSORS_FOR_AWAY_ML_ALGORITHMS:
if not NOT_ENOUGH_MOTION_SENSOR_TAG in self.tags:
# Not enough motion sensors for away mode detection - Tag it and end
botengine.tag_location(NOT_ENOUGH_MOTION_SENSOR_TAG)
self.tags.append(NOT_ENOUGH_MOTION_SENSOR_TAG)
return
elif NOT_ENOUGH_MOTION_SENSOR_TAG in self.tags:
# There are enough motion sensors and it was previously tagged - remove the tag and continue
botengine.delete_location_tag(NOT_ENOUGH_MOTION_SENSOR_TAG)
self.tags.remove(NOT_ENOUGH_MOTION_SENSOR_TAG)
# Next let's evaluate whether the occupant is home or away.
# Note that there are 2 arguments passed into the timer in this case,
# so we're looking to argument[0] for the action we want to perform, and argument[1] is extra context.
if argument[0] == HOME_AWAY_EVALUATION_REFERENCE:
# Evaluate whether the user is absent
import numpy as np
# Extract a relative hour of the day feature for our ML prediction
relative_hour_of_day = self.parent.get_local_hour_of_day(botengine)
# Extract a day-of-the-week feature for our ML prediction
day_of_week = self.parent.get_local_day_of_week(botengine)
# Download our absent models from non-volatile memory
absent_models = botengine.load_variable(VARIABLE_ABSENT_MODELS)
# The argument[1] contains the last door closed object
last_door_closed_object = argument[1]
# Grab a specific model for the amount of time that has elapsed
focused_model = absent_models[self.focused_duration_index]
# Initial probability that this user is absent - set low but greater than 0.0.
probability_absent = H2A_CONFIDENCE_THRESHOLD
try:
# Call your machine learning algorithm with the latest features and the model we're comparing against.
probability_absent = ml.get_prediction_value(
relative_hour_of_day,
day_of_week,
last_door_closed_object,
focused_model)
# Track the results in your favorite analytics tool
self.parent.track(botengine, 'probability_absent', properties={"probability": probability_absent})
# See the results on your command line terminal
botengine.get_logger().info("location_mlexample_microservice: {} probability that the user is absent".format(probability_absent))
except Exception as e:
botengine.get_logger().warn("location_mlexample_microservice : Cannot get absent prediction. " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
# Is the probability that the user is away greater than our current confidence threshold?
# The confidence threshold acts like a knob that can go up and down to tune itself to this
# user's account as we learn whether our predictions were right or wrong.
if probability_absent >= self.away_confidence_threshold:
# You appear to be away now
if "ABSENT" not in self.parent.occupancy_status:
self._announce_occupancy(botengine, "ABSENT", ML_ALGORITHM_MODE_CHANGE)
return
elif probability_absent >= H2A_CONFIDENCE_THRESHOLD:
# You might be transitioning into absent mode
# Check again soon.
# We use the term "H2A" as a transitionary state to represent home-to-absent.
# It's to say 'We're not really sure if the person is away yet, but maybe.."
# There are some useful things that can be done here, like slowly dialing down the thermostat to
# save energy.
if "H2A" not in self.parent.occupancy_status:
self._announce_occupancy(botengine, "H2A", ML_ALGORITHM_MODE_CHANGE)
# If we got this far, we're still evaluating whether the person is home or away.
# We increase the index of which self.durations interval we're going to evaluate next, so
# we can go from 15 minutes to 20 minutes to 25 minutes, etc. and test the the model each step of the way.
self.focused_duration_index += 1
if self.focused_duration_index <= len(self.durations) - 1:
# Set a timer to keep evaluating at the next prescribed duration of time.
self.start_timer_s(botengine, self._calculate_next_duration(botengine),
argument=[HOME_AWAY_EVALUATION_REFERENCE, last_door_closed_object],
reference=HOME_AWAY_EVALUATION_REFERENCE)
else:
# Okay, we ran out of machine learning models (or never had any to begin with).
#
# It's been a really long time. We haven't seen any activity in the home to indicate someone is there.
# Let's make an application-layer decision to force this location into an 'absent' occupancy status
# after another 15 minutes passes with no activity.
self.start_timer_s(botengine, 60 * 15, argument=[FORCE_AWAY_REFERENCE], reference=FORCE_AWAY_REFERENCE)
elif argument[0] == FORCE_AWAY_REFERENCE:
# We've gone over our limit to evaluate if the user is home or absent.
# We gave up. The machine learning algorithms weren't able to make a decision.
# Let's make an application-layer decision to just force this user into an absent occupancy status because
# it has been so long...
if "ABSENT" not in self.parent.occupancy_status:
# Dial down the threshold for next time so we can try to be more aggressive.
self.away_confidence_threshold -= AWAY_CONFIDENCE_THRESHOLD_INCREMENT
if self.away_confidence_threshold < MIN_AWAY_CONFIDENCE_THRESHOLD:
self.away_confidence_threshold = MIN_AWAY_CONFIDENCE_THRESHOLD
self._announce_occupancy(botengine, "ABSENT", ML_ALGORITHM_MODE_CHANGE)
def file_uploaded(self, botengine, device_object, file_id, filesize_bytes, content_type, file_extension):
"""
A device file has been uploaded
:param botengine: BotEngine environment
:param device_object: Device object that uploaded the file
:param file_id: File ID to reference this file at the server
:param filesize_bytes: The file size in bytes
:param content_type: The content type, for example 'video/mp4'
:param file_extension: The file extension, for example 'mp4'
"""
return
def coordinates_updated(self, botengine, latitude, longitude):
"""
Approximate coordinates of the parent proxy device object have been updated
:param latitude: Latitude
:param longitude: Longitude
"""
return
def user_role_updated(self, botengine, user_id, alert_category, location_access, previous_alert_category,
previous_location_access):
"""
A user changed roles
:param botengine: BotEngine environment
:param user_id: User ID that changed roles
:param alert_category: User's current alert/communications category (1=resident; 2=supporter)
:param location_access: User's access to the location and devices. (0=None; 10=read location/device data; 20=control devices and modes; 30=update location info and manage devices)
:param previous_alert_category: User's previous category, if any
:param previous_location_access: User's previous access to the location, if any
"""
return
def data_request_ready(self, botengine, reference, csv_dict):
"""
A botengine.request_data() asynchronous request for CSV data is ready.
This is part of a very scalable method to extract large amounts of data from the server for the purpose of
machine learning services. If a service needs to extract a large amount of data for one or multiple devices,
the developer should call botengine.request_data(..) and also allow the bot to trigger off of trigger type 2048.
The bot can exit its current execution. The server will independently gather all the necessary data and
capture it into a LZ4-compressed CSV file on the server which is available for one day and accessible only by
the bot through a public HTTPS URL identified by a cryptographic token. The bot then gets triggered and
downloads the CSV data, passing the data throughout the environment with this data_request_ready()
event-driven method.
Developers are encouraged to use the 'reference' argument inside calls to botengine.request_data(..). The
reference is passed back out at the completion of the request, allowing the developer to ensure the
data request that is now available was truly destined for their microservice.
Your bots will need to include the following configuration for data requests to operate:
* runtime.json should include trigger 2048
* structure.json should include inside 'pip_install_remotely' a reference to the "lz4" Python package
:param botengine: BotEngine environment
:param reference: Optional reference passed into botengine.request_data(..)
:param csv_dict: { device_object: 'csv data string' }
"""
if reference == "all":
# This is a data request response that was driven by the 'data_request' microservice package.
# This typically fires once per week, but could fire more often if requested through
# a data stream message with the 'download_data'.
# We can update the definition of EVALUATION_INTERVALS_IN_MINUTES any time, without
# affecting the operation of ML models that have already been generated. Here, we take
# a snapshot of the current evaluation intervals we're going to be building machine learning
# models around.
self.durations = EVALUATION_INTERVALS_IN_MINUTES
try:
# Here we calculate the models based on all the data passed in.
# There is effectively one model calculated per duration of time.
# Underneath, you would implement algorithms to take this raw data and convert it into
# models, using whatever machine learning tools you believe are appropriate for this task.
absent_models = ml.generate_prediction_models(csv_dict, self.durations)
# Now save the model. The model can potentially get quite large, and we do not need it for every
# execution of the bot. Therefore it's a perfect candidate to store in its own variable direct
# through the botengine environment, instead of as a class variable locally here which would
# have to get downloaded with every trigger and execution of the bot.
botengine.save_variable(VARIABLE_ABSENT_MODELS, absent_models)
# Let's see how big the model got.
size = utilities.getsize(absent_models)
botengine.get_logger().info("sizeof(home_away_model) = {}".format(size))
# Let's track the results to our analytics tools of choice.
self.parent.track(botengine, 'away_model_calculated', properties={"sizeof": size, "durations": self.durations})
# Let's tag this Location so we can see as administrators through the Maestro Command Center
# whether this Location was successful at generating models or not.
botengine.delete_location_tag(AWAY_ML_FAILURE)
botengine.tag_location(AWAY_ML_SUCCESS)
except utilities.MachineLearningError as e:
# Your ML tools down below should raise a MachineLearningError when they can't converge on a solution,
# possibly because there's not enough data.
# Tag the Location as having a problem generating models.
botengine.delete_location_tag(AWAY_ML_SUCCESS)
botengine.tag_location(AWAY_ML_FAILURE)
return
except Exception as e:
# Some strange exception
# Look for errors like this so you can fix them: ./botengine --errors com.you.Bot
botengine.get_logger().warn("location_mlexample_microservice : Cannot create absent model. " + str(e))
import traceback
botengine.get_logger().error(traceback.format_exc())
# Tag the Location as having a problem generating models.
botengine.delete_location_tag(AWAY_ML_SUCCESS)
botengine.tag_location(AWAY_ML_FAILURE)
return
botengine.get_logger().info("location_mlexample_microservice: Done generating absent models")
return
def occupancy_status_updated(self, botengine, status, reason, last_status, last_reason):
"""
AI Occupancy Status updated
:param botengine: BotEngine
:param status: Current occupancy status
:param reason: Current occupancy reason
:param last_status: Last occupancy status
:param last_reason: Last occupancy reason
"""
# See how well Away mode detection did...
# We implement a basic reinforcement algorithm to identify success or failure of the machine learning
# decisions, and then use this to dial the knob of our confidence thresholds up to require our
# machine learning algorithms to be more discerning next time.
if (( "ABSENT" in last_status or "A2H" in last_status) and ML_ALGORITHM_MODE_CHANGE in last_reason) and "PRESENT" in status:
# This intelligence changed to away mode last time. Were we correct?
if ML_ALGORITHM_MODE_CHANGE in last_reason:
# Some other intelligence changed the mode this time.
if "MOTION" in reason:
# We're now in HOME mode because motion was detected.
# This means we made a mistake - dial up the confidence threshold.
self.away_confidence_threshold += AWAY_CONFIDENCE_THRESHOLD_INCREMENT
if self.away_confidence_threshold > MAX_AWAY_CONFIDENCE_THRESHOLD:
self.away_confidence_threshold = MAX_AWAY_CONFIDENCE_THRESHOLD
# Track it in your favorite analytics service
self.parent.track(botengine,
'occupancy_absent_failure',
properties={
"away_confidence_threshold": self.away_confidence_threshold
})
elif "ENTRY" in reason:
# Success (really, dependent upon enough doors being instrumented and motion sensors in place)
self.parent.track(botengine,
'occupancy_absent_success',
properties={
"away_confidence_threshold": self.away_confidence_threshold
})
elif ("ABSENT" in last_status or "A2H" in last_status) and ("AI" not in last_status and "AWAY" in last_status):
# The user changed their mode to AWAY which confirms we made the right choice.
# Log this in your favorite analytics service
self.parent.track(botengine, 'occupancy_absent_success',
properties={
"away_confidence_threshold": self.away_confidence_threshold
})
def _is_entry_sensor_naughty(self, botengine, device_object):
"""
Return True if the entry sensor device is naughty and should be ignored
:param device_object:
:return:
"""
if not device_object.is_goal_id(EntryDevice.GOAL_PERIMETER_NORMAL) and not device_object.is_goal_id(EntryDevice.GOAL_PERIMETER_ALERT_ALWAYS):
botengine.get_logger().info("location_mlexample_microservice: Entry sensor {} is not on the perimeter and will be ignored.".format(device_object.description))
return True
for name in NAUGHTY_ENTRY_SENSOR_NAMES:
if name in device_object.description.lower():
botengine.get_logger().info("location_mlexample_microservice: Entry sensor {} has a naughty name and will be ignored.".format(device_object.description))
return True
return False
def _calculate_next_duration(self, botengine):
"""
Calculate the next duration of time, in seconds, that we wait before re-evaluating whether the family is away.
:param botengine:
:return: relative time in seconds to wait
"""
if self.focused_duration_index == 0:
return self.durations[0] * 60
return (self.durations[self.focused_duration_index] - self.durations[self.focused_duration_index - 1]) * 60
def _announce_occupancy(self, botengine, status, reason):
"""
Announce the mode change internally to other microservices
:param botengine: BotEngine environment
:param mode: New mode
:param reason: "ML" if the machine learning algorithm changed the mode, or "MISTAKE" if the machine learning algorithm is correcting its mistake in the previous state change
"""
# This would go to another service that arbitrates between multiple individual microservice's recommendations
# about what the occupancy status should be.
self.parent.distribute_datastream_message(botengine, "occupancy_recommendation", { "status": status, "reason": reason, "source": "AWAY" }, internal=True, external=False)
|
the-stack_0_26072
|
# Copyright (C) 2018 Riedel Communications GmbH & Co. KG
#
# Modifications Copyright 2018 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import socket
import os
from requests.compat import json
from jsonschema import ValidationError
from urllib.parse import urlparse
from dnslib import QTYPE
from copy import deepcopy
from pathlib import Path
from zeroconf_monkey import ServiceBrowser, ServiceInfo, Zeroconf
from .. import Config as CONFIG
from ..MdnsListener import MdnsListener
from ..GenericTest import GenericTest, NMOSTestException, NMOS_WIKI_URL
from ..IS04Utils import IS04Utils
from ..TestHelper import get_default_ip, load_resolved_schema
NODE_API_KEY = "node"
RECEIVER_CAPS_KEY = "receiver-caps"
CAPS_REGISTER_KEY = "caps-register"
class IS0401Test(GenericTest):
"""
Runs IS-04-01-Test
"""
def __init__(self, apis, registries, node, dns_server):
GenericTest.__init__(self, apis)
self.invalid_registry = registries[0]
self.primary_registry = registries[1]
self.registries = registries[1:]
self.node = node
self.dns_server = dns_server
self.node_url = self.apis[NODE_API_KEY]["url"]
self.registry_basics_done = False
self.registry_basics_data = []
self.registry_primary_data = None
self.registry_invalid_data = None
self.node_basics_data = {
"self": None, "devices": None, "sources": None,
"flows": None, "senders": None, "receivers": None
}
self.is04_utils = IS04Utils(self.node_url)
self.zc = None
self.zc_listener = None
def set_up_tests(self):
self.zc = Zeroconf()
self.zc_listener = MdnsListener(self.zc)
if self.dns_server:
self.dns_server.load_zone(self.apis[NODE_API_KEY]["version"], self.protocol, self.authorization,
"test_data/IS0401/dns_records.zone", CONFIG.PORT_BASE+100)
print(" * Waiting for up to {} seconds for a DNS query before executing tests"
.format(CONFIG.DNS_SD_ADVERT_TIMEOUT))
self.dns_server.wait_for_query(
QTYPE.PTR,
[
"_nmos-register._tcp.{}.".format(CONFIG.DNS_DOMAIN),
"_nmos-registration._tcp.{}.".format(CONFIG.DNS_DOMAIN)
],
CONFIG.DNS_SD_ADVERT_TIMEOUT
)
# Wait for a short time to allow the device to react after performing the query
time.sleep(CONFIG.API_PROCESSING_TIMEOUT)
def tear_down_tests(self):
if self.zc:
self.zc.close()
self.zc = None
if self.dns_server:
self.dns_server.reset()
def _registry_mdns_info(self, port, priority=0, api_ver=None, api_proto=None, api_auth=None, ip=None):
"""Get an mDNS ServiceInfo object in order to create an advertisement"""
if api_ver is None:
api_ver = self.apis[NODE_API_KEY]["version"]
if api_proto is None:
api_proto = self.protocol
if api_auth is None:
api_auth = self.authorization
if ip is None:
ip = get_default_ip()
hostname = "nmos-mocks.local."
else:
hostname = ip.replace(".", "-") + ".local."
# TODO: Add another test which checks support for parsing CSV string in api_ver
txt = {'api_ver': api_ver, 'api_proto': api_proto, 'pri': str(priority), 'api_auth': str(api_auth).lower()}
service_type = "_nmos-registration._tcp.local."
if self.is04_utils.compare_api_version(self.apis[NODE_API_KEY]["version"], "v1.3") >= 0:
service_type = "_nmos-register._tcp.local."
info = ServiceInfo(service_type,
"NMOSTestSuite{}{}.{}".format(port, api_proto, service_type),
addresses=[socket.inet_aton(ip)], port=port,
properties=txt, server=hostname)
return info
def do_node_basics_prereqs(self):
"""Collect a copy of each of the Node's resources"""
for resource in self.node_basics_data:
url = "{}{}".format(self.node_url, resource)
valid, r = self.do_request("GET", url)
if valid and r.status_code == 200:
try:
self.node_basics_data[resource] = r.json()
except Exception:
pass
def do_registry_basics_prereqs(self):
"""Advertise a registry and collect data from any Nodes which discover it"""
if self.registry_basics_done:
return
if not CONFIG.ENABLE_DNS_SD:
self.do_node_basics_prereqs()
return
if CONFIG.DNS_SD_MODE == "multicast":
registry_mdns = []
priority = 0
# Add advertisement with invalid version
info = self._registry_mdns_info(self.invalid_registry.get_data().port, priority, "v9.0")
registry_mdns.append(info)
# Add advertisement with invalid protocol
info = self._registry_mdns_info(self.invalid_registry.get_data().port, priority, None, "invalid")
registry_mdns.append(info)
# Add advertisement for primary and failover registries
for registry in self.registries[0:-1]:
info = self._registry_mdns_info(registry.get_data().port, priority)
registry_mdns.append(info)
priority += 10
# Add a fake advertisement for a timeout simulating registry
info = self._registry_mdns_info(444, priority, ip="192.0.2.1")
registry_mdns.append(info)
priority += 10
# Add the final real registry advertisement
info = self._registry_mdns_info(self.registries[-1].get_data().port, priority)
registry_mdns.append(info)
# Reset all registries to clear previous heartbeats, etc.
self.invalid_registry.reset()
for registry in self.registries:
registry.reset()
self.invalid_registry.enable()
self.primary_registry.enable()
if CONFIG.DNS_SD_MODE == "multicast":
# Advertise the primary registry and invalid ones at pri 0, and allow the Node to do a basic registration
if self.is04_utils.compare_api_version(self.apis[NODE_API_KEY]["version"], "v1.0") != 0:
self.zc.register_service(registry_mdns[0])
self.zc.register_service(registry_mdns[1])
self.zc.register_service(registry_mdns[2])
# Wait for n seconds after advertising the service for the first POST from a Node
start_time = time.time()
while time.time() < start_time + CONFIG.DNS_SD_ADVERT_TIMEOUT:
if self.primary_registry.has_registrations():
break
if self.invalid_registry.has_registrations():
break
time.sleep(0.2)
# Wait until we're sure the Node has registered everything it intends to, and we've had at least one heartbeat
while (time.time() - self.primary_registry.last_time) < CONFIG.HEARTBEAT_INTERVAL + 1 or \
(time.time() - self.invalid_registry.last_time) < CONFIG.HEARTBEAT_INTERVAL + 1:
time.sleep(0.2)
# Collect matching resources from the Node
self.do_node_basics_prereqs()
# Ensure we have two heartbeats from the Node, assuming any are arriving (for test_05)
if len(self.primary_registry.get_data().heartbeats) > 0 or len(self.invalid_registry.get_data().heartbeats) > 0:
# It is heartbeating, but we don't have enough of them yet
while len(self.primary_registry.get_data().heartbeats) < 2 and \
len(self.invalid_registry.get_data().heartbeats) < 2:
time.sleep(0.2)
# Once registered, advertise all other registries at different (ascending) priorities
for index, registry in enumerate(self.registries[1:]):
registry.enable()
if CONFIG.DNS_SD_MODE == "multicast":
for info in registry_mdns[3:]:
self.zc.register_service(info)
# Kill registries one by one to collect data around failover
self.invalid_registry.disable()
for index, registry in enumerate(self.registries):
registry.disable()
# Prevent access to an out of bounds index below
if (index + 1) >= len(self.registries):
break
# in event of testing HTTPS support, the TLS handshake seems to take nearly 2 seconds, so
# when the first registry is disabled, an additional few seconds is needed to ensure the node
# has a chance to make a connection to it, receive the 5xx error, and make a connection to
# the next one
if CONFIG.ENABLE_HTTPS:
heartbeat_countdown = CONFIG.HEARTBEAT_INTERVAL + 1 + 5
else:
heartbeat_countdown = CONFIG.HEARTBEAT_INTERVAL + 1
# Wait an extra heartbeat interval when dealing with the timout test
# This allows a Node's connection to time out and then register with the next mock registry
if (index + 2) == len(self.registries):
heartbeat_countdown += CONFIG.HEARTBEAT_INTERVAL
while len(self.registries[index + 1].get_data().heartbeats) < 1 and heartbeat_countdown > 0:
# Wait until the heartbeat interval has elapsed or a heartbeat has been received
time.sleep(0.2)
heartbeat_countdown -= 0.2
if len(self.registries[index + 1].get_data().heartbeats) < 1:
# Testing has failed at this point, so we might as well abort
break
# Clean up mDNS advertisements and disable registries
if CONFIG.DNS_SD_MODE == "multicast":
for info in registry_mdns:
self.zc.unregister_service(info)
self.invalid_registry.disable()
for index, registry in enumerate(self.registries):
registry.disable()
self.registry_basics_done = True
for registry in self.registries:
self.registry_basics_data.append(registry.get_data())
self.registry_invalid_data = self.invalid_registry.get_data()
# If the Node preferred the invalid registry, don't penalise it for other tests which check the general
# interactions are correct
if len(self.registry_invalid_data.posts) > 0:
self.registry_primary_data = self.registry_invalid_data
else:
self.registry_primary_data = self.registry_basics_data[0]
def test_01(self, test):
"""Node can discover network registration service via multicast DNS"""
if not CONFIG.ENABLE_DNS_SD or CONFIG.DNS_SD_MODE != "multicast":
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False or DNS_SD_MODE is not "
"'multicast'")
self.do_registry_basics_prereqs()
registry_data = self.registry_primary_data
if len(registry_data.posts) > 0:
return test.PASS()
return test.FAIL("Node did not attempt to register with the advertised registry.")
def test_01_01(self, test):
"""Node does not attempt to register with an unsuitable registry"""
if self.is04_utils.compare_api_version(self.apis[NODE_API_KEY]["version"], "v1.0") == 0:
return test.NA("Nodes running v1.0 do not check DNS-SD api_ver and api_proto TXT records")
if not CONFIG.ENABLE_DNS_SD or CONFIG.DNS_SD_MODE != "multicast":
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False or DNS_SD_MODE is not "
"'multicast'")
self.do_registry_basics_prereqs()
if len(self.registry_invalid_data.posts) > 0:
return test.FAIL("Node incorrectly registered with a registry advertising an invalid 'api_ver' or "
"'api_proto'")
return test.PASS()
def test_02(self, test):
"""Node can discover network registration service via unicast DNS"""
if not CONFIG.ENABLE_DNS_SD or CONFIG.DNS_SD_MODE != "unicast":
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False or DNS_SD_MODE is not "
"'unicast'")
self.do_registry_basics_prereqs()
registry_data = self.registry_primary_data
if len(registry_data.posts) > 0:
return test.PASS()
return test.FAIL("Node did not attempt to register with the advertised registry.")
def test_02_01(self, test):
"""Node does not attempt to register with an unsuitable registry"""
if self.is04_utils.compare_api_version(self.apis[NODE_API_KEY]["version"], "v1.0") == 0:
return test.NA("Nodes running v1.0 do not check DNS-SD api_ver and api_proto TXT records")
if not CONFIG.ENABLE_DNS_SD or CONFIG.DNS_SD_MODE != "unicast":
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False or DNS_SD_MODE is not "
"'unicast'")
self.do_registry_basics_prereqs()
if len(self.registry_invalid_data.posts) > 0:
return test.FAIL("Node incorrectly registered with a registry advertising an invalid 'api_ver' or "
"'api_proto'")
return test.PASS()
def test_03(self, test):
"""Registration API interactions use the correct headers"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
registry_data = self.registry_primary_data
if len(registry_data.posts) == 0:
return test.UNCLEAR("No registrations found")
ctype_warn = ""
for resource in registry_data.posts:
ctype_valid, ctype_message = self.check_content_type(resource[1]["headers"])
if not ctype_valid:
return test.FAIL(ctype_message)
elif ctype_message and not ctype_warn:
ctype_warn = ctype_message
accept_valid, accept_message = self.check_accept(resource[1]["headers"])
if not accept_valid:
return test.FAIL(accept_message)
if "Transfer-Encoding" not in resource[1]["headers"]:
if "Content-Length" not in resource[1]["headers"]:
return test.FAIL("One or more Node POSTs did not include Content-Length")
else:
if "Content-Length" in resource[1]["headers"]:
return test.FAIL("API signalled both Transfer-Encoding and Content-Length")
if ctype_warn:
return test.WARNING(ctype_warn)
else:
return test.PASS()
def test_03_01(self, test):
"""Registration API interactions use the correct versioned path"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
api = self.apis[NODE_API_KEY]
self.do_registry_basics_prereqs()
registry_data = self.registry_primary_data
if len(registry_data.posts) == 0:
return test.UNCLEAR("No registrations found")
for resource in registry_data.posts:
if resource[1]["version"] != api["version"]:
return test.FAIL("One or more Node POSTs used version '{}' instead of '{}'"
.format(resource[1]["version"], api["version"]))
for resource in registry_data.deletes:
if resource[1]["version"] != api["version"]:
return test.FAIL("One or more Node DELETEs used version '{}' instead of '{}'"
.format(resource[1]["version"], api["version"]))
for resource in registry_data.heartbeats:
if resource[1]["version"] != api["version"]:
return test.FAIL("One or more Node heartbeats used version '{}' instead of '{}'"
.format(resource[1]["version"], api["version"]))
return test.PASS()
def get_registry_resource(self, res_type, res_id):
"""Get a specific resource ID from the mock registry, or a real registry if DNS-SD is disabled"""
found_resource = None
if CONFIG.ENABLE_DNS_SD:
# Look up data in local mock registry
registry_data = self.registry_primary_data
for resource in registry_data.posts:
if resource[1]["payload"]["type"] == res_type and resource[1]["payload"]["data"]["id"] == res_id:
found_resource = resource[1]["payload"]["data"]
else:
# Look up data from a configured Query API
url = "{}://{}:{}/x-nmos/query/{}/{}s/{}".format(
self.protocol,
CONFIG.QUERY_API_HOST,
str(CONFIG.QUERY_API_PORT),
self.apis[NODE_API_KEY]["version"],
res_type,
res_id
)
try:
valid, r = self.do_request("GET", url)
if valid and r.status_code == 200:
found_resource = r.json()
else:
raise Exception
except Exception:
print(" * ERROR: Unable to load resource from the configured Query API ({}:{})".format(
CONFIG.QUERY_API_HOST,
CONFIG.QUERY_API_PORT
))
return found_resource
def get_node_resources(self, res_type):
"""Get resources matching a specific type from the Node API"""
if res_type == "node":
res_type = "self"
else:
res_type = res_type + "s"
resp_json = self.node_basics_data[res_type]
resources = {}
if resp_json is None:
raise ValueError
elif isinstance(resp_json, dict):
resources[resp_json["id"]] = resp_json
else:
for resource in resp_json:
resources[resource["id"]] = resource
return resources
def do_test_matching_resource(self, test, res_type):
"""Check that a resource held in the registry matches the resource held by the Node API"""
try:
node_resources = self.get_node_resources(res_type)
if len(node_resources) == 0:
return test.UNCLEAR("No {} resources were found on the Node.".format(res_type.title()))
for res_id in node_resources:
reg_resource = self.get_registry_resource(res_type, res_id)
if not reg_resource:
return test.FAIL("{} {} was not found in the registry.".format(res_type.title(), res_id))
elif reg_resource != node_resources[res_id]:
return test.FAIL("Node API JSON does not match data in registry for "
"{} {}.".format(res_type.title(), res_id))
return test.PASS()
except ValueError:
return test.FAIL("Failed to reach Node API or invalid JSON received!")
def parent_resource_type(self, res_type):
"""Find the parent resource type required for a given resource type"""
if res_type == "device":
return "node"
elif res_type == "flow" and \
self.is04_utils.compare_api_version(self.apis[NODE_API_KEY]["version"], "v1.0") <= 0:
return "source"
elif res_type in ["sender", "receiver", "source", "flow"]:
return "device"
else:
return None
def preceding_resource_type(self, res_type):
"""Find the preceding resource type recommended for a given resource type,
if different than the parent resource type"""
# The recommendation ensures e.g. that a Query API client would find the Source and Flow
# associated with a particular Sender
if res_type == "flow" and \
self.is04_utils.compare_api_version(self.apis[NODE_API_KEY]["version"], "v1.0") > 0:
return "source"
elif res_type == "sender":
return "flow"
else:
return None
def do_test_referential_integrity(self, test, res_type):
"""Check that the parents for a specific resource type are held in the mock registry,
and the recommended order for referential integrity has been adhered to"""
api = self.apis[NODE_API_KEY]
# Look up data in local mock registry
registry_data = self.registry_primary_data
parent_type = self.parent_resource_type(res_type)
registered_parents = []
preceding_type = self.preceding_resource_type(res_type)
registered_preceding = []
preceding_warn = ""
found_resource = False
try:
# Cycle over registrations in order
for resource in registry_data.posts:
rtype = resource[1]["payload"]["type"]
rdata = resource[1]["payload"]["data"]
if rtype == parent_type:
registered_parents.append(rdata["id"])
elif preceding_type and rtype == preceding_type:
registered_preceding.append(rdata["id"])
elif rtype == res_type:
found_resource = True
if rdata[parent_type + "_id"] not in registered_parents:
return test.FAIL("{} '{}' was registered before its referenced '{}' '{}'"
.format(res_type.title(), rdata["id"],
parent_type + "_id", rdata[parent_type + "_id"]))
if preceding_type and rdata[preceding_type + "_id"] and \
rdata[preceding_type + "_id"] not in registered_preceding and not preceding_warn:
preceding_warn = "{} '{}' was registered before its referenced '{}' '{}'" \
.format(res_type.title(), rdata["id"],
preceding_type + "_id", rdata[preceding_type + "_id"])
if preceding_warn:
return test.WARNING(preceding_warn,
"https://specs.amwa.tv/is-04/branches/{}"
"/docs/4.1._Behaviour_-_Registration.html#referential-integrity"
.format(api["spec_branch"]))
elif found_resource:
return test.PASS()
else:
return test.UNCLEAR("No {} resources were registered with the mock registry.".format(res_type.title()))
except KeyError as e:
return test.FAIL("Unable to find expected key in the registered {}: {}".format(res_type.title(), e))
def test_04(self, test):
"""Node can register a valid Node resource with the network registration service,
matching its Node API self resource"""
self.do_registry_basics_prereqs()
return self.do_test_matching_resource(test, "node")
def test_05(self, test):
"""Node maintains itself in the registry via periodic calls to the health resource"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
api = self.apis[NODE_API_KEY]
self.do_registry_basics_prereqs()
registry_data = self.registry_primary_data
if len(registry_data.heartbeats) < 2:
return test.FAIL("Not enough heartbeats were made in the time period.")
initial_node = registry_data.posts[0]
last_hb = None
for heartbeat in registry_data.heartbeats:
# Ensure the Node ID for heartbeats matches the registrations
if heartbeat[1]["node_id"] != initial_node[1]["payload"]["data"]["id"]:
return test.FAIL("Heartbeats matched a different Node ID to the initial registration.")
if last_hb:
# Check frequency of heartbeats matches the defaults
time_diff = heartbeat[0] - last_hb[0]
if time_diff > CONFIG.HEARTBEAT_INTERVAL + 0.5:
return test.FAIL("Heartbeats are not frequent enough.")
elif time_diff < CONFIG.HEARTBEAT_INTERVAL - 0.5:
return test.FAIL("Heartbeats are too frequent.")
else:
# For first heartbeat, check against Node registration
if (heartbeat[0] - initial_node[0]) > CONFIG.HEARTBEAT_INTERVAL + 0.5:
return test.FAIL("First heartbeat occurred too long after initial Node registration.")
# Ensure the heartbeat request body is empty
if heartbeat[1]["payload"] is not bytes():
return test.WARNING("Heartbeat POST contained a payload body.",
"https://specs.amwa.tv/is-04/branches/{}"
"/docs/2.2._APIs_-_Client_Side_Implementation_Notes.html#empty-request-bodies"
.format(api["spec_branch"]))
if "Content-Type" in heartbeat[1]["headers"]:
return test.WARNING("Heartbeat POST contained a Content-Type header.",
"https://specs.amwa.tv/is-04/branches/{}"
"/docs/2.2._APIs_-_Client_Side_Implementation_Notes.html#empty-request-bodies"
.format(api["spec_branch"]))
if "Transfer-Encoding" not in heartbeat[1]["headers"]:
if "Content-Length" not in heartbeat[1]["headers"] or \
int(heartbeat[1]["headers"]["Content-Length"]) != 0:
# The NMOS spec currently says Content-Length: 0 is OPTIONAL, but it is RECOMMENDED in RFC 7230
# and omitting it causes problems for commonly deployed HTTP servers
return test.WARNING("Heartbeat POST did not contain a valid Content-Length header.",
"https://specs.amwa.tv/is-04/branches/{}"
"/docs/2.2._APIs_-_Client_Side_Implementation_Notes.html#empty-request-bodies"
.format(api["spec_branch"]))
else:
if "Content-Length" in heartbeat[1]["headers"]:
return test.FAIL("API signalled both Transfer-Encoding and Content-Length")
accept_valid, accept_message = self.check_accept(heartbeat[1]["headers"])
if not accept_valid:
return test.FAIL(accept_message)
last_hb = heartbeat
return test.PASS()
def test_07(self, test):
"""Node can register a valid Device resource with the network registration service, matching its
Node API Device resource"""
self.do_registry_basics_prereqs()
return self.do_test_matching_resource(test, "device")
def test_07_01(self, test):
"""Registered Device was POSTed after a matching referenced Node"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
return self.do_test_referential_integrity(test, "device")
def test_08(self, test):
"""Node can register a valid Source resource with the network
registration service, matching its Node API Source resource"""
self.do_registry_basics_prereqs()
return self.do_test_matching_resource(test, "source")
def test_08_01(self, test):
"""Registered Source was POSTed after a matching referenced Device"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
return self.do_test_referential_integrity(test, "source")
def test_09(self, test):
"""Node can register a valid Flow resource with the network
registration service, matching its Node API Flow resource"""
self.do_registry_basics_prereqs()
return self.do_test_matching_resource(test, "flow")
def test_09_01(self, test):
"""Registered Flow was POSTed after a matching referenced Device or Source"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
return self.do_test_referential_integrity(test, "flow")
def test_10(self, test):
"""Node can register a valid Sender resource with the network
registration service, matching its Node API Sender resource"""
self.do_registry_basics_prereqs()
return self.do_test_matching_resource(test, "sender")
def test_10_01(self, test):
"""Registered Sender was POSTed after a matching referenced Device"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
return self.do_test_referential_integrity(test, "sender")
def test_11(self, test):
"""Node can register a valid Receiver resource with the network
registration service, matching its Node API Receiver resource"""
self.do_registry_basics_prereqs()
return self.do_test_matching_resource(test, "receiver")
def test_11_01(self, test):
"""Registered Receiver was POSTed after a matching referenced Device"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
return self.do_test_referential_integrity(test, "receiver")
def test_12(self, test):
"""Node advertises a Node type mDNS announcement with no ver_* TXT records
in the presence of a Registration API (v1.0, v1.1 and v1.2)"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.3") >= 0:
return test.DISABLED("This test is disabled for Nodes >= v1.3")
node_list = self.collect_mdns_announcements()
for node in node_list:
port = node.port
if port != api["port"]:
continue
for address in node.addresses:
address = socket.inet_ntoa(address)
if address != api["ip"]:
continue
properties = self.convert_bytes(node.properties)
for prop in properties:
if "ver_" in prop:
return test.FAIL("Found 'ver_' TXT record while Node is registered.")
if self.is04_utils.compare_api_version(api["version"], "v1.1") >= 0:
if "api_ver" not in properties:
return test.FAIL("No 'api_ver' TXT record found in Node API advertisement.")
elif api["version"] not in properties["api_ver"].split(","):
return test.FAIL("Node does not claim to support version under test.")
if "api_proto" not in properties:
return test.FAIL("No 'api_proto' TXT record found in Node API advertisement.")
elif properties["api_proto"] != self.protocol:
return test.FAIL("API protocol ('api_proto') TXT record is not '{}'.".format(self.protocol))
if self.is04_utils.compare_api_version(api["version"], "v1.3") >= 0:
if "api_auth" not in properties:
return test.FAIL("No 'api_auth' TXT record found in Node API advertisement.")
elif properties["api_auth"] != str(self.authorization).lower():
return test.FAIL("API authorization ('api_auth') TXT record is not '{}'."
.format(str(self.authorization).lower()))
return test.PASS()
return test.WARNING("No matching mDNS announcement found for Node with IP/Port {}:{}. This will not affect "
"operation in registered mode but may indicate a lack of support for peer to peer "
"operation.".format(api["ip"], api["port"]),
NMOS_WIKI_URL + "/IS-04#nodes-peer-to-peer-mode")
def test_12_01(self, test):
"""Node does not advertise a Node type mDNS announcement in the presence of a Registration API (v1.3+)"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.3") < 0:
return test.DISABLED("This test is disabled for Nodes < v1.3")
node_list = self.collect_mdns_announcements()
for node in node_list:
port = node.port
if port != api["port"]:
continue
for address in node.addresses:
address = socket.inet_ntoa(address)
if address != api["ip"]:
continue
properties = self.convert_bytes(node.properties)
if "api_ver" not in properties:
return test.FAIL("No 'api_ver' TXT record found in Node API advertisement.")
min_version_lt_v1_3 = False
for api_version in properties["api_ver"].split(","):
if self.is04_utils.compare_api_version(api_version, "v1.3") < 0:
min_version_lt_v1_3 = True
if not min_version_lt_v1_3:
return test.WARNING("Nodes which support v1.3+ only should not advertise via mDNS when in "
"registered mode.")
return test.PASS()
def test_13(self, test):
"""PUTing to a Receiver target resource with a Sender resource payload is accepted
and connects the Receiver to a stream"""
valid, receivers = self.do_request("GET", self.node_url + "receivers")
if not valid or receivers.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(receivers))
try:
formats_tested = []
for receiver in receivers.json():
if not receiver["transport"].startswith("urn:x-nmos:transport:rtp"):
continue
try:
stream_type = receiver["format"].split(":")[-1]
except TypeError:
return test.FAIL("Unexpected Receiver format: {}".format(receiver))
# Test each available receiver format once
if stream_type in formats_tested:
continue
if stream_type not in ["video", "audio", "data", "mux"]:
return test.FAIL("Unexpected Receiver format: {}".format(receiver["format"]))
request_data = self.node.get_sender(stream_type)
self.do_receiver_put(test, receiver["id"], request_data)
time.sleep(CONFIG.API_PROCESSING_TIMEOUT)
valid, response = self.do_request("GET", self.node_url + "receivers/" + receiver["id"])
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(receiver))
receiver = response.json()
if receiver["subscription"]["sender_id"] != request_data["id"]:
return test.FAIL("Node API Receiver {} subscription does not reflect the subscribed "
"Sender ID".format(receiver["id"]))
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.2") >= 0:
if not receiver["subscription"]["active"]:
return test.FAIL("Node API Receiver {} subscription does not indicate an active "
"subscription".format(receiver["id"]))
formats_tested.append(stream_type)
if len(formats_tested) > 0:
return test.PASS()
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
return test.UNCLEAR("Node API does not expose any RTP Receivers")
def test_14(self, test):
"""PUTing to a Receiver target resource with an empty JSON object payload is accepted and
disconnects the Receiver from a stream"""
valid, receivers = self.do_request("GET", self.node_url + "receivers")
if not valid or receivers.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(receivers))
try:
test_receiver = None
for receiver in receivers.json():
if not receiver["transport"].startswith("urn:x-nmos:transport:rtp"):
continue
test_receiver = receiver
break
if test_receiver is not None:
self.do_receiver_put(test, test_receiver["id"], {})
time.sleep(CONFIG.API_PROCESSING_TIMEOUT)
valid, response = self.do_request("GET", self.node_url + "receivers/" + test_receiver["id"])
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(test_receiver))
receiver = response.json()
if receiver["subscription"]["sender_id"] is not None:
return test.FAIL("Node API Receiver {} subscription does not reflect the subscribed "
"Sender ID".format(receiver["id"]))
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.2") >= 0:
if receiver["subscription"]["active"]:
return test.FAIL("Node API Receiver {} subscription does not indicate an inactive "
"subscription".format(receiver["id"]))
return test.PASS()
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
return test.UNCLEAR("Node API does not expose any RTP Receivers")
def test_15(self, test):
"""Node correctly selects a Registration API based on advertised priorities"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
last_hb = None
last_registry = None
# All but the first and last registry can be used for priority tests. The last one is reserved for timeout tests
for index, registry_data in enumerate(self.registry_basics_data[1:-1]):
if len(registry_data.heartbeats) < 1:
return test.FAIL("Node never made contact with registry {} advertised on port {}"
.format(index + 1, registry_data.port))
first_hb_to_registry = registry_data.heartbeats[0]
if last_hb:
if first_hb_to_registry < last_hb:
return test.FAIL("Node sent a heartbeat to the registry on port {} before the registry on port {}, "
"despite their priorities requiring the opposite behaviour"
.format(registry_data.port, last_registry.port))
last_hb = first_hb_to_registry
last_registry = registry_data
return test.PASS()
def test_16(self, test):
"""Node correctly fails over between advertised Registration APIs when one fails"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
# All but the first and last registry can be used for failover tests. The last one is reserved for timeout tests
for index, registry_data in enumerate(self.registry_basics_data[1:-1]):
if len(registry_data.heartbeats) < 1:
return test.FAIL("Node never made contact with registry {} advertised on port {}"
.format(index + 1, registry_data.port))
if index > 0:
for resource in registry_data.posts:
if resource[1]["payload"]["type"] == "node":
return test.FAIL("Node re-registered its resources when it failed over to a new registry, when "
"it should only have issued a heartbeat")
return test.PASS()
def test_16_01(self, test):
"""Node correctly handles Registration APIs whose connections time out"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
self.do_registry_basics_prereqs()
# The second to last registry will intentionally cause a timeout. Check here that the Node successfully times
# out its attempted connection within a heartbeat period and then registers with the next available one.
registry_data = self.registry_basics_data[-1]
if len(registry_data.heartbeats) < 1:
return test.WARNING("Node never made contact with registry {} advertised on port {}"
.format(len(self.registry_basics_data), registry_data.port))
for resource in registry_data.posts:
if resource[1]["payload"]["type"] == "node":
return test.WARNING("Node re-registered its resources when it failed over to a new registry, when it "
"should only have issued a heartbeat")
return test.PASS()
def test_17(self, test):
"""All Node resources use different UUIDs"""
uuids = set()
valid, response = self.do_request("GET", self.node_url + "self")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
uuids.add(response.json()["id"])
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
for resource_type in ["devices", "sources", "flows", "senders", "receivers"]:
valid, response = self.do_request("GET", self.node_url + resource_type)
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for resource in response.json():
if resource["id"] in uuids:
return test.FAIL("Duplicate ID '{}' found in Node API '{}' resource".format(resource["id"],
resource_type))
uuids.add(resource["id"])
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
return test.PASS()
def test_17_01(self, test):
"""All Devices refer to their attached Senders and Receivers"""
# store references from Devices to Senders and Receivers
from_devices = {}
# store references to Devices from Senders and Receivers
to_devices = {}
# get all the Node's Devices
valid, response = self.do_request("GET", self.node_url + "devices")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for resource in response.json():
from_devices[resource["id"]] = {
"senders": set(resource["senders"]),
"receivers": set(resource["receivers"])
}
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
if len(from_devices) == 0:
return test.UNCLEAR("Node API does not expose any Devices")
# get all the Node's Senders and Receivers
empty_refs = {"senders": set(), "receivers": set()}
for resource_type in ["senders", "receivers"]:
valid, response = self.do_request("GET", self.node_url + resource_type)
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for resource in response.json():
id = resource["device_id"]
if id not in to_devices:
to_devices[id] = deepcopy(empty_refs)
to_devices[id][resource_type].add(resource["id"])
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
found_empty_refs = False
for id, from_device in from_devices.items():
if id not in to_devices:
if from_device == empty_refs:
# no Senders or Receivers are attached to this Device
continue
else:
return test.FAIL("Device '{}' references one or more unknown Senders or Receivers."
.format(id))
to_device = to_devices[id]
if from_device == empty_refs:
# Device appears not to be populating the deprecated attributes
found_empty_refs = True
else:
for refs in ["senders", "receivers"]:
if len(from_device[refs] - to_device[refs]) > 0:
return test.FAIL("Device '{}' references one or more unknown {}."
.format(id, refs.title()))
elif len(to_device[refs] - from_device[refs]) > 0:
return test.FAIL("Device '{}' does not have a reference to one or more of its {}."
.format(id, refs.title()))
# else: references from Device to its Senders and Receivers
# match references from Senders and Receivers to that Device
if found_empty_refs:
return test.WARNING("One or more Devices do not have references to any of their Senders or Receivers. "
"(The 'senders' and 'receivers' attributes are deprecated since IS-04 v1.2.)")
return test.PASS()
def test_18(self, test):
"""All Node clocks are unique, and relate to any visible Sources' clocks"""
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.1") < 0:
return test.NA("Clocks are not available until IS-04 v1.1")
clocks = set()
valid, response = self.do_request("GET", self.node_url + "self")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for clock in response.json()["clocks"]:
clock_name = clock["name"]
if clock_name in clocks:
return test.FAIL("Duplicate clock name '{}' found in Node API self resource".format(clock_name))
clocks.add(clock_name)
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
valid, response = self.do_request("GET", self.node_url + "sources")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for source in response.json():
clock_name = source["clock_name"]
if clock_name not in clocks and clock_name is not None:
return test.FAIL("Source '{}' uses a non-existent clock name '{}'".format(source["id"], clock_name))
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
return test.PASS()
def test_19(self, test):
"""All Node interfaces are unique, and relate to any visible Senders and Receivers' interface_bindings"""
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.2") < 0:
return test.NA("Interfaces are not available until IS-04 v1.2")
interfaces = set()
valid, response = self.do_request("GET", self.node_url + "self")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for interface in response.json()["interfaces"]:
interface_name = interface["name"]
if interface_name not in interfaces:
interfaces.add(interface_name)
else:
return test.FAIL("Duplicate interface name '{}' found in Node API self resource"
.format(interface_name))
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
for binder_type in ["senders", "receivers"]:
valid, response = self.do_request("GET", self.node_url + binder_type)
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for binder in response.json():
interface_bindings = binder["interface_bindings"]
if len(interface_bindings) == 0:
return test.FAIL("{} '{}' does not list any interface_bindings"
.format(binder_type.capitalize().rstrip("s"), binder["id"]))
for interface_name in interface_bindings:
if interface_name not in interfaces:
return test.FAIL("{} '{}' uses a non-existent interface name '{}'"
.format(binder_type.capitalize().rstrip("s"),
binder["id"],
interface_name))
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
if len(interfaces) == 0:
return test.UNCLEAR("Node 'interfaces' is empty")
return test.PASS()
def test_19_01(self, test):
"""All bound Node interfaces have attached_network_device info"""
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.3") < 0:
return test.NA("Attached network device info is not available until IS-04 v1.3")
interfaces = {}
valid, response = self.do_request("GET", self.node_url + "self")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for interface in response.json()["interfaces"]:
interface_name = interface["name"]
if interface_name not in interfaces:
interfaces[interface_name] = interface
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
attached_network_device_warn = False
for binder_type in ["senders", "receivers"]:
valid, response = self.do_request("GET", self.node_url + binder_type)
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
for binder in response.json():
interface_bindings = binder["interface_bindings"]
for interface_name in interface_bindings:
if interface_name not in interfaces:
pass
elif "attached_network_device" not in interfaces[interface_name]:
attached_network_device_warn = True
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
if len(interfaces) == 0:
return test.UNCLEAR("Node 'interfaces' is empty")
elif attached_network_device_warn:
return test.OPTIONAL("One or more Node 'interfaces' used by a Sender or Receiver is missing "
"'attached_network_device' info",
NMOS_WIKI_URL + "/IS-04#nodes-interface-neighbour-information")
return test.PASS()
def test_20(self, test):
"""Node's resources correctly signal the current protocol and IP/hostname"""
found_api_endpoint = False
found_href = False
href_hostname_warn = False
api_endpoint_host_warn = False
service_href_scheme_warn = False
service_href_hostname_warn = False
service_href_auth_warn = False
control_href_scheme_warn = False
control_href_hostname_warn = False
control_href_auth_warn = False
manifest_href_scheme_warn = False
manifest_href_hostname_warn = False
api = self.apis[NODE_API_KEY]
valid, response = self.do_request("GET", self.node_url + "self")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
node_self = response.json()
if not node_self["href"].startswith(self.protocol + "://"):
return test.FAIL("Node 'href' does not match the current protocol")
if node_self["href"].startswith("https://") and urlparse(node_self["href"]).hostname[-1].isdigit():
href_hostname_warn = True
if self.is04_utils.compare_api_version(api["version"], "v1.1") >= 0:
for endpoint in node_self["api"]["endpoints"]:
if endpoint["protocol"] != self.protocol:
return test.FAIL("One or more Node 'api.endpoints' do not match the current protocol")
if self.is04_utils.compare_api_version(api["version"], "v1.3") >= 0:
if self.authorization is not endpoint.get("authorization", False):
return test.FAIL("One or more Node 'api.endpoints' do not match the current authorization "
"mode")
if endpoint["host"].lower() == api["hostname"].lower() and endpoint["port"] == api["port"]:
found_api_endpoint = True
if self.is04_utils.compare_urls(node_self["href"], "{}://{}:{}"
.format(endpoint["protocol"], endpoint["host"], endpoint["port"])):
found_href = True
if endpoint["protocol"] == "https" and endpoint["host"][-1].isdigit():
api_endpoint_host_warn = True
for service in node_self["services"]:
href = service["href"]
if href.startswith("http") and not href.startswith(self.protocol + "://"):
# Only warn about these at the end so that more major failures are flagged first
# Protocols other than HTTP may be used, so don't incorrectly flag those too
service_href_scheme_warn = True
if href.startswith("https://") and urlparse(href).hostname[-1].isdigit():
service_href_hostname_warn = True
if self.is04_utils.compare_api_version(api["version"], "v1.3") >= 0 and \
service["type"].startswith("urn:x-nmos:"):
if self.authorization is not service.get("authorization", False):
service_href_auth_warn = True
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
if self.is04_utils.compare_api_version(api["version"], "v1.1") >= 0:
if not found_api_endpoint:
return test.FAIL("None of the Node 'api.endpoints' match the current protocol, IP/hostname and port")
if not found_href:
return test.FAIL("None of the Node 'api.endpoints' match the Node 'href'")
valid, response = self.do_request("GET", self.node_url + "devices")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
node_devices = response.json()
for device in node_devices:
for control in device["controls"]:
href = control["href"]
if href.startswith("http") and not href.startswith(self.protocol + "://"):
# Only warn about these at the end so that more major failures are flagged first
# Protocols other than HTTP may be used, so don't incorrectly flag those too
control_href_scheme_warn = True
if href.startswith("https://") and urlparse(href).hostname[-1].isdigit():
control_href_hostname_warn = True
if self.is04_utils.compare_api_version(api["version"], "v1.3") >= 0 and \
control["type"].startswith("urn:x-nmos:"):
if self.authorization is not control.get("authorization", False):
control_href_auth_warn = True
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
valid, response = self.do_request("GET", self.node_url + "senders")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
node_senders = response.json()
for sender in node_senders:
href = sender["manifest_href"]
if href is not None and href.startswith("http") and not href.startswith(self.protocol + "://"):
manifest_href_scheme_warn = True
if href is not None and href.startswith("https://") and urlparse(href).hostname[-1].isdigit():
manifest_href_hostname_warn = True
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
if href_hostname_warn:
return test.WARNING("Node 'href' value has an IP address not a hostname")
elif api_endpoint_host_warn:
return test.WARNING("One or more Node 'api.endpoints.host' values are an IP address not a hostname")
elif service_href_hostname_warn:
return test.WARNING("One or more Node service 'href' values have an IP address not a hostname")
elif control_href_hostname_warn:
return test.WARNING("One or more Device control 'href' values have an IP address not a hostname")
elif manifest_href_hostname_warn:
return test.WARNING("One or more Sender 'manifest_href' values have an IP address not a hostname")
elif service_href_scheme_warn:
return test.WARNING("One or more Node service 'href' values do not match the current protocol")
elif control_href_scheme_warn:
return test.WARNING("One or more Device control 'href' values do not match the current protocol")
elif manifest_href_scheme_warn:
return test.WARNING("One or more Sender 'manifest_href' values do not match the current protocol")
elif service_href_auth_warn:
return test.WARNING("One or more Node 'x-nmos' services do not match the current authorization mode")
elif control_href_auth_warn:
return test.WARNING("One or more Device 'x-nmos' controls do not match the current authorization mode")
return test.PASS()
def test_20_01(self, test):
"""Sender manifests use the expected Content-Type"""
valid, response = self.do_request("GET", self.node_url + "senders")
if not valid or response.status_code != 200:
return test.FAIL("Unexpected response from the Node API: {}".format(response))
try:
access_error = False
content_type_warn = None
node_senders = response.json()
for sender in node_senders:
if not sender["transport"].startswith("urn:x-nmos:transport:rtp"):
continue
href = sender["manifest_href"]
if not href:
access_error = True
continue
valid, response = self.do_request("GET", href)
if valid and response.status_code == 200:
valid, message = self.check_content_type(response.headers, "application/sdp")
if not content_type_warn and (not valid or message != ""):
content_type_warn = message
elif valid and response.status_code == 404:
access_error = True
else:
return test.FAIL("Unexpected response from manifest_href '{}': {}"
.format(href, response))
if len(node_senders) == 0:
return test.UNCLEAR("Not tested. No resources found.")
if access_error:
return test.UNCLEAR("One or more of the tested Senders had null or empty 'manifest_href' or "
"returned a 404 HTTP code. Please ensure all Senders are enabled and re-test.")
if content_type_warn:
return test.WARNING(content_type_warn)
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
return test.PASS()
def test_21(self, test):
"""Node correctly interprets a 200 code from a registry upon initial registration"""
if not CONFIG.ENABLE_DNS_SD:
return test.DISABLED("This test cannot be performed when ENABLE_DNS_SD is False")
registry_info = self._registry_mdns_info(self.primary_registry.get_data().port, 0)
# Reset the registry to clear previous heartbeats, and enable in 200 test mode
self.primary_registry.reset()
self.primary_registry.enable(first_reg=True)
if CONFIG.DNS_SD_MODE == "multicast":
# Advertise a registry at pri 0 and allow the Node to do a basic registration
self.zc.register_service(registry_info)
# Wait for n seconds after advertising the service for the first POST and then DELETE from a Node
self.primary_registry.wait_for_registration(CONFIG.DNS_SD_ADVERT_TIMEOUT)
self.primary_registry.wait_for_delete(CONFIG.HEARTBEAT_INTERVAL + 1)
# Wait for the Node to finish its interactions
while (time.time() - self.primary_registry.last_time) < CONFIG.HEARTBEAT_INTERVAL + 1:
time.sleep(0.2)
# By this point we should have had at least one Node POST and a corresponding DELETE
if CONFIG.DNS_SD_MODE == "multicast":
self.zc.unregister_service(registry_info)
self.primary_registry.disable()
# Get the relevant Node ID
url = "{}self".format(self.node_url)
valid, r = self.do_request("GET", url)
if valid and r.status_code == 200:
try:
# Check that a POST and DELETE match the Node's ID
node_id = r.json()["id"]
found_post = False
for resource in self.primary_registry.get_data().posts:
if resource[1]["payload"]["type"] == "node" and resource[1]["payload"]["data"]["id"] == node_id:
found_post = True
if not found_post:
return test.FAIL("Node did not attempt to make contact with the registry")
found_delete = False
found_extra_deletes = False
for resource in self.primary_registry.get_data().deletes:
if resource[1]["type"] == "node" and resource[1]["id"] == node_id:
found_delete = True
elif resource[1]["type"] != "node":
found_extra_deletes = True
if not found_delete:
return test.FAIL("Node did not attempt to DELETE itself having encountered a 200 code on initial "
"registration")
elif found_extra_deletes:
return test.WARNING("Node DELETEd more than just its 'node' resource. This is unnecessary when "
"encountering a 200 code on initial registration")
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
else:
return test.FAIL("Unexpected responses from Node API self resource")
return test.PASS()
def test_22(self, test):
"""Node resource IDs persist over a reboot"""
return test.MANUAL("This check must be performed manually, or via use of the following tool",
"https://github.com/AMWA-TV/nmos-testing/blob/master/utilities/uuid-checker/README.md")
def test_23(self, test):
"""Senders and Receivers correctly use BCP-002-01 grouping syntax"""
found_groups = False
found_senders_receivers = False
groups = {"node": {}, "device": {}}
for resource_name in ["senders", "receivers"]:
valid, response = self.do_request("GET", self.node_url + resource_name)
if valid and response.status_code == 200:
try:
for resource in response.json():
found_senders_receivers = True
if resource["device_id"] not in groups["device"]:
groups["device"][resource["device_id"]] = {}
for tag_name, tag_value in resource["tags"].items():
if tag_name != "urn:x-nmos:tag:grouphint/v1.0":
continue
if not isinstance(tag_value, list) or len(tag_value) == 0:
return test.FAIL("Group tag for {} {} is not an array or has too few items"
.format(resource_name.capitalize().rstrip("s"), resource["id"]))
found_groups = True
for group_def in tag_value:
group_params = group_def.split(":")
group_scope = "device"
# Perform basic validation on the group syntax
if len(group_params) < 2:
return test.FAIL("Group syntax for {} {} has too few parameters"
.format(resource_name.capitalize().rstrip("s"), resource["id"]))
elif len(group_params) > 3:
return test.FAIL("Group syntax for {} {} has too many parameters"
.format(resource_name.capitalize().rstrip("s"), resource["id"]))
elif len(group_params) == 3:
if group_params[2] not in ["device", "node"]:
return test.FAIL("Group syntax for {} {} uses an invalid group scope: {}"
.format(resource_name.capitalize().rstrip("s"), resource["id"],
group_params[2]))
group_scope = group_params[2]
# Ensure we have a reference to the group name stored
if group_scope == "node":
if group_params[0] not in groups["node"]:
groups["node"][group_params[0]] = {}
group_ref = groups["node"][group_params[0]]
elif group_scope == "device":
if group_params[0] not in groups["device"][resource["device_id"]]:
groups["device"][resource["device_id"]][group_params[0]] = {}
group_ref = groups["device"][resource["device_id"]][group_params[0]]
# Check for duplicate roles within groups
if group_params[1] in group_ref:
return test.FAIL("Duplicate role found in group {} for resources {} and {}"
.format(group_params[0], resource["id"],
group_ref[group_params[1]]))
else:
group_ref[group_params[1]] = resource["id"]
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
if not found_senders_receivers:
return test.UNCLEAR("No Sender or Receiver resources were found on the Node")
elif found_groups:
return test.PASS()
else:
return test.OPTIONAL("No BCP-002-01 groups were identified in Sender or Receiver tags",
"https://specs.amwa.tv/bcp-002-01/branches/v1.0.x"
"/docs/1.0._Natural_Grouping.html")
def test_24(self, test):
"""Periodic Sources specify a 'grain_rate'"""
valid, response = self.do_request("GET", self.node_url + "sources")
if valid and response.status_code == 200:
try:
for resource in response.json():
# Currently testing where it would be particularly unusual to find a non-periodic Source
if resource["format"] in ["urn:x-nmos:format:video",
"urn:x-nmos:format:mux"]:
if "grain_rate" not in resource:
return test.WARNING("Source {} MUST specify a 'grain_rate' if it is periodic"
.format(resource["id"]))
if len(response.json()) > 0:
return test.PASS()
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key: {}".format(e))
return test.UNCLEAR("No Source resources were found on the Node")
def test_24_01(self, test):
"""Periodic Flows' 'grain_rate' is divisible by their parent Source 'grain_rate'"""
source_valid, source_response = self.do_request("GET", self.node_url + "sources")
flow_valid, flow_response = self.do_request("GET", self.node_url + "flows")
if source_valid and flow_valid and source_response.status_code == 200 and flow_response.status_code == 200:
try:
sources = {source["id"]: source for source in source_response.json()}
flows = flow_response.json()
for flow in flows:
if "grain_rate" in flow:
source = sources[flow["source_id"]]
if "grain_rate" not in source:
return test.FAIL("Source {} MUST specify a 'grain_rate' because one or more of its "
"child Flows specify a 'grain_rate'".format(source["id"]))
flow_rate = flow["grain_rate"]
if "denominator" not in flow_rate:
flow_rate["denominator"] = 1
source_rate = source["grain_rate"]
if "denominator" not in source_rate:
source_rate["denominator"] = 1
if ((source_rate["numerator"] * flow_rate["denominator"]) %
(flow_rate["numerator"] * source_rate["denominator"])):
return test.FAIL("Flow {} 'grain_rate' MUST be integer divisible by the Source "
"'grain_rate'".format(flow["id"]))
elif flow["format"] in ["urn:x-nmos:format:video",
"urn:x-nmos:format:mux"]:
return test.WARNING("Flow {} SHOULD specify a 'grain_rate' if it is periodic"
.format(flow["id"]))
if len(flow_response.json()) > 0:
return test.PASS()
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError:
return test.FAIL("No Source found for one or more advertised Flows")
return test.UNCLEAR("No Source or Flow resources were found on the Node")
def test_25(self, test):
"""Receivers expose expected 'caps' for their API version"""
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.1") < 0:
return test.NA("Capabilities are not used before API v1.1")
receivers_valid, receivers_response = self.do_request("GET", self.node_url + "receivers")
no_receivers = True
if receivers_valid and receivers_response.status_code == 200:
try:
for receiver in receivers_response.json():
no_receivers = False
if "media_types" not in receiver["caps"]:
return test.WARNING("Receiver 'caps' should include a list of accepted 'media_types', unless "
"this Receiver can handle any 'media_type'",
"https://specs.amwa.tv/is-04/branches/{}"
"/docs/4.3._Behaviour_-_Nodes.html#all-resources"
.format(api["spec_branch"]))
if self.is04_utils.compare_api_version(api["version"], "v1.3") >= 0:
if receiver["format"] == "urn:x-nmos:format:data" and \
receiver["transport"] in ["urn:x-nmos:transport:websocket", "urn:x-nmos:transport:mqtt"]:
# Technically this is a bit IS-07 specific, but it may still be best placed here for now
if "event_types" not in receiver["caps"]:
return test.WARNING("Receiver 'caps' should include a list of accepted 'event_types' "
"if the Receiver accepts IS-07 events, unless this Receiver can "
"handle any 'event_type'",
"https://specs.amwa.tv/is-04/branches/{}"
"/docs/4.3._Behaviour_-_Nodes.html#all-resources"
.format(api["spec_branch"]))
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key in the Receiver: {}".format(e))
if no_receivers:
return test.UNCLEAR("No Receivers were found on the Node")
else:
return test.PASS()
def test_26(self, test):
"""Source 'format' matches Flow 'format'"""
source_valid, source_response = self.do_request("GET", self.node_url + "sources")
flow_valid, flow_response = self.do_request("GET", self.node_url + "flows")
if source_valid and flow_valid and source_response.status_code == 200 and flow_response.status_code == 200:
try:
sources = {source["id"]: source for source in source_response.json()}
flows = flow_response.json()
for flow in flows:
source = sources[flow["source_id"]]
if flow["format"] != source["format"]:
return test.FAIL("Source {} and Flow {} 'format' does not match"
.format(source["id"], flow["id"]))
if len(flow_response.json()) > 0:
return test.PASS()
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError:
return test.FAIL("No Source found for one or more advertised Flows")
return test.UNCLEAR("No Source or Flow resources were found on the Node")
def test_27_1(self, test):
"""Node API implements BCP-004-01 Receiver Capabilities"""
api = self.apis[RECEIVER_CAPS_KEY]
reg_api = self.apis[CAPS_REGISTER_KEY]
receivers_valid, receivers_response = self.do_request("GET", self.node_url + "receivers")
schema = load_resolved_schema(api["spec_path"], "receiver_constraint_sets.json")
# workaround to load the Capabilities register schema as if with load_resolved_schema directly
# but with the base_uri of the Receiver Capabilities schemas
reg_schema_file = str(Path(os.path.abspath(reg_api["spec_path"])) / "capabilities/constraint_set.json")
with open(reg_schema_file, "r") as f:
reg_schema_obj = json.load(f)
reg_schema = load_resolved_schema(api["spec_path"], schema_obj=reg_schema_obj)
no_receivers = True
no_constraint_sets = True
if receivers_valid and receivers_response.status_code == 200:
try:
for receiver in receivers_response.json():
no_receivers = False
if "constraint_sets" in receiver["caps"]:
no_constraint_sets = False
try:
self.validate_schema(receiver, schema)
except ValidationError as e:
return test.FAIL("Receiver {} does not comply with the BCP-004-01 schema: "
"{}".format(receiver["id"], str(e)),
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html"
"#validating-parameter-constraints-and-constraint-sets"
.format(api["spec_branch"]))
for constraint_set in receiver["caps"]["constraint_sets"]:
try:
self.validate_schema(constraint_set, reg_schema)
except ValidationError as e:
return test.FAIL("Receiver {} does not comply with the Capabilities register schema: "
"{}".format(receiver["id"], str(e)),
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html"
"#behaviour-receivers"
.format(api["spec_branch"]))
found_param_constraint = False
for param_constraint in constraint_set:
if not param_constraint.startswith("urn:x-nmos:cap:meta:"):
found_param_constraint = True
break
if not found_param_constraint:
return test.FAIL("Receiver {} caps includes a constraint set without any "
"parameter constraints".format(receiver["id"]),
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html"
"#constraint-sets"
.format(api["spec_branch"]))
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key in the Receiver: {}".format(e))
if no_receivers:
return test.UNCLEAR("No Receivers were found on the Node")
elif no_constraint_sets:
return test.OPTIONAL("No BCP-004-01 'constraint_sets' were identified in Receiver caps",
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#listing-constraint-sets"
.format(api["spec_branch"]))
else:
return test.PASS()
def test_27_2(self, test):
"""Receiver 'caps' version is valid"""
api = self.apis[RECEIVER_CAPS_KEY]
receivers_valid, receivers_response = self.do_request("GET", self.node_url + "receivers")
no_receivers = True
no_caps_version = True
if receivers_valid and receivers_response.status_code == 200:
try:
for receiver in receivers_response.json():
no_receivers = False
if "version" in receiver["caps"]:
no_caps_version = False
caps_version = receiver["caps"]["version"]
core_version = receiver["version"]
if self.is04_utils.compare_resource_version(caps_version, core_version) > 0:
return test.FAIL("Receiver {} caps version is later than resource version"
.format(receiver["id"]),
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#behaviour-receivers"
.format(api["spec_branch"]))
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key in the Receiver: {}".format(e))
if no_receivers:
return test.UNCLEAR("No Receivers were found on the Node")
elif no_caps_version:
return test.OPTIONAL("No Receiver caps versions were found",
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#capabilities-version"
.format(api["spec_branch"]))
else:
return test.PASS()
def test_27_3(self, test):
"""Receiver 'caps' parameter constraints should be listed in the Capabilities register"""
api = self.apis[RECEIVER_CAPS_KEY]
reg_api = self.apis[CAPS_REGISTER_KEY]
# load the Capabilities register schema as JSON as we're only interested in the list of properties
reg_schema_file = str(Path(os.path.abspath(reg_api["spec_path"])) / "capabilities/constraint_set.json")
with open(reg_schema_file, "r") as f:
reg_schema_obj = json.load(f)
receivers_valid, receivers_response = self.do_request("GET", self.node_url + "receivers")
no_receivers = True
no_constraint_sets = True
warn_unregistered = ""
if receivers_valid and receivers_response.status_code == 200:
try:
for receiver in receivers_response.json():
no_receivers = False
if "constraint_sets" in receiver["caps"]:
no_constraint_sets = False
for constraint_set in receiver["caps"]["constraint_sets"]:
# keys in each constraint set must be either parameter constraints
# or constraint set metadata, both of which are listed in the schema
for param_constraint in constraint_set:
if param_constraint not in reg_schema_obj["properties"] and not warn_unregistered:
warn_unregistered = "Receiver {} caps includes an unregistered " \
"parameter constraint '{}'".format(receiver["id"], param_constraint)
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key in the Receiver: {}".format(e))
if no_receivers:
return test.UNCLEAR("No Receivers were found on the Node")
elif no_constraint_sets:
return test.OPTIONAL("No BCP-004-01 'constraint_sets' were identified in Receiver caps",
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#listing-constraint-sets"
.format(api["spec_branch"]))
elif warn_unregistered:
return test.WARNING(warn_unregistered,
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#defining-parameter-constraints"
.format(api["spec_branch"]))
else:
return test.PASS()
def test_27_4(self, test):
"""Node API implements BCP-004-01 Receiver Capabilities constraint set labels"""
return self.do_test_constraint_set_meta(test, "label", "human-readable labels", warn_not_all=True)
def test_27_5(self, test):
"""Node API implements BCP-004-01 Receiver Capabilities constraint set preferences"""
return self.do_test_constraint_set_meta(test, "preference", "preferences")
def test_27_6(self, test):
"""Node API implements BCP-004-01 Receiver Capabilities enabled/disabled constraint sets"""
return self.do_test_constraint_set_meta(test, "enabled", "enabled/disabled flags")
def do_test_constraint_set_meta(self, test, meta, description, warn_not_all=False):
api = self.apis[RECEIVER_CAPS_KEY]
receivers_valid, receivers_response = self.do_request("GET", self.node_url + "receivers")
no_receivers = True
no_constraint_sets = True
no_meta = True
all_meta = True
if receivers_valid and receivers_response.status_code == 200:
try:
for receiver in receivers_response.json():
no_receivers = False
if "constraint_sets" in receiver["caps"]:
no_constraint_sets = False
for constraint_set in receiver["caps"]["constraint_sets"]:
if "urn:x-nmos:cap:meta:" + meta in constraint_set:
no_meta = False
else:
all_meta = False
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key in the Receiver: {}".format(e))
if no_receivers:
return test.UNCLEAR("No Receivers were found on the Node")
elif no_constraint_sets:
return test.OPTIONAL("No BCP-004-01 'constraint_sets' were identified in Receiver caps",
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#listing-constraint-sets"
.format(api["spec_branch"]))
elif no_meta:
return test.OPTIONAL("No BCP-004-01 'constraint_sets' have {}".format(description),
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#constraint-set-{}"
.format(api["spec_branch"], meta))
elif warn_not_all and not all_meta:
return test.WARNING("Only some BCP-004-01 'constraint_sets' have {}".format(description),
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#constraint-set-{}"
.format(api["spec_branch"], meta))
else:
return test.PASS()
def test_27_7(self, test):
"""Receiver 'caps' parameter constraints should be used with the correct format"""
# general_constraints = [
# "urn:x-nmos:cap:format:media_type",
# "urn:x-nmos:cap:format:grain_rate"
# ]
video_specific_constraints = [
"urn:x-nmos:cap:format:frame_width",
"urn:x-nmos:cap:format:frame_height",
"urn:x-nmos:cap:format:interlace_mode",
"urn:x-nmos:cap:format:colorspace",
"urn:x-nmos:cap:format:transfer_characteristic",
"urn:x-nmos:cap:format:color_sampling",
"urn:x-nmos:cap:format:component_depth",
"urn:x-nmos:cap:transport:st2110_21_sender_type"
]
audio_specific_constraints = [
"urn:x-nmos:cap:format:channel_count",
"urn:x-nmos:cap:format:sample_rate",
"urn:x-nmos:cap:format:sample_depth",
"urn:x-nmos:cap:transport:packet_time",
"urn:x-nmos:cap:transport:max_packet_time"
]
data_specific_constraints = [
"urn:x-nmos:cap:format:event_type"
]
format_specific_constraints = {
"urn:x-nmos:format:video": video_specific_constraints,
"urn:x-nmos:format:audio": audio_specific_constraints,
"urn:x-nmos:format:data": data_specific_constraints,
"urn:x-nmos:format:mux": []
}
api = self.apis[RECEIVER_CAPS_KEY]
receivers_valid, receivers_response = self.do_request("GET", self.node_url + "receivers")
no_receivers = True
no_constraint_sets = True
warn_format = ""
if receivers_valid and receivers_response.status_code == 200:
try:
for receiver in receivers_response.json():
no_receivers = False
if "constraint_sets" in receiver["caps"]:
no_constraint_sets = False
format = receiver["format"]
wrong_constraints = [c for f in format_specific_constraints if f != format
for c in format_specific_constraints[f]]
for constraint_set in receiver["caps"]["constraint_sets"]:
for param_constraint in constraint_set:
if param_constraint in wrong_constraints and not warn_format:
warn_format = "Receiver {} caps includes a parameter constraint '{}' " \
"that is not relevant for {}".format(receiver["id"], param_constraint, format)
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key in the Receiver: {}".format(e))
if no_receivers:
return test.UNCLEAR("No Receivers were found on the Node")
elif no_constraint_sets:
return test.OPTIONAL("No BCP-004-01 'constraint_sets' were identified in Receiver caps",
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#listing-constraint-sets"
.format(api["spec_branch"]))
elif warn_format:
return test.WARNING(warn_format)
else:
return test.PASS()
def test_27_8(self, test):
"""Receiver 'caps' media type constraints should be used consistently"""
media_type = "urn:x-nmos:cap:format:media_type"
api = self.apis[RECEIVER_CAPS_KEY]
receivers_valid, receivers_response = self.do_request("GET", self.node_url + "receivers")
no_receivers = True
no_constraint_sets = True
if receivers_valid and receivers_response.status_code == 200:
try:
for receiver in receivers_response.json():
no_receivers = False
caps = receiver["caps"]
if "media_types" in caps and "constraint_sets" in caps:
no_constraint_sets = False
media_types = caps["media_types"]
for constraint_set in caps["constraint_sets"]:
if media_type in constraint_set:
if "enum" in constraint_set[media_type]:
if not set(constraint_set[media_type]["enum"]).issubset(set(media_types)):
return test.FAIL("Receiver {} caps includes a value for the parameter "
"constraint '{}' that is excluded by 'media_types'"
.format(receiver["id"], media_type))
except json.JSONDecodeError:
return test.FAIL("Non-JSON response returned from Node API")
except KeyError as e:
return test.FAIL("Unable to find expected key in the Receiver: {}".format(e))
if no_receivers:
return test.UNCLEAR("No Receivers were found on the Node")
elif no_constraint_sets:
return test.OPTIONAL("No BCP-004-01 'constraint_sets' were identified in Receiver caps",
"https://specs.amwa.tv/bcp-004-01/branches/{}"
"/docs/1.0._Receiver_Capabilities.html#listing-constraint-sets"
.format(api["spec_branch"]))
else:
return test.PASS()
def do_receiver_put(self, test, receiver_id, data):
"""Perform a PUT to the Receiver 'target' resource with the specified data"""
valid, put_response = self.do_request("PUT", self.node_url + "receivers/" + receiver_id + "/target", json=data)
if not valid:
raise NMOSTestException(test.FAIL("Unexpected response from the Node API: {}".format(put_response)))
if put_response.status_code == 501:
api = self.apis[NODE_API_KEY]
if self.is04_utils.compare_api_version(api["version"], "v1.3") >= 0:
raise NMOSTestException(test.OPTIONAL("Node indicated that basic connection management is not "
"supported",
NMOS_WIKI_URL + "/IS-04#nodes-basic-connection-management"))
else:
raise NMOSTestException(test.WARNING("501 'Not Implemented' status code is not supported below API "
"version v1.3",
NMOS_WIKI_URL + "/IS-04#nodes-basic-connection-management"))
elif put_response.status_code != 202:
raise NMOSTestException(test.FAIL("Receiver target PUT did not produce a 202 response code: "
"{}".format(put_response.status_code)))
schema = self.get_schema(NODE_API_KEY, "PUT", "/receivers/{receiverId}/target", put_response.status_code)
valid, message = self.check_response(schema, "PUT", put_response)
if valid:
# if message:
# return WARNING somehow...
pass
else:
raise NMOSTestException(test.FAIL(message))
def collect_mdns_announcements(self):
"""Helper function to collect Node mDNS announcements in the presence of a Registration API"""
registry_info = self._registry_mdns_info(self.primary_registry.get_data().port, 0)
# Reset the registry to clear previous data, although we won't be checking it
self.primary_registry.reset()
self.primary_registry.enable()
if CONFIG.DNS_SD_MODE == "multicast":
# Advertise a registry at pri 0 and allow the Node to do a basic registration
self.zc.register_service(registry_info)
# Wait for n seconds after advertising the service for the first POST from a Node
self.primary_registry.wait_for_registration(CONFIG.DNS_SD_ADVERT_TIMEOUT)
ServiceBrowser(self.zc, "_nmos-node._tcp.local.", self.zc_listener)
time.sleep(CONFIG.DNS_SD_BROWSE_TIMEOUT)
node_list = self.zc_listener.get_service_list()
# Withdraw the registry advertisement now we've performed a browse for Node advertisements
if CONFIG.DNS_SD_MODE == "multicast":
self.zc.unregister_service(registry_info)
self.primary_registry.disable()
return node_list
|
the-stack_0_26074
|
#!/usr/bin/python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_moisture as upmMoisture
def main():
# Instantiate a Grove Moisture sensor on analog pin A0
myMoisture = upmMoisture.Moisture(0)
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from myMoisture
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# Values (approximate):
# 0-300, sensor in air or dry soil
# 300-600, sensor in humid soil
# 600+, sensor in wet soil or submerged in water
# Read the value every second and print the corresponding moisture level
while(1):
moisture_val = myMoisture.value()
if (moisture_val >= 0 and moisture_val < 300):
result = "Dry"
elif (moisture_val >= 300 and moisture_val < 600):
result = "Moist"
else:
result = "Wet"
print("Moisture value: {0}, {1}".format(moisture_val, result))
time.sleep(1)
if __name__ == '__main__':
main()
|
the-stack_0_26075
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from collections import namedtuple
import torch
EpisodeStats = namedtuple("Stats", ["episode_lengths", "episode_rewards", "episode_epsilon", "episode_alpha"])
def plot_episode_stats(stats, n_episodes, smoothing_window=10, noshow=False, goal_value=None, fig_size=(15, 8), ada_divisor=25, show_params=False):
# Plot the episode length over time
fig1 = plt.figure(figsize=fig_size)
plt.plot(stats.episode_lengths)
plt.xlabel("Episode")
plt.ylabel("Episode Length")
plt.title("Episode Length over Time")
if noshow:
plt.close(fig1)
else:
plt.show(fig1)
# Plot the episode reward over time
fig2 = plt.figure(figsize=fig_size)
rewards_smoothed = pd.Series(stats.episode_rewards).rolling(smoothing_window, min_periods=smoothing_window).mean()
plt.plot(rewards_smoothed)
plt.xlabel("Episode")
plt.ylabel("Episode Reward (Smoothed)")
title = "Episode Reward over Time (Smoothed over window size {})".format(smoothing_window)
if goal_value is not None:
plt.axhline(goal_value, color='g', linestyle='dashed')
title = "Episode Reward over Time (Smoothed over window size" \
" " + str(smoothing_window) + ", goal value " + str(goal_value) + ")"
plt.title(title)
if noshow:
plt.close(fig2)
else:
plt.show(fig2)
# Plot time steps and episode number
fig3 = plt.figure(figsize=fig_size)
plt.plot(np.cumsum(stats.episode_lengths), np.arange(len(stats.episode_lengths)))
plt.xlabel("Time Steps")
plt.ylabel("Episode")
plt.title("Episode per time step")
if noshow:
plt.close(fig3)
else:
plt.show(fig3)
if show_params:
# Plot Epsilon over episode
fig4 = plt.figure(figsize=(15, 8))
plt.plot(np.arange(n_episodes), stats.episode_epsilon)
plt.xlabel("Episode t")
plt.ylabel("Epsilon")
plt.title("Epsilon over episode using ada_divisor of {}".format(ada_divisor))
if noshow:
plt.close(fig4)
else:
plt.show(fig4)
# Plot Epsilon over episode
fig5 = plt.figure(figsize=(15, 8))
plt.plot(np.arange(n_episodes), stats.episode_alpha)
plt.xlabel("Episode t")
plt.ylabel("Alpha")
plt.title("Alpha over episode using ada_divisor of {}".format(ada_divisor))
if noshow:
plt.close(fig5)
else:
plt.show(fig5)
return fig1, fig2, fig3, fig4, fig5
def plot_durations(episode_durations, is_ipython):
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
from IPython import display
display.clear_output(wait=True)
display.display(plt.gcf())
|
the-stack_0_26076
|
# -*- coding:utf-8 -*-
"""
app/form.py
~~~~~~~~~~~~~~
wtf form helper
"""
from flask import jsonify
from wtforms import SelectMultipleField, widgets
def orm2form(obj, form, include=(), out=()):
"""
填充sql对象到表单 支持指定包含的字段和排除的字段 ('field_name', ...)
obj/form 依赖 domi.orm.base/wtf_form
@param obj: orm对象
@param form: 指定表单
@param include: 包含字段
@param out: 排除字段
@return:
"""
if not include:
include = obj.all_data_field
if out:
include = set(include) - set(out)
for attr in include:
if hasattr(obj, attr) and hasattr(form, attr):
filed = getattr(form, attr)
filed.data = getattr(obj, attr)
def form2orm(form, obj, include=(), out=()):
"""
从指定表单获取字段值填充到sql对象 支持指定包含的字段和排除的字段 ('field_name', ...)
obj/form 依赖 domi.orm.base/wtf_form
@param obj: orm对象
@param form: 指定表单
@param include: 包含字段
@param out: 排除字段
@return:
"""
if not include:
include = obj.all_data_field
if out:
include = set(include) - set(out)
for attr in include:
if hasattr(obj, attr) and hasattr(form, attr):
setattr(obj, attr, getattr(form, attr).data)
def get_form_errors(form, spliter=' '):
if form.errors:
ary = []
for field_errors in form.errors.values():
ary.append(spliter.join(field_errors))
return spliter.join(ary)
class MultiCheckboxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
|
the-stack_0_26077
|
# coding=utf-8
# Copyright 2020 The SimCLR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific simclr governing permissions and
# limitations under the License.
# ==============================================================================
"""Contrastive loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.compiler.tf2xla.python import xla # pylint: disable=g-direct-tensorflow-import
LARGE_NUM = 1e9
def add_supervised_loss(labels, logits, weights, **kwargs):
"""Compute loss for model and add it to loss collection."""
return tf.losses.softmax_cross_entropy(labels, logits, weights, **kwargs)
def add_contrastive_loss(hidden,
hidden_norm=True,
temperature=1.0,
tpu_context=None,
weights=1.0):
"""Compute loss for model.
Args:
hidden: hidden vector (`Tensor`) of shape (2 * bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
tpu_context: context information for tpu.
weights: a weighting number or vector.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
hidden1, hidden2 = tf.split(hidden, 2, 0)
batch_size = tf.shape(hidden1)[0]
# Gather hidden1/hidden2 across replicas and create local labels.
if tpu_context is not None:
hidden1_large = tpu_cross_replica_concat(hidden1, tpu_context)
hidden2_large = tpu_cross_replica_concat(hidden2, tpu_context)
enlarged_batch_size = tf.shape(hidden1_large)[0]
# TODO(iamtingchen): more elegant way to convert u32 to s32 for replica_id.
replica_id = tf.cast(tf.cast(xla.replica_id(), tf.uint32), tf.int32)
labels_idx = tf.range(batch_size) + replica_id * batch_size
labels = tf.one_hot(labels_idx, enlarged_batch_size * 2)
masks = tf.one_hot(labels_idx, enlarged_batch_size)
else:
hidden1_large = hidden1
hidden2_large = hidden2
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
loss_a = tf.losses.softmax_cross_entropy(
labels, tf.concat([logits_ab, logits_aa], 1), weights=weights)
loss_b = tf.losses.softmax_cross_entropy(
labels, tf.concat([logits_ba, logits_bb], 1), weights=weights)
loss = loss_a + loss_b
return loss, logits_ab, labels
def tpu_cross_replica_concat(tensor, tpu_context=None):
"""Reduce a concatenation of the `tensor` across TPU cores.
Args:
tensor: tensor to concatenate.
tpu_context: A `TPUContext`. If not set, CPU execution is assumed.
Returns:
Tensor of the same rank as `tensor` with first dimension `num_replicas`
times larger.
"""
if tpu_context is None or tpu_context.num_replicas <= 1:
return tensor
num_replicas = tpu_context.num_replicas
with tf.name_scope('tpu_cross_replica_concat'):
# This creates a tensor that is like the input tensor but has an added
# replica dimension as the outermost dimension. On each replica it will
# contain the local values and zeros for all other values that need to be
# fetched from other replicas.
ext_tensor = tf.scatter_nd(
indices=[[xla.replica_id()]],
updates=[tensor],
shape=[num_replicas] + tensor.shape.as_list())
# As every value is only present on one replica and 0 in all others, adding
# them all together will result in the full tensor on all replicas.
ext_tensor = tf.tpu.cross_replica_sum(ext_tensor)
# Flatten the replica dimension.
# The first dimension size will be: tensor.shape[0] * num_replicas
# Using [-1] trick to support also scalar input.
return tf.reshape(ext_tensor, [-1] + ext_tensor.shape.as_list()[2:])
|
the-stack_0_26079
|
from .models import make_model
import numpy as np
import math
import scipy.io as sio
from sklearn.metrics import mean_squared_error
from skimage import measure
ft_epoch_arr = {'sigma15':27, 'sigma25':16, 'sigma30':12, 'sigma50':9, 'sigma75':7}
class Fine_tuning:
def __init__(self, clean_image, noisy_image, noise_sigma):
self.clean_img = np.float32(clean_image)
self.noisy_img = np.float32(noisy_image)
self.noise_sigma = noise_sigma
self.img_x = clean_image.shape[0]
self.img_y = clean_image.shape[1]
self.ep = ft_epoch_arr['sigma'+str(self.noise_sigma)]
self.mini_batch_size = 1
return
def get_PSNR(self, X, X_hat):
mse = mean_squared_error(X,X_hat)
test_PSNR = 10 * math.log10(1/mse)
return test_PSNR
def get_SSIM(self, X, X_hat):
test_SSIM = measure.compare_ssim(X, X_hat, dynamic_range=X.max() - X.min())
return test_SSIM
def preprocessing(self):
self.noisy_img /= 255.
self.clean_img /= 255.
self.X_data = (self.noisy_img - 0.5) / 0.2
self.X_data = self.X_data.reshape(1,self.img_x, self.img_y, 1)
self.Y_data = np.zeros((1,self.img_x, self.img_y,3))
self.Y_data[:,:,:,0] = self.clean_img
self.Y_data[:,:,:,1] = self.noisy_img
self.Y_data[:,:,:,2] = self.noise_sigma/255.
def generate_flipped_image_set(self, X_data):
if X_data.shape[3] == 1:
flipped_image_set = []
lr_flip = np.fliplr(X_data.reshape(self.img_x,self.img_y))
ud_flip = np.flipud(X_data.reshape(self.img_x,self.img_y))
lr_ud_flip = np.flipud(lr_flip)
flipped_image_set = X_data.reshape(1,self.img_x,self.img_y,X_data.shape[3])
flipped_image_set = np.vstack((flipped_image_set, lr_flip.reshape(1,self.img_x,self.img_y,X_data.shape[3])))
flipped_image_set = np.vstack((flipped_image_set, ud_flip.reshape(1,self.img_x,self.img_y,X_data.shape[3])))
flipped_image_set = np.vstack((flipped_image_set, lr_ud_flip.reshape(1,self.img_x,self.img_y,X_data.shape[3])))
else:
flipped_image_set = np.zeros((4,X_data.shape[1],X_data.shape[2],X_data.shape[3]))
for i in range(3):
origin = X_data[0,:,:,i]
lr_flip = np.fliplr(X_data[0,:,:,i])
ud_flip = np.flipud(X_data[0,:,:,i])
lr_ud_flip = np.flipud(np.fliplr(X_data[0,:,:,i]))
flipped_image_set[0,:,:,i] = origin
flipped_image_set[1,:,:,i] = lr_flip
flipped_image_set[2,:,:,i] = ud_flip
flipped_image_set[3,:,:,i] = lr_ud_flip
return flipped_image_set
def reverse_flipped_image_set(self,X_data):
origin_image = X_data[0]
reverse_lr_flip = np.fliplr(X_data[1])
reverse_ud_flip = np.flipud(X_data[2])
reverse_lr_ud_flip = np.flipud(np.fliplr(X_data[3]))
ensemble_image = (origin_image + reverse_lr_flip + reverse_ud_flip + reverse_lr_ud_flip)/4
return ensemble_image
def denoising(self):
Z_data_flip = self.noisy_img.reshape(1,self.img_x,self.img_y,1)
Z_data_flip = self.generate_flipped_image_set(Z_data_flip)
returned_score = self.model.predict(self.X_data_flip,batch_size=4, verbose=0)
returned_score = np.array(returned_score)
returned_score = returned_score.reshape(4,self.img_x,self.img_y,2)
denoised_test_image = returned_score[:,:,:,0] * (Z_data_flip[:,:,:,0]) + returned_score[:,:,:,1]
denoised_test_image = np.clip(denoised_test_image, 0, 1)
denoised_test_image = self.reverse_flipped_image_set(denoised_test_image)
PSNR = self.get_PSNR(self.clean_img,denoised_test_image)
SSIM = self.get_SSIM(self.clean_img,denoised_test_image)
return denoised_test_image, PSNR, SSIM
def fine_tuning(self):
self.preprocessing()
self.X_data_flip = self.X_data
self.X_data_flip = self.generate_flipped_image_set(self.X_data)
Y_data = self.Y_data
Y_data = self.generate_flipped_image_set(Y_data)
self.model = make_model(self.img_x, self.img_y)
self.model.load_weights('./weights/' + 'sigma' + str(self.noise_sigma) + '.hdf5')
self.model.fit(self.X_data_flip, Y_data, verbose=0, batch_size = self.mini_batch_size, epochs = self.ep)
denoised_test_image, PSNR, SSIM = self.denoising()
return denoised_test_image, PSNR, SSIM
|
the-stack_0_26081
|
# MIT licensed
# Copyright (c) 2020 lilydjwg <[email protected]>, et al.
# Copyright (c) 2020 Sunlei <[email protected]>
from xml.etree import ElementTree
from nvchecker.api import session
NAMESPACE = 'http://www.andymatuschak.org/xml-namespaces/sparkle'
async def get_version(name, conf, *, cache, **kwargs):
sparkle = conf['sparkle']
return await cache.get(sparkle, get_version_impl)
async def get_version_impl(sparkle):
res = await session.get(sparkle)
root = ElementTree.fromstring(res.body)
item = root.find('./channel/item[1]/enclosure')
version_string = item.get(f'{{{NAMESPACE}}}shortVersionString')
build_number = item.get(f'{{{NAMESPACE}}}version')
if (version_string and version_string.isdigit()) and (
build_number and not build_number.isdigit()
):
version_string, build_number = build_number, version_string
version = []
if version_string:
version.append(version_string)
if build_number and (build_number not in version):
version.append(build_number)
return '-'.join(version) if version else None
|
the-stack_0_26082
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
class VmwareHcxScenarioTest(ScenarioTest):
def setUp(self):
# https://vcrpy.readthedocs.io/en/latest/configuration.html#request-matching
self.vcr.match_on = ['scheme', 'method', 'path', 'query'] # not 'host', 'port'
super(VmwareHcxScenarioTest, self).setUp()
@ResourceGroupPreparer(name_prefix='cli_test_vmware_hcx')
def test_vmware_hcx(self):
self.kwargs.update({
'loc': 'westcentralus',
'privatecloud': 'cloud1',
})
# create a private cloud
self.cmd('vmware private-cloud create -g {rg} -n {privatecloud} --location {loc} --sku av20 --cluster-size 4 --network-block 192.168.48.0/22 --nsxt-password 5rqdLj4GF3cePUe6( --vcenter-password UpfBXae9ZquZSDXk( --accept-eula')
# Create a HCX addon
self.cmd('az vmware addon hcx create -g {rg} -c {privatecloud} --offer "VMware MaaS Cloud Provider"')
# List all existing addon
count = len(self.cmd('vmware addon list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 1, 'addon count expected to be 1')
# hcx-enterprise-site list should report 1
count = len(self.cmd('vmware hcx-enterprise-site list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 1, 'hcx-enterprise-site count expected to be 1')
# create authorization
self.cmd('vmware hcx-enterprise-site create -g {rg} -c {privatecloud} -n myhcx')
# hcx-enterprise-site list should report 1
count = len(self.cmd('vmware hcx-enterprise-site list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 1, 'hcx-enterprise-site count expected to be 1')
self.cmd('vmware hcx-enterprise-site show -g {rg} -c {privatecloud} -n myhcx')
self.cmd('vmware hcx-enterprise-site delete -g {rg} -c {privatecloud} -n myhcx')
# hcx-enterprise-site list should report 1
count = len(self.cmd('vmware hcx-enterprise-site list -g {rg} -c {privatecloud}').get_output_in_json())
self.assertEqual(count, 1, 'hcx-enterprise-site count expected to be 1')
|
the-stack_0_26083
|
import json
import glob
import os
import textwrap
import math
import pandas as pd
import streamlit as st
from PIL import Image, ImageDraw, ImageFont
STATES_FOLDER = "data/states/"
def show_menu():
st.sidebar.title("Social Media Toolkit Generator")
st.sidebar.header("Defund the Police")
st.sidebar.markdown("“Defund the police” means reallocating or redirecting funding away from the police department to other government agencies funded by the local municipality.")
st.sidebar.markdown("The goal of this tool is to highlight how much money local communities spend on Police, and then how reallocating funds can make a direct impact into their community")
#TODO add more "apps" such as county compare tool
def draw_image(text ,bg_color,text_color,font):
#TODO make advanced pannel for deep customizations
image_width = 600
image_height = 335
img = Image.new('RGB', (image_width, image_height), color = bg_color)
canvas = ImageDraw.Draw(img)
font = ImageFont.truetype(font, size=24)
pad = -25
#print(text)
for line in text:
#print(line)
#canvas.textsize(text, font=font)
#canvas.text((10,10), text, fill=(255, 255, 0))
text_width, text_height = canvas.textsize(line, font=font)
x_pos = int((image_width - text_width) / 2)
y_pos = int((image_height - text_height) / 2) + pad
canvas.text((x_pos, y_pos), line, font=font, fill=text_color)
pad += text_height + 5
return img
def create_budget_json(state,county):
# read budget.csv
budget_csv_path = STATES_FOLDER + state + "/" + county + "/budget.csv"
budget_df = pd.read_csv(budget_csv_path, index_col=False)
#st.write(budget_df)
# get police budget
police_df = budget_df.loc[budget_df["item"] == "Police"]
police_json = police_df.reset_index().to_json(orient="records")
police_data = json.loads(police_json)[0]
return police_data
def make_investment_image(investment,reinvest_money,bg_color,text_color,font):
if investment == "Education":
cpu_cost = 500.0
laptops = int(math.ceil( reinvest_money / cpu_cost))
laptops_string = str(f'{laptops:,}')
text = "That translates to "+ laptops_string + " laptops for our community"
wrapped_string = textwrap.wrap(text, width=30)
image = draw_image(wrapped_string,bg_color,text_color,font)
st.image(image, use_column_width=True)
st.write("*500 dollar laptops")
#TODO add in extra investments
def main():
show_menu()
st.header("Select Community")
# Select state
states = os.listdir(STATES_FOLDER)
state = st.selectbox("Select State", states)
# select county
counties = os.listdir(STATES_FOLDER + state)
county = st.selectbox("Select County", counties)
police_data = create_budget_json(state,county)
#st.write(police_data)
# Show budget for year
money = "$"+f'{police_data["budget"]:,}'
header_string = (
# "For "
# + str(police_data["year"])
# + " "
str(county)
+ " County, "
+ str(state)
+ " has a police budget of "
+ str(money)
)
wrapped_string = textwrap.wrap(header_string, width=30)
#st.header(wrapped_string)
fonts = glob.glob("fonts/*")
font = st.selectbox("Select Font", fonts)
bg_color = st.beta_color_picker('Background color', '#496D89')
st.write('The current background color is', bg_color)
text_color = st.beta_color_picker('Text color', '#FFFFFF')
st.write('The current text color is', text_color)
image = draw_image(wrapped_string,bg_color,text_color,font)
st.image(image, use_column_width=True)
st.write("source: " + str(police_data["source"]))
defund = st.slider("Defund %", 0, 100, 20)
defund_decmial = float(defund / 100)
reinvest_money = float(police_data["budget"]) * defund_decmial
reinvest_money_string = "$"+f'{int(reinvest_money):,}'
investments = ["Education","Healthcare", "Social Programs"]
realocate = st.selectbox("Reinvest", investments)
realoc_str = (
"By defunding the police by "
+ str(defund)
+ "% we can invest "
+ reinvest_money_string
+ " into "
+ realocate
)
wrapped_string = textwrap.wrap(realoc_str, width=30)
#st.header(realoc_str)
image = draw_image(wrapped_string,bg_color,text_color,font)
st.image(image, use_column_width=True)
#based on input show what we can do...
make_investment_image(realocate,reinvest_money,bg_color,text_color,font)
if __name__ == "__main__":
main()
|
the-stack_0_26085
|
# swift_build_support/products/product.py -----------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2021 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
import os
from . import product
from .. import cmake
from .. import shell
class CMakeProduct(product.Product):
def build_with_cmake(self, build_targets, build_type, build_args):
assert self.toolchain.cmake is not None
cmake_build = []
_cmake = cmake.CMake(self.args, self.toolchain)
if self.toolchain.distcc_pump:
cmake_build.append(self.toolchain.distcc_pump)
cmake_build.extend([self.toolchain.cmake, "--build"])
generator_output_path = ""
if self.args.cmake_generator == "Ninja":
generator_output_path = os.path.join(self.build_dir, "build.ninja")
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
if self.args.reconfigure or not os.path.isfile(cmake_cache_path) or \
(generator_output_path and not os.path.isfile(generator_output_path)):
if not os.path.exists(self.build_dir):
os.makedirs(self.build_dir)
# Use `cmake-file-api` in case it is available.
query_dir = os.path.join(self.build_dir, ".cmake", "api", "v1", "query")
if not os.path.exists(query_dir):
os.makedirs(query_dir)
open(os.path.join(query_dir, "codemodel-v2"), 'a').close()
open(os.path.join(query_dir, "cache-v2"), 'a').close()
env = None
if self.toolchain.distcc:
env = {
"DISTCC_HOSTS": "localhost,lzo,cpp"
}
with shell.pushd(self.build_dir):
shell.call([self.toolchain.cmake] + list(self.cmake_options) +
list(_cmake.common_options()) +
self.args.extra_cmake_options + [self.source_dir],
env=env)
if not self.args.skip_build or self.product_name() == "llvm":
if self.args.cmake_generator == "Xcode":
# Xcode generator uses "ALL_BUILD" instead of "all".
# Also, xcodebuild uses -target instead of bare names.
build_targets = build_targets[:]
build_targets = [val for target in build_targets
for val in ["-target",
target if target != "all"
else "ALL_BUILD"]]
# Xcode can't restart itself if it turns out we need to reconfigure.
# Do an advance build to handle that.
shell.call(cmake_build + [self.build_dir, "--config", build_type])
shell.call(cmake_build + [self.build_dir, "--config", build_type, "--"]
+ build_args + build_targets)
def test_with_cmake(self, executable_target, results_targets,
build_type, build_args):
assert self.toolchain.cmake is not None
cmake_build = []
if self.toolchain.distcc_pump:
cmake_build.append(self.toolchain.distcc_pump)
cmake_args = [self.toolchain.cmake, "--build", self.build_dir,
"--config", build_type, "--"]
cmake_build.extend(cmake_args + build_args)
def target_flag(target):
if self.args.cmake_generator == "Xcode":
return ["-target", target]
return [target]
if executable_target:
shell.call(cmake_build + target_flag(executable_target))
for target in results_targets:
if target:
test_target = target
print("--- %s ---" % target)
if test_target.startswith("check-swift") and self.args.test_paths:
test_target = test_target + "-custom"
shell.call(cmake_build + target_flag(test_target))
print("--- %s finished ---" % target)
def install_with_cmake(self, install_targets, install_destdir):
assert self.toolchain.cmake is not None
cmake_build = []
if self.toolchain.distcc_pump:
cmake_build.append(self.toolchain.distcc_pump)
cmake_args = [self.toolchain.cmake, "--build", self.build_dir, "--"]
cmake_build.extend(cmake_args + install_targets)
environment = {'DESTDIR': install_destdir}
shell.call(cmake_build, env=environment)
|
the-stack_0_26086
|
# [h] selectively copy-and-paste data from one glyph to others
# imports
from mojo.roboFont import CurrentFont, CurrentGlyph
from mojo.events import addObserver, removeObserver
from vanilla import *
from hTools2 import hDialog
from hTools2.modules.fontutils import get_glyphs
from hTools2.modules.anchors import transfer_anchors
from hTools2.modules.messages import no_glyph_selected, no_font_open
# object
class copyPasteGlyphDialog(hDialog):
"""A dialog to copy and paste glyphs, with a few special options.
.. image:: imgs/glyphs/copy-paste.png
"""
# attributes
source_font = None
source_glyph = None
# methods
def __init__(self):
self.title = "paste+"
self.width = 123
self.height = (self.button_height * 2) + (self.text_height * 5) + (self.padding_y * 4)
self.w = FloatingWindow((self.width, self.height), self.title)
x = self.padding_x
y = self.padding_y
# paste
self.w.button_copy = SquareButton(
(x, y,
-self.padding_x,
self.button_height),
"copy",
callback=self.copy_callback,
sizeStyle=self.size_style)
# options
y += (self.button_height + self.padding_y)
self.w.foreground = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"foreground",
value=True,
sizeStyle=self.size_style)
y += self.text_height
self.w.layers = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"layers",
value=True,
sizeStyle=self.size_style)
y += self.text_height
self.w.metrics = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"width",
value=True,
sizeStyle=self.size_style)
y += self.text_height
self.w.anchors = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"anchors",
value=True,
sizeStyle=self.size_style)
y += self.text_height
self.w.color = CheckBox(
(x, y,
-self.padding_x,
self.text_height),
"color",
value=True,
sizeStyle=self.size_style)
# paste
y += (self.text_height + self.padding_y)
self.w.button_paste = SquareButton(
(x, y,
-self.padding_x,
self.button_height),
"paste",
callback=self.paste_callback,
sizeStyle=self.size_style)
# open
self.w.open()
# callbacks
def copy_callback(self, sender):
f = CurrentFont()
glyph_name = get_glyphs(f)[0]
print('copied glyph %s' % glyph_name)
self.source_font = f
self.source_glyph = self.source_font[glyph_name]
print()
def paste_callback(self, sender):
f = CurrentFont()
if f is not None:
glyph_names = get_glyphs(f)
if len(glyph_names) > 0:
# get data
foreground = self.w.foreground.get()
layers = self.w.layers.get()
metrics = self.w.metrics.get()
anchors = self.w.anchors.get()
color = self.w.color.get()
# print info
bool_string = [ False, True ]
print('pasting data from glyph %s:\n' % self.source_glyph.name)
print('\tforeground: %s' % bool_string[foreground])
print('\tlayers: %s' % bool_string[layers])
print('\tmetrics: %s' % bool_string[metrics])
print('\tanchors: %s' % bool_string[anchors])
print('\tcolor: %s' % bool_string[color])
print()
print('\tpasting in', end=' ')
# copy data
for glyph_name in glyph_names:
print(glyph_name, end=' ')
# prepare undo
f[glyph_name].prepareUndo('paste from glyph')
# copy outlines in foreground layer
if foreground:
target_layer = f[glyph_name].getLayer('foreground')
pen = target_layer.getPointPen()
self.source_glyph.drawPoints(pen)
# copy all other layers
if layers:
for layer_name in self.source_font.layerOrder:
source_layer = self.source_glyph.getLayer(layer_name)
target_layer = f[glyph_name].getLayer(layer_name)
pen = target_layer.getPointPen()
source_layer.drawPoints(pen)
# copy glyph width
if metrics:
f[glyph_name].width = self.source_glyph.width
# copy anchors
if anchors:
transfer_anchors(self.source_glyph, f[glyph_name])
# copy mark color
if color:
f[glyph_name].mark = self.source_glyph.mark
# activate undo
f[glyph_name].performUndo()
# done with glyph
f[glyph_name].update()
# done
f.update()
print()
print('\n...done.\n')
# no glyph selected
else:
print(no_glyph_selected)
# no font open
else:
print(no_font_open)
|
the-stack_0_26089
|
# -*- coding:utf-8 -*-
name = "%(name)s"
version = "%(version)s"
## Subdirectory within which to release this package
category = "proj"
_requires = {
"any": [
"core_pipeline"
## Supported DCCs
## The "~" character denotes a "weak" requirement,
## meaning they aren't required by this project alone,
## but if explicitly requested then this must be their
## version.
],
## Requirements relative a request
## E.g. if `ATC maya` is requested, the "maya"
## requirements are added to the list.
"maya": [
],
"nuke": [
],
"houdini": [
],
}
_environ = {
"any": {
# PROJECTS_PATH - core_pipeline
"PROJECT_NAME": "%(name)s",
"PROJECT_PATH": "{env.PROJECTS_PATH}/%(name)s",
},
## Global overrides for TDs and free-form scripts
## Normally, these files are included alongside a
## package, e.g. "{root}/python". These are different.
## These lack version or write-access control, and
## are intended for quick hacks and experimentation
## by artists not familiar or involved with Rez
## or overall package distribution.
"maya": {
}
}
## ----------------------
##
## INTERNAL
##
## ----------------------
## The command used to bundle payload with package.py
build_command = "python -m rezutil build {root}"
private_build_requires = ["python-2.7+<4", "rezutil-1"]
## Below are boilerplate functionality to enable the above, do not touch
late = locals()["late"]
@late()
def requires():
"""Requirements relative a request
This function merges a the "any" requirements with e.g. "maya"
if "maya" is part of a request. Normally, every requirement
is included with every request, but in this case we wouldn't want
"maya" requirements included for e.g. "nuke" or "houdini" etc.
The @late decorate makes this function get called at the time
of calling `rez env` whereby `request` contains the requests
made during that time.
"""
global this
global request
global in_context
requires = this._requires
result = requires["any"][:]
# Add request-specific requirements
if in_context():
for name, reqs in requires.items():
if name not in request:
continue
result += reqs
return result
def commands():
env = globals()["env"]
this = globals()["this"]
request = globals()["request"]
expandvars = globals()["expandvars"]
environ = this._environ
result = list(environ["any"].items())
# Add request-specific environments
for key, values in environ.items():
if key not in request:
continue
result += list(values.items())
for key, value in result:
if isinstance(value, (tuple, list)):
[ env[key].append(expandvars(v)) for v in value ]
else:
env[key] = expandvars(value)
|
the-stack_0_26090
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for `caso.record` module.
"""
import uuid
import caso
from caso import exception
from caso import record
from caso.tests import base
class TestCasoManager(base.TestCase):
def test_invalid_version(self):
r = record.CloudRecord(uuid.uuid4().hex,
"site-foo",
"name-foo",
uuid.uuid4().hex,
uuid.uuid4().hex,
"/Foo/User/Fqan")
self.assertRaises(exception.RecordVersionNotFound,
r.as_dict, version="0.0")
self.assertRaises(exception.RecordVersionNotFound,
r.as_json, version="0.0")
def test_required_fields(self):
server_id = uuid.uuid4().hex
site_name = "site-foo"
server_name = "name-foo"
server_user_id = uuid.uuid4().hex
server_project_id = uuid.uuid4().hex
fqan = "FooVO"
status = 'completed'
image_id = uuid.uuid4().hex
user_dn = "/Foo/bar/baz"
expected = {
'FQAN': fqan,
'GlobalUserName': user_dn,
'ImageId': image_id,
'LocalGroupId': server_project_id,
'LocalUserId': server_user_id,
'MachineName': server_name,
'SiteName': site_name,
'Status': status,
'VMUUID': server_id,
'CloudType': caso.user_agent,
}
expected_02 = {
'CpuCount': None,
'CpuDuration': None,
'Disk': None,
'EndTime': None,
'Memory': None,
'NetworkInbound': None,
'NetworkOutbound': None,
'NetworkType': None,
'StartTime': None,
'StorageRecordId': None,
'SuspendDuration': None,
'WallDuration': None,
}
expected_04 = {
'CloudComputeService': None,
'BenchmarkType': None,
'Benchmark': None,
'PublicIPCount': None,
}
r = record.CloudRecord(server_id,
site_name,
server_name,
server_user_id,
server_project_id,
fqan,
status=status,
image_id=image_id,
user_dn=user_dn)
d_02 = r.as_dict(version="0.2")
self.assertDictContainsSubset(expected, d_02)
self.assertDictContainsSubset(expected_02, d_02)
d_04 = r.as_dict(version="0.4")
self.assertDictContainsSubset(expected, d_04)
self.assertDictContainsSubset(expected_04, d_04)
|
the-stack_0_26091
|
import boto3
import os
ENVIRO = os.environ['environment']
ACCOUNT = os.environ['account']
def lambda_handler(event, context):
ec2 = boto3.resource('ec2')
instanceid = event['detail']['instance-id']
#instanceid = 'i-0496caa71efa41cbe' #In case manual adding is needed for testing
instance = ec2.Instance(id=instanceid)
for tag in instance.tags:
if 'XdConfig' in tag['Key']:
for tag in instance.tags:
if tag["Key"] == 'Name':
#Find instance name to work out environment
instance_name = tag['Value']
print ('Instance Name: ' + instance_name)
#End If
#End For
# Convert AWS account environment into Evnironemtn tag as backup if cannot be calculated from instance name
choices_env = {'prod': "Production", 'uat': "UAT", 'sit': "SIT", 'dev': "Development"}
environment_tag = choices_env.get(ENVIRO, 'default')
print ('Account Environment Tag: ' + environment_tag)
# Calculate environment from instance name
instance_name_env = instance_name[3:6]
choices_instanceenv = {'PRD': "Production", 'UAT': "UAT", 'SIT': "SIT", 'DEV': "Development"}
instanceenvironment_tag = choices_instanceenv.get(instance_name_env, environment_tag)
print ('Instance Environment Tag: ' + instanceenvironment_tag)
# Calculate Organisation from AWS account
choices_org = {'afb': "AfB"}
organisation_tag = choices_org.get(ACCOUNT, 'AWL')
print ('Organisation: ' + organisation_tag)
# Configure Tag values
mytags = [
{
"Key" : "Backup",
"Value" : "False"
},
{
"Key" : "Organisation",
"Value" : organisation_tag
},
{
"Key" : "BusinessOwner",
"Value" : "Chris Grey"
},
{ "Key": "BusinessUnit",
"Value": "Infrastructure"
},
{
"Key" : "ServiceOwner",
"Value" :"Travis De Coning"
},
{
"Key" : "ServiceLevel",
"Value" : "Gold"
},
{
"Key" : "CostCentre",
"Value" : "P023611"
},
{
"Key" : "Quadrant",
"Value" : "Q1"
},
{
"Key" : "ApplicationName",
"Value" : "Citrix"
},
{
"Key" : "ApplicationType",
"Value" :"Citrix"
},
{
"Key" : "Description",
"Value" : "Citrix Application Server"
},
{
"Key" : "Environment",
"Value" : instanceenvironment_tag
},
{
"Key" : "CreatedBy",
"Value" : "Citrix"
},
{
"Key" : "CreationDate",
"Value" : "CitrixDaily"
},
{
"Key" : "OperatingSystem",
"Value" : "Windows Server 2016"
},
{
"Key" : "Terraform",
"Value" : "False"
},
{
"Key" : "ssmMaintenanceWindow",
"Value" : "False"
}
]
# Set instance Tags
instance.create_tags(
Tags=mytags
)
print ('Tags Updated')
#End if
#End For
#End Function
|
the-stack_0_26092
|
# -*- encoding: utf-8 -*-
"""
Tests for django.core.servers.
"""
from __future__ import unicode_literals
import os
try:
from urllib.request import urlopen, HTTPError
except ImportError: # Python 2
from urllib2 import urlopen, HTTPError
from django.core.exceptions import ImproperlyConfigured
from django.test import LiveServerTestCase
from django.core.servers.basehttp import WSGIServerException
from django.test.utils import override_settings
from django.utils.http import urlencode
from django.utils._os import upath
from .models import Person
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'MEDIA_URL': '/media/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'media'),
'STATIC_URL': '/static/',
'STATIC_ROOT': os.path.join(TEST_ROOT, 'static'),
}
class LiveServerBase(LiveServerTestCase):
urls = 'regressiontests.servers.urls'
fixtures = ['testdata.json']
@classmethod
def setUpClass(cls):
# Override settings
cls.settings_override = override_settings(**TEST_SETTINGS)
cls.settings_override.enable()
super(LiveServerBase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# Restore original settings
cls.settings_override.disable()
super(LiveServerBase, cls).tearDownClass()
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class LiveServerAddress(LiveServerBase):
"""
Ensure that the address set in the environment variable is valid.
Refs #2879.
"""
@classmethod
def setUpClass(cls):
# Backup original environment variable
address_predefined = 'DJANGO_LIVE_TEST_SERVER_ADDRESS' in os.environ
old_address = os.environ.get('DJANGO_LIVE_TEST_SERVER_ADDRESS')
# Just the host is not accepted
cls.raises_exception('localhost', ImproperlyConfigured)
# The host must be valid
cls.raises_exception('blahblahblah:8081', WSGIServerException)
# The list of ports must be in a valid format
cls.raises_exception('localhost:8081,', ImproperlyConfigured)
cls.raises_exception('localhost:8081,blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-', ImproperlyConfigured)
cls.raises_exception('localhost:8081-blah', ImproperlyConfigured)
cls.raises_exception('localhost:8081-8082-8083', ImproperlyConfigured)
# If contrib.staticfiles isn't configured properly, the exception
# should bubble up to the main thread.
old_STATIC_URL = TEST_SETTINGS['STATIC_URL']
TEST_SETTINGS['STATIC_URL'] = None
cls.raises_exception('localhost:8081', ImproperlyConfigured)
TEST_SETTINGS['STATIC_URL'] = old_STATIC_URL
# Restore original environment variable
if address_predefined:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = old_address
else:
del os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS']
@classmethod
def raises_exception(cls, address, exception):
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = address
try:
super(LiveServerAddress, cls).setUpClass()
raise Exception("The line above should have raised an exception")
except exception:
pass
def test_test_test(self):
# Intentionally empty method so that the test is picked up by the
# test runner and the overriden setUpClass() method is executed.
pass
class LiveServerViews(LiveServerBase):
def test_404(self):
"""
Ensure that the LiveServerTestCase serves 404s.
Refs #2879.
"""
try:
self.urlopen('/')
except HTTPError as err:
self.assertEqual(err.code, 404, 'Expected 404 response')
else:
self.fail('Expected 404 response')
def test_view(self):
"""
Ensure that the LiveServerTestCase serves views.
Refs #2879.
"""
f = self.urlopen('/example_view/')
self.assertEqual(f.read(), b'example view')
def test_static_files(self):
"""
Ensure that the LiveServerTestCase serves static files.
Refs #2879.
"""
f = self.urlopen('/static/example_static_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example static file')
def test_media_files(self):
"""
Ensure that the LiveServerTestCase serves media files.
Refs #2879.
"""
f = self.urlopen('/media/example_media_file.txt')
self.assertEqual(f.read().rstrip(b'\r\n'), b'example media file')
def test_environ(self):
f = self.urlopen('/environ_view/?%s' % urlencode({'q': 'тест'}))
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Ensure that fixtures are properly loaded and visible to the
live server thread.
Refs #2879.
"""
f = self.urlopen('/model_view/')
self.assertEqual(f.read().splitlines(), [b'jane', b'robert'])
def test_database_writes(self):
"""
Ensure that data written to the database by a view can be read.
Refs #2879.
"""
self.urlopen('/create_model_instance/')
self.assertQuerysetEqual(
Person.objects.all().order_by('pk'),
['jane', 'robert', 'emily'],
lambda b: b.name
)
|
the-stack_0_26093
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 10:42:02 2021
@author: syang
"""
import os
import numpy as np
import json
import cv2
# Use the same script for MOT16
DATA_PATH = '../../dataset/Kitti_left'
GT_FOLDER = 'reformed_labels/label_02_new'
OUT_PATH = os.path.join(DATA_PATH, 'annotations_origin')
CLASSES = ['Car', 'Pedestrian', 'Van', 'Misc', 'Cyclist', 'Truck', 'Person', 'Tram', 'DontCare']
#SPLITS = ['train_half', 'val_half', 'train'] # --> split training data to train_half and val_half.
SPLITS = ['val_half', 'train_half', 'train']
HALF_VIDEO = True
CREATE_SPLITTED_ANN = True
CREATE_SPLITTED_DET = True
if __name__ == '__main__':
if not os.path.exists(OUT_PATH):
os.makedirs(OUT_PATH)
for split in SPLITS:
data_path = os.path.join(DATA_PATH, GT_FOLDER)
out_path = os.path.join(OUT_PATH, '{}.json'.format(split))
out = {'images': [], 'annotations': [], 'videos': [],
'categories': [{'id': 0, 'name': 'Car'},
{'id': 1, 'name': 'Pedestrian'},
{'id': 2, 'name': 'Van'},
{'id': 3, 'name': 'Misc'},
{'id': 4, 'name': 'Cyclist'},
{'id': 5, 'name': 'Truck'},
{'id': 6, 'name': 'Person'},
{'id': 7, 'name': 'Tram'}]}
seqs = os.listdir(data_path)
image_cnt = 0
ann_cnt = 0
video_cnt = 0
for seq in sorted(seqs):
if '.txt' not in seq:
continue
seq_noext = os.path.splitext(seq)[0]
video_cnt += 1 # video sequence number.
out['videos'].append({'id': video_cnt, 'file_name': seq_noext})
ann_path = os.path.join(data_path, seq)
img_path = os.path.join(DATA_PATH, 'train/image_02/{}'.format(seq_noext))
images = os.listdir(img_path)
num_images = len([image for image in images if 'png' in image]) # half and half
if HALF_VIDEO and ('half' in split):
image_range = [0, num_images // 2] if 'train' in split else \
[num_images // 2 + 1, num_images - 1]
else:
image_range = [0, num_images - 1]
for i in range(num_images):
if i < image_range[0] or i > image_range[1]:
continue
img = cv2.imread(os.path.join(img_path, '{:06d}.png'.format(i)))
height, width = img.shape[:2]
image_info = {'file_name': 'image_02/{}/{:06d}.png'.format(seq_noext, i), # image name.
'id': image_cnt + i + 1, # image number in the entire training set.
'frame_id': i + 1 - image_range[0], # image number in the video sequence, starting from 1.
'prev_image_id': image_cnt + i if i > 0 else -1, # image number in the entire training set.
'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1,
'video_id': video_cnt,
'height': height, 'width': width}
out['images'].append(image_info)
print('{}: {} images'.format(seq_noext, num_images))
if split != 'test':
#det_path = os.path.join(seq_path, 'det/det.txt')
anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',', comments='#')
#dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',')
if CREATE_SPLITTED_ANN and ('half' in split):
anns_out = np.array([anns[i] for i in range(anns.shape[0])
if int(anns[i][0]) - 1 >= image_range[0] and
int(anns[i][0]) - 1 <= image_range[1]], np.float32)
anns_out[:, 0] -= image_range[0]
gt_out = os.path.join(img_path, 'gt_coco')
if not os.path.exists(gt_out):
os.mkdir(gt_out)
fout = open(os.path.join(gt_out, 'gt_{}.txt'.format(split)), 'w')
#fout.write('# frame, tracking_id, bbox(xmin,ymin,weight,height), class, -1, -1, -1\n')
obj_number = np.unique(anns_out[:,1])
#for o in anns_out[anns_out[:,1].argsort()]:
for num in obj_number:
if int(num) == -1:
continue
sub_anns = anns_out[np.where(anns_out[:,1] == num)]
for o in sub_anns:
fout.write('{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:f}\n'.format(
int(o[0]), int(o[1])+1, int(o[2]), int(o[3]), int(o[4]), int(o[5]),
1, int(o[6]), 1))
fout.close()
print('{} ann images'.format(int(anns[:, 0].max())))
for i in range(anns.shape[0]):
frame_id = int(anns[i][0])
if frame_id - 1 < image_range[0] or frame_id - 1 > image_range[1]:
continue
track_id = int(anns[i][1])
cat_id = int(anns[i][7])
ann_cnt += 1
category_id = int(anns[i][6])
if category_id == -1:
continue
iscrowd = 0
#iscrowd = 0
ann = {'id': ann_cnt,
'category_id': category_id,
'image_id': image_cnt + frame_id,
'track_id': track_id,
'bbox': anns[i][2:6].tolist(),
'conf': float(1),
'iscrowd': iscrowd,
'area': float(anns[i][4] * anns[i][5])}
out['annotations'].append(ann)
image_cnt += num_images
print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations'])))
json.dump(out, open(out_path, 'w'))
|
the-stack_0_26095
|
"""This module contains the general information for BiosVfWorkLoadConfig ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class BiosVfWorkLoadConfigConsts:
VP_WORK_LOAD_CONFIG_BALANCED = "Balanced"
VP_WORK_LOAD_CONFIG_I_O_SENSITIVE = "I/O Sensitive"
VP_WORK_LOAD_CONFIG_NUMA = "NUMA"
VP_WORK_LOAD_CONFIG_UMA = "UMA"
VP_WORK_LOAD_CONFIG_PLATFORM_DEFAULT = "platform-default"
class BiosVfWorkLoadConfig(ManagedObject):
"""This is BiosVfWorkLoadConfig class."""
consts = BiosVfWorkLoadConfigConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("BiosVfWorkLoadConfig", "biosVfWorkLoadConfig", "work-load-config", VersionMeta.Version204c, "InputOutput", 0x1f, [], ["admin"], ['biosPlatformDefaults', 'biosSettings'], [], ["Get", "Set"]),
"modular": MoMeta("BiosVfWorkLoadConfig", "biosVfWorkLoadConfig", "work-load-config", VersionMeta.Version2013e, "InputOutput", 0x1f, [], ["admin"], ['biosPlatformDefaults', 'biosSettings'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version204c, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version204c, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version204c, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_work_load_config": MoPropertyMeta("vp_work_load_config", "vpWorkLoadConfig", "string", VersionMeta.Version204c, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Balanced", "I/O Sensitive", "NUMA", "UMA", "platform-default"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version421a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"vp_work_load_config": MoPropertyMeta("vp_work_load_config", "vpWorkLoadConfig", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Balanced", "I/O Sensitive", "platform-default"], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpWorkLoadConfig": "vp_work_load_config",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"rn": "rn",
"status": "status",
"vpWorkLoadConfig": "vp_work_load_config",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.status = None
self.vp_work_load_config = None
self.child_action = None
ManagedObject.__init__(self, "BiosVfWorkLoadConfig", parent_mo_or_dn, **kwargs)
|
the-stack_0_26097
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operations for preparing useful quantum states."""
from typing import Iterable, Sequence, Set, Tuple, Union, cast
import numpy
import cirq
from openfermion import (
QuadraticHamiltonian,
gaussian_state_preparation_circuit,
slater_determinant_preparation_circuit)
from openfermioncirq import YXXY
def prepare_gaussian_state(qubits: Sequence[cirq.QubitId],
quadratic_hamiltonian: QuadraticHamiltonian,
occupied_orbitals: Sequence[int]=None,
initial_state: Union[int, Sequence[int]]=0
) -> cirq.OP_TREE:
"""Prepare a fermionic Gaussian state from a computational basis state.
A fermionic Gaussian state is an eigenstate of a quadratic Hamiltonian. If
the Hamiltonian conserves particle number, then it is a Slater determinant.
The algorithm used is described in arXiv:1711.05395. It assumes the
Jordan-Wigner transform.
Args:
qubits: The qubits to which to apply the circuit.
quadratic_hamiltonian: The Hamiltonian whose eigenstate is desired.
occupied_orbitals: A list of integers representing the indices of the
pseudoparticle orbitals to occupy in the Gaussian state. The
orbitals are ordered in ascending order of energy.
The default behavior is to fill the orbitals with negative energy,
i.e., prepare the ground state.
initial_state: The computational basis state that the qubits start in.
This can be either an integer or a sequence of integers.
If an integer, it is mapped to a computational basis state via
"big endian" ordering of the binary representation of the integer.
For example, the computational basis state on five qubits with
the first and second qubits set to one is 0b11000, which is 24
in decimal.
If a sequence of integers, then it contains the indices of the
qubits that are set to one (indexing starts from 0). For
example, the list [2, 3] represents qubits 2 and 3 being set to one.
Default is 0, the all zeros state.
"""
n_qubits = len(qubits)
circuit_description, start_orbitals = gaussian_state_preparation_circuit(
quadratic_hamiltonian, occupied_orbitals)
if isinstance(initial_state, int):
initially_occupied_orbitals = _occupied_orbitals(
initial_state, n_qubits)
else:
initially_occupied_orbitals = initial_state # type: ignore
# Flip bits so that the correct starting orbitals are occupied
yield (cirq.X(qubits[j]) for j in range(n_qubits)
if (j in initially_occupied_orbitals) != (j in start_orbitals))
yield _ops_from_givens_rotations_circuit_description(
qubits, circuit_description)
def prepare_slater_determinant(qubits: Sequence[cirq.QubitId],
slater_determinant_matrix: numpy.ndarray,
initial_state: Union[int, Sequence[int]]=0
) -> cirq.OP_TREE:
r"""Prepare a Slater determinant from a computational basis state.
A Slater determinant is described by an :math:`\eta \times N` matrix
:math:`Q` with orthonormal rows, where :math:`\eta` is the particle number
and :math:`N` is the total number of modes. The state corresponding to this
matrix is
.. math::
b^\dagger_1 \cdots b^\dagger_{\eta} \lvert \text{vac} \rangle,
where
.. math::
b^\dagger_j = \sum_{k = 1}^N Q_{jk} a^\dagger_k.
The algorithm used is described in arXiv:1711.05395. It assumes the
Jordan-Wigner transform.
Args:
qubits: The qubits to which to apply the circuit.
slater_determinant_matrix: The matrix :math:`Q` which describes the
Slater determinant to be prepared.
initial_state: The computational basis state that the qubits start in.
This can be either an integer or a container of integers.
If an integer, it is mapped to a computational basis state via
"big endian" ordering of the binary representation of the integer.
For example, the computational basis state on five qubits with
the first and second qubits set to one is 0b11000, which is 24
in decimal.
If a container of integers, then it contains the indices of the
qubits that are set to one (indexing starts from 0). For
example, the list [2, 3] represents qubits 2 and 3 being set to one.
Default is 0, the all zeros state.
"""
n_qubits = len(qubits)
circuit_description = slater_determinant_preparation_circuit(
slater_determinant_matrix)
n_occupied = slater_determinant_matrix.shape[0]
if isinstance(initial_state, int):
initially_occupied_orbitals = _occupied_orbitals(
initial_state, n_qubits)
else:
initially_occupied_orbitals = initial_state # type: ignore
# Flip bits so that the first n_occupied are 1 and the rest 0
yield (cirq.X(qubits[j]) for j in range(n_qubits)
if (j < n_occupied) != (j in initially_occupied_orbitals))
yield _ops_from_givens_rotations_circuit_description(
qubits, circuit_description)
def _occupied_orbitals(computational_basis_state: int, n_qubits) -> Set[int]:
"""Indices of ones in the binary expansion of an integer in big endian
order. e.g. 010110 -> [1, 3, 4]"""
bitstring = format(computational_basis_state, 'b').zfill(n_qubits)
return {j for j in range(len(bitstring)) if bitstring[j] == '1'}
def _ops_from_givens_rotations_circuit_description(
qubits: Sequence[cirq.QubitId],
circuit_description: Iterable[Iterable[
Union[str, Tuple[int, int, float, float]]]]) -> cirq.OP_TREE:
"""Yield operations from a Givens rotations circuit obtained from
OpenFermion.
"""
for parallel_ops in circuit_description:
for op in parallel_ops:
if op == 'pht':
yield cirq.X(qubits[-1])
else:
i, j, theta, phi = cast(Tuple[int, int, float, float], op)
yield YXXY(qubits[i], qubits[j]) ** (2 * theta / numpy.pi)
yield cirq.Z(qubits[j]) ** (phi / numpy.pi)
|
the-stack_0_26098
|
'''
Created on May 19, 2013
@author: vinnie
'''
import os
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from scipy.misc import imread, imresize
from skimage.filters import canny
from scipy.ndimage.filters import sobel
# Good for the b/w test images used
MIN_CANNY_THRESHOLD = 30
MAX_CANNY_THRESHOLD = 100
def gradient_orientation(image):
'''
Calculate the gradient orientation for edge point in the image
'''
dx = sobel(image, axis=0, mode='constant')
dy = sobel(image, axis=1, mode='constant')
gradient = np.arctan2(dy,dx) * 180 / np.pi
return gradient
def build_r_table(image, origin):
'''
Build the R-table from the given shape image and a reference point
'''
edges = canny(image, low_threshold=MIN_CANNY_THRESHOLD,
high_threshold=MAX_CANNY_THRESHOLD)
gradient = gradient_orientation(edges)
r_table = defaultdict(list)
for (i,j),value in np.ndenumerate(edges):
if value:
r_table[gradient[i,j]].append((origin[0]-i, origin[1]-j))
return r_table
def accumulate_gradients(r_table, grayImage):
'''
Perform a General Hough Transform with the given image and R-table
'''
edges = canny(grayImage, low_threshold=MIN_CANNY_THRESHOLD,
high_threshold=MAX_CANNY_THRESHOLD)
plt.matshow(edges)
plt.show()
gradient = gradient_orientation(edges)
accumulator = np.zeros(grayImage.shape)
for (i,j),value in np.ndenumerate(edges):
if value:
for r in r_table[gradient[i,j]]:
accum_i, accum_j = i+r[0], j+r[1]
if accum_i < accumulator.shape[0] and accum_j < accumulator.shape[1]:
accumulator[accum_i, accum_j] += 1
return accumulator
def general_hough_closure(reference_image):
'''
Generator function to create a closure with the reference image and origin
at the center of the reference image
Returns a function f, which takes a query image and returns the accumulator
'''
referencePoint = (reference_image.shape[0]/2, reference_image.shape[1]/2)
r_table = build_r_table(reference_image, referencePoint)
def f(query_image):
return accumulate_gradients(r_table, query_image)
return f
def n_max(a, n):
'''
Return the N max elements and indices in a
'''
indices = a.ravel().argsort()[-n:]
indices = (np.unravel_index(i, a.shape) for i in indices)
return [(a[i], i) for i in indices]
def general_hough(gh, reference_image, query):
'''
Uses a GH closure to detect shapes in an image and create nice output
'''
query_image = imread(query, flatten=True)
query_image = imresize(query_image, 0.25)
accumulator = gh(query_image)
plt.clf()
plt.gray()
fig = plt.figure()
fig.add_subplot(2,2,1)
plt.title('Reference image')
plt.imshow(reference_image)
fig.add_subplot(2,2,2)
plt.title('Query image')
plt.imshow(query_image)
fig.add_subplot(2,2,3)
plt.title('Accumulator')
plt.imshow(accumulator)
fig.add_subplot(2,2,4)
plt.title('Detection')
plt.imshow(query_image)
# top 5 results in red
m = n_max(accumulator, 5)
y_points = [pt[1][0] for pt in m]
x_points = [pt[1][1] for pt in m]
plt.scatter(x_points, y_points, marker='o', color='r')
# top result in yellow
i,j = np.unravel_index(accumulator.argmax(), accumulator.shape)
plt.scatter([j], [i], marker='x', color='y')
d,f = os.path.split(query)[0], os.path.splitext(os.path.split(query)[1])[0]
plt.savefig(os.path.join(d, f + '_output.png'))
return
def main():
reference_image = imread("../data/similar_objects/1t.jpg", flatten=True)
reference_image = imresize(reference_image, 0.25)
detect_s = general_hough_closure(reference_image)
general_hough(detect_s, reference_image, "../data/similar_objects/1.JPG")
if __name__ == '__main__':
main()
|
the-stack_0_26099
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Columbia EECS E6893 Big Data Analytics
import datetime
import pandas as pd
from psaw import PushshiftAPI
api = PushshiftAPI()
# the tags to track
tags = ['jpm', 'jpmorgan', 'aapl', 'apple', 'pfe', 'pfizer', 'eth', 'ethereum', 'btc', 'bitcoin']
subs = ['news', 'worldnews', 'stocks', 'business', 'wallstreetbets', 'investing', 'stockmarket', 'crypto', 'cryptocurrency', 'bitcoin', 'etherium', 'dogecoin']
start_date = datetime.date(2019, 11, 1)
end_date = datetime.date(2021, 11, 30)
delta = end_date - start_date
if __name__ == '__main__':
try:
comments = []
for i in range(delta.days):
first_datetime = datetime.datetime.combine(start_date + datetime.timedelta(days=i), datetime.datetime.min.time())
second_datetime = datetime.datetime.combine(start_date + datetime.timedelta(days=i+1), datetime.datetime.min.time())
print('Dates: {0} to {1}'.format(first_datetime, second_datetime))
for sub in subs:
query = api.search_submissions(subreddit=sub, after=first_datetime, before=second_datetime, limit=500)
for comment in query:
if any(tag in comment.d_['title'] for tag in tags):
comment_time = datetime.datetime.utcfromtimestamp(int(comment.created_utc)).strftime('%Y-%m-%d %H:%M:%S')
comment_data = [comment_time, comment.d_['title'], comment.d_['score'], comment.d_['num_comments']]
comments.append(comment_data)
print(len(comments))
df = pd.DataFrame(comments, columns= ['Time', 'Comment', 'Score', 'Num Comments'])
df.to_csv('new_reddit_past_samples_try.csv', index=False)
except KeyboardInterrupt:
print('Interrupted')
df = pd.DataFrame(comments, columns= ['Time', 'Comment', 'Score', 'Num Comments'])
df.to_csv('new_reddit_past_samples_try.csv', index=False)
print('Complete')
|
the-stack_0_26101
|
# -*- coding: utf-8 -*-
"""
heroku3.api
~~~~~~~~~~
This module provides the basic API interface for Heroku.
"""
import sys
import json
from pprint import pprint # noqa
# Third party libraries
import requests
from requests.exceptions import HTTPError
# Project libraries
from .models import Plan, RateLimit
from .helpers import is_collection, validate_name
from .exceptions import InvalidNameException
from .models.app import App
from .models.key import Key
from .rendezvous import Rendezvous
from .structures import KeyedListResource, SSHKeyListResource
from .models.dyno import Dyno
from .models.addon import Addon
from .models.oauth import OAuthToken, OAuthClient, OAuthAuthorization
from .models.account import Account
from .models.invoice import Invoice
from .models.app_setup import AppSetup
from .models.configvars import ConfigVars
from .models.logsession import LogSession
from .models.account.feature import AccountFeature
if sys.version_info > (3, 0):
from urllib.parse import quote
else:
from urllib import quote # noqa
HEROKU_URL = "https://api.heroku.com"
HEROKU_HEADERS = {"Accept": "application/vnd.heroku+json; version=3.cedar-acm", "Content-Type": "application/json"}
class RateLimitExceeded(Exception):
pass
class MaxRangeExceeded(Exception):
pass
class HerokuCore(object):
"""The core Heroku class."""
def __init__(self, session=None):
super(HerokuCore, self).__init__()
if session is None:
session = requests.session()
#: The User's API Key.
self._api_key = None
self._api_key_verified = None
self._heroku_url = HEROKU_URL
self._session = session
self._ratelimit_remaining = None
self._last_request_id = None
# We only want JSON back.
self._session.headers.update(HEROKU_HEADERS)
def __repr__(self):
return "<heroku-core at 0x%x>" % (id(self))
def authenticate(self, api_key):
"""Logs user into Heroku with given api_key."""
self._api_key = api_key
# Attach auth to session.
self._session.auth = ("", self._api_key)
return self._verify_api_key()
@property
def is_authenticated(self):
if self._api_key_verified is None:
return self._verify_api_key()
else:
return self._api_key_verified
def _verify_api_key(self):
r = self._session.get(self._url_for("account/rate-limits"))
self._api_key_verified = True if r.ok else False
return self._api_key_verified
def _url_for(self, *args):
args = map(str, args)
return "/".join([self._heroku_url] + list(args))
@staticmethod
def _resource_serialize(o):
"""Returns JSON serialization of given object."""
return json.dumps(o)
@staticmethod
def _resource_deserialize(s):
"""Returns dict deserialization of a given JSON string."""
try:
return json.loads(s)
except ValueError:
raise ResponseError("The API Response was not valid.")
def _get_headers_for_request(
self,
legacy=False,
order_by=None,
limit=None,
valrange=None,
sort=None,
):
headers = {}
if legacy is True:
# Nasty patch session to fallback to old api
headers.update({"Accept": "application/json"})
else:
range_str = None
# "Range: name ..; order=desc,max=10;"#
if order_by or limit or valrange or sort:
range_str = ""
seperator = ""
if order_by:
range_str = "{0} ..;".format(order_by)
else:
range_str = "id ..;"
if sort is not None:
assert sort == "asc" or sort == "desc"
range_str += " order={0}".format(sort)
seperator = ","
else:
range_str += " "
if limit:
if limit > 1000:
raise MaxRangeExceeded(
"Your *limit* ({0}) argument is"
" greater than the maximum "
"allowed value of 1000".format(limit)
)
range_str += "{0}max={1}".format(seperator, limit)
range_str += ";"
# print(range_str)
if valrange:
# If given, This should override limit and order_by
range_str = valrange
if range_str is not None:
headers.update({"Range": range_str})
return headers
def _http_resource(
self,
method,
resource,
params=None,
data=None,
legacy=False,
order_by=None,
limit=None,
valrange=None,
sort=None,
):
"""Makes an HTTP request."""
if not is_collection(resource):
resource = [resource]
url = self._url_for(*resource)
headers = self._get_headers_for_request(
legacy=legacy, order_by=order_by, limit=limit, valrange=valrange, sort=sort
)
r = self._session.request(method, url, params=params, data=data, headers=headers)
if "ratelimit-remaining" in r.headers:
self._ratelimit_remaining = r.headers["ratelimit-remaining"]
if "Request-Id" in r.headers:
self._last_request_id = r.headers["Request-Id"]
# if 'Accept-Ranges' in r.headers:
# print("Accept-Ranges = {0}".format(r.headers['Accept-Ranges']))
if r.status_code == 422:
http_error = HTTPError(
"%s - %s Client Error: %s" % (self._last_request_id, r.status_code, r.content.decode("utf-8"))
)
http_error.response = r
raise http_error
if r.status_code == 429:
# Rate limit reached
raise RateLimitExceeded("You have exceeded your rate limit \n{0}".format(r.content.decode("utf-8")))
if (not str(r.status_code).startswith("2")) and (r.status_code not in [304]):
pass
r.raise_for_status()
return r
def _get_resource(self, resource, obj, params=None, **kwargs):
"""Returns a mapped object from an HTTP resource."""
r = self._http_resource("GET", resource, params=params)
return self._process_item(self._resource_deserialize(r.content.decode("utf-8")), obj, **kwargs)
def _process_item(self, item, obj, **kwargs):
return obj.new_from_dict(item, h=self, **kwargs)
def _get_resources(
self,
resource,
obj,
params=None,
map=None,
legacy=None,
order_by=None,
limit=None,
valrange=None,
sort=None,
**kwargs
):
"""Returns a list of mapped objects from an HTTP resource."""
if not order_by:
order_by = obj.order_by
return self._process_items(
self._get_data(
resource, params=params, legacy=legacy, order_by=order_by, limit=limit, valrange=valrange, sort=sort
),
obj,
map=map,
**kwargs
)
def _get_data(self, resource, params=None, legacy=None, order_by=None, limit=None, valrange=None, sort=None):
r = self._http_resource(
"GET", resource, params=params, legacy=legacy, order_by=order_by, limit=limit, valrange=valrange, sort=sort
)
items = self._resource_deserialize(r.content.decode("utf-8"))
if r.status_code == 206 and "Next-Range" in r.headers and not limit:
# We have unexpected chunked response - deal with it
valrange = r.headers["Next-Range"]
print(
"Warning Response was chunked, Loading the next Chunk using the following next-range header returned by Heroku '{0}'. WARNING - This breaks randomly depending on your order_by name. I think it's only guarenteed to work with id's - Looks to be a Heroku problem".format( # noqa
valrange
)
) # noqa
new_items = self._get_data(
resource, params=params, legacy=legacy, order_by=order_by, limit=limit, valrange=valrange, sort=sort
)
items.extend(new_items)
return items
def _process_items(self, d_items, obj, map=None, **kwargs):
if not isinstance(d_items, list):
print(
"Warning, Response for '{0}' was of type {1} - I was expecting a 'list'. This could mean the api has changed its response type for this request.".format( # noqa
obj, type(d_items) # noqa
)
) # noqa
if isinstance(d_items, dict):
print("As it's a dict, I'll try to process it anyway")
return self._process_item(d_items, obj, **kwargs)
items = [obj.new_from_dict(item, h=self, **kwargs) for item in d_items]
if map is None:
map = KeyedListResource
list_resource = map(items=items)
list_resource._h = self
list_resource._obj = obj
list_resource._kwargs = kwargs
return list_resource
class Heroku(HerokuCore):
"""The main Heroku class."""
def __init__(self, session=None):
super(Heroku, self).__init__(session=session)
def __repr__(self):
return "<heroku-client at 0x%x>" % (id(self))
def create_appsetup(self, source_blob, overrides=None, app=None):
"""
Creates an app-setup
"""
assert "url" in source_blob
payload = {"source_blob": source_blob}
if overrides:
payload.update({"overrides": overrides})
if app:
payload.update({"app": app})
r = self._http_resource(method="POST", resource=("app-setups",), data=self._resource_serialize(payload))
r.raise_for_status()
item = self._resource_deserialize(r.content.decode("utf-8"))
pprint(item)
return AppSetup.new_from_dict(item, h=self)
def get_appsetup(self, app_setup_id):
return self._get_resource(("app-setups/{0:s}".format(app_setup_id)), AppSetup)
def account(self):
return self._get_resource(("account"), Account)
def addons(self, app_id_or_name, **kwargs):
return self._get_resources(resource=("apps", app_id_or_name, "addons"), obj=Addon, **kwargs)
def addon_services(self, id_or_name=None, **kwargs):
if id_or_name is not None:
return self._get_resource(("addon-services/{0}".format(quote(id_or_name))), Plan)
else:
return self._get_resources(("addon-services"), Plan, **kwargs)
def apps(self, **kwargs):
return self._get_resources(("apps"), App, **kwargs)
def app(self, id_or_name):
return self._get_resource(("apps/{0:s}".format(id_or_name)), App)
def create_app(self, name=None, stack_id_or_name="cedar", region_id_or_name=None, organization=None):
"""Creates a new app."""
payload = {}
if organization:
payload["organization"] = organization
resource = ("organizations", "apps")
else:
resource = ("apps",)
if name:
if validate_name(name):
payload["name"] = name
else:
raise InvalidNameException(
"Name must start with a letter, end with a letter or digit and can only contain lowercase letters, digits, and dashes."
)
if stack_id_or_name:
payload["stack"] = stack_id_or_name
if region_id_or_name:
payload["region"] = region_id_or_name
try:
r = self._http_resource(method="POST", resource=resource, data=self._resource_serialize(payload))
r.raise_for_status()
item = self._resource_deserialize(r.content.decode("utf-8"))
app = App.new_from_dict(item, h=self)
except HTTPError as e:
if "Name is already taken" in str(e):
try:
app = self.app(name)
except: # noqa
raise
else:
print("Warning - {0:s}".format(str(e)))
else:
raise
return app
def keys(self, **kwargs):
return self._get_resources(("account/keys"), Key, map=SSHKeyListResource, **kwargs)
def invoices(self, **kwargs):
return self._get_resources(("account/invoices"), Invoice)
def labs(self, **kwargs):
return self.features(**kwargs)
def features(self, **kwargs):
return self._get_resources(("account/features"), AccountFeature, **kwargs)
def oauthauthorization(self, oauthauthorization_id):
return self._get_resource(("oauth", "authorizations", oauthauthorization_id), OAuthAuthorization)
def oauthauthorizations(self, **kwargs):
return self._get_resources(("oauth", "authorizations"), OAuthAuthorization, **kwargs)
def oauthauthorization_create(self, scope, oauthclient_id=None, description=None):
"""
Creates an OAuthAuthorization
"""
payload = {"scope": scope}
if oauthclient_id:
payload.update({"client": oauthclient_id})
if description:
payload.update({"description": description})
r = self._http_resource(
method="POST", resource=("oauth", "authorizations"), data=self._h._resource_serialize(payload)
)
r.raise_for_status()
item = self._resource_deserialize(r.content.decode("utf-8"))
return OAuthClient.new_from_dict(item, h=self)
def oauthauthorization_delete(self, oauthauthorization_id):
"""
Destroys the OAuthAuthorization with oauthauthorization_id
"""
r = self._http_resource(method="DELETE", resource=("oauth", "authorizations", oauthauthorization_id))
r.raise_for_status()
return r.ok
def oauthclient(self, oauthclient_id):
return self._get_resource(("oauth", "clients", oauthclient_id), OAuthClient)
def oauthclients(self, **kwargs):
return self._get_resources(("oauth", "clients"), OAuthClient, **kwargs)
def oauthclient_create(self, name, redirect_uri):
"""
Creates an OAuthClient with the given name and redirect_uri
"""
payload = {"name": name, "redirect_uri": redirect_uri}
r = self._http_resource(
method="POST", resource=("oauth", "clients"), data=self._h._resource_serialize(payload)
)
r.raise_for_status()
item = self._resource_deserialize(r.content.decode("utf-8"))
return OAuthClient.new_from_dict(item, h=self)
def oauthclient_delete(self, oauthclient_id):
"""
Destroys the OAuthClient with id oauthclient_id
"""
r = self._http_resource(method="DELETE", resource=("oauth", "clients", oauthclient_id))
r.raise_for_status()
return r.ok
def oauthtoken_create(self, client_secret=None, grant_code=None, grant_type=None, refresh_token=None):
"""
Creates an OAuthToken with the given optional parameters
"""
payload = {}
grant = {}
if client_secret:
payload.update({"client": {"secret": client_secret}})
if grant_code:
grant.update({"code": grant_code})
if grant_type:
grant.update({"type": grant_type})
if refresh_token:
payload.update({"refresh_token": {"token": refresh_token}})
if grant:
payload.update({"grant": grant})
r = self._http_resource(method="POST", resource=("oauth", "tokens"), data=self._h._resource_serialize(payload))
r.raise_for_status()
item = self._resource_deserialize(r.content.decode("utf-8"))
return OAuthToken.new_from_dict(item, h=self)
def run_command_on_app(
self, appname, command, size="standard-1x", attach=True, printout=True, env=None, timeout_secs=60
):
"""Run a remote command attach=True if you want to capture the output"""
if attach:
attach = True
payload = {"command": command, "attach": attach, "size": size}
if env:
payload["env"] = env
r = self._http_resource(
method="POST", resource=("apps", appname, "dynos"), data=self._resource_serialize(payload)
)
r.raise_for_status()
item = self._resource_deserialize(r.content.decode("utf-8"))
dyno = Dyno.new_from_dict(item, h=self)
if attach:
output = Rendezvous(dyno.attach_url, printout=printout, timeout_secs=timeout_secs).start()
return output, dyno
else:
return dyno
@property
def rate_limit(self):
return self._get_resource(("account/rate-limits"), RateLimit)
def ratelimit_remaining(self):
if self._ratelimit_remaining is not None:
return int(self._ratelimit_remaining)
else:
self.rate_limit
return int(self._ratelimit_remaining)
def stream_app_log(self, app_id_or_name, dyno=None, lines=100, source=None, timeout=None):
logger = self._app_logger(app_id_or_name, dyno=dyno, lines=lines, source=source, tail=True)
return logger.stream(timeout=timeout)
def get_app_log(self, app_id_or_name, dyno=None, lines=100, source=None, timeout=None):
logger = self._app_logger(app_id_or_name, dyno=dyno, lines=lines, source=source, tail=0)
return logger.get(timeout=timeout)
def update_appconfig(self, app_id_or_name, config):
payload = self._resource_serialize(config)
r = self._http_resource(method="PATCH", resource=("apps", app_id_or_name, "config-vars"), data=payload)
r.raise_for_status()
item = self._resource_deserialize(r.content.decode("utf-8"))
return ConfigVars.new_from_dict(item, h=self)
def _app_logger(self, app_id_or_name, dyno=None, lines=100, source=None, tail=0):
payload = {}
if dyno:
payload["dyno"] = dyno
if tail:
payload["tail"] = tail
if source:
payload["source"] = source
if lines:
payload["lines"] = lines
r = self._http_resource(
method="POST", resource=("apps", app_id_or_name, "log-sessions"), data=self._resource_serialize(payload)
)
r.raise_for_status()
item = self._resource_deserialize(r.content.decode("utf-8"))
return LogSession.new_from_dict(item, h=self, app=self)
@property
def last_request_id(self):
return self._last_request_id
class ResponseError(ValueError):
"""The API Response was unexpected."""
|
the-stack_0_26103
|
import pytest
def names(argvalue):
if isinstance(argvalue, (list,)):
return '-'
if argvalue.startswith("RRULE"):
return argvalue[6:]
return '-'
params = (
('rrule', 'dtstart', 'exdates', 'expected'),
[
[
"RRULE:FREQ=MONTHLY;BYMONTHDAY=15,30;COUNT=5",
"DTSTART;TZID=America/New_York:20070115T090000",
[],
[
"20070115T090000",
"20070130T090000",
"20070215T090000",
"20070315T090000",
"20070330T090000",
]
],
],
False,
names
)
def list_rrule_no_tz(rrule, dtstart, begin, end, exdates, maxres):
import uICAL
rule = rrule.split(':', 1)[1]
start = dtstart.split(':', 1)[1]
excludes = [ex.split(':', 1)[1] for ex in exdates]
rr = uICAL.RRule(rule, start, begin=begin, end=end, exclude=excludes)
results = []
while rr.next():
results.append("%04d%02d%02dT%02d%02d%02d" % rr.now())
if len(results) == maxres:
break
return results
@pytest.mark.parametrize(*params)
def test_rrule_stepping_begin(rrule, dtstart, exdates, expected):
expected_all = expected
for i in range(1, len(expected_all)):
expected = expected_all[i:]
begin = expected_all[i]
results = list_rrule_no_tz(rrule, dtstart, begin, None, exdates, len(expected))
if len(results) == len(expected):
if expected[-1] == "...":
results[-1] = "..."
from itertools import count
for c, r, e in zip(count(), results, expected):
assert r == e, "Rolling index=%d, count=%d" % (i, c)
|
the-stack_0_26104
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import logging
import numpy as np
import os
import re
from typing import Callable, Sequence
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
from jax import api
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax.config import config
from jax.experimental import host_callback as hcb
from jax.lib import xla_bridge
config.parse_flags_with_absl()
FLAGS = config.FLAGS
def skip_if_jit_not_enabled():
if os.getenv("JAX_ENABLE_JIT_PRINT", "false") == "false":
raise SkipTest("print jit not enabled yet; use JAX_ENABLE_JIT_PRINT env.")
def supported_dtypes():
return sorted(jtu.supported_dtypes(), key=lambda x: np.dtype(x).name)
class _TestingOutputStream(object):
"""Use as `output_stream` for tests."""
def __init__(self):
self._output = []
self.testMethodName = None
def write(self, what: str) -> None:
print(f"output_stream[{self.testMethodName}]: {what}", end="")
self._output.append(what)
@property
def output(self):
return "".join(self._output)
def __str__(self):
return "TestingOutputStream"
def reset(self):
self._output = []
testing_stream = _TestingOutputStream()
def fun1(a):
y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream)
y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y)
return y**2 # Some computation to make the gradient interesting
def fun1_equiv(a): # Numerical equivalent of fun`
return (a * 2.)**2
def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase, expected: str, what: str):
"""A variant that preprocesses the string to eliminate non-determinism in
floating point values, and several uninteresting id_tap primitive params."""
# Sometimes we get floating points in the output; we round them
def repl_floats(match_group):
matched = match_group.group(0)
if matched == ".": return matched
# TODO: why can't we use here np.around?
x = np.around(float(matched), decimals=2)
return f"{x:.2f}"
what = re.sub(r"\-?\d*\.[\-\def]*", repl_floats, what)
what = re.sub(r"output_stream=[^\]\n]*", "", what)
what = re.sub(r"threshold=[^\]\n]*", "", what)
# Empty lines
what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE)
def repl_func(match_group):
matched = match_group.group(0)
if "function _print_consumer" in matched:
return "func=_print"
else:
return "..."
what = re.sub(r"func=(.*)", repl_func, what)
tst.assertMultiLineStrippedEqual(expected, what)
class HostCallbackTest(jtu.JaxTestCase):
def setUp(self):
testing_stream.reset()
testing_stream.testMethodName = self._testMethodName
self.old_flags = os.getenv("XLA_FLAGS", "")
def tearDown(self) -> None:
if os.getenv("XLA_FLAGS") != self.old_flags:
os.environ["XLA_FLAGS"] = self.old_flags
xla_bridge.get_backend.cache_clear()
def helper_set_devices(self, nr_devices):
flags_str = os.getenv("XLA_FLAGS", "")
os.environ["XLA_FLAGS"] = (
flags_str +
" --xla_force_host_platform_device_count={}".format(nr_devices))
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
return api.devices()
def helper_set_hlo_dump(self):
flags_str = os.getenv("XLA_FLAGS", "")
os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to=/tmp/xla_dump"
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def test_eval(self):
# TODO: renable jaxpr golden tests when changing host_callback
#assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(fun1)(5.)))
with hcb.outfeed_receiver():
self.assertAllClose((5. * 2.) ** 2, fun1(5.))
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
testing_stream.reset()
def test_with_tuple_results(self):
def func2(x):
x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream)
return x1 + y1
#assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(func2)(3.)))
with hcb.outfeed_receiver():
self.assertEqual(3. * (2. + 3.), func2(3.))
assertMultiLineStrippedEqual(self, """
[ 6.00
9.00 ]""", testing_stream.output)
testing_stream.reset()
def test_with_dict_results(self):
def func2(x):
res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream)
return res["a"] + res["b"]
with hcb.outfeed_receiver():
self.assertEqual(3. * (2. + 3.), func2(3.))
assertMultiLineStrippedEqual(self, """
{ a=6.00
b=9.00 }""", testing_stream.output)
testing_stream.reset()
def test_with_result(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream)
return x1
with hcb.outfeed_receiver():
self.assertEqual(3. * 4., func2(3.))
assertMultiLineStrippedEqual(self, """
[ 6.00
9.00 ]""", testing_stream.output)
testing_stream.reset()
def test_eval_tap_exception(self):
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1, what="err")
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaises(hcb.TapFunctionException):
with hcb.outfeed_receiver():
_ = func(0)
# We should have received everything before the error
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
testing_stream.reset()
def test_jit_simple(self):
jit_fun1 = api.jit(lambda x: 3. * hcb.id_print(
2. * x, what="here", output_stream=testing_stream))
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
res = jit_fun1(5.)
self.assertAllClose(6. * 5., res)
assertMultiLineStrippedEqual(self, """
what: here
10.00""", testing_stream.output)
testing_stream.reset()
def test_jit_constant(self):
def func(x):
return hcb.id_print(42, result=x, output_stream=testing_stream)
#assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(api.jit(func))(5)))
with hcb.outfeed_receiver():
self.assertAllClose(5, api.jit(func)(5))
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
testing_stream.reset()
def test_jit_sequence1(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
logging.info("%s: %s", self._testMethodName,
api.make_jaxpr(func)(1))
logging.info("%s: %s", self._testMethodName,
api.xla_computation(func)(1).as_hlo_text())
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertEqual(2, api.jit(func)(1))
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2""", testing_stream.output)
testing_stream.reset()
def test_jit2(self):
"""A sequence of JIT."""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
return x2
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertEqual(2, api.jit(func)(1))
self.assertEqual(11, api.jit(func)(10))
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
testing_stream.reset()
def test_jit_nested(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
def func_nested(x):
x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream)
return x2
x3 = api.jit(func_nested)(x1)
return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream)
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertEqual(3, api.jit(func)(1))
assertMultiLineStrippedEqual(self, """
where: 1
1
where: nested
2
where: 3
3""", testing_stream.output)
testing_stream.reset()
def test_jit_devices(self):
"""Running on multiple devices."""
devices = api.local_devices()
logging.info(f"{self._testMethodName}: has devices {devices}")
def func(x, device_id):
x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream)
return x2
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
for d in devices:
self.assertEqual(112, api.jit(func, device=d, static_argnums=1)(111, d.id))
logging.info(f"{self._testMethodName}: found output {testing_stream.output}")
self.assertEqual(len(devices), len(re.findall(r"111", testing_stream.output)))
self.assertEqual(len(devices), len(re.findall(r"112", testing_stream.output)))
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_pytree(self, with_jit=False):
def func(x, what=""):
"""Returns some pytrees depending on x"""
if what == "pair_1_x":
return (1, x)
elif what == "pair_x_2x":
return (x, 2 * x)
elif what == "dict":
return dict(a=2 * x, b=3 * x)
else:
assert False
tap_count = 0
def tap_func(a, what=""):
nonlocal tap_count
tap_count += 1
self.assertEqual(func(5, what), a)
transform = api.jit if with_jit else lambda f: f
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
for what in ("pair_1_x", "pair_x_2x", "dict"):
self.assertEqual(func(10, what),
transform(lambda x: hcb.id_tap(tap_func, func(x, what),
result=func(x * 2, what),
what=what))(5))
# Wait for receivers to be done
self.assertEqual(3, tap_count)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_cond(self, with_jit=False):
"""A conditional"""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="cond_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="cond_f", result=x, output_stream=testing_stream),
x2 + 1)
x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream)
return x5
transform = api.jit if with_jit else lambda f: f
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertEqual(4, transform(func)(1))
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: cond_f
-1
where: end
4""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_while_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(x):
x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="w_b_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="w_b_f",
result=x, output_stream=testing_stream),
x3 + 1)
return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream)
x10 = lax.while_loop(lambda x: x <= 3, body, x2)
res = hcb.id_print(x10, where="end", output_stream=testing_stream)
return res
transform = api.jit if with_jit else lambda f: f
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertEqual(4, transform(func)(1))
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: w_b_1
2
where: w_b_t
3
where: w_b_2
3
where: w_b_1
3
where: w_b_f
-1
where: w_b_2
4
where: end
4""", testing_stream.output)
testing_stream.reset()
def test_jit_while_pred_tap(self):
"""While with printing in the conditional."""
def func(x):
x1 = hcb.id_print(x, where="1")
x10 = lax.while_loop(lambda x: hcb.id_print(x < 3,
where="w_p",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x10, where="3", output_stream=testing_stream)
return res
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertEqual(3, api.jit(func)(1))
assertMultiLineStrippedEqual(self,
"""
where: w_p
True
where: w_b
2
where: w_p
True
where: w_b
3
where: w_p
False
where: 3
3""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_scan_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(c, x):
x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream),
x3 + 1)
return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream))
_, x10 = lax.scan(body, x2, jnp.arange(3))
res = hcb.id_print(x10, where="10", output_stream=testing_stream)
return res
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
if with_jit:
func = api.jit(func)
res = func(1)
self.assertAllClose(jnp.array([1, 2, 3]), res)
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: s_1
0
where: s_t
1
where: s_2
1
where: s_1
1
where: s_f
-1
where: s_2
2
where: s_1
2
where: s_t
3
where: s_2
3
where: 10
[1 2 3]""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_shape_{shape}_dtype_{dtype}_nr_args={nr_args}",
shape=shape,
dtype=dtype,
nr_args=nr_args) for nr_args in [1, 2]
for shape in [(), (2,), (2, 3), (2, 3, 4)]
for dtype in supported_dtypes()))
def test_jit_types(self, nr_args=2, dtype=jnp.int16, shape=(2,)):
if dtype in (jnp.complex64, jnp.complex128, jnp.bool_):
raise SkipTest(f"id_print jit not implemented for {dtype}.")
if jtu.device_under_test() == "tpu":
if dtype in (jnp.int16,):
raise SkipTest(f"transfering {dtype} not supported on TPU")
args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)]
if nr_args > 1:
args = args * nr_args
jit_fun1 = api.jit(lambda xs: hcb.id_print(
xs,
a_new_test="************",
testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}"))
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
_ = jit_fun1(args)
# self.assertAllClose(args, res)
def test_jit_large(self):
arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1))
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
api.jit(hcb.id_print)(arg)
def test_jit_several_together(self):
arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5))
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
api.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32))
def test_jit_interleaving(self):
# Several jit's without data dependencies; they may interfere
count = 0 # Count tap invocations
nr_arrays = 5
def tap_func(arg, **kwargs):
nonlocal count
assert len(arg) == nr_arrays
count += 1
# This is the function that we'll run multiple times
def func(x, count):
for i in range(count):
x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)], i=i)[-1]
return x
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
x = jnp.array(1, dtype=np.int32)
res = 0
for i in range(10):
# No dependencies between the jit invocations
res += api.jit(lambda x: func(x, 10))(x)
self.assertEqual(100, count)
def test_jit_tap_exception(self):
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1, what="err")
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaises(hcb.TapFunctionException):
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
res = api.jit(func)(0)
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
testing_stream.reset()
def test_jit_unknown_tap(self):
# Simulate an unknown tap function
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(hcb._unknown_testing_consumer, x1 + 1, what="err")
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaises(hcb.TapFunctionException):
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
res = api.jit(func)(0)
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
testing_stream.reset()
# On CPU and GPU the device code blocks
# On GPU it seems that there is a 5 min timeout?
# On TPU the client does not block, but messes up the rest somehow
@jtu.skip_on_devices("cpu", "gpu", "tpu")
def test_jit_receiver_ends_prematurely(self):
# Simulate an unknown tap function
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(hcb._end_consumer, result=x1 + 1) # Will end the consumer loop
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
_ = api.jit(func)(0)
assert False # It seems that the previous jit blocks above
def test_jit_error_no_consumer(self):
# Check for errors if starting jit without a consumer active
with self.assertRaisesRegex(ValueError, "outfeed_receiver is not started"):
api.jit(lambda x: hcb.id_print(x))(0)
def test_while(self):
"""Executing while, even without JIT uses compiled code"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
func(y)
assertMultiLineStrippedEqual(self, """
1
2
3
4""", testing_stream.output)
testing_stream.reset()
def test_while_error_no_receiver(self):
"""Executing while needs the receiver"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
with self.assertRaisesRegex(ValueError, ".*outfeed_receiver.*not started"):
func(y).block_until_ready()
def test_jvp(self):
jvp_fun1 = lambda x, xt: api.jvp(fun1, (x,), (xt,))
#assertMultiLineStrippedEqual(self, "",
# str(api.make_jaxpr(jvp_fun1)(jnp.float32(5.), jnp.float32(0.1))))
with hcb.outfeed_receiver():
res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1))
self.assertAllClose(100., res_primals, check_dtypes=False)
self.assertAllClose(4., res_tangents, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
transforms: ({'name': 'jvp'},) what: a * 2
0.20
what: y * 3
30.00
transforms: ({'name': 'jvp'},) what: y * 3
0.60""", testing_stream.output)
testing_stream.reset()
def test_grad_primal_unused(self):
# The output of id_print is not needed for backwards pass
def func(x):
return 2. * hcb.id_print(x * 3., what="x * 3", output_stream=testing_stream)
grad_func = api.grad(func)
with hcb.outfeed_receiver():
assertMultiLineStrippedEqual(self, """
{ lambda ; a.
let
in (6.00,) }""", str(api.make_jaxpr(grad_func)(5.)))
# Just making the Jaxpr invokes the id_print once
assertMultiLineStrippedEqual(self, """
transforms: ({'name': 'jvp'}, {'name': 'transpose'}) what: x * 3
2.00""", testing_stream.output)
testing_stream.reset()
with hcb.outfeed_receiver():
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(6., res_grad, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00
transforms: ({'name': 'jvp'}, {'name': 'transpose'}) what: x * 3
2.00""", testing_stream.output)
testing_stream.reset()
def test_grad_simple(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream)
grad_func = api.grad(func)
#assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(grad_func)(5.)))
with hcb.outfeed_receiver():
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00
transforms: ({'name': 'jvp'}, {'name': 'transpose'}) what: y * 3
5.00
transforms: ({'name': 'jvp'}, {'name': 'transpose'}) what: x * 2
15.00""", testing_stream.output)
testing_stream.reset()
def test_grad_double(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * (y * 3.)
grad_func = api.grad(api.grad(func))
with hcb.outfeed_receiver():
_ = api.make_jaxpr(grad_func)(5.)
# Just making the Jaxpr invokes the id_print twiceonce
assertMultiLineStrippedEqual(self, """
transforms: ({'name': 'jvp'}, {'name': 'transpose'}) what: x * 2
3.00
transforms: ({'name': 'jvp'}, {'name': 'transpose'}, {'name': 'jvp'}, {'name': 'transpose'}) what: x * 2
2.00""", testing_stream.output)
testing_stream.reset()
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(12., res_grad, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
transforms: ({'name': 'jvp'}, {'name': 'transpose'}) what: x * 2
15.00
transforms: ({'name': 'jvp'}, {'name': 'transpose'}, {'name': 'jvp'}, {'name': 'transpose'}) what: x * 2
2.00
transforms: ({'name': 'jvp'}, {'name': 'transpose'}) what: x * 2
3.00""", testing_stream.output)
testing_stream.reset()
def test_vmap(self):
vmap_fun1 = api.vmap(fun1)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
#assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(vmap_fun1)(vargs)))
with hcb.outfeed_receiver():
_ = vmap_fun1(vargs)
assertMultiLineStrippedEqual(self, """
transforms: ({'name': 'batch', 'batch_dims': (0,)},) what: a * 2
[ 8.00 10.00]
transforms: ({'name': 'batch', 'batch_dims': (0, 0)},) what: y * 3
[24.00 30.00]""", testing_stream.output)
testing_stream.reset()
def test_vmap_not_batched(self):
x = 3.
def func(y):
# x is not mapped, y is mapped
_, y = hcb.id_print((x, y), output_stream=testing_stream)
return x + y
vmap_func = api.vmap(func)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
#assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(vmap_func)(vargs)))
with hcb.outfeed_receiver():
_ = vmap_func(vargs)
assertMultiLineStrippedEqual(self, """
transforms: ({'name': 'batch', 'batch_dims': (None, 0)},)
[ 3.00
[4.00 5.00] ]""", testing_stream.output)
testing_stream.reset()
def test_double_vmap(self):
# A 2D tensor with x[i, j] = i + j using 2 vmap
def sum(x, y):
return hcb.id_print(x + y, output_stream=testing_stream)
def sum_rows(xv, y):
return api.vmap(sum, in_axes=(0, None))(xv, y)
def sum_all(xv, yv):
return api.vmap(sum_rows, in_axes=(None, 0))(xv, yv)
xv = jnp.arange(5, dtype=np.int32)
yv = jnp.arange(3, dtype=np.int32)
#assertMultiLineStrippedEqual(self, "", str(api.make_jaxpr(sum_all)(xv, yv)))
with hcb.outfeed_receiver():
_ = sum_all(xv, yv)
assertMultiLineStrippedEqual(self, """
transforms: ({'name': 'batch', 'batch_dims': (0,)}, {'name': 'batch', 'batch_dims': (0,)})
[[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]]""", testing_stream.output)
testing_stream.reset()
def test_vmap_while(self):
"""Vmap of while."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: x < 2,
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertAllClose(np.array([2, 2, 2, 3, 4]), api.jit(api.vmap(func))(inputs),
check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: 1
[0 1 2 3 4]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: w_b
[1 2 3 4 5]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: w_b
[2 3 3 4 5]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: 3
[2 2 2 3 4]""", testing_stream.output)
testing_stream.reset()
def test_vmap_while_tap_cond(self):
"""Vmap of while, with a tap in the conditional."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertAllClose(np.array([2, 2, 2, 3, 4]), api.jit(api.vmap(func))(inputs),
check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: 1
[0 1 2 3 4]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: w_c
[ True True False False False]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: w_b
[1 2 3 4 5]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: w_c
[ True False False False False]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: w_b
[2 3 3 4 5]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: w_c
[False False False False False]
transforms: ({'name': 'batch', 'batch_dims': (0,)},) where: 3
[2 2 2 3 4]""", testing_stream.output)
testing_stream.reset()
def test_pmap(self):
vargs = 2. + jnp.arange(api.local_device_count(), dtype=jnp.float32)
pmap_fun1 = api.pmap(fun1, axis_name="i")
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
res = pmap_fun1(vargs)
expected_res = jnp.stack([fun1_equiv(2. + a) for a in range(api.local_device_count())])
self.assertAllClose(expected_res, res, check_dtypes=False)
def test_pmap_error_no_receiver(self):
# Check for errors if starting jit without a consumer active
vargs = 2. + jnp.arange(api.local_device_count(), dtype=jnp.float32)
with self.assertRaisesRegex(ValueError, "outfeed_receiver is not started"):
api.pmap(lambda x: hcb.id_print(x))(vargs)
def test_mask(self):
# TODO(necula)
raise SkipTest("masking has regressed")
@partial(api.mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
return jnp.sum(hcb.id_print(x, what="x", output_stream=testing_stream))
args = [jnp.arange(4)], dict(n=np.int64(2))
assertMultiLineStrippedEqual(self, """
{ lambda c f ; a b.
let d = lt c b
e = id_tap[ func=_print
logical_shapes=[(Traced<ShapedArray(int32[]):JaxprTrace(level=0/0)>,)]
transforms=('mask',)
what=x ] a
g = select d e f
h = reduce_sum[ axes=(0,) ] g
in (h,) }""", str(api.make_jaxpr(padded_sum)(*args)))
_ = padded_sum(*args)
self.assertMultiLineStrippedEqual("""
logical_shapes: [(2,)] transforms: ('mask',) what: x
[0 1 2 3]
""", testing_stream.output)
testing_stream.reset()
class OutfeedRewriterTest(jtu.JaxTestCase):
def assertRewrite(self, expected: str, func: Callable, args: Sequence,
has_input_token=True, has_output_token=True):
"""Check that the rewrite of func(*args) matches expected."""
_ = api.make_jaxpr(func)(*args)
# TODO: re-enable when we change the host_callback rewriter
#assertMultiLineStrippedEqual(self, expected,
# str(hcb._rewrite_typed_jaxpr(jaxpr, has_input_token, has_output_token)[0]))
def test_no_outfeed(self):
self.assertRewrite("""
{ lambda ; a.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_input_token=False, has_output_token=False)
self.assertRewrite("""
{ lambda ; a d.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_output_token=False)
self.assertRewrite("""
{ lambda ; a d.
let b = mul a a
c = add a b
in (c, d) }""", lambda x: x + x * x, [0])
def test_simple_outfeed(self):
self.assertRewrite("""
{ lambda ; a d.
let b = add a a
c e = id_tap[ arg_treedef=*
func=_print
] b d
in (c, e) }""", lambda x: hcb.id_print(x + x), [0])
def test_cond(self):
y = jnp.ones(5) # captured const
def func(x, z):
return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)),
z, lambda a: (hcb.id_print(a), y))
self.assertRewrite("""
{ lambda e f ; a b i.
let c = gt b 0
d = convert_element_type[ new_dtype=int32
old_dtype=bool ] c
g h j = cond[ branches=( { lambda ; f_ e a b c g.
let d h = id_tap[ arg_treedef=*
func=_print
] c g
in (d, e, h) }
{ lambda ; d g_ a b c h.
let
in (a, d, h) } )
linear=(False, False, False, False, False, False) ] d e f 1 2 b i
in (g, h, j) }""", func, [y, 5])
def test_while(self):
ct_body = jnp.ones(5, np.float32) # captured const for the body
ct_cond = jnp.ones(5, np.float32) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond),
lambda c: (ct_body, hcb.id_print(c[1]) + 1.),
(x, np.float32(1.)))
# TODO: we should not need to start a receiver here!!! I believe this is
# because of the partial evaluation of while, which calls impl, which
# uses JIT.
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertRewrite("""
{ lambda b c ; a f.
let d e g = while[ body_jaxpr={ lambda ; c a b f.
let d g = id_tap[ arg_treedef=*
func=_print
] b f
e = add d 1.00
in (c, e, g) }
body_nconsts=1
cond_jaxpr={ lambda ; c a b g.
let d = add a c
e = reduce_sum[ axes=(0,) ] d
f = lt b e
in (f,) }
cond_nconsts=1 ] b c a 1.00 f
in (d, e, g) }""", func, [ct_body])
def test_while_pred_outfeed(self):
"""A while with outfeed in the pred."""
ct_body = jnp.ones(5) # captured const for the body
ct_cond = jnp.ones(2) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5,
lambda c: (ct_body, hcb.id_print(c[1]) + 1),
(x, 1))
# TODO: we should not need to start a receiver here!!! I believe this is
# because of the partial evaluation of while, which calls impl, which
# uses JIT.
with hcb.outfeed_receiver(receiver_name=self._testMethodName):
self.assertRewrite("""
{ lambda b c ; a f.
let h i = xla_call[ call_jaxpr={ lambda ; c a b g.
let d e h = id_tap[ arg_treedef=*
func=_print
nr_untapped=1
] c b g
f = lt e 5
in (f, h) }
name=cond_before ] b a 1 f
y d e g = while[ body_jaxpr={ lambda ; n o p q r s.
let t u v = xla_call[ call_jaxpr={ lambda ; c a b f.
let d g = id_tap[ arg_treedef=*
func=_print
] b f
e = add d 1
in (c, e, g) }
name=body ] o q r s
w x = xla_call[ call_jaxpr={ lambda ; c a b g.
let d e h = id_tap[ arg_treedef=*
func=_print
nr_untapped=1
] c b g
f = lt e 5
in (f, h) }
name=cond_body ] n t u v
in (w, t, u, x) }
body_nconsts=2
cond_jaxpr={ lambda ; j k l m.
let
in (j,) }
cond_nconsts=0 ] b c h a 1 i
in (d, 5, g) }""", func, [ct_body])
def test_scan(self):
y = jnp.ones(5) # captured const
def func(x):
return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x)
self.assertRewrite("""
{ lambda b ; a f.
let c d g e = scan[ jaxpr={ lambda ; f a b g c.
let d e h = id_tap[ arg_treedef=PyTreeDef(tuple, [*,*])
func=_print
] a b g
in (d, e, h, f) }
length=5
linear=(False, False, False, False, False)
num_carry=3
num_consts=1
reverse=False ] b 1 2 f a
in (c, d, e, g) }""", func, [y])
if __name__ == "__main__":
absltest.main()
|
the-stack_0_26105
|
def getMAFFromSNPsAllele(SNPsAlleleFileName, outputFileName):
# Gets the MAF for each SNP from a file with SNP locations and alleles
# SNPsAlleleFile has the following columns:
# 1. SNP chromosome
# 2. SNP position
# 3. Allele on read
# SNPsMethylAlleleLastFile will have repeated SNP, C pairs, where each pair is listed for each read it is on
# ASSUMES THAT SNPsMethylAlleleLastFile IS SORTED BY SNP CHROMOSOME, SNP POSITION
SNPsAlleleFileName = gzip.open(SNPsAlleleFileName)
outputFile = open(outputFileName, 'w+')
lastPosition = ("", 0)
alleles = []
for line in SNPsAlleleFileName:
# Iterate through the SNPs on the reads and find the MAF for each SNP
lineElements = line.strip().split("\t")
currentPosition = (lineElements[0], int(lineElements[1]))
if currentPosition != lastPosition:
# At a new SNP, so record the information from the last SNP
if len(alleles) > 0:
# Not at the beginning of the file
numAltAllele = sum(alleles)
numRefAllele = len(alleles) - numAltAllele
MAF = 0
if numRefAllele > numAltAllele:
# The alternate allele is the minor allele
MAF = float(numAltAllele)/float(len(alleles))
else:
MAF = float(numRefAllele)/float(len(alleles))
outputFile.write(lastPosition[0] + "\t" + str(lastPosition[1]) + "\t" + str(MAF) + "\n")
lastPosition = currentPosition
alleles = []
alleles.append(float(lineElements[2]))
numAltAllele = sum(alleles)
numRefAllele = len(alleles) - numAltAllele
MAF = 0
if numRefAllele > numAltAllele:
# The alternate allele is the minor allele
MAF = float(numAltAllele)/float(len(alleles))
else:
MAF = float(numRefAllele)/float(len(alleles))
outputFile.write(lastPosition[0] + "\t" + str(lastPosition[1]) + "\t" + str(MAF) + "\n")
SNPsAlleleFileName.close()
outputFile.close()
if __name__=="__main__":
import sys
import math
import gzip
SNPsAlleleFileName = sys.argv[1]
outputFileName = sys.argv[2]
getMAFFromSNPsAllele(SNPsAlleleFileName, outputFileName)
|
the-stack_0_26107
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara
import aesara.tensor as at
import cloudpickle
import numpy as np
import pytest
from aesara.tensor.random.op import RandomVariable
import pymc as pm
from pymc.distributions.distribution import get_moment
from pymc.initial_point import make_initial_point_fn, make_initial_point_fns_per_chain
def transform_fwd(rv, expected_untransformed):
return rv.tag.value_var.tag.transform.forward(expected_untransformed, *rv.owner.inputs).eval()
def transform_back(rv, transformed) -> np.ndarray:
return rv.tag.value_var.tag.transform.backward(transformed, *rv.owner.inputs).eval()
class TestInitvalAssignment:
def test_dist_warnings_and_errors(self):
with pytest.warns(FutureWarning, match="argument is deprecated and has no effect"):
rv = pm.Exponential.dist(lam=1, testval=0.5)
assert not hasattr(rv.tag, "test_value")
with pytest.raises(TypeError, match="Unexpected keyword argument `initval`."):
pm.Normal.dist(1, 2, initval=None)
pass
def test_new_warnings(self):
with pm.Model() as pmodel:
with pytest.warns(FutureWarning, match="`testval` argument is deprecated"):
rv = pm.Uniform("u", 0, 1, testval=0.75)
initial_point = pmodel.compute_initial_point(seed=0)
assert initial_point["u_interval__"] == transform_fwd(rv, 0.75)
assert not hasattr(rv.tag, "test_value")
pass
def test_valid_string_strategy(self):
with pm.Model() as pmodel:
pm.Uniform("x", 0, 1, size=2, initval="unknown")
with pytest.raises(ValueError, match="Invalid string strategy: unknown"):
pmodel.compute_initial_point(seed=0)
class TestInitvalEvaluation:
def test_make_initial_point_fns_per_chain_checks_kwargs(self):
with pm.Model() as pmodel:
A = pm.Uniform("A", 0, 1, initval=0.5)
B = pm.Uniform("B", lower=A, upper=1.5, transform=None, initval="moment")
with pytest.raises(ValueError, match="Number of initval dicts"):
make_initial_point_fns_per_chain(
model=pmodel,
overrides=[{}, None],
jitter_rvs={},
chains=1,
)
pass
def test_dependent_initvals(self):
with pm.Model() as pmodel:
L = pm.Uniform("L", 0, 1, initval=0.5)
U = pm.Uniform("U", lower=9, upper=10, initval=9.5)
B1 = pm.Uniform("B1", lower=L, upper=U, initval=5)
B2 = pm.Uniform("B2", lower=L, upper=U, initval=(L + U) / 2)
ip = pmodel.compute_initial_point(seed=0)
assert ip["L_interval__"] == 0
assert ip["U_interval__"] == 0
assert ip["B1_interval__"] == 0
assert ip["B2_interval__"] == 0
# Modify initval of L and re-evaluate
pmodel.initial_values[U] = 9.9
ip = pmodel.compute_initial_point(seed=0)
assert ip["B1_interval__"] < 0
assert ip["B2_interval__"] == 0
pass
def test_nested_initvals(self):
# See issue #5168
with pm.Model() as pmodel:
one = pm.LogNormal("one", mu=np.log(1), sigma=1e-5, initval="prior")
two = pm.Lognormal("two", mu=np.log(one * 2), sigma=1e-5, initval="prior")
three = pm.LogNormal("three", mu=np.log(two * 2), sigma=1e-5, initval="prior")
four = pm.LogNormal("four", mu=np.log(three * 2), sigma=1e-5, initval="prior")
five = pm.LogNormal("five", mu=np.log(four * 2), sigma=1e-5, initval="prior")
six = pm.LogNormal("six", mu=np.log(five * 2), sigma=1e-5, initval="prior")
ip_vals = list(make_initial_point_fn(model=pmodel, return_transformed=True)(0).values())
assert np.allclose(np.exp(ip_vals), [1, 2, 4, 8, 16, 32], rtol=1e-3)
ip_vals = list(make_initial_point_fn(model=pmodel, return_transformed=False)(0).values())
assert np.allclose(ip_vals, [1, 2, 4, 8, 16, 32], rtol=1e-3)
pmodel.initial_values[four] = 1
ip_vals = list(make_initial_point_fn(model=pmodel, return_transformed=True)(0).values())
assert np.allclose(np.exp(ip_vals), [1, 2, 4, 1, 2, 4], rtol=1e-3)
ip_vals = list(make_initial_point_fn(model=pmodel, return_transformed=False)(0).values())
assert np.allclose(ip_vals, [1, 2, 4, 1, 2, 4], rtol=1e-3)
def test_initval_resizing(self):
with pm.Model() as pmodel:
data = aesara.shared(np.arange(4))
rv = pm.Uniform("u", lower=data, upper=10, initval="prior")
ip = pmodel.compute_initial_point(seed=0)
assert np.shape(ip["u_interval__"]) == (4,)
data.set_value(np.arange(5))
ip = pmodel.compute_initial_point(seed=0)
assert np.shape(ip["u_interval__"]) == (5,)
pass
def test_seeding(self):
with pm.Model() as pmodel:
pm.Normal("A", initval="prior")
pm.Uniform("B", initval="prior")
pm.Normal("C", initval="moment")
ip1 = pmodel.compute_initial_point(seed=42)
ip2 = pmodel.compute_initial_point(seed=42)
ip3 = pmodel.compute_initial_point(seed=15)
assert ip1 == ip2
assert ip3 != ip2
pass
def test_untransformed_initial_point(self):
with pm.Model() as pmodel:
pm.Flat("A", initval="moment")
pm.HalfFlat("B", initval="moment")
fn = make_initial_point_fn(model=pmodel, jitter_rvs={}, return_transformed=False)
iv = fn(0)
assert iv["A"] == 0
assert iv["B"] == 1
pass
def test_adds_jitter(self):
with pm.Model() as pmodel:
A = pm.Flat("A", initval="moment")
B = pm.HalfFlat("B", initval="moment")
C = pm.Normal("C", mu=A + B, initval="moment")
fn = make_initial_point_fn(model=pmodel, jitter_rvs={B}, return_transformed=True)
iv = fn(0)
# Moment of the Flat is 0
assert iv["A"] == 0
# Moment of the HalfFlat is 1, but HalfFlat is log-transformed by default
# so the transformed initial value with jitter will be zero plus a jitter between [-1, 1].
b_transformed = iv["B_log__"]
b_untransformed = transform_back(B, b_transformed)
assert b_transformed != 0
assert -1 < b_transformed < 1
# C is centered on 0 + untransformed initval of B
assert np.isclose(iv["C"], np.array(0 + b_untransformed, dtype=aesara.config.floatX))
# Test jitter respects seeding.
assert fn(0) == fn(0)
assert fn(0) != fn(1)
def test_respects_overrides(self):
with pm.Model() as pmodel:
A = pm.Flat("A", initval="moment")
B = pm.HalfFlat("B", initval=4)
C = pm.Normal("C", mu=A + B, initval="moment")
fn = make_initial_point_fn(
model=pmodel,
jitter_rvs={},
return_transformed=True,
overrides={
A: at.as_tensor(2, dtype=int),
B: 3,
C: 5,
},
)
iv = fn(0)
assert iv["A"] == 2
assert np.isclose(iv["B_log__"], np.log(3))
assert iv["C"] == 5
def test_string_overrides_work(self):
with pm.Model() as pmodel:
A = pm.Flat("A", initval=10)
B = pm.HalfFlat("B", initval=10)
C = pm.HalfFlat("C", initval=10)
fn = make_initial_point_fn(
model=pmodel,
jitter_rvs={},
return_transformed=True,
overrides={
"A": 1,
"B": 1,
"C_log__": 0,
},
)
iv = fn(0)
assert iv["A"] == 1
assert np.isclose(iv["B_log__"], 0)
assert iv["C_log__"] == 0
class TestMoment:
def test_basic(self):
# Standard distributions
rv = pm.Normal.dist(mu=2.3)
np.testing.assert_allclose(get_moment(rv).eval(), 2.3)
# Special distributions
rv = pm.Flat.dist()
assert get_moment(rv).eval() == np.zeros(())
rv = pm.HalfFlat.dist()
assert get_moment(rv).eval() == np.ones(())
rv = pm.Flat.dist(size=(2, 4))
assert np.all(get_moment(rv).eval() == np.zeros((2, 4)))
rv = pm.HalfFlat.dist(size=(2, 4))
assert np.all(get_moment(rv).eval() == np.ones((2, 4)))
@pytest.mark.parametrize("rv_cls", [pm.Flat, pm.HalfFlat])
def test_numeric_moment_shape(self, rv_cls):
rv = rv_cls.dist(shape=(2,))
assert not hasattr(rv.tag, "test_value")
assert tuple(get_moment(rv).shape.eval()) == (2,)
@pytest.mark.parametrize("rv_cls", [pm.Flat, pm.HalfFlat])
def test_symbolic_moment_shape(self, rv_cls):
s = at.scalar()
rv = rv_cls.dist(shape=(s,))
assert not hasattr(rv.tag, "test_value")
assert tuple(get_moment(rv).shape.eval({s: 4})) == (4,)
pass
@pytest.mark.parametrize("rv_cls", [pm.Flat, pm.HalfFlat])
def test_moment_from_dims(self, rv_cls):
with pm.Model(
coords={
"year": [2019, 2020, 2021, 2022],
"city": ["Bonn", "Paris", "Lisbon"],
}
):
rv = rv_cls("rv", dims=("year", "city"))
assert not hasattr(rv.tag, "test_value")
assert tuple(get_moment(rv).shape.eval()) == (4, 3)
pass
def test_moment_not_implemented_fallback(self):
class MyNormalRV(RandomVariable):
name = "my_normal"
ndim_supp = 0
ndims_params = [0, 0]
dtype = "floatX"
@classmethod
def rng_fn(cls, rng, mu, sigma, size):
return np.pi
class MyNormalDistribution(pm.Normal):
rv_op = MyNormalRV()
with pm.Model() as m:
x = MyNormalDistribution("x", 0, 1, initval="moment")
with pytest.warns(
UserWarning, match="Moment not defined for variable x of type MyNormalRV"
):
res = m.compute_initial_point()
assert np.isclose(res["x"], np.pi)
def test_pickling_issue_5090():
with pm.Model() as model:
pm.Normal("x", initval="prior")
ip_before = model.compute_initial_point(seed=5090)
model = cloudpickle.loads(cloudpickle.dumps(model))
ip_after = model.compute_initial_point(seed=5090)
assert ip_before["x"] == ip_after["x"]
|
the-stack_0_26108
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import inspect
import asyncio
from aioutils import Bag, OrderedBag, Group
def test_bag():
chars = 'abcdefg'
def g():
b = Bag()
@asyncio.coroutine
def f(c):
yield from asyncio.sleep(random.random()/10)
b.put(c)
def schedule():
for c in chars:
b.spawn(f(c))
b.join()
b.schedule(schedule)
yield from b.yielder()
chars2 = g()
assert inspect.isgenerator(chars2)
chars2 = list(chars2)
assert set(chars) == set(chars2)
def test_orderedbag():
chars = 'abcdefg'
def g():
b = OrderedBag(Group())
@asyncio.coroutine
def f(c):
yield from asyncio.sleep(random.random()*0.1)
b.put(c)
def schedule():
for c in chars:
b.spawn(f(c))
b.join()
b.schedule(schedule)
yield from b.yielder()
chars2 = g()
assert inspect.isgenerator(chars2)
chars2 = list(chars2)
for c1, c2 in zip(chars, chars2):
assert c1 == c2
if __name__ == '__main__':
test_bag()
test_orderedbag()
|
the-stack_0_26116
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.util.html import get_content
from ykdl.util.match import matchall, match1
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
from ykdl.compact import urlencode, compact_bytes
from .util import get_macid, md5, md5x, cmd5x
import json
import time
def gettmts(tvid, vid):
tm = int(time.time() * 1000)
key = 'd5fb4bd9d50c4be6948c97edd7254b0e'
host = 'https://cache.m.iqiyi.com'
params = {
'src': '76f90cbd92f94a2e925d83e8ccd22cb7',
'sc': md5(str(tm) + key + vid),
't': tm
}
src = '/tmts/{}/{}/?{}'.format(tvid, vid, urlencode(params))
req_url = '{}{}'.format(host, src)
html = get_content(req_url)
return json.loads(html)
def getdash(tvid, vid, bid=500):
tm = int(time.time() * 1000)
host = 'https://cache.video.iqiyi.com'
params = {
'tvid': tvid,
'bid': bid,
'vid': vid,
'src': '01010031010000000000',
'vt': 0,
'rs': 1,
'uid': '',
'ori': 'pcw',
'ps': 0,
'tm': tm,
'qd_v': 1,
'k_uid': get_macid(),
'pt': 0,
'd': 0,
's': '',
'lid': '',
'cf': '',
'ct': '',
'authKey': cmd5x('0{}{}'.format(tm, tvid)),
'k_tag': 1,
'ost': 0,
'ppt': 0,
'locale': 'zh_cn',
'pck': '',
'k_err_retries': 0,
'ut': 0
}
src = '/dash?{}'.format(urlencode(params))
vf = cmd5x(src)
req_url = '{}{}&vf={}'.format(host, src, vf)
html = get_content(req_url)
return json.loads(html)
def getvps(tvid, vid):
tm = int(time.time() * 1000)
host = 'http://cache.video.qiyi.com'
params = {
'tvid': tvid,
'vid': vid,
'v': 0,
'qypid': '{}_12'.format(tvid),
'src': '01012001010000000000',
't': tm,
'k_tag': 1,
'k_uid': get_macid(),
'rs': 1,
}
src = '/vps?{}'.format(urlencode(params))
vf = md5x(src)
req_url = '{}{}&vf={}'.format(host, src, vf)
html = get_content(req_url)
return json.loads(html)
class Iqiyi(VideoExtractor):
name = u"爱奇艺 (Iqiyi)"
ids = ['4k','BD', 'TD', 'HD', 'SD', 'LD']
vd_2_id = dict(sum([[(vd, id) for vd in vds] for id, vds in {
'4k': [10, 19],
'BD': [5, 18, 600],
'TD': [4, 17, 500],
'HD': [2, 14, 21, 300],
'SD': [1, 200],
'LD': [96, 100]
}.items()], []))
id_2_profile = {
'4k': '4k',
'BD': '1080p',
'TD': '720p',
'HD': '540p',
'SD': '360p',
'LD': '210p'
}
def prepare(self):
info = VideoInfo(self.name)
if self.url and not self.vid:
vid = matchall(self.url, ['curid=([^_]+)_([\w]+)'])
if vid:
self.vid = vid[0]
info_u = 'http://pcw-api.iqiyi.com/video/video/playervideoinfo?tvid=' + self.vid[0]
try:
info_json = json.loads(get_content(info_u))
info.title = info_json['data']['vn']
except:
self.vid = None
def get_vid():
html = get_content(self.url)
video_info = match1(html, ":video-info='(.+?)'")
if video_info:
video_info = json.loads(video_info)
self.vid = str(video_info['tvId']), str(video_info['vid'])
info.title = video_info['name']
else:
tvid = match1(html,
'tvId:\s*"([^"]+)',
'data-video-tvId="([^"]+)',
'''\['tvid'\]\s*=\s*"([^"]+)''',
'"tvId":\s*([^,]+)')
videoid = match1(html,
'data-video-vid="([^"]+)',
'vid:\s*"([^"]+)',
'''\['vid'\]\s*=\s*"([^"]+)''',
'"vid":\s*([^,]+)')
if not (tvid and videoid):
url = match1(html, '(www\.iqiyi\.com/v_\w+\.html)')
if url:
self.url = 'https://' + url
return get_vid()
self.vid = (tvid, videoid)
info.title = match1(html, '<title>([^<]+)').split('-')[0]
if self.url and not self.vid:
get_vid()
tvid, vid = self.vid
assert tvid and vid, 'can\'t play this video!!'
def push_stream_vd(vs):
vd = vs['vd']
stream = self.vd_2_id[vd]
if not stream in info.streams:
info.stream_types.append(stream)
elif int(vd) < 10:
return
m3u8 = vs['m3utx']
stream_profile = self.id_2_profile[stream]
info.streams[stream] = {
'video_profile': stream_profile,
'container': 'm3u8',
'src': [m3u8],
'size': 0
}
def push_stream_bid(bid, container, fs_array, size):
stream = self.vd_2_id[bid]
if stream in info.streams:
return
real_urls = []
for seg_info in fs_array:
url = url_prefix + seg_info['l']
json_data = json.loads(get_content(url))
down_url = json_data['l']
real_urls.append(down_url)
info.stream_types.append(stream)
stream_profile = self.id_2_profile[stream]
info.streams[stream] = {
'video_profile': stream_profile,
'container': container,
'src': real_urls,
'size': size
}
try:
# try use tmts first
# less http requests, get results quickly
tmts_data = gettmts(tvid, vid)
self.logger.debug('tmts_data:\n' + str(tmts_data))
assert tmts_data['code'] == 'A00000', 'can\'t play this video!!'
vs_array = tmts_data['data']['vidl']
for vs in vs_array:
push_stream_vd(vs)
vip_conf = tmts_data['data'].get('ctl', {}).get('configs')
if vip_conf:
for vds in (('10', '19'), ('18', '5')):
for vd in vds:
if vd in vip_conf:
tmts_data = gettmts(tvid, vip_conf[vd]['vid'])
if tmts_data['code'] == 'A00000':
push_stream_vd(tmts_data['data'])
break
except:
try:
# use vps as preferred fallback
vps_data = getvps(tvid, vid)
self.logger.debug('vps_data:\n' + str(vps_data))
assert vps_data['code'] == 'A00000', 'can\'t play this video!!'
url_prefix = vps_data['data']['vp']['du']
vs_array = vps_data['data']['vp']['tkl'][0]['vs']
for vs in vs_array:
bid = vs['bid']
fs_array = vs['fs']
size = vs['vsize']
push_stream_bid(bid, 'flv', fs_array, size)
except:
# use dash as fallback
for bid in (500, 300, 200, 100):
dash_data = getdash(tvid, vid, bid)
self.logger.debug('dash_data:\n' + str(dash_data))
assert dash_data['code'] == 'A00000', 'can\'t play this video!!'
url_prefix = dash_data['data']['dd']
streams = dash_data['data']['program']['video']
for stream in streams:
if 'fs' in stream:
_bid = stream['bid']
container = stream['ff']
fs_array = stream['fs']
size = stream['vsize']
break
push_stream_bid(_bid, container, fs_array, size)
info.stream_types = sorted(info.stream_types, key=self.ids.index)
return info
def prepare_list(self):
html = get_content(self.url)
return matchall(html, ['data-tvid=\"([^\"]+)\" data-vid=\"([^\"]+)\"'])
site = Iqiyi()
|
the-stack_0_26117
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Modules Config Mapping according to specific backend."""
import copy
import zeus
from zeus.common.config import Config
class ConfigBackendMapping(object):
"""Config mapping according to backend.
:param module_type: module type in trainer, 'optim', 'loss' or 'lr_scheduler'
:type module_type: str
"""
def __init__(self, type_dict, params_dict):
"""Init config backend mapping."""
self.type_mapping_dict = copy.deepcopy(type_dict)
self.params_mapping_dict = copy.deepcopy(params_dict)
self.backend_type = None
if zeus.is_torch_backend():
self.backend_type = 'torch'
elif zeus.is_tf_backend():
self.backend_type = 'tf'
elif zeus.is_ms_backend():
self.backend_type = 'ms'
else:
raise ValueError('Backend type must be torch, tf or ms.')
def backend_mapping(self, config):
"""Map config to specific backend.
:param config: original config from config file
:type config: Config or dict
:return: config after mapping to backend
:rtype: Config
"""
origin_config = Config(copy.deepcopy(config))
type = origin_config.type
if type not in self.type_mapping_dict:
return config
params = origin_config.get('params', {})
backend_config = Config()
backend_config.type = self.type_mapping_dict[type][self.backend_type]
backend_config.params = Config()
mapping_params = self.params_mapping_dict.get(type, {})
for key, value in params.items():
if key in mapping_params:
mapping_key = mapping_params[key][self.backend_type]
else:
mapping_key = key
if mapping_key is not None:
if isinstance(value, dict) and 'type' in value:
backend_config.params[mapping_key] = self.backend_mapping(value)
else:
backend_config.params[mapping_key] = value
return Config(backend_config)
|
the-stack_0_26118
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Repository datasets management."""
import re
import shutil
import urllib
from collections import OrderedDict
from pathlib import Path
from typing import List, Optional
import click
import git
import patoolib
import requests
from renku.core import errors
from renku.core.commands.format.dataset_files import DATASET_FILES_FORMATS
from renku.core.commands.format.dataset_tags import DATASET_TAGS_FORMATS
from renku.core.commands.format.datasets import DATASETS_FORMATS
from renku.core.commands.providers import ProviderFactory
from renku.core.errors import DatasetNotFound, InvalidAccessToken, OperationError, ParameterError, UsageError
from renku.core.management import LocalClient
from renku.core.management.command_builder import inject
from renku.core.management.command_builder.command import Command
from renku.core.management.datasets import DATASET_METADATA_PATHS
from renku.core.metadata.immutable import DynamicProxy
from renku.core.models.dataset import (
Dataset,
DatasetDetailsJson,
DatasetsProvenance,
DatasetTag,
Url,
generate_default_name,
get_dataset_data_dir,
)
from renku.core.models.provenance.agent import Person
from renku.core.models.tabulate import tabulate
from renku.core.utils import communication
from renku.core.utils.doi import is_doi
from renku.core.utils.urls import remove_credentials
@inject.autoparams()
def _list_datasets(datasets_provenance: DatasetsProvenance, format=None, columns=None):
"""List all datasets."""
if format is None:
return list(datasets_provenance.datasets)
if format not in DATASETS_FORMATS:
raise UsageError("format not supported")
return DATASETS_FORMATS[format](datasets_provenance.datasets, columns=columns)
def list_datasets():
"""Command for listing datasets."""
return Command().command(_list_datasets).with_database().require_migration()
@inject.autoparams()
def create_dataset_helper(
name,
client: LocalClient,
title=None,
description="",
creators=None,
keywords=None,
images=None,
safe_image_paths=None,
):
"""Create a dataset in the repository."""
if not creators:
creators = [Person.from_git(client.repo)]
else:
creators, _ = _construct_creators(creators)
dataset = client.create_dataset(
name=name,
title=title,
description=description,
creators=creators,
keywords=keywords,
images=images,
safe_image_paths=safe_image_paths,
)
return dataset
def create_dataset():
"""Return a command for creating an empty dataset in the current repo."""
command = Command().command(create_dataset_helper).lock_dataset().with_database(write=True)
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
@inject.autoparams()
def _edit_dataset(
name,
title,
description,
creators,
client: LocalClient,
datasets_provenance: DatasetsProvenance,
keywords=None,
images=None,
skip_image_update=False,
safe_image_paths=None,
):
"""Edit dataset metadata."""
possible_updates = {
"creators": creators,
"description": description,
"keywords": keywords,
"title": title,
}
creators, no_email_warnings = _construct_creators(creators, ignore_email=True)
title = title.strip() if isinstance(title, str) else ""
dataset = client.get_dataset(name=name)
updated = {k: v for k, v in possible_updates.items() if v}
if updated:
dataset.update_metadata(creators=creators, description=description, keywords=keywords, title=title)
if skip_image_update:
images_updated = False
else:
safe_image_paths.append(client.path)
images_updated = client.set_dataset_images(dataset, images, safe_image_paths)
if images_updated:
updated["images"] = [{"content_url": i.content_url, "position": i.position} for i in dataset.images]
if not updated:
return [], no_email_warnings
datasets_provenance.add_or_update(dataset, creator=Person.from_client(client))
return updated, no_email_warnings
def edit_dataset():
"""Command for editing dataset metadata."""
command = Command().command(_edit_dataset).lock_dataset().with_database(write=True)
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
@inject.autoparams()
def _show_dataset(name, client: LocalClient):
"""Show detailed dataset information."""
dataset = client.get_dataset(name)
return DatasetDetailsJson().dump(dataset)
def show_dataset():
"""Command for showing detailed dataset information."""
return Command().command(_show_dataset).with_database().require_migration()
def _construct_creators(creators, ignore_email=False):
from collections.abc import Iterable
creators = creators or ()
if not isinstance(creators, Iterable) or isinstance(creators, str):
raise errors.ParameterError("Invalid type")
people = []
no_email_warnings = []
for creator in creators:
if isinstance(creator, str):
person = Person.from_string(creator)
elif isinstance(creator, dict):
person = Person.from_dict(creator)
else:
raise errors.ParameterError("Invalid type")
message = 'A valid format is "Name <email> [affiliation]"'
if not person.name: # pragma: no cover
raise errors.ParameterError(f'Name is invalid: "{creator}".\n{message}')
if not person.email:
if not ignore_email: # pragma: no cover
raise errors.ParameterError(f'Email is invalid: "{creator}".\n{message}')
else:
no_email_warnings.append(creator)
people.append(person)
return people, no_email_warnings
@inject.autoparams()
def _add_to_dataset(
urls,
name,
client: LocalClient,
external=False,
force=False,
overwrite=False,
create=False,
sources=(),
destination="",
ref=None,
with_metadata=None,
extract=False,
all_at_once=False,
destination_names=None,
total_size=None,
repository=None,
clear_files_before=False,
):
"""Add data to a dataset."""
if len(urls) == 0:
raise UsageError("No URL is specified")
if sources and len(urls) > 1:
raise UsageError('Cannot use "--source" with multiple URLs.')
if total_size is None:
total_size = 0
for url in urls:
try:
with requests.get(url, stream=True, allow_redirects=True) as r:
total_size += int(r.headers.get("content-length", 0))
except requests.exceptions.RequestException:
pass
usage = shutil.disk_usage(client.path)
if total_size > usage.free:
mb = 2 ** 20
message = "Insufficient disk space (required: {:.2f} MB" "/available: {:.2f} MB). ".format(
total_size / mb, usage.free / mb
)
raise OperationError(message)
try:
with client.with_dataset(name=name, create=create) as dataset:
client.add_data_to_dataset(
dataset,
urls=urls,
external=external,
force=force,
overwrite=overwrite,
sources=sources,
destination=destination,
ref=ref,
extract=extract,
all_at_once=all_at_once,
destination_names=destination_names,
repository=repository,
clear_files_before=clear_files_before,
)
if with_metadata:
dataset.update_metadata_from(with_metadata)
return dataset
except DatasetNotFound:
raise DatasetNotFound(
message='Dataset "{0}" does not exist.\n'
'Use "renku dataset create {0}" to create the dataset or retry '
'"renku dataset add {0}" command with "--create" option for '
"automatic dataset creation.".format(name)
)
except (FileNotFoundError, git.exc.NoSuchPathError) as e:
raise ParameterError("Could not find paths/URLs: \n{0}".format("\n".join(urls))) from e
def add_to_dataset():
"""Create a command for adding data to datasets."""
command = Command().command(_add_to_dataset).lock_dataset().with_database(write=True)
return command.require_migration().with_commit(raise_if_empty=True, commit_only=DATASET_METADATA_PATHS)
@inject.autoparams()
def _list_files(
client: LocalClient, datasets=None, creators=None, include=None, exclude=None, format=None, columns=None
):
"""List dataset files."""
records = _filter(names=datasets, creators=creators, include=include, exclude=exclude, immutable=True)
for record in records:
record.title = record.dataset.title
record.dataset_name = record.dataset.name
record.dataset_id = record.dataset.id
record.creators_csv = record.dataset.creators_csv
record.creators_full_csv = record.dataset.creators_full_csv
record.full_path = client.path / record.entity.path
record.path = record.entity.path
record.name = Path(record.entity.path).name
record.added = record.date_added
if format is None:
return records
if format not in DATASETS_FORMATS:
raise UsageError("format not supported")
return DATASET_FILES_FORMATS[format](records, columns=columns)
def list_files():
"""Command for listing dataset files."""
return Command().command(_list_files).with_database().require_migration()
@inject.autoparams()
def _file_unlink(name, include, exclude, client: LocalClient, datasets_provenance: DatasetsProvenance, yes=False):
"""Remove matching files from a dataset."""
if not include and not exclude:
raise ParameterError(
(
"include or exclude filters not found.\n"
"Check available filters with 'renku dataset unlink --help'\n"
"Hint: 'renku dataset unlink my-dataset -I path'"
)
)
dataset = client.get_dataset(name=name)
if not dataset:
raise ParameterError("Dataset does not exist.")
records = _filter(names=[name], include=include, exclude=exclude)
if not records:
raise ParameterError("No records found.")
if not yes:
prompt_text = (
f'You are about to remove following from "{name}" dataset.'
+ "\n"
+ "\n".join([str(record.entity.path) for record in records])
+ "\nDo you wish to continue?"
)
communication.confirm(prompt_text, abort=True, warning=True)
for file in records:
dataset.unlink_file(file.entity.path)
datasets_provenance.add_or_update(dataset, creator=Person.from_client(client))
return records
def file_unlink():
"""Command for removing matching files from a dataset."""
command = Command().command(_file_unlink).lock_dataset().with_database(write=True)
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
@inject.autoparams()
def _remove_dataset(name, client: LocalClient, datasets_provenance: DatasetsProvenance):
"""Delete a dataset."""
dataset = client.get_dataset(name=name, strict=True)
datasets_provenance.remove(dataset=dataset)
def remove_dataset():
"""Command for deleting a dataset."""
command = Command().command(_remove_dataset).lock_dataset().with_database(write=True)
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
@inject.autoparams()
def _export_dataset(name, provider_name, publish, tag, client: LocalClient, **kwargs):
"""Export data to 3rd party provider.
:raises: ``ValueError``, ``HTTPError``, ``InvalidAccessToken``,
``DatasetNotFound``
"""
provider_name = provider_name.lower()
# TODO: all these callbacks are ugly, improve in #737
config_key_secret = "access_token"
dataset_ = client.get_dataset(name, strict=True)
try:
provider = ProviderFactory.from_id(provider_name)
except KeyError:
raise ParameterError("Unknown provider.")
provider.set_parameters(**kwargs)
selected_tag = None
selected_commit = client.repo.head.commit
if tag:
selected_tag = next((t for t in dataset_.tags if t.name == tag), None)
if not selected_tag:
raise ValueError("Tag {} not found".format(tag))
selected_commit = selected_tag.commit
elif dataset_.tags and len(dataset_.tags) > 0:
tag_result = _prompt_tag_selection(dataset_.tags)
if tag_result:
selected_tag = tag_result
selected_commit = tag_result.commit
# FIXME: This won't work and needs to be fixed in #renku-python/issues/2210
# If the tag is created automatically for imported datasets, it
# does not have the dataset yet and we need to use the next commit
with client.with_commit(selected_commit):
test_ds = client.get_dataset(name)
if not test_ds:
commits = client.dataset_commits(dataset_)
next_commit = selected_commit
for commit in commits:
if commit.hexsha == selected_commit:
selected_commit = next_commit.hexsha
break
next_commit = commit
with client.with_commit(selected_commit):
dataset_ = client.get_dataset(name)
if not dataset_:
raise DatasetNotFound(name=name)
dataset_.data_dir = get_dataset_data_dir(client, dataset_)
access_token = client.get_value(provider_name, config_key_secret)
exporter = provider.get_exporter(dataset_, access_token=access_token)
if access_token is None:
access_token = _prompt_access_token(exporter)
if access_token is None or len(access_token) == 0:
raise InvalidAccessToken()
client.set_value(provider_name, config_key_secret, access_token, global_only=True)
exporter.set_access_token(access_token)
try:
destination = exporter.export(publish=publish, tag=selected_tag, client=client)
except errors.AuthenticationError:
client.remove_value(provider_name, config_key_secret, global_only=True)
raise
communication.echo(f"Exported to: {destination}")
def export_dataset():
"""Command for exporting a dataset to 3rd party provider."""
command = Command().command(_export_dataset).with_database()
return command.require_migration().require_clean()
@inject.autoparams()
def _import_dataset(
uri, client: LocalClient, name="", extract=False, yes=False, previous_dataset=None, delete=False, gitlab_token=None
):
"""Import data from a 3rd party provider or another renku project."""
provider, err = ProviderFactory.from_uri(uri)
if err and provider is None:
raise ParameterError(f"Could not process '{uri}'.\n{err}")
try:
record = provider.find_record(uri, gitlab_token=gitlab_token)
dataset = record.as_dataset(client)
files = record.files_info
total_size = 0
if not yes:
communication.echo(
tabulate(
files,
headers=OrderedDict(
(
("checksum", "checksum"),
("filename", "name"),
("size_in_mb", "size (mb)"),
("filetype", "type"),
)
),
floatfmt=".2f",
)
)
text_prompt = "Do you wish to download this version?"
if not record.is_last_version(uri):
text_prompt = f"Newer version found at {record.latest_uri}\n{text_prompt}"
communication.confirm(text_prompt, abort=True, warning=True)
for file_ in files:
if file_.size_in_mb is not None:
total_size += file_.size_in_mb
total_size *= 2 ** 20
except KeyError as e:
raise ParameterError(f"Could not process '{uri}'.\nUnable to fetch metadata: {e}")
except LookupError as e:
raise ParameterError(f"Could not process '{uri}'.\nReason: {e}")
if not files:
raise ParameterError(f"Dataset '{uri}' has no files.")
if not provider.is_git_based:
if not name:
name = generate_default_name(dataset.title, dataset.version)
dataset.same_as = Url(url_id=remove_credentials(uri))
if is_doi(dataset.identifier):
dataset.same_as = Url(url_str=urllib.parse.urljoin("https://doi.org", dataset.identifier))
urls, names = zip(*[(f.source, f.filename) for f in files])
dataset = _add_to_dataset(
urls=urls,
name=name,
create=not previous_dataset,
with_metadata=dataset,
force=True,
extract=extract,
all_at_once=True,
destination_names=names,
total_size=total_size,
overwrite=True,
clear_files_before=True,
)
if previous_dataset:
dataset = _update_metadata(dataset, previous_dataset, delete, dataset.same_as)
if dataset.version:
tag_name = re.sub("[^a-zA-Z0-9.-_]", "_", dataset.version)
_tag_dataset_helper(
dataset=dataset,
tag=tag_name,
description=f"Tag {dataset.version} created by renku import",
update_provenance=False,
)
else:
name = name or dataset.name
dataset.same_as = Url(url_id=record.latest_uri)
if not dataset.data_dir:
raise OperationError(f"Data directory for dataset must be set: {dataset.name}")
sources = []
if record.datadir_exists:
sources = [f"{dataset.data_dir}/**"]
for file in dataset.files:
try:
Path(file.entity.path).relative_to(dataset.data_dir)
except ValueError: # Files that are not in dataset's data directory
sources.append(file.entity.path)
new_dataset = _add_to_dataset(
urls=[record.project_url],
name=name,
sources=sources,
with_metadata=dataset,
create=not previous_dataset,
overwrite=True,
repository=record.repository,
clear_files_before=True,
)
if previous_dataset:
_update_metadata(new_dataset, previous_dataset, delete, dataset.same_as)
if provider.supports_images:
record.import_images(dataset)
def import_dataset():
"""Create a command for importing datasets."""
command = Command().command(_import_dataset).lock_dataset().with_database(write=True)
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
@inject.autoparams()
def _update_metadata(new_dataset: Dataset, previous_dataset, delete, same_as, client: LocalClient):
"""Update metadata and remove files that exists in ``previous_dataset`` but not in ``new_dataset``."""
current_paths = set(str(f.entity.path) for f in new_dataset.files)
# NOTE: remove files not present in the dataset anymore
for file in previous_dataset.files:
if str(file.entity.path) in current_paths:
continue
if delete:
client.remove_file(client.path / file.entity.path)
new_dataset.same_as = same_as
# NOTE: Remove derived_from because this is an updated and imported dataset
new_dataset.derived_from = None
return new_dataset
@inject.autoparams()
def _update_datasets(names, creators, include, exclude, ref, delete, client: LocalClient, external=False):
"""Update dataset files."""
ignored_datasets = []
if (include or exclude) and names and any(d.same_as for d in client.datasets.values() if d.name in names):
raise errors.UsageError("--include/--exclude is incompatible with datasets created by 'renku dataset import'")
names_provided = bool(names)
# NOTE: update imported datasets
if not include and not exclude:
for dataset in client.datasets.values():
if names and dataset.name not in names or not dataset.same_as:
continue
uri = dataset.same_as.url
if isinstance(uri, dict):
uri = uri.get("@id")
provider, err = ProviderFactory.from_uri(uri)
if not provider:
continue
record = provider.find_record(uri)
if record.is_last_version(uri) and record.version == dataset.version:
continue
uri = record.latest_uri
# NOTE: set extract to false if there are any archives present in the dataset
extract = True
for f in dataset.files:
try:
patoolib.get_archive_format(f.entity.path)
except patoolib.util.PatoolError:
continue
else:
extract = False
break
_import_dataset(
uri=uri, name=dataset.name, extract=extract, yes=True, previous_dataset=dataset, delete=delete
)
communication.echo(f"Updated dataset '{dataset.name}' from remote provider")
if names:
names.remove(dataset.name)
ignored_datasets.append(dataset.name)
else:
ignored_datasets = [d.name for d in client.datasets.values() if d.same_as]
if names_provided and not names:
return
records = _filter(names=names, creators=creators, include=include, exclude=exclude, ignore=ignored_datasets)
if not records:
if ignored_datasets:
return
raise ParameterError("No files matched the criteria.")
possible_updates = []
unique_remotes = set()
external_files = []
local_files = []
for file in records:
if file.based_on:
possible_updates.append(file)
unique_remotes.add(file.based_on.url)
elif file.is_external:
external_files.append(file)
else:
local_files.append(file)
if ref and len(unique_remotes) > 1:
raise ParameterError(
"Cannot use '--ref' with more than one Git repository.\n"
"Limit list of files to be updated to one repository. See 'renku dataset update -h' for more information."
)
if external_files:
if external:
client.update_external_files(external_files)
else:
communication.echo("To update external files run update command with '--external' flag.")
updated_files = []
deleted_files = []
if possible_updates:
updated_files, deleted_files = client.update_dataset_git_files(files=possible_updates, ref=ref, delete=delete)
if local_files:
updated, deleted = client.update_dataset_local_files(records=local_files, delete=delete)
updated_files.extend(updated)
deleted_files.extend(deleted)
if deleted_files and not delete:
communication.echo("Some files are deleted. To also delete them from datasets' metadata use '--delete' flag.")
message = f"Updated {len(updated_files)} files"
if delete:
message += f" and deleted {len(deleted_files)} files"
communication.echo(message)
def update_datasets():
"""Command for updating datasets."""
command = Command().command(_update_datasets).lock_dataset().with_database(write=True)
return command.require_migration().require_clean().with_commit(commit_only=DATASET_METADATA_PATHS)
def _include_exclude(file_path, include=None, exclude=None):
"""Check if file matches one of include filters and not in exclude filter.
:param file_path: Path to the file.
:param include: Tuple containing patterns to which include from result.
:param exclude: Tuple containing patterns to which exclude from result.
"""
if exclude is not None and exclude:
for pattern in exclude:
if file_path.match(pattern):
return False
if include is not None and include:
for pattern in include:
if file_path.match(pattern):
return True
return False
return True
@inject.autoparams()
def _filter(
client: LocalClient, names=None, creators=None, include=None, exclude=None, ignore=None, immutable=False
) -> List[DynamicProxy]:
"""Filter dataset files by specified filters.
:param names: Filter by specified dataset names.
:param creators: Filter by creators.
:param include: Include files matching file pattern.
:param exclude: Exclude files matching file pattern.
:param ignore: Ignored datasets.
:param immutable: Return immutable copies of dataset objects.
"""
if isinstance(creators, str):
creators = set(creators.split(","))
if isinstance(creators, list) or isinstance(creators, tuple):
creators = set(creators)
records = []
unused_names = set(names)
for dataset in client.datasets.values():
if not immutable:
dataset = dataset.copy()
if (not names or dataset.name in names) and (not ignore or dataset.name not in ignore):
if unused_names:
unused_names.remove(dataset.name)
for file in dataset.files:
record = DynamicProxy(file)
record.dataset = dataset
record.client = client
path = Path(record.entity.path)
match = _include_exclude(path, include, exclude)
if creators:
dataset_creators = {c.name for c in dataset.creators}
match = match and creators.issubset(dataset_creators)
if match:
records.append(record)
if unused_names:
unused_names = ", ".join(unused_names)
raise ParameterError(f"Dataset does not exist: {unused_names}")
return sorted(records, key=lambda r: r.date_added)
@inject.autoparams()
def _tag_dataset(name, tag, description, client: LocalClient, update_provenance=True, force=False):
"""Creates a new tag for a dataset."""
dataset = client.get_dataset(name, strict=True)
_tag_dataset_helper(
dataset=dataset, tag=tag, description=description, update_provenance=update_provenance, force=force
)
@inject.autoparams()
def _tag_dataset_helper(
dataset,
tag,
description,
client: LocalClient,
datasets_provenance: DatasetsProvenance,
update_provenance=True,
force=False,
):
try:
client.add_dataset_tag(dataset, tag, description, force)
except ValueError as e:
raise ParameterError(e)
else:
if update_provenance:
datasets_provenance.add_or_update(dataset)
def tag_dataset():
"""Command for creating a new tag for a dataset."""
command = Command().command(_tag_dataset).lock_dataset().with_database(write=True)
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
@inject.autoparams()
def _remove_dataset_tags(name, tags, client: LocalClient, datasets_provenance: DatasetsProvenance):
"""Removes tags from a dataset."""
dataset = client.get_dataset(name, strict=True)
try:
client.remove_dataset_tags(dataset, tags)
except ValueError as e:
raise ParameterError(e)
else:
datasets_provenance.add_or_update(dataset)
def remove_dataset_tags():
"""Command for removing tags from a dataset."""
command = Command().command(_remove_dataset_tags).lock_dataset().with_database(write=True)
return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
@inject.autoparams()
def _list_tags(name, format, client: LocalClient):
"""List all tags for a dataset."""
dataset = client.get_dataset(name, strict=True)
tags = sorted(dataset.tags, key=lambda t: t.date_created)
return DATASET_TAGS_FORMATS[format](tags)
def list_tags():
"""Command for listing a dataset's tags."""
return Command().command(_list_tags).with_database().require_migration()
def _prompt_access_token(exporter):
"""Prompt user for an access token for a provider.
:return: The new access token
"""
text_prompt = "You must configure an access token\n"
text_prompt += "Create one at: {0}\n".format(exporter.access_token_url())
text_prompt += "Access token"
return communication.prompt(text_prompt, type=str)
def _prompt_tag_selection(tags) -> Optional[DatasetTag]:
"""Prompt user to chose a tag or <HEAD>."""
# Prompt user to select a tag to export
tags = sorted(tags, key=lambda t: t.date_created)
text_prompt = "Tag to export: \n\n<HEAD>\t[1]\n"
text_prompt += "\n".join("{}\t[{}]".format(t.name, i) for i, t in enumerate(tags, start=2))
text_prompt += "\n\nTag"
selection = communication.prompt(text_prompt, type=click.IntRange(1, len(tags) + 1), default=1)
if selection > 1:
return tags[selection - 2]
return None
|
the-stack_0_26119
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from .test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class OpenAIGPTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = OpenAIGPTTokenizer
rust_tokenizer_class = OpenAIGPTTokenizerFast
test_rust_tokenizer = True
test_seq2seq = False
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
def get_input_output_texts(self, tokenizer):
return "lower newer", "lower newer"
def test_full_tokenizer(self):
tokenizer = OpenAIGPTTokenizer(self.vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_padding(self, max_length=15):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Simple input
s = "This is a simple input"
s2 = ["This is a simple input 1", "This is a simple input 2"]
p = ("This is a simple input", "This is a pair")
p2 = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(
ValueError,
tokenizer_r.batch_encode_plus,
s2,
max_length=max_length,
padding="max_length",
)
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(
ValueError,
tokenizer_r.batch_encode_plus,
p2,
max_length=max_length,
padding="max_length",
)
# tokenizer has no padding token
def test_padding_different_model_input_name(self):
pass
|
the-stack_0_26121
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os_faults.drivers.services import process
class LinuxService(process.ServiceAsProcess):
"""Linux service
Service that is defined in init.d and can be controlled by `service`
CLI tool.
**Example configuration:**
.. code-block:: yaml
services:
app:
driver: linux_service
args:
linux_service: app
grep: my_app
port: ['tcp', 4242]
parameters:
- **linux_service** - name of a service
- **grep** - regexp for grep to find process PID
- **port** - tuple with two values - protocol, port number (optional)
"""
NAME = 'linux_service'
DESCRIPTION = 'Service in init.d'
CONFIG_SCHEMA = {
'type': 'object',
'properties': {
'linux_service': {'type': 'string'},
'grep': {'type': 'string'},
'port': process.PORT_SCHEMA,
},
'required': ['grep', 'linux_service'],
'additionalProperties': False,
}
def __init__(self, *args, **kwargs):
super(LinuxService, self).__init__(*args, **kwargs)
self.linux_service = self.config['linux_service']
self.restart_cmd = 'service {} restart'.format(self.linux_service)
self.terminate_cmd = 'service {} stop'.format(self.linux_service)
self.start_cmd = 'service {} start'.format(self.linux_service)
|
the-stack_0_26122
|
"""
This file manage the view, and display user's "click"
See import my modules to consult files wich manage mathematicals operations
"""
from tkinter import *
import math
#import my modules
import basic_op
import scientific
# to generate a window with tkinter
window = Tk()
window.configure()
window.title("Elo's calculator")
calc_input=""
def set_message(type_mess=""):
"""
This function manage all messages needed to send to the user for a better utilisation of the app
"""
if type_mess=="":
mess=""
if type_mess == "negative_error" :
mess = "\n please enter value before \n - \n"
if type_mess == "how_to_use_pourcent":
mess="\n pourcent using: \n first enter the %value + % + the basis_value \n like: 50% /of 32 \n - \n"
if type_mess == "rounded_result":
mess="\n Please note: \n if necessary, result is rounded 5 numbers after comma: \n 0.12345678 -> 0.12346 \n - \n"
if type_mess == "fraction_method":
mess="\n fraction method: enter first your value, then 1/x button \n "
message_text.set(mess)
def input_key(input_value):
"""
This function concatene the value
1/set the message to null if there is one
2/this function catch the input value and concatenate simples numbers to display the desired complete nbr in "e"
"""
set_message()
if input_value == "pi":
input_value = math.pi
value = input_value
current = e.get()
e.delete(0,END)
e.insert(0,str(current) + str(value))
actual_value=e.get()
def input_neg_value(e_value):
"""
This function manage the button "+/-"
1/ if there is not value in "e", send a message about how to use it
2/ if there is a value entered yet: pass the actual value to the negative and display it in "e"
"""
if e_value == "":
set_message("negative_error")
else:
current_value= e.get()
neg_value = float(current_value)*-1
e.delete(0,END)
e.insert(0,str(neg_value))
actual_value=e.get()
def get_e_value():
"""
This function catch the actual value displayed in "e"
It return it to be used in calculator's functions
"""
actual_nbr = e.get()
return actual_nbr
def input_operator(operator,actualValue):
"""
This function catch the value of operator entered + actual value in "e"
1/ set the message to null if there is one
2/ send a message if pourcent is using to help the user
3/ add actual value in the array myOperations calling the function : put_in_myOperations(actualValue) in basic_op.py
4/ print operator in the display area name "calc_input_text"
5/ delete the e display (usual attitude of a calculator)
6/ send the actual value to the function make_operation in basic_op.py
"""
set_message()
global calc_input
if operator=="pourcent":
set_message("how_to_use_pourcent")
basic_op.put_in_myOperations(actualValue)
calc_input += e.get()
if operator=="pourcent":
operator = "% of "
calc_input += operator
calc_input_text.set(calc_input)
e.delete(0,END)
basic_op.make_operation(operator)
def input_eqal(actualValue):
"""
This function return the result of operation
1/set the message to inform the user about the aqal function round the result
2/catch the value and add it in the array "myOperations"
3/return the result of operation in a variable names "result" and display it in the areas named : "e" and "result_text"
"""
set_message("rounded_result")
global calc_input
calc_input += e.get()
calc_input_text.set(calc_input)
basic_op.put_in_myOperations(actualValue)
result=basic_op.calc_all_myOperations()
result_text.set(result)
calc_result=str(result)
calc_input += ("="+ calc_result + "/ " )
calc_input_text.set(calc_input)
e.insert(0,result)
e.delete(0,END)
e.insert(0, result)
def input_clear():
"""
This function clear all operations and data entry
1/ set the message to null if there is one
2/ clear all the areas wich return values, results and operations.
3/ use function clear_myOperations in basic_op.py to empty the array 'myOperations'
"""
set_message()
global calc_input
calc_input = ""
calc_input_text.set(calc_input)
result_text.set(calc_input)
e.delete(0,END)
basic_op.clear_myOperations()
def input_sci_op(input_sci_value, actual_value):
"""
This function manage scientific operations as tan, cos, sin, racine, fraction
1/ set the message to null if there is one
2/ set a message to help user make a fraction
3/ send the actual value in "e" to the function scientific_op() in scientific.py and return result
4/ manage the diplay_view, changing str like "racine" by symbol √, or "franction" by 1/ before display the complete operation and the result in concerning areas
"""
set_message()
if input_sci_value == "fraction":
set_message("fraction_method")
value = actual_value
result = scientific.scientific_op(input_sci_value, value)
if input_sci_value == "racine":
input_sci_value = "√"
elif input_sci_value == "fraction":
input_sci_value = "1/"
calc_input = input_sci_value + " (" +value + ") = "
calc_input_text.set(calc_input)
result_text.set(result)
"""
Here under script to create the graphic interface:
all common buttons in a calculator
areas to return operations andresults
front-end : modif view with width,height,cbackground,font,etc...
"""
#row 0 : to close
button_close =Button(window, text="Close", width=10, height=2, bg="#86ADB1", command=window.quit).grid(row=0, column=5)
#row 1 : usual calculator display, here called "e"
e = Entry(window,width =35,justify=RIGHT, borderwidth=2, font=25)
e.grid(row=1,column=0,columnspan=6, padx=20, pady=20)
#row 2 & 3 : special display + row 4 :space
calc_input_text = StringVar()
Label(window, textvariable=calc_input_text,width=50,bg="#ADADAD", font=20, justify=RIGHT).grid(row=2, column=0, columnspan=6)
result_text = StringVar()
Label(window, textvariable=result_text, width=50, height=3, font=30 , bg="#656666", fg="#DBFD00", justify=RIGHT).grid(row=3, column=0, columnspan=6)
#rows 5 -> 9: buttons
button_sin =Button(window, text="sin", width=5, height=3, bg="#2D4044",fg="white", font="bold", command=lambda: input_sci_op("sin", get_e_value())).grid(row=5, column=0, pady=1, padx=1)
button_fraction =Button(window, text="1/x", width=5, height=3, bg="#2D4044",fg="white", font="bold",command=lambda: input_sci_op("fraction", get_e_value())).grid(row=5, column=1, pady=1, padx=1 )
button_pourcent =Button(window, text="%", width=5, height=3, bg="#2D4044",fg="white", font="bold",command=lambda: input_operator("pourcent", get_e_value())).grid(row=5, column=2, pady=1, padx=1)
button_negative =Button(window, text="+/-", width=5, height=3, bg="#95BFC3", font="bold",command=lambda: input_neg_value(get_e_value())).grid(row=5, column=3, pady=1, padx=1)
button_clear =Button(window, text="A/C", width=20, height=3, bg="#232424", fg="#DBFD00", font="bold",command=lambda: input_clear()).grid(row=5, column=4,columnspan=2, pady=1, padx=1 )
button_tan =Button(window, text="tan", width=5, height=3, bg="#2D4044",fg="white", font="bold",command=lambda: input_sci_op("tan", get_e_value())).grid(row=6, column=0)
button_7 =Button(window, text=" 7 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("7")).grid(row=6, column=1)
button_8 =Button(window, text=" 8 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("8")).grid(row=6, column=2)
button_9 =Button(window, text=" 9 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("9")).grid(row=6, column=3)
button_divide =Button(window, text=" / ", width=5, height=3, bg="#232424", fg="#DBFD00", font="bold",command=lambda: input_operator("/", get_e_value())).grid(row=6, column=4)
button_eqal =Button(window, text=" = ", width=13, height=16, bg="#DBFD00", font=('Helvetica',12,'bold'), command=lambda: input_eqal(get_e_value())).grid(row=6, column=5,rowspan=4 )
button_cos =Button(window, text="cos", width=5, height=3, bg="#2D4044",fg="white", font="bold",command=lambda: input_sci_op("cos", get_e_value())).grid(row=7, column=0)
button_4 =Button(window, text=" 4 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("4")).grid(row=7, column=1)
button_5 =Button(window, text=" 5 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("5")).grid(row=7, column=2)
button_6 =Button(window, text=" 6 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("6")).grid(row=7, column=3)
button_multiply =Button(window, text=" x ", width=5, height=3, bg="#232424", fg="#DBFD00", font="bold",command=lambda: input_operator("*", get_e_value())).grid(row=7, column=4)
button_pi =Button(window, text=" π ", width=5, height=3, bg="#2D4044",fg="white", font="bold",command=lambda: input_key("pi")).grid(row=8, column=0)
button_1 =Button(window, text=" 1 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("1")).grid(row=8, column=1)
button_2 =Button(window, text=" 2 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("2")).grid(row=8, column=2)
button_3 =Button(window, text=" 3 ", width=5, height=3, bg="#527F87", font="bold",command=lambda: input_key("3")).grid(row=8, column=3)
button_substract =Button(window, text=" - ", width=5, height=3, bg="#232424", fg="#DBFD00", font="bold",command=lambda: input_operator("-", get_e_value())).grid(row=8, column=4)
button_racine =Button(window, text=" √ ", width=5, height=3, bg="#2D4044",fg="white", font="bold",command=lambda: input_sci_op("racine", get_e_value())).grid(row=9, column=0)
button_0 =Button(window, text=" 0 ", width=15, height=3, bg="#527F87", font="bold",command=lambda: input_key("0")).grid(row=9, column=1, columnspan=2)
button_virg =Button(window, text=" . ", width=5, height=3,bg="#95BFC3", font="bold",command=lambda: input_key(".")).grid(row=9, column=3)
button_add =Button(window, text=" + ", width=5, height=3, bg="#232424", fg="#DBFD00", font="bold",command=lambda: input_operator("+", get_e_value())).grid(row=9, column=4)
message_text = StringVar()
Label(window, textvariable=message_text, font=('Helvetica',12,'bold'),fg="#527F87", justify=CENTER).grid(row=10, column=0, columnspan=6)
#end of graphic interface
window.mainloop()
|
the-stack_0_26126
|
#!/bin/sh
''''exec nosetests -s -- "$0" ${1+"$@"} # '''
import numpy as np
from .utils import *
from .planets import *
from .event_generator import *
def sandbox_test():
np.set_printoptions(threshold=1000)
print(jd_now())
jd_start = iso2jd('2014-10-01 7:40:00')
rx_periods = Mercury(jd_start).retrogrades_within_period(jd_start, jd_start+30)
print(rx_periods)
print(list(map(lambda x: jd2iso(x['jd']), rx_periods)))
print(jd2iso(Mercury(jd_start).next_rx_event()['jd']))
jd_start = jd_now()
jd_end = jd_start + 365*0.5
#generate_event_table(jd_start, jd_end, [Jupiter(), Saturn()], [(0,'conjunction',None)], compute_ingresses=False)
generate_event_table(jd_start, jd_end)
#for event in get_events(jd_now(), jd_now()+400, planet='mercury', type='retrograde'): print(event, '\n')
def compute_min_max_speeds():
for p in [Moon(), Sun(), Mercury(), Venus(), Mars(), Jupiter(), Saturn()]:
min = 1000
max = 0
jd = jd_now()
while jd < jd_now()+365*100:
if p.speed(jd) > max:
max = p.speed(jd)
if p.speed(jd) < min:
min = p.speed(jd)
jd += 1
print(p, min, max)
|
the-stack_0_26127
|
"""
Copyright 2020 Marc Steele
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from wave_chunk_parser.chunks import CartChunk, CartTimer
from datetime import datetime
from wave_chunk_parser.exceptions import InvalidHeaderException, InvalidTimerException
import json
from parameterized import parameterized
from typing import List, Tuple
from unittest import TestCase
class TestCartChunk(TestCase):
@parameterized.expand(
[
(
"./tests/files/cart_no_tag.blob",
0,
"0101",
"A cart with no tag text",
"Some artist",
"TAGLESS",
"Free Spirit",
"DEMO",
"Demo and sample files",
"Nom Nom Nom!",
datetime(1900, 1, 1, 0, 0),
datetime(2099, 12, 31, 23, 59, 59),
"Hand Crafted",
"MK1 Eyeball",
"Some stuff goes in here....",
32768,
[("MRK ", 112000), ("SEC1", 152533), ("EOD", 201024)],
"http://www.example.com/",
None,
),
(
"./tests/files/cart_long.blob",
0,
"0101",
"This is a cart with a really long title that should be trunkated",
"This is a cart with a really long artist name that should be tru",
"LONGCART",
"Biscuit Muncher",
"DEMO",
"Demo and sample files",
"Nom Nom Nom!",
datetime(1900, 1, 1, 0, 0),
datetime(2099, 12, 31, 23, 59, 59),
"Hand Crafted",
"MK1 Eyeball",
"Some stuff goes in here....",
32768,
[("MRK ", 112000), ("SEC1", 152533), ("EOD", 201024)],
"http://www.example.com/",
"A load of junk goes in here.\r\n",
),
]
)
def test_read_valid_data_chunk(
self,
file_name: str,
chunk_offset: int,
expected_version: str,
expected_title: str,
expected_artist: str,
expected_cut_id: str,
expected_client_id: str,
expected_category: str,
expected_classification: str,
expected_out_cue: str,
expected_start_date: datetime,
expected_end_date: datetime,
expected_producer_app: str,
expected_producer_app_version: str,
expected_user_defined: str,
expected_ref_0db: int,
expected_timers: List[Tuple[str, int]],
expected_url: str,
expected_tag_text: str,
):
"""
The cart chunk can be read correctly.
"""
# Arrange
with open(file_name, "rb") as file:
# Act
chunk = CartChunk.from_file(file, chunk_offset)
# Assert
self.assertIsNotNone(chunk)
self.assertEqual(chunk.get_name, b"cart")
self.assertEqual(chunk.version, expected_version)
self.assertEqual(chunk.title, expected_title)
self.assertEqual(chunk.artist, expected_artist)
self.assertEqual(chunk.cut_id, expected_cut_id)
self.assertEqual(chunk.client_id, expected_client_id)
self.assertEqual(chunk.category, expected_category)
self.assertEqual(chunk.classification, expected_classification)
self.assertEqual(chunk.out_cue, expected_out_cue)
self.assertEqual(chunk.start_date, expected_start_date)
self.assertEqual(chunk.end_date, expected_end_date)
self.assertEqual(chunk.producer_app, expected_producer_app)
self.assertEqual(chunk.producer_app_version, expected_producer_app_version)
self.assertEqual(chunk.user_defined, expected_user_defined)
self.assertEqual(chunk.ref_0db, expected_ref_0db)
self.assertEqual(len(chunk.timers), len(expected_timers))
for (expected_name, expected_time) in expected_timers:
self.assertTrue(
[
timer.name == expected_name and timer.time == expected_time
for timer in chunk.timers
]
)
self.assertEqual(chunk.url, expected_url)
self.assertEqual(chunk.tag_text, expected_tag_text)
@parameterized.expand(
[
(
"./tests/files/valid_no_markers.wav",
12,
)
]
)
def test_read_wrong_chunk(self, file_name: str, chunk_offset: int):
"""
An appropriate error is raised if the wrong chunk is read.
"""
# Arrange
with open(file_name, "rb") as file:
# Act
with self.assertRaises(InvalidHeaderException) as context:
CartChunk.from_file(file, chunk_offset)
# Assert
self.assertIn("Cart chunk must start with cart", context.exception)
@parameterized.expand(
[
("./tests/files/cart_long.json", "./tests/files/cart_long.blob"),
("./tests/files/cart_no_tag.json", "./tests/files/cart_no_tag.blob"),
]
)
def test_encode_chunk(self, json_filename: str, blob_filename: str):
"""
Encode a cart chunk.
"""
# Arrange
with open(json_filename, "r") as json_file:
fields = json.load(json_file)
timers = []
for timer_parts in fields["timers"]:
timers.append(CartTimer(timer_parts["name"], timer_parts["time"]))
chunk = CartChunk(
fields["version"],
fields["title"],
fields["artist"],
fields["cut_id"],
fields["client_id"],
fields["category"],
fields["classification"],
fields["out_cue"],
datetime.strptime(fields["start_date"], CartChunk.FORMAT_DATE_TIME),
datetime.strptime(fields["end_date"], CartChunk.FORMAT_DATE_TIME),
fields["producer_app"],
fields["producer_app_version"],
fields["user_defined"],
int(fields["ref_0db"]),
timers,
fields["url"],
fields["tag_text"],
)
with open(blob_filename, "rb") as blob_file:
expected_blob = blob_file.read()
# Act
blob = chunk.to_bytes()
# Assert
self.assertEqual(len(blob), len(expected_blob))
self.assertEqual(blob, expected_blob)
def test_cart_bad_length(self):
"""
An error is raised when an invalid cart length is supplied.
"""
# Arrange
with open("./tests/files/cart_bad_length.blob", "rb") as cart_file:
# Act
with self.assertRaises(InvalidHeaderException) as context:
CartChunk.from_file(cart_file, 0)
# Assert
self.assertIn(
"Cart chunk is not long enough. Must be a minimum of 2048 bytes",
context.exception,
)
@parameterized.expand(
[
("ERR0", 0, "ERR is not a valid timer prefix"),
("MRKs", 0, "MRK timers cannot have start or end suffixes"),
("MRKe", 0, "MRK timers cannot have start or end suffixes"),
("AUD1", 0, "AUD timers cannot be enumerated"),
]
)
def test_invalid_timer_names(self, name: str, time: int, expected_error: str):
"""
Invalid timer names raise an error.
"""
# Arrange
# Act
with self.assertRaises(InvalidTimerException) as context:
CartTimer(name, time)
# Assert
self.assertIn(expected_error, context.exception)
|
the-stack_0_26128
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Job managed by the Job Manager."""
import warnings
import logging
from typing import List, Optional, Union
from concurrent.futures import ThreadPoolExecutor
from threading import Lock
from qiskit.providers.ibmq import IBMQBackend
from qiskit.qobj import QasmQobj, PulseQobj
from qiskit.result import Result
from qiskit.providers.jobstatus import JobStatus
from qiskit.providers.exceptions import JobError
from qiskit.providers.ibmq.apiconstants import ApiJobShareLevel, API_JOB_FINAL_STATES
from ..job.ibmqjob import IBMQJob
from ..job.exceptions import IBMQJobTimeoutError
from ..exceptions import IBMQBackendJobLimitError
logger = logging.getLogger(__name__)
class ManagedJob:
"""Job managed by the Job Manager."""
def __init__(
self,
start_index: int,
experiments_count: int,
job: Optional[IBMQJob] = None
):
"""ManagedJob constructor.
Args:
start_index: Starting index of the experiment set.
experiments_count: Number of experiments.
job: Job to be managed, or ``None`` if not already known.
"""
self.start_index = start_index
self.end_index = start_index + experiments_count - 1
self.future = None
# Properties that may be populated by the future.
self.job = job # type: Optional[IBMQJob]
self.submit_error = None # type: Optional[Exception]
def submit(
self,
qobj: Union[QasmQobj, PulseQobj],
job_name: str,
backend: IBMQBackend,
executor: ThreadPoolExecutor,
submit_lock: Lock,
job_share_level: ApiJobShareLevel,
job_tags: Optional[List[str]] = None
) -> None:
"""Submit the job.
Args:
qobj: Qobj to run.
job_name: Name of the job.
backend: Backend to execute the experiments on.
executor: The thread pool used to submit the job.
submit_lock: Lock used to synchronize job submission.
job_share_level: Job share level.
job_tags: Tags to be assigned to the job.
"""
# Submit the job in its own future.
logger.debug("Submitting job %s in future", job_name)
self.future = executor.submit(
self._async_submit, qobj=qobj, job_name=job_name, backend=backend,
submit_lock=submit_lock, job_share_level=job_share_level, job_tags=job_tags)
logger.debug("Job %s future obtained", job_name)
def _async_submit(
self,
qobj: Union[QasmQobj, PulseQobj],
job_name: str,
backend: IBMQBackend,
submit_lock: Lock,
job_share_level: ApiJobShareLevel,
job_tags: Optional[List[str]] = None
) -> None:
"""Run a Qobj asynchronously and populate instance attributes.
Args:
qobj: Qobj to run.
job_name: Name of the job.
backend: Backend to execute the experiments on.
submit_lock: Lock used to synchronize job submission.
job_share_level: Job share level.
job_tags: Tags to be assigned to the job.
"""
# pylint: disable=missing-raises-doc
logger.debug("Job %s waiting for submit lock.", job_name)
submit_lock.acquire()
logger.debug("Job %s got the submit lock.", job_name)
try:
while self.job is None:
try:
self.job = backend.run(
qobj=qobj,
job_name=job_name,
job_share_level=job_share_level.value,
job_tags=job_tags)
except IBMQBackendJobLimitError:
final_states = [state.value for state in API_JOB_FINAL_STATES]
oldest_running = backend.jobs(limit=1, descending=False,
db_filter={"status": {"nin": final_states}})
if oldest_running:
oldest_running = oldest_running[0]
logger.warning("Job limit reached, waiting for job %s to finish "
"before submitting the next one.",
oldest_running.job_id())
try:
oldest_running.wait_for_final_state(timeout=300)
except Exception as err: # pylint: disable=broad-except
# Don't kill the submit if unable to wait for old job.
logger.debug("An error occurred while waiting for "
"job %s to finish: %s", oldest_running.job_id(), err)
except Exception as err: # pylint: disable=broad-except
warnings.warn("Unable to submit job for experiments {}-{}: {}".format(
self.start_index, self.end_index, err))
self.submit_error = err
finally:
submit_lock.release()
logger.debug("Job %s released the submit lock.", job_name)
def status(self) -> Optional[JobStatus]:
"""Query the server for job status.
Returns:
Current job status, or ``None`` if an error occurred.
"""
if self.submit_error is not None:
return None
if self.job is None:
# Job not yet submitted
return JobStatus.INITIALIZING
try:
return self.job.status()
except JobError as err:
warnings.warn(
"Unable to retrieve job status for experiments {}-{}, job ID={}: {} ".format(
self.start_index, self.end_index, self.job.job_id(), err))
return None
def result(
self,
timeout: Optional[float] = None,
partial: bool = False,
refresh: bool = False
) -> Optional[Result]:
"""Return the result of the job.
Args:
timeout: Number of seconds to wait for job.
partial: If ``True``, attempt to retrieve partial job results.
refresh: If ``True``, re-query the server for the result. Otherwise
return the cached value.
Returns:
Job result or ``None`` if result could not be retrieved.
Raises:
IBMQJobTimeoutError: If the job does not return results before a
specified timeout.
"""
result = None
if self.job is not None:
try:
result = self.job.result(timeout=timeout, partial=partial, refresh=refresh)
except IBMQJobTimeoutError:
raise
except JobError as err:
warnings.warn(
"Unable to retrieve job result for experiments {}-{}, job ID={}: {} ".format(
self.start_index, self.end_index, self.job.job_id(), err))
return result
def error_message(self) -> Optional[str]:
"""Provide details about the reason of failure.
Returns:
An error report if the job failed or ``None`` otherwise.
"""
if self.job is None:
return None
try:
return self.job.error_message()
except JobError:
return "Unknown error."
def cancel(self) -> None:
"""Attempt to cancel the job."""
cancelled = False
cancel_error = "Unknown error"
try:
cancelled = self.job.cancel()
except JobError as err:
cancel_error = str(err)
if not cancelled:
logger.warning("Unable to cancel job %s for experiments %d-%d: %s",
self.job.job_id(), self.start_index, self.end_index, cancel_error)
def qobj(self) -> Optional[Union[QasmQobj, PulseQobj]]:
"""Return the Qobj for this job.
Returns:
The Qobj for this job or ``None`` if the Qobj could not be retrieved.
"""
if self.job is None:
return None
try:
return self.job.qobj()
except JobError as err:
warnings.warn(
"Unable to retrieve qobj for experiments {}-{}, job ID={}: {} ".format(
self.start_index, self.end_index, self.job.job_id(), err))
return None
|
the-stack_0_26130
|
# -*- encoding=utf-8 -*-
import os
#导入thrift的python模块
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
#导入自已编译生成的hbase python模块
from mythrift.hbase import THBaseService
from mythrift.hbase.ttypes import *
from mythrift.hbase.ttypes import TResult
#创建Socket连接,到s201:9090
transport = TSocket.TSocket('cs1', 9090)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = THBaseService.Client(protocol)
#打开传输端口!!!
transport.open()
## put操作
# table = b'ns1:t1'
# row = b'row1'
# v1 = TColumnValue(b'f1', b'id', b'101')
# v2 = TColumnValue(b'f1', b'name', b'tomas')
# v3 = TColumnValue(b'f1', b'age', b'12')
# vals = [v1, v2, v3]
# put = TPut(row, vals)
# client.put(table, put)
# print("okkkk!!")
# transport.close()
# #get
# table = b'ns1:t1'
# rowkey=b"row1"
# col_id = TColumn(b"f1",b"id")
# col_name = TColumn(b"f1",b"name")
# col_age = TColumn(b"f1",b"age")
#
# cols = [col_id,col_name,col_age]
# get = TGet(rowkey,cols)
# res = client.get(table,get)
# print(bytes.decode(res.columnValues[0].qualifier))
# print(bytes.decode(res.columnValues[0].family))
# print(res.columnValues[0].timestamp)
# print(bytes.decode(res.columnValues[0].value))
# #delete
# table = b'ns1:t1'
# rowkey = b"row1"
# col_id = TColumn(b"f1", b"id")
# col_name = TColumn(b"f1", b"name")
# col_age = TColumn(b"f1", b"age")
# cols = [col_id, col_name]
#
# #构造删除对象
# delete = TDelete(rowkey,cols)
# res = client.deleteSingle(table, delete)
# transport.close()
# print("ok")
# Scan
table = b'ns1:t12'
startRow = b'1530357094900-43dwMLjxI5-0'
stopRow = b'1530357183537-43dwMLjxI5-6'
payload = TColumn(b"f1", b"payload")
cols = [payload]
scan = TScan(startRow=startRow,stopRow=stopRow,columns=cols)
# 这里如果不传 stopRow 就是扫描到结尾
scan = TScan(startRow=startRow, columns=cols)
r = client.getScannerResults(table,scan,100);
for x in r:
print("============")
print(bytes.decode(x.columnValues[0].qualifier))
print(bytes.decode(x.columnValues[0].family))
print(x.columnValues[0].timestamp)
print(bytes.decode(x.columnValues[0].value))
|
the-stack_0_26135
|
def convert_time(time):
"""convert time from hh:dd to minutes after 00:00"""
hours, minutes = time.split(":")
return int(hours) * 60 + int(minutes)
class Node:
def __init__(self, data):
self.data = data
self.job_name = data[0] # BEWARE - this doesn't update if data is updated
self.start_time = convert_time(data[1])
self.end_time = convert_time(data[2])
self.left_child = None
self.right_child = None
def __str__(self):
return f"Job:\t\t{self.data[0]}\nStart time:\t{self.data[1]}\nEnd time:\t{self.data[2]}\n"
class BinarySearchTree:
def __init__(self, data=None):
self.root = None
def insert(self, data):
new_node = Node(data)
if not self.root:
self.root = new_node
print(f"{new_node.job_name} added sucessfully.")
return
self._insert(self.root, new_node)
def _insert(self, current, new_node):
if new_node.end_time <= current.start_time:
if not current.left_child:
current.left_child = new_node
print(f"{new_node.job_name} added sucessfully.")
else:
self._insert(current.left_child, new_node)
elif new_node.start_time >= current.end_time:
if not current.right_child:
current.right_child = new_node
print(f"{new_node.job_name} added sucessfully.")
else:
self._insert(current.right_child, new_node)
else:
print(f"***{new_node.job_name} NOT added due to time slot conflicts.***")
def in_order(self):
self._in_order(self.root)
def _in_order(self, node):
if node:
self._in_order(node.left_child)
print(node)
self._in_order(node.right_child)
def min_right_subtree(self, node):
node = node.right_child
while node.left_child:
node = node.left_child
return node.data
def delete(self, data):
self._delete(self.root, None, None, data)
def _delete(self, current, parent, is_left, data):
target_node = Node(data)
if current:
if target_node.data == current.data:
# if no children
if not current.left_child and not current.right_child:
# if not root
if parent:
if is_left:
parent.left_child = None
else:
parent.right_child = None
# if root
else:
self.root = None
# if left child only
elif current.left_child and not current.right_child:
# if not root
if parent:
if is_left:
parent.left_child = current.left_child
else:
parent.right_child = current.left_child
# if root
else:
self.root = current.left_child
current.left_child = None
# if right child only
elif not current.left_child and current.right_child:
# if not root
if parent:
if is_left:
parent.left_child = current.right_child
else:
parent.right_child = current.right_child
# if root
else:
self.root = current.right_child
current.right_child = None
# if two children
else:
# get data of leftmost value of right subtree
min_right_subtree_data = self.min_right_subtree(current)
# delete node with that data (has at most one child so handled by above)
self.delete(min_right_subtree_data)
# replace current node data with that data
current.data = min_right_subtree_data
# if node is to left
elif target_node.end_time <= current.start_time:
# move left
self._delete(current.left_child, current, True, data)
# if node is to right
elif target_node.start_time >= current.end_time:
# move right
self._delete(current.right_child, current, False, data)
# if current == None
else:
raise ValueError(f"{target_node.job_name} not in job tree")
|
the-stack_0_26136
|
import cv2
import numpy as np
import numpy.linalg as LA
from queue import SimpleQueue
from math import ceil
from time import time
from importlib import reload
import matplotlib.pyplot as plt
from IPython.display import Video
from warnings import simplefilter
from scipy.linalg import sqrtm
import torch
from torchvision import transforms
from torchvision.io import read_video, read_video_timestamps, write_video
from torchvision.utils import save_image
import kornia as K
import kornia.feature as KF
from kornia_moons.feature import *
from kornia.contrib import ImageStitcher
from kornia.geometry.transform import warp_perspective, get_perspective_transform
simplefilter("ignore", UserWarning)
# read video
fname = "./deep-stabilization/dvs/video/s_114_outdoor_running_trail_daytime/ControlCam_20200930_104820.mp4"
video_frames, _, meta = read_video(fname, end_pts=15, pts_unit="sec")
print(video_frames.shape)
print(meta)
# select frame
img1 = video_frames[:-1].permute(0,3,1,2).float() / 255
img2 = video_frames[1:].permute(0,3,1,2).float() / 255
feature1 = transforms.Resize((1080//8,1920//8))(img1)
feature2 = transforms.Resize((1080//8,1920//8))(img2)
# find match point
# matcher = KF.LocalFeatureMatcher(
# KF.SIFTFeature(100, device="cuda"),
# KF.DescriptorMatcher('smnn', 0.8)
# )
loftr = KF.LoFTR('outdoor').cuda()
t_all = 0
mkpts0 = []
mkpts1 = []
batch_idx = []
for x in range(ceil(len(feature1)/32)):
f1 = feature1[32*x:32*(x+1)].cuda()
f2 = feature2[32*x:32*(x+1)].cuda()
input_dict = {"image0": K.color.rgb_to_grayscale(f1).cuda(), # LofTR works on grayscale images only
"image1": K.color.rgb_to_grayscale(f2).cuda()}
with torch.no_grad():
t = time()
# correspondences = matcher(input_dict)
correspondences = loftr(input_dict)
t_all += time()-t
del f1, f2, input_dict
th = torch.quantile(correspondences["confidence"], 0.8)
idx = correspondences["confidence"] >= th
print("keypoints count: ", idx.sum().item())
mkpts0.append(correspondences['keypoints0'][idx].cpu().numpy())
mkpts1.append(correspondences['keypoints1'][idx].cpu().numpy())
batch_idx.append((correspondences['batch_indexes'][idx]+32*x).cpu().numpy())
mkpts0 = np.vstack(mkpts0)
mkpts1 = np.vstack(mkpts1)
batch_idx = np.hstack(batch_idx)
print(mkpts0.shape)
print(batch_idx.shape)
print(np.max(batch_idx))
print("Get matching points: {:5.3f} sec".format(t_all))
print("number of keypoints: {:5.2f}".format(len(correspondences["keypoints0"])/32))
q = SimpleQueue()
H = np.eye(3)
count = 0
Hs = []
for x in range(len(feature1)):
t=time()
try:
H1, _ = cv2.findHomography(mkpts0[batch_idx==x], mkpts1[batch_idx==x], cv2.USAC_MAGSAC, 0.5, 0.999, 100000)
except cv2.error as e:
H1 = np.eye(3)
try:
H = H1 @ H
except:
pass
q.put_nowait(H1)
t_all += time()-t
count += 1
if count >= 16:
try:
H = H @ LA.inv(q.get())
except:
pass
Hs.append(sqrtm(sqrtm(sqrtm(sqrtm(H)))).real)
frames = []
for img, H in zip(img1, Hs):
frames.append(cv2.warpAffine(img.permute(1,2,0).numpy(), H[:2], (1920, 1080)))
frames = (torch.from_numpy(np.stack(frames)) * 255).type(torch.uint8)
print(frames.shape)
write_video("test.mp4", frames, fps=meta["video_fps"])
# plt.imsave("res.png", res)
# plt.imsave("img1.png", img1.permute(1,2,0).numpy())
# plt.imsave("img2.png", img2.permute(1,2,0).numpy())
|
the-stack_0_26137
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class EmailVerificationRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'token': 'string'
}
attribute_map = {
'token': 'token'
}
def __init__(self, token=None): # noqa: E501
"""EmailVerificationRequest - a model defined in Swagger""" # noqa: E501
self._token = None
self.discriminator = None
self.token = token
@property
def token(self):
"""Gets the token of this EmailVerificationRequest. # noqa: E501
The verification token that was sent to the user # noqa: E501
:return: The token of this EmailVerificationRequest. # noqa: E501
:rtype: string
"""
return self._token
@token.setter
def token(self, token):
"""Sets the token of this EmailVerificationRequest.
The verification token that was sent to the user # noqa: E501
:param token: The token of this EmailVerificationRequest. # noqa: E501
:type: string
"""
if token is None:
raise ValueError("Invalid value for `token`, must not be `None`") # noqa: E501
self._token = token
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmailVerificationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_26138
|
from functools import partialmethod
import arcade
from . import background
from .models import Game
from .button import ActionButton
def check_mouse_press_for_buttons(x, y, button_list):
""" Given an x, y, see if we need to register any button clicks. """
for button in button_list:
if x > button.center_x + button.width / 2:
continue
if x < button.center_x - button.width / 2:
continue
if y > button.center_y + button.height / 2:
continue
if y < button.center_y - button.height / 2:
continue
button.on_press()
def check_mouse_release_for_buttons(x, y, button_list):
""" If a mouse button has been released, see if we need to process
any release events. """
for button in button_list:
if button.pressed:
button.on_release()
class GameWindow(arcade.Window):
"""
Main window for the game.
"""
def __init__(self, width, height, title, fullscreen, resizable, antialiasing):
""" Main window's initializer"""
super().__init__(width, height, title, fullscreen, resizable, antialiasing=antialiasing)
# All variables are later defined in setup.
# This is so we can restart the game.
self.background: background.Background
self.button_list = None
def setup(self):
""" Set up the game and initialize the variables.
NOTE: This is ran here so we could restart the game."""
self.background = background.StartBackground() # We always start in the start menu
self.game = Game.init_game()
self.button_list = [
ActionButton(
action=partialmethod(self.game.buy_cow, "Meat Cows"),
center_x=30,
center_y=10,
width=80,
height=30,
text="Buy Meat Cow"
),
ActionButton(
action=partialmethod(self.game.buy_cow, "Milk Cows"),
center_x=30,
center_y=50,
width=80,
height=30,
text="Buy Milk Cow"
),
ActionButton(
action=partialmethod(self.game.buy_cow, "Meat Burp"),
center_x=30,
center_y=70,
width=80,
height=30,
text="Buy Burp Cow"
),
ActionButton(
action=partialmethod(self.game.sell_cow, "Meat Cows"),
center_x=100,
center_y=10,
width=80,
height=30,
text="Buy Meat Cow"
),
ActionButton(
action=partialmethod(self.game.sell_cow, "Milk Cows"),
center_x=100,
center_y=50,
width=80,
height=30,
text="Buy Milk Cow"
),
ActionButton(
action=partialmethod(self.game.sell_cow, "Meat Burp"),
center_x=100,
center_y=70,
width=80,
height=30,
text="Buy Burp Cow"
),
]
# Events
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.background.draw()
for button in self.button_list:
button.draw()
def update(self, delta_time):
"""
All the logic to move, and the game logic goes here.
Normally, you'll call update() on the sprite lists that
need it.
"""
self.game.update(delta_time)
def on_key_press(self, key, key_modifiers):
"""
Called whenever a key on the keyboard is pressed.
For a full list of keys, see:
http://arcade.academy/arcade.key.html
"""
# If we're in the start "menu":
if isinstance(self.background, background.StartBackground):
self.background = background.GameBackground()
def on_mouse_press(self, x, y, button, key_modifiers):
"""
Called when the user presses a mouse button.
"""
# If we're in the start "menu":
if isinstance(self.background, background.StartBackground):
self.background = background.GameBackground()
check_mouse_press_for_buttons(x, y, self.button_list)
def on_mouse_release(self, x, y, button, key_modifiers):
"""
Called when a user releases a mouse button.
"""
check_mouse_release_for_buttons(x, y, self.button_list)
|
the-stack_0_26139
|
import pytest
import numpy as np
from numpy import deg2rad
from pytest import approx
from ..functions import (
jacobian, gm_reduce_single, mod_bearing, mod_elevation, gauss2sigma,
rotx, roty, rotz, cart2sphere, cart2angles, pol2cart, sphere2cart)
from ..types.array import StateVector, StateVectors
from ..types.state import State, GaussianState
def test_jacobian():
""" jacobian function test """
# State related variables
state_mean = StateVector([[3.0], [1.0]])
def f(x):
return np.array([[1, 1], [0, 1]])@x.state_vector
jac = jacobian(f, State(state_mean))
assert np.allclose(jac, np.array([[1, 1], [0, 1]]))
def test_jacobian2():
""" jacobian function test """
# Sample functions to compute Jacobian on
def fun(x):
""" function for testing scalars i.e. scalar input, scalar output"""
return 2*x.state_vector**2
def fun1d(ins):
""" test function with vector input, scalar output"""
out = 2*ins.state_vector[0]+3*ins.state_vector[1]
return out
def fun2d(vec):
""" test function with 2d input and 2d output"""
out = np.empty((2, 1))
out[0] = 2*vec.state_vector[0]**2 + 3*vec.state_vector[1]**2
out[1] = 2*vec.state_vector[0]+3*vec.state_vector[1]
return out
x = 3
jac = jacobian(fun, State(StateVector([[x]])))
assert np.allclose(jac, 4*x)
x = StateVector([[1], [2]])
# Tolerance value to use to test if arrays are equal
tol = 1.0e-5
jac = jacobian(fun1d, State(x))
T = np.array([2.0, 3.0])
FOM = np.where(np.abs(jac-T) > tol)
# Check # of array elements bigger than tol
assert len(FOM[0]) == 0
jac = jacobian(fun2d, State(x))
T = np.array([[4.0*x[0], 6*x[1]],
[2, 3]])
FOM = np.where(np.abs(jac - T) > tol)
# Check # of array elements bigger than tol
assert len(FOM[0]) == 0
def test_jacobian_large_values():
# State related variables
state = State(StateVector([[1E10], [1.0]]))
def f(x):
return x.state_vector**2
jac = jacobian(f, state)
assert np.allclose(jac, np.array([[2e10, 0.0], [0.0, 2.0]]))
def test_gm_reduce_single():
means = StateVectors([StateVector([1, 2]), StateVector([3, 4]), StateVector([5, 6])])
covars = np.stack([[[1, 1], [1, 0.7]],
[[1.2, 1.4], [1.3, 2]],
[[2, 1.4], [1.2, 1.2]]], axis=2)
weights = np.array([1, 2, 5])
mean, covar = gm_reduce_single(means, covars, weights)
assert np.allclose(mean, np.array([[4], [5]]))
assert np.allclose(covar, np.array([[3.675, 3.35],
[3.2, 3.3375]]))
def test_bearing():
bearing_in = [10., 170., 190., 260., 280., 350., 705]
rad_in = deg2rad(bearing_in)
bearing_out = [10., 170., -170., -100., -80., -10., -15.]
rad_out = deg2rad(bearing_out)
for ind, val in enumerate(rad_in):
assert rad_out[ind] == approx(mod_bearing(val))
def test_elevation():
elev_in = [10., 80., 110., 170., 190., 260., 280]
rad_in = deg2rad(elev_in)
elev_out = [10., 80., 70., 10., -10., -80., -80.]
rad_out = deg2rad(elev_out)
for ind, val in enumerate(rad_in):
assert rad_out[ind] == approx(mod_elevation(val))
def test_gauss2sigma_float():
mean = 1.0
covar = 2.0
state = GaussianState([[mean]], [[covar]])
sigma_points_states, mean_weights, covar_weights = gauss2sigma(state, kappa=0)
for n, sigma_point_state in zip((0, 1, -1), sigma_points_states):
assert sigma_point_state.state_vector[0, 0] == approx(mean + n*covar**0.5)
def test_gauss2sigma_int():
mean = 1
covar = 2.0
state = GaussianState([[mean]], [[covar]])
sigma_points_states, mean_weights, covar_weights = gauss2sigma(state, kappa=0)
for n, sigma_point_state in zip((0, 1, -1), sigma_points_states):
# Resultant sigma points are still ints
assert sigma_point_state.state_vector[0, 0] == int(mean + n*covar**0.5)
assert isinstance(sigma_point_state.state_vector[0, 0], np.integer)
@pytest.mark.parametrize(
"angle",
[
(
np.array([np.pi]), # angle
np.array([np.pi / 2]),
np.array([-np.pi]),
np.array([-np.pi / 2]),
np.array([np.pi / 4]),
np.array([-np.pi / 4]),
np.array([np.pi / 8]),
np.array([-np.pi / 8]),
)
]
)
def test_rotations(angle):
c, s = np.cos(angle), np.sin(angle)
zero = np.zeros_like(angle)
one = np.ones_like(angle)
assert np.array_equal(rotx(angle), np.array([[one, zero, zero],
[zero, c, -s],
[zero, s, c]]))
assert np.array_equal(roty(angle), np.array([[c, zero, s],
[zero, one, zero],
[-s, zero, c]]))
assert np.array_equal(rotz(angle), np.array([[c, -s, zero],
[s, c, zero],
[zero, zero, one]]))
@pytest.mark.parametrize(
"x, y, z",
[ # Cartesian values
(1., 0., 0.),
(0., 1., 0.),
(0., 0., 1.),
(1., 1., 0.),
(1., 0., 1.),
(0., 1., 1.),
(1., 1., 1.)
]
)
def test_cart_sphere_inversions(x, y, z):
rho, phi, theta = cart2sphere(x, y, z)
# Check sphere2cart(cart2sphere(cart)) == cart
assert np.allclose(np.array([x, y, z]), sphere2cart(rho, phi, theta))
# Check cart2angle == cart2sphere for angles
assert np.allclose(np.array([phi, theta]), cart2angles(x, y, z))
# Check that pol2cart(cart2angle(cart)) == cart
# note, this only works correctly when z==0
if z == 0:
assert np.allclose(np.array([x, y]), pol2cart(rho, phi))
|
the-stack_0_26140
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
camera = vtk.vtkCamera()
ren1.SetActiveCamera(camera)
# create a sphere source and actor
#
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(80)
sphere.SetPhiResolution(40)
sphere.SetRadius(1)
sphere.SetCenter(0,0,0)
sphere2 = vtk.vtkSphereSource()
sphere2.SetThetaResolution(80)
sphere2.SetPhiResolution(40)
sphere2.SetRadius(0.5)
sphere2.SetCenter(1,0,0)
sphere3 = vtk.vtkSphereSource()
sphere3.SetThetaResolution(80)
sphere3.SetPhiResolution(40)
sphere3.SetRadius(0.5)
sphere3.SetCenter(-1,0,0)
sphere4 = vtk.vtkSphereSource()
sphere4.SetThetaResolution(80)
sphere4.SetPhiResolution(40)
sphere4.SetRadius(0.5)
sphere4.SetCenter(0,1,0)
sphere5 = vtk.vtkSphereSource()
sphere5.SetThetaResolution(80)
sphere5.SetPhiResolution(40)
sphere5.SetRadius(0.5)
sphere5.SetCenter(0,-1,0)
appendData = vtk.vtkAppendPolyData()
appendData.AddInputConnection(sphere.GetOutputPort())
appendData.AddInputConnection(sphere2.GetOutputPort())
appendData.AddInputConnection(sphere3.GetOutputPort())
appendData.AddInputConnection(sphere4.GetOutputPort())
appendData.AddInputConnection(sphere5.GetOutputPort())
depthSort = vtk.vtkDepthSortPolyData()
depthSort.SetInputConnection(appendData.GetOutputPort())
depthSort.SetDirectionToBackToFront()
depthSort.SetVector(1,1,1)
depthSort.SetCamera(camera)
depthSort.SortScalarsOn()
depthSort.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(depthSort.GetOutputPort())
mapper.SetScalarRange(0,depthSort.GetOutput().GetNumberOfCells())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetOpacity(0.5)
actor.GetProperty().SetColor(1,0,0)
actor.RotateX(-72)
depthSort.SetProp3D(actor)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.SetBackground(1,1,1)
renWin.SetSize(300,200)
# render the image
#
ren1.ResetCamera()
ren1.GetActiveCamera().Zoom(2.2)
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
the-stack_0_26142
|
import time
import joblib
import os
import torch
import numpy as np
from numpy.linalg import inv
import core
import matplotlib.pyplot as plt
import math
import visilibity as vis
from scipy.stats import norm
from gym.utils.seeding import _int_list_from_bigint, hash_seed
from functools import partial
from multiprocessing import Pool
from statsmodels.stats.weightstats import DescrStatsW
DET_STEP = 100
DET_STEP_FRAC = 71.0
ACTION = np.array([[-DET_STEP,0],
[-DET_STEP_FRAC,DET_STEP_FRAC],
[0,DET_STEP],
[DET_STEP_FRAC,DET_STEP_FRAC],
[DET_STEP,0],
[DET_STEP_FRAC,-DET_STEP_FRAC],
[0,-DET_STEP],
[-DET_STEP_FRAC,-DET_STEP_FRAC]])
EPSILON = 0.0000001
def make_env(random_ng, num_obs):
"""Create radiation source search environment"""
import gym
init_dims = {'bbox': [[0.0,0.0],[2700.0,0.0],[2700.0,2700.0],[0.0,2700.0]],
'area_obs':[200.0,500.0], 'coord_noise':False,
'obstruct':num_obs, 'seed' : random_ng}
env_name = 'gym_rad_search:RadSearch-v0'
env = gym.make(env_name,**init_dims)
return env
def select_model(fpath, ac_kwargs, bp_kwargs, grad_kwargs, model='rl',
bkg=None, FIM_step=None,search_area_max=None, scale=None,env=None):
"""
Sets which algorithm will be used for action selection (get_action fcn.).
See "gs" conditional branch for further comments
Choices are:
gs: gradient search
bpf-a2c: bootstrap particle filter with actor critic
rad-a2c: particle filter GRU with actor critic
rid-fim: bootstrap particle filter with renyi information divergence-fisher information matrix controller
"""
from gym.spaces import Box, Discrete
fname = os.path.join(fpath, 'pyt_save', 'model'+'.pt')
#print('\n\nLoading from %s.\n\n'%fname)
obs_space = Box(0, np.inf, shape=(11,), dtype=np.float32)
act_space = Discrete(8)
bp_kwargs['bkg'] = bkg
bp_kwargs['scale'] = scale
if model =='gs':
#Instantiate model
grad_act = core.GradSearch(**grad_kwargs)
fim = np.zeros((3,3))
x_est_glob = np.zeros((3,))
def get_action(x,est=False,FIM=False,act=False,hidden=None,done=False,post=0,step=None,init_est=False):
"""
Args:
If est is true:
x (list): contains the raw (unprocessed) radiation measurement and detector coordinates
If init_est is true:
x (list): in order, contains the location prediction, unnormalized detector coordinates,
background rate, scaling matrix, prior probabilities for BPF estimates,
standardized observation
If act is true:
x (list): in order, contains the location prediction, raw (unprocessed) radiation measurement and detector coordinates,
processed radiation measurement and detector coordinates, background rate, scaling matrix
"""
nonlocal x_est_glob
if est:
#Location prediction if applicable
return x_est_glob.copy()
elif FIM or FIM is 0:
#Calculate Fisher information if applicable
return fim
elif init_est:
#Initial location prediction if applicable
return fim,x_est_glob.copy()
elif act:
#Action selection
return grad_act.step(x[1]), x_est_glob, 0
else:
#Reset model if when doing multiple runs in same environment
x_est_glob = np.zeros((3,))
elif model == 'bpf-a2c':
ac = core.RNNModelActorCritic(obs_space,act_space,**ac_kwargs)
ac.load_state_dict(torch.load(fname))
ac.model = core.ParticleFilter(**bp_kwargs)
ac.eval()
fim = np.zeros((3,3))
def get_action(x,est=False,FIM=False,act=False,hidden=None,done=False,post=0,step=None,init_est=False):
if est:
return ac.model.track(x)
elif FIM or FIM is 0:
pred = ac.model.xp_prev[:,FIM,:].copy()
pred[:,0] = pred[:,0]*1e4
denom = np.sum(np.square(x[1][1:] - pred[:,1:]),axis=1)
grad_xy = (2*(x[1][1:] - pred[:,1:]))*(pred[:,0] / np.square(denom))[:,None]
grad_I = 1 / denom
grad = np.hstack((grad_I[:,None], grad_xy))
J = np.einsum('ij,ikl->ijk',grad,grad[:,:,None])* np.tile((1/(pred[:,0]/denom + x[2]))[:,None,None],(3,3))
return (((J@x[3])*(ac.model.wp_prev[:,FIM,None,:])).sum(axis=0)).squeeze()
elif init_est:
pred = ac.model.xp_init.copy()
pred[:,0] = pred[:,0]*1e4
denom = np.sum(np.square(x[1] - pred[:,1:]),axis=1)
grad_xy = (2*(x[1][1:] - pred[:,1:]))*(pred[:,0] / np.square(denom))[:,None]
grad_I = 1 / denom
grad = np.hstack((grad_I[:,None], grad_xy))
J = np.einsum('ij,ikl->ijk',grad,grad[:,:,None])* np.tile((1/((pred[:,0]/denom) + x[2]))[:,None,None],(3,3))
return (J@(x[3]@x[4])).mean(axis=0).squeeze(),(ac.model.xp_init * ac.model.wp_init).sum(axis=0)
elif act:
if hidden is None:
hidden = ac.reset_hidden()
with torch.no_grad():
x_obs = torch.FloatTensor(np.append(x[2],x[0][1:]/search_area_max)[None,None,:])
action, hidden, _ = ac.pi._distribution(x_obs,hidden=hidden[1])
act = action.sample().item()
sim_det = sim_step(act,x[1][1:])
pred = ac.model.xp_prev[:,step,:].copy()
pred[:,0] = pred[:,0]*1e4
denom = np.sum(np.square(sim_det - pred[:,1:]),axis=1)
grad_xy = (2*(sim_det - pred[:,1:]))*(pred[:,0] / np.square(denom))[:,None]
grad_I = 1 / denom
grad = np.hstack((grad_I[:,None], grad_xy))
J = np.einsum('ij,ikl->ijk',grad,grad[:,:,None])* np.tile((1/(pred[:,0]/denom + x[3]))[:,None,None],(3,3))
J_tot = np.trace(((J@x[4])*(ac.model.wp_prev[:,step,None,:])).sum(axis=0).squeeze())
return act, J_tot,(None,hidden)
else:
ac.model = core.ParticleFilter(**bp_kwargs)
elif model == 'rad-a2c':
ac = core.RNNModelActorCritic(obs_space,act_space,**ac_kwargs)
ac.load_state_dict(torch.load(fname))
ac.eval()
fim = np.zeros((3,3))
x_est_glob = np.zeros((3,))
def get_action(x,est=False,FIM=False,act=False,hidden=None,done=False,post=0,step=None,init_est=False):
nonlocal x_est_glob
if est:
return x_est_glob.copy()
elif FIM or FIM is 0:
return fim
elif init_est:
hidden = ac.reset_hidden()[0]
with torch.no_grad():
x_est_glob[1:],_ = ac.model( torch.as_tensor(x[5][:3], dtype=torch.float32).unsqueeze(0),hidden)
return fim,x_est_glob.copy()
elif act:
if hidden is None:
hidden = ac.reset_hidden()
with torch.no_grad():
action, _, _, hidden, x_est_glob[1:] = ac.act(x[2],hidden=hidden)
return action, x_est_glob, hidden
else:
x_est_glob = np.zeros((3,))
elif model == 'rid-fim':
ac = core.FIC(**bp_kwargs)
ac.FIM_step = FIM_step
def get_action(x,est=False,FIM=False,act=False,hidden=None,done=False,post=0,step=None,init_est=False):
if done:
print('Tracking marg!')
ac.bpf.marg_mp(5000)
ac.bpf.plot_marg(x)
elif est:
return ac.bpf.track(x)
elif init_est:
pred = ac.bpf.xp_init.copy()
pred[:,0] = pred[:,0]*1e4
denom = np.sum(np.square(x[1][1:] - pred[:,1:]),axis=1)
grad_xy = (2*(x[1][1:] - pred[:,1:]))*(pred[:,0] / np.square(denom))[:,None]
grad_I = 1 / denom
grad = np.hstack((grad_I[:,None], grad_xy))
J = np.einsum('ij,ikl->ijk',grad,grad[:,:,None])* np.tile((1/((pred[:,0]/denom) + x[2]))[:,None,None],(ac.s_size,ac.s_size))
return (((J@(x[3]@x[4])).mean(axis=0)).squeeze(),(ac.bpf.xp_init * ac.bpf.wp_init).sum(axis=0))
elif FIM or FIM is 0:
pred = ac.bpf.xp_prev[:,FIM,:].copy()
pred[:,0] = pred[:,0]*1e4
denom = np.sum(np.square(x[1][1:] - pred[:,1:]),axis=1)
grad_xy = (2*(x[1][1:] - pred[:,1:]))*(pred[:,0] / np.square(denom))[:,None]
grad_I = 1 / denom
grad = np.hstack((grad_I[:,None], grad_xy))
J = np.einsum('ij,ikl->ijk',grad,grad[:,:,None])* np.tile((1/(pred[:,0]/denom + x[2]))[:,None,None],(ac.s_size,ac.s_size))
return (((J@x[3])*(ac.bpf.wp_prev[:,FIM,None,:])).sum(axis=0)).squeeze()
elif act:
ret = ac.optim_action(np.append(x[1][0],x[1][1:]),x[0],step=step)
return ret[0],ret[1], None
elif post:
probs = np.zeros((ac.bpf.state_dim,ac.bpf.state_dim))
covar = np.diag(np.square(1/post[1]))
for jj in range(ac.bpf.nPart):
mu = (ac.bpf.xp_prev[jj,post[0],None] - ac.bpf.xp_prev[:,post[0]-1]).squeeze()
w_T = ac.bpf.wp_prev[:,post[0]-1].T
p_x = norm.pdf(mu/post[1])/post[1]
phi = (w_T @ p_x).squeeze()
grad = 1*(w_T @ (p_x * (mu @ covar))).squeeze()
grad_op = np.outer(grad,grad)
probs += ((grad_op)/(phi[:,None]**2)) * ac.bpf.wp_prev[jj,post[0]]
return probs
else:
ac.bpf = core.ParticleFilter(**bp_kwargs)
else:
raise ValueError('Invalid model type!')
return get_action
def set_vis_coord(point, coords):
point.set_x(coords[0])
point.set_y(coords[1])
return point
def sim_step(act,det):
return det + ACTION[act]
def refresh_env(env_dict,env,n,num_obs=0):
"""
Load saved test environment parameters from dictionary
into the current instantiation of environment
"""
key = 'env_'+str(n)
env.src_coords = env_dict[key][0]
env.det_coords = env_dict[key][1].copy()
env.intensity = env_dict[key][2]
env.bkg_intensity = env_dict[key][3]
env.source = set_vis_coord(env.source,env.src_coords)
env.detector = set_vis_coord(env.detector,env.det_coords)
if num_obs > 0:
env.obs_coord = env_dict[key][4]
env.num_obs = len(env_dict[key][4])
env.poly = []
env.line_segs = []
for obs in env.obs_coord:
geom = [vis.Point(float(obs[0][jj][0]),float(obs[0][jj][1])) for jj in range(len(obs[0]))]
poly = vis.Polygon(geom)
env.poly.append(poly)
env.line_segs.append([vis.Line_Segment(geom[0],geom[1]),vis.Line_Segment(geom[0],geom[3]),
vis.Line_Segment(geom[2],geom[1]),vis.Line_Segment(geom[2],geom[3])])
env.env_ls = [solid for solid in env.poly]
env.env_ls.insert(0,env.walls)
env.world = vis.Environment(env.env_ls)
# Check if the environment is valid
assert env.world.is_valid(EPSILON), "Environment is not valid"
env.vis_graph = vis.Visibility_Graph(env.world, EPSILON)
o, _, _, _ = env.step(-1)
env.det_sto = [env_dict[key][1].copy()]
env.src_sto = [env_dict[key][0].copy()]
env.meas_sto = [o[0].copy()]
env.prev_det_dist = env.world.shortest_path(env.source,env.detector,env.vis_graph,EPSILON).length()
env.iter_count = 1
return o, env
def calc_stats(results,mc=None,plot=False,snr=None,control=None,obs=None):
"""Calculate results from the evaluation"""
stats = np.zeros((len(results[0]),len(results[0][0][1]),3))
keys = results[0][0][1].keys()
num_elem = 101
d_count_dist = np.zeros((len(results[0]),2,num_elem))
for jj, data in enumerate(results[0]):
for ii, key in enumerate(keys):
if 'Count' in key:
stats[jj,ii,0:2] = data[1][key] if data[1][key].size > 0 else np.nan
elif 'LocEstErr' in key:
stats[jj,ii,0] = np.mean(data[1][key]) if data[1][key].size > 0 else np.nan
stats[jj,ii,1] = np.var(data[1][key])/data[1][key].shape[0] if data[1][key].size > 0 else np.nan
else:
stats[jj,ii,0] = np.median(data[1][key]) if data[1][key].size > 0 else np.nan
stats[jj,ii,1] = np.var(data[1][key])/data[1][key].shape[0] if data[1][key].size > 0 else np.nan
stats[jj,ii,2] = data[1][key].shape[0]
if key in 'dEpLen': #and isinstance(data[0],np.ndarray):
uni,counts = np.unique(data[1][key],return_counts=True)
sort_idx = np.argsort(counts)
if len(sort_idx) > num_elem:
d_count_dist[jj,0,:] = uni[sort_idx][-num_elem:]
d_count_dist[jj,1,:] = counts[sort_idx][-num_elem:]
else:
d_count_dist[jj,0,num_elem-len(sort_idx):] = uni[sort_idx][-num_elem:]
d_count_dist[jj,1,num_elem-len(sort_idx):] = counts[sort_idx][-num_elem:]
for ii, key in enumerate(keys):
if key in ['dIntDist','ndIntDist', 'dBkgDist','ndBkgDist','dEpRet','ndEpRet','ndEpLen','TotEpLen']:
pass
else:
if 'LocEstErr' in key:
tot_mean = np.mean(stats[:,ii,0])
std_error = math.sqrt(np.nansum(stats[:,ii,1]/stats[:,ii,2]))
#print('Mean '+ key +': ' +str(np.round(tot_mean,decimals=2))+ ' +/- ' +str(np.round(std_error,3)))
else:
if np.nansum(stats[:,ii,0]) > 1:
d1 = DescrStatsW(stats[:,ii,0], weights=stats[:,ii,2])
lp_w, weight_med, hp_w = d1.quantile([0.025,0.5,0.975],return_pandas=False)
q1, q3 = d1.quantile([0.25,0.75],return_pandas=False)
print('Weighted Median '+ key +': ' +str(np.round(weight_med,decimals=2))+ ' Weighted Percentiles (' +str(np.round(lp_w,3))+','+str(np.round(hp_w,3))+')')
return stats, d_count_dist
def run_policy(env, env_set, render=True, save_gif=False, save_path=None,
MC=1, control='fic', fish_analysis=False, ac_kwargs=None,
bp_kwargs=None, grad_kwargs=None,tot_ep=1, n=0):
assert env is not None, \
"Environment not found!\n\n It looks like the environment wasn't saved, " + \
"and we can't run the agent in it. :( \n\n Check out the readthedocs " + \
"page on Experiment Outputs for how to handle this situation."
#Setup intial data structures
ep_ret_ls = []
loc_est_ls = []
FIM_bound = []
render_num = []
mc_stats = {}
done_count = 0
repl = 0
mc = 0
seq_sto = {}
done_dist_int, done_dist_bkg, not_done_dist_int, not_done_dist_bkg = np.array([]), np.array([]), np.array([]), np.array([])
tot_ep_len, d_ep_ret, nd_ep_ret, d_ep_len, nd_ep_len = np.array([]), np.array([]), np.array([]), np.array([]), np.array([])
FIM_bound = [[] for _ in range(MC)]
loc_est_ls = [[] for _ in range(MC)]
J_score_ls = [[] for _ in range(MC)]
det_ls = [[] for _ in range(MC)]
loc_est_err = np.array([])
#Set A2C hidden state to initial condition
hidden = None
#Scaling and prior probabilities for PCRB calculation
scale_mat = np.diag(np.array([1e10,1,1]))
uni_probs = np.diag(np.array([(1/(1e3-1e2)),
(1/(25e2+0)),
(1/(25e2+0))]))
#Variances for PCRB calcs.
sigma_mat = np.array([bp_kwargs['noise_params'][1],
bp_kwargs['noise_params'][0],
bp_kwargs['noise_params'][0]])
pro_covar = inv(np.diag(np.square(sigma_mat)))
#Reset environment and then replace the original parameter with test set parameters
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
o, env = refresh_env(env_set,env,n,num_obs=len(env.obs_coord))
#Instantiate and update running standardization module
stat_buff = core.StatBuff()
stat_buff.update(o[0])
try:
max_ep_len= env._max_episode_steps
except:
max_ep_len= env.env._max_episode_steps
#Set the algorithm that will be used for action selection
get_action = select_model(save_path, ac_kwargs, bp_kwargs, grad_kwargs, model=control,
bkg=env.bkg_intensity, FIM_step=env.FIM_step, search_area_max=env.search_area[2][1],
scale=scale_mat,env=env)
#Make initial location prediction if applicable
x_est = get_action(np.append((env.meas_sto[ep_len]),env.det_sto[ep_len]),est=True)
obs_std = o
obs_std[0] = np.clip((o[0]-stat_buff.mu)/stat_buff.sig_obs,-8,8)
#Get initial FIM calculation if applicable
est_init_bnd, x_est_init = get_action([x_est,env.det_sto[ep_len],env.bkg_intensity,scale_mat,uni_probs[None,:],obs_std],init_est=True)
loc_est_ls[mc].append(x_est_init)
FIM_bound[mc].append(est_init_bnd)
while mc < MC: #Perform Monte Carlo runs
loc_est_ls[mc].append(x_est)
#Get action, fisher score, and hidden state when applicable
action, score, hidden = get_action([x_est,np.append(env.meas_sto[ep_len],env.det_sto[ep_len]),obs_std,env.bkg_intensity,scale_mat],
hidden=hidden,act=True,step=ep_len)
#Take action in environment and get new observation
o, r, d, _ = env.step(action)
#Update running statistics
stat_buff.update(o[0])
obs_std = o
obs_std[0] = np.clip((o[0]-stat_buff.mu)/stat_buff.sig_obs,-8,8)
ep_ret += r
ep_len += 1
J_score_ls[mc].append(score)
ep_ret_ls.append(ep_ret)
#Next location prediction
x_est = get_action(np.append((env.meas_sto[ep_len]),env.det_sto[ep_len]),est=True)
if fish_analysis:
#Calculate PCRB if an algorithm is using the bootstrap particle filter
R_t = get_action([x_est,env.det_sto[ep_len],env.bkg_intensity,scale_mat],FIM=ep_len)
rec_bpf = pro_covar + R_t - np.square(pro_covar) @ inv(FIM_bound[mc][ep_len-1] + pro_covar)
FIM_bound[mc].append(rec_bpf)
if d or (ep_len == max_ep_len):
if control == 'rad-a2c':
loc_est_ls[mc].append(x_est)
loc_est_ls[mc] = np.delete(loc_est_ls[mc],1,axis=0) * env.search_area[2][1]
loc_est_err =np.append(loc_est_err, math.sqrt(np.sum(np.square(loc_est_ls[mc][:,1:] - env.src_coords),axis=1).mean()))
else:
loc_est_err = np.append(loc_est_err,math.sqrt(np.sum(np.square(np.array(loc_est_ls[mc])[:,1:] - env.src_coords),axis=1).mean()))
det_ls[mc].append(np.array(env.det_sto))
if mc < 1:
if d:
done_dist_int = np.append(done_dist_int,env.intensity)
done_dist_bkg = np.append(done_dist_bkg,env.bkg_intensity)
else:
not_done_dist_int = np.append(not_done_dist_int,env.intensity)
not_done_dist_bkg = np.append(not_done_dist_bkg,env.bkg_intensity)
tot_ep_len = np.append(tot_ep_len,ep_len)
if d:
done_count += 1
d_ep_len = np.append(d_ep_len,ep_len)
d_ep_ret = np.append(d_ep_ret,ep_ret)
else:
nd_ep_len = np.append(nd_ep_len,ep_len)
nd_ep_ret = np.append(nd_ep_ret,ep_ret)
if render and n==(tot_ep-1) and repl < 1:
#Save trajectory for future rendering
seq_sto['Ep'+str(mc)+'_rew'] = ep_ret_ls
seq_sto['Ep'+str(mc)+'_meas'] = env.meas_sto
seq_sto['Ep'+str(mc)+'_det'] = env.det_sto
seq_sto['Ep'+str(mc)+'_params'] = [env.intensity,env.bkg_intensity,env.src_coords]
seq_sto['Ep'+str(mc)+'_obs'] = env.obs_coord
seq_sto['Ep'+str(mc)+'_loc'] = loc_est_ls
render_num.append(mc)
repl += 1
mc += 1
#Reset environment without performing an env.reset
env.epoch_end = False
env.done = False; env.oob = False
env.iter_count = 0
env.oob_count = 0
r, d, ep_ret, ep_len = 0, False, 0, 0
o, env = refresh_env(env_set,env,n,num_obs=len(env.obs_coord))
#Reset running statistics, hidden state initial condition and
ep_ret_ls= []
stat_buff.reset()
stat_buff.update(o[0])
obs_std = o
obs_std[0] = np.clip((o[0]-stat_buff.mu)/stat_buff.sig_obs,-8,8)
hidden = None
#Reset model in action selection fcn.
get_action(0)
#Get initial location prediction
x_est = get_action(np.append((env.meas_sto[ep_len]),env.det_sto[ep_len]),est=True)
if fish_analysis and mc < MC:
est_init_bnd, x_est_init = get_action([x_est,env.det_sto[ep_len],env.bkg_intensity,scale_mat,uni_probs[None,:],obs_std],init_est=True)
est_init_bnd = (est_init_bnd)
loc_est_ls[mc].append(x_est_init)
FIM_bound[mc].append(est_init_bnd)
if render and n==(tot_ep-1):
for i in render_num:
env.render(data=seq_sto['Ep'+str(i)+'_det'],
meas=seq_sto['Ep'+str(i)+'_meas'],
ep_rew=seq_sto['Ep'+str(i)+'_rew'],
params=seq_sto['Ep'+str(i)+'_params'],
obs=seq_sto['Ep'+str(i)+'_obs'],
loc_est=seq_sto['Ep'+str(i)+'_loc'],
save_gif=save_gif,
just_env=False,
path=save_path,epoch_count=i)
time.sleep(1e-3)
mc_stats['dEpLen'] = d_ep_len
mc_stats['ndEpLen'] = nd_ep_len
mc_stats['dEpRet'] = d_ep_ret
mc_stats['ndEpRet'] = nd_ep_ret
mc_stats['dIntDist'] = done_dist_int
mc_stats['ndIntDist'] = not_done_dist_int
mc_stats['dBkgDist'] = done_dist_bkg
mc_stats['ndBkgDist'] = not_done_dist_bkg
mc_stats['DoneCount'] = np.array([done_count])
mc_stats['TotEpLen'] = tot_ep_len
mc_stats['LocEstErr'] = loc_est_err
results = [loc_est_ls, FIM_bound, J_score_ls, det_ls]
print(f'Finished episode {n}!, completed count: {done_count}')
return (results,mc_stats)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--fpath', type=str,default='../models/pre_train/rad_a2c/loc24_hid24_pol32_val32_alpha01_tkl07_val01_lam09_npart40_lr3e-4_proc10_obs-1_iter40_blr5e-3_2_tanh_ep3000_steps4800_s1',
help='Specify model directory, Ex: ../models/train/bpf/model_dir')
parser.add_argument('--episodes', '-n', type=int, default=100,help='Number of episodes to test on, option: [1-1000]')
parser.add_argument('--render', '-r',type=bool, default=False,help='Produce gif of agent in environment, last episode of n episodes. Num_cpu should be 1')
parser.add_argument('--save_gif', type=bool,default=False, help='Save gif of the agent in model folder, render must be true')
parser.add_argument('--control', type=str, default='rad-a2c',help='Control algorithm, options: [rad-a2c,bpf-a2c,gs,rid-fim]')
parser.add_argument('--snr', type=str, default='high',help='SNR of environment, options: [low,med,high]')
parser.add_argument('--num_obs', type=int, default=0,help='Number of obstructions in environment, options:[1,3,5,7]')
parser.add_argument('--mc_runs', type=int, default=100,help='Number of Monte Carlo runs per episode')
parser.add_argument('--num_cpu', '-ncpu', type=int, default=10,help='Number of cpus to run episodes across')
parser.add_argument('--fisher',type=bool, default=False,help='Calculate the posterior Cramer-Rao Bound for BPF based methods')
parser.add_argument('--save_results', type=bool, default=False, help='Save list of results across episodes and runs')
args = parser.parse_args()
plt.rc('font',size=14)
seed = 9389090
#Path for the test environments
env_fpath = 'test_envs/snr/test_env_dict_obs'
robust_seed = _int_list_from_bigint(hash_seed(seed))[0]
rng = np.random.default_rng(robust_seed)
params = np.arange(0,args.episodes,1)
#Load set of test envs
env = make_env(rng,args.num_obs)
env_path = env_fpath + str(args.num_obs) if args.snr is None else env_fpath + str(args.num_obs)+'_'+args.snr+'_v4'
try:
env_set = joblib.load(env_path)
except:
env_set = joblib.load(f'eval/{env_path}')
#Model parameters, must match the model being loaded
ac_kwargs = {'batch_s': 1, 'hidden': [24], 'hidden_sizes_pol': [32], 'hidden_sizes_rec': [24],
'hidden_sizes_val': [32], 'net_type': 'rnn', 'pad_dim': 2, 'seed': robust_seed}
#Bootstrap particle filter parameters for RID-FIM controller and BPF-A2C
bp_kwargs = {'nParticles':int(6e3), 'noise_params':[15.,1.,1],'thresh':1,'s_size':3,
'rng':rng, 'L': 1,'k':0.0, 'alpha':0.6, 'fim_thresh':0.36,'interval':[75,75]}
#Gradient search parameters
grad_kwargs = {'q':0.0042,'env':env}
#Create partial func. for use with multiprocessing
func = partial(run_policy, env, env_set, args.render,args.save_gif,
args.fpath, args.mc_runs, args.control,args.fisher,
ac_kwargs,bp_kwargs,grad_kwargs, args.episodes)
mc_results = []
print(f'Number of cpus available: {os.cpu_count()}')
print('Starting pool')
p = Pool(processes=args.num_cpu)
mc_results.append(p.map(func,params))
stats, len_freq = calc_stats(mc_results,mc=args.mc_runs,plot=False,snr=args.snr,control=args.control,obs=args.num_obs)
if args.save_results:
print('Saving results..')
joblib.dump(stats,'results/raw/n_'+str(args.episodes)+'_mc'+str(args.mc_runs)+'_'+args.control+'_'+'stats_'+args.snr+'_v4.pkl')
joblib.dump(len_freq,'results/raw/n_'+str(args.episodes)+'_mc'+str(args.mc_runs)+'_'+args.control+'_'+'freq_stats_'+args.snr+'_v4.pkl')
joblib.dump(mc_results,'results/raw/n_'+str(args.episodes)+'_mc'+str(args.mc_runs)+'_'+args.control+'_'+'full_dump_'+args.snr+'_v4.pkl')
|
the-stack_0_26143
|
import unittest
from buzz import generator
def test_sample_single_word():
l = ('foo', 'bar', 'foobar')
word = generator.sample(l)
assert word in l
def test_sample_multiple_words():
l = ('foo', 'bar', 'foobar')
words = generator.sample(l, 2)
assert len(words) == 2
assert words[0] in l
assert words[1] in l
assert words[0] is not words[1]
def test_generate_buzz_of_at_least_five_words():
phrase = generator.generate_buzz()
assert len(phrase.split()) >= 5
|
the-stack_0_26145
|
from functools import partial
import pytest
from plenum.test.cli.helper import TestCliCore
from plenum.test.testable import spyable
from indy_client.agent.agent_cli import AgentCli
from indy_client.test.agent.acme import create_acme, bootstrap_acme
from indy_client.test.agent.helper import buildAcmeWallet
from indy_client.test.cli.helper import getCliBuilder, getAgentCliHelpString
from indy_client.test.cli.test_tutorial import acmeWithEndpointAdded,\
connectIfNotAlreadyConnected, faberCli, acmeCli, thriftCli
@spyable(methods=[AgentCli.print, AgentCli.printTokens])
class TestAgentCLI(AgentCli, TestCliCore):
pass
@pytest.fixture(scope='module')
def agentCliBuilder(tdir, tdirWithPoolTxns, tdirWithDomainTxns, tconf,
txnPoolNodesLooper, cliTempLogger, txnPoolNodeSet):
return partial(getCliBuilder, tdir=tdir, tconf=tconf,
tdirWithPoolTxns=tdirWithPoolTxns,
tdirWithDomainTxns=tdirWithDomainTxns,
logFileName=cliTempLogger, cliClass=TestAgentCLI)
@pytest.fixture(scope='module')
def acmeAgentCli(agentCliBuilder, acmeAgentPort, tdirWithClientPoolTxns):
agent = create_acme(port=acmeAgentPort, base_dir_path=tdirWithClientPoolTxns,
wallet=buildAcmeWallet())
cliBuild = agentCliBuilder(name='Acme-Agent', agent=agent)
cli = cliBuild('Acme-Agent')
yield from cli
@pytest.fixture(scope='module')
def acmeAgentCliRunning(acmeWithEndpointAdded, acmeAgentCli, looper):
looper.run(bootstrap_acme(acmeAgentCli.agent))
return acmeAgentCli
def test_acme_cli_started_successfully(be, acmeAgentCliRunning):
be(acmeAgentCliRunning)
assert acmeAgentCliRunning.currPromptText == 'Acme-Agent'
def testAgentCliHelp(be, do, acmeAgentCliRunning):
be(acmeAgentCliRunning)
do('help', expect=[getAgentCliHelpString()])
def testAgentCliForInvalidCommand(be, do, acmeAgentCliRunning):
be(acmeAgentCliRunning)
do('set Attr1 to Value1', expect=[
"Invalid command: 'set Attr1 to Value1'",
getAgentCliHelpString()])
def sendProofRequest(be, do, agentCli, userMap):
be(agentCli)
userMap['pr-name-version'] = '{}-v{}'.format(
userMap['pr-name'], userMap['pr-schema-version'])
do('send proof-request {pr-name-version} to {send-proof-target}',
within=5,
mapper=userMap,
expect=[
'Sent proof request "{pr-name-version}" to {send-proof-target}'
])
def checkProofRequestReceived(be, do, userCli, commandMap):
be(userCli)
do(None, within=3, mapper=commandMap,
expect=['Proof request {pr-name} received from {inviter}.'])
def getProofRequestsCount(userCli, target):
li = userCli.activeWallet.getConnectionBy(remote=target)
return len(li.proofRequests)
@pytest.fixture(scope='module')
def aliceAcceptedAcmeInvitationNoProofReq(
acmeAgentCliRunning, be, do, aliceCLI, acmeMap, loadInviteOut,
unsycedAcceptedInviteWithoutClaimOut,
syncConnectionOutWithEndpoint, newKeyringOut):
def _(invitationFile, wallet):
be(aliceCLI)
connectIfNotAlreadyConnected(do, aliceCLI, acmeMap)
walletMapper = {
'wallet-name': wallet
}
do('new wallet {}'.format(wallet),
expect=newKeyringOut,
mapper=walletMapper)
do('load {}'.format(invitationFile),
mapper=acmeMap,
expect=loadInviteOut)
do('sync {inviter}',
mapper=acmeMap,
expect=syncConnectionOutWithEndpoint,
within=15)
do('accept request from {inviter}',
within=15,
mapper=acmeMap,
expect=unsycedAcceptedInviteWithoutClaimOut)
proofRequestsBefore = getProofRequestsCount(
aliceCLI, acmeMap['remote'])
sendProofRequest(be, do, acmeAgentCliRunning, acmeMap)
checkProofRequestReceived(be, do, aliceCLI, acmeMap)
proofRequestsAfter = getProofRequestsCount(aliceCLI, acmeMap['remote'])
return proofRequestsBefore, proofRequestsAfter
return _
def test_acme_cli_send_proof_request(
be, do, acmeAgentCliRunning, aliceCLI, acmeMap,
aliceAcceptedAcmeInvitationNoProofReq):
proofRequestsBefore, proofRequestsAfter = aliceAcceptedAcmeInvitationNoProofReq(
acmeMap['invite-no-pr'], 'aliceNoPR')
assert proofRequestsBefore + 1 == proofRequestsAfter
|
the-stack_0_26146
|
from django.core.files import File
from rest_framework import status
from rest_framework.test import APITestCase
class UploadedFileTests(APITestCase):
def test_file_upload(self):
with open('file_metadata/test_files/test_file.JPG', 'rb') as f:
my_file = File(f)
data = {'file': my_file}
response = self.client.post('/file-metadata/files/', data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['file_size'], my_file.size)
|
the-stack_0_26147
|
#!/bin/python3
import os
# Complete the repeatedString function below.
def repeatedString(s, n):
num = 0
for i in range(len(s)):
if s[i] == "a" and i <= n:
num += 1
if (n - len(s)) > 0:
num *= int(n / len(s))
if (n % len(s)) != 0:
for i in (s[:(n % len(s))]):
if i == "a":
num += 1
return num
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
n = int(input())
result = repeatedString(s, n)
fptr.write(str(result) + '\n')
fptr.close()
|
the-stack_0_26149
|
from django.urls import path
from .views import (
ItemDetailView,
CheckoutView,
HomeView,
OrderSummaryView,
add_to_cart,
remove_from_cart,
remove_single_item_from_cart,
PaymentView,
AddCouponView,
RequestRefundView
)
app_name = 'core'
urlpatterns = [
path('', HomeView.as_view(), name='home'),
path('checkout/', CheckoutView.as_view(), name='checkout'),
path('order-summary/', OrderSummaryView.as_view(), name='order-summary'),
path('product/<slug>/', ItemDetailView.as_view(), name='product'),
path('add-to-cart/<slug>/', add_to_cart, name='add-to-cart'),
path('add-coupon/', AddCouponView.as_view(), name='add-coupon'),
path('remove-from-cart/<slug>/', remove_from_cart, name='remove-from-cart'),
path('remove-item-from-cart/<slug>/', remove_single_item_from_cart,
name='remove-single-item-from-cart'),
path('payment/<payment_option>/', PaymentView.as_view(), name='payment'),
path('request-refund/', RequestRefundView.as_view(), name='request-refund')
]
|
the-stack_0_26150
|
import asyncio
import errno
import json
import logging
import os
import tarfile
import tempfile
import typing
import warnings
import zipfile
import glob
from asyncio import AbstractEventLoop
from io import BytesIO as IOReader
from pathlib import Path
from typing import Text, Any, Dict, Union, List, Type, Callable
import ruamel.yaml as yaml
from rasa.constants import ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL
if typing.TYPE_CHECKING:
from prompt_toolkit.validation import Validator
DEFAULT_ENCODING = "utf-8"
def configure_colored_logging(loglevel):
import coloredlogs
loglevel = loglevel or os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {}
coloredlogs.install(
level=loglevel,
use_chroot=False,
fmt="%(asctime)s %(levelname)-8s %(name)s - %(message)s",
level_styles=level_styles,
field_styles=field_styles,
)
def enable_async_loop_debugging(
event_loop: AbstractEventLoop, slow_callback_duration: float = 0.1
) -> AbstractEventLoop:
logging.info(
"Enabling coroutine debugging. Loop id {}.".format(id(asyncio.get_event_loop()))
)
# Enable debugging
event_loop.set_debug(True)
# Make the threshold for "slow" tasks very very small for
# illustration. The default is 0.1 (= 100 milliseconds).
event_loop.slow_callback_duration = slow_callback_duration
# Report all mistakes managing asynchronous resources.
warnings.simplefilter("always", ResourceWarning)
return event_loop
def fix_yaml_loader() -> None:
"""Ensure that any string read by yaml is represented as unicode."""
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml.Loader.add_constructor("tag:yaml.org,2002:str", construct_yaml_str)
yaml.SafeLoader.add_constructor("tag:yaml.org,2002:str", construct_yaml_str)
def replace_environment_variables():
"""Enable yaml loader to process the environment variables in the yaml."""
import re
import os
# eg. ${USER_NAME}, ${PASSWORD}
env_var_pattern = re.compile(r"^(.*)\$\{(.*)\}(.*)$")
yaml.add_implicit_resolver("!env_var", env_var_pattern)
def env_var_constructor(loader, node):
"""Process environment variables found in the YAML."""
value = loader.construct_scalar(node)
expanded_vars = os.path.expandvars(value)
if "$" in expanded_vars:
not_expanded = [w for w in expanded_vars.split() if "$" in w]
raise ValueError(
"Error when trying to expand the environment variables"
" in '{}'. Please make sure to also set these environment"
" variables: '{}'.".format(value, not_expanded)
)
return expanded_vars
yaml.SafeConstructor.add_constructor("!env_var", env_var_constructor)
def read_yaml(content: Text) -> Union[List[Any], Dict[Text, Any]]:
"""Parses yaml from a text.
Args:
content: A text containing yaml content.
"""
fix_yaml_loader()
replace_environment_variables()
yaml_parser = yaml.YAML(typ="safe")
yaml_parser.version = "1.2"
yaml_parser.unicode_supplementary = True
# noinspection PyUnresolvedReferences
try:
return yaml_parser.load(content) or {}
except yaml.scanner.ScannerError:
# A `ruamel.yaml.scanner.ScannerError` might happen due to escaped
# unicode sequences that form surrogate pairs. Try converting the input
# to a parsable format based on
# https://stackoverflow.com/a/52187065/3429596.
content = (
content.encode("utf-8")
.decode("raw_unicode_escape")
.encode("utf-16", "surrogatepass")
.decode("utf-16")
)
return yaml_parser.load(content) or {}
def read_file(filename: Text, encoding: Text = DEFAULT_ENCODING) -> Any:
"""Read text from a file."""
try:
with open(filename, encoding=encoding) as f:
return f.read()
except FileNotFoundError:
raise ValueError("File '{}' does not exist.".format(filename))
def read_json_file(filename: Text) -> Any:
"""Read json from a file."""
content = read_file(filename)
try:
return json.loads(content)
except ValueError as e:
raise ValueError(
"Failed to read json from '{}'. Error: "
"{}".format(os.path.abspath(filename), e)
)
def dump_obj_as_json_to_file(filename: Text, obj: Any) -> None:
"""Dump an object as a json string to a file."""
write_text_file(json.dumps(obj, indent=2), filename)
def read_config_file(filename: Text) -> Dict[Text, Any]:
"""Parses a yaml configuration file. Content needs to be a dictionary
Args:
filename: The path to the file which should be read.
"""
content = read_yaml(read_file(filename))
if content is None:
return {}
elif isinstance(content, dict):
return content
else:
raise ValueError(
"Tried to load invalid config file '{}'. "
"Expected a key value mapping but found {}"
".".format(filename, type(content))
)
def read_yaml_file(filename: Text) -> Union[List[Any], Dict[Text, Any]]:
"""Parses a yaml file.
Args:
filename: The path to the file which should be read.
"""
return read_yaml(read_file(filename, DEFAULT_ENCODING))
def unarchive(byte_array: bytes, directory: Text) -> Text:
"""Tries to unpack a byte array interpreting it as an archive.
Tries to use tar first to unpack, if that fails, zip will be used."""
try:
tar = tarfile.open(fileobj=IOReader(byte_array))
tar.extractall(directory)
tar.close()
return directory
except tarfile.TarError:
zip_ref = zipfile.ZipFile(IOReader(byte_array))
zip_ref.extractall(directory)
zip_ref.close()
return directory
def write_yaml_file(data: Dict, filename: Union[Text, Path]) -> None:
"""Writes a yaml file.
Args:
data: The data to write.
filename: The path to the file which should be written.
"""
with open(str(filename), "w", encoding=DEFAULT_ENCODING) as outfile:
yaml.dump(data, outfile, default_flow_style=False)
def write_text_file(
content: Text,
file_path: Union[Text, Path],
encoding: Text = DEFAULT_ENCODING,
append: bool = False,
) -> None:
"""Writes text to a file.
Args:
content: The content to write.
file_path: The path to which the content should be written.
encoding: The encoding which should be used.
append: Whether to append to the file or to truncate the file.
"""
mode = "a" if append else "w"
with open(file_path, mode, encoding=encoding) as file:
file.write(content)
def is_subdirectory(path: Text, potential_parent_directory: Text) -> bool:
if path is None or potential_parent_directory is None:
return False
path = os.path.abspath(path)
potential_parent_directory = os.path.abspath(potential_parent_directory)
return potential_parent_directory in path
def create_temporary_file(data: Any, suffix: Text = "", mode: Text = "w+") -> Text:
"""Creates a tempfile.NamedTemporaryFile object for data.
mode defines NamedTemporaryFile's mode parameter in py3."""
encoding = None if "b" in mode else DEFAULT_ENCODING
f = tempfile.NamedTemporaryFile(
mode=mode, suffix=suffix, delete=False, encoding=encoding
)
f.write(data)
f.close()
return f.name
def create_path(file_path: Text) -> None:
"""Makes sure all directories in the 'file_path' exists."""
parent_dir = os.path.dirname(os.path.abspath(file_path))
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
def create_directory_for_file(file_path: Text) -> None:
"""Creates any missing parent directories of this file path."""
create_directory(os.path.dirname(file_path))
def file_type_validator(
valid_file_types: List[Text], error_message: Text
) -> Type["Validator"]:
"""Creates a `Validator` class which can be used with `questionary` to validate
file paths.
"""
def is_valid(path: Text) -> bool:
return path is not None and any(
[path.endswith(file_type) for file_type in valid_file_types]
)
return create_validator(is_valid, error_message)
def not_empty_validator(error_message: Text) -> Type["Validator"]:
"""Creates a `Validator` class which can be used with `questionary` to validate
that the user entered something other than whitespace.
"""
def is_valid(input: Text) -> bool:
return input is not None and input.strip() != ""
return create_validator(is_valid, error_message)
def create_validator(
function: Callable[[Text], bool], error_message: Text
) -> Type["Validator"]:
"""Helper method to create `Validator` classes from callable functions. Should be
removed when questionary supports `Validator` objects."""
from prompt_toolkit.validation import Validator, ValidationError
from prompt_toolkit.document import Document
class FunctionValidator(Validator):
@staticmethod
def validate(document: Document) -> None:
is_valid = function(document.text)
if not is_valid:
raise ValidationError(message=error_message)
return FunctionValidator
def list_files(path: Text) -> List[Text]:
"""Returns all files excluding hidden files.
If the path points to a file, returns the file."""
return [fn for fn in list_directory(path) if os.path.isfile(fn)]
def list_subdirectories(path: Text) -> List[Text]:
"""Returns all folders excluding hidden files.
If the path points to a file, returns an empty list."""
return [fn for fn in glob.glob(os.path.join(path, "*")) if os.path.isdir(fn)]
def _filename_without_prefix(file: Text) -> Text:
"""Splits of a filenames prefix until after the first ``_``."""
return "_".join(file.split("_")[1:])
def list_directory(path: Text) -> List[Text]:
"""Returns all files and folders excluding hidden files.
If the path points to a file, returns the file. This is a recursive
implementation returning files in any depth of the path."""
if not isinstance(path, str):
raise ValueError(
"`resource_name` must be a string type. "
"Got `{}` instead".format(type(path))
)
if os.path.isfile(path):
return [path]
elif os.path.isdir(path):
results = []
for base, dirs, files in os.walk(path):
# sort files for same order across runs
files = sorted(files, key=_filename_without_prefix)
# add not hidden files
good_files = filter(lambda x: not x.startswith("."), files)
results.extend(os.path.join(base, f) for f in good_files)
# add not hidden directories
good_directories = filter(lambda x: not x.startswith("."), dirs)
results.extend(os.path.join(base, f) for f in good_directories)
return results
else:
raise ValueError(
"Could not locate the resource '{}'.".format(os.path.abspath(path))
)
def create_directory(directory_path: Text) -> None:
"""Creates a directory and its super paths.
Succeeds even if the path already exists."""
try:
os.makedirs(directory_path)
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
def zip_folder(folder: Text) -> Text:
"""Create an archive from a folder."""
import tempfile
import shutil
zipped_path = tempfile.NamedTemporaryFile(delete=False)
zipped_path.close()
# WARN: not thread-safe!
return shutil.make_archive(zipped_path.name, str("zip"), folder)
|
the-stack_0_26151
|
import tensorrt as trt
import numpy as np
import os
import cv2
import torch
from efficientdet.scripts.utils import *
#from utils import *
import re
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
def get_engine(model_path: str):
if os.path.exists(model_path) and model_path.endswith('trt'):
print(f"Reading engine from file {model_path}")
with open(model_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
print(f"FILE: {model_path} not found or extension not supported.")
def preprocess(img, img_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)):
normalized_img = (img / 255 - mean) / std
framed_img, *framed_meta = aspectaware_resize_padding(normalized_img, img_size, img_size)
framed_img = framed_img.transpose(2, 0, 1)
return np.ascontiguousarray(framed_img[np.newaxis, ...]), framed_meta
def postprocess_outputs(pred, anchors, img_size, image, original_img, regressBoxes, clipBoxes, threshold, iou_threshold, framed_meta):
regression = torch.from_numpy(pred[0].reshape(1, -1, 4))
classification = torch.from_numpy(pred[1].reshape(1, -1, 90))
out = postprocess(image, anchors, regression, classification,
regressBoxes, clipBoxes,
threshold, iou_threshold)[0]
out = scale_coords(framed_meta, out)
vis = plot_bbox(out, original_img)
return vis
class EFFICIENTDET:
def __init__(self, model_path='cfg/efficientdet-d0.trt'):
model_type = int(re.search(r'\d+', model_path).group())
self.img_size = 512
self.threshold = 0.2
self.iou_threshold = 0.2
anchor_scale = [4., 4., 4., 4., 4., 4., 4., 5.]
self.regressBoxes = BBoxTransform()
self.clipBoxes = ClipBoxes()
self.anchors = anchors_def(anchor_scale=anchor_scale[model_type])
engine = get_engine(model_path)
self.context = engine.create_execution_context()
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(engine)
def predict(self, frame):
#frame = cv2.flip(frame, 0)
image, framed_meta = preprocess(frame, self.img_size)
self.inputs[0].host = image
trt_outputs = do_inference_v2(self.context, self.bindings, self.inputs, self.outputs, self.stream)
vis = postprocess_outputs(trt_outputs, self.anchors, self.img_size, image, frame, self.regressBoxes, self.clipBoxes, self.threshold, self.iou_threshold, framed_meta)
return vis
def main():
model_type = 0
model_path = f'cfg/efficientdet-d{model_type}.trt'
img_size = 512
threshold = 0.2
iou_threshold = 0.2
anchor_scale = [4., 4., 4., 4., 4., 4., 4., 5.]
webcam = WebcamStream()
fps = FPS()
regressBoxes = BBoxTransform()
clipBoxes = ClipBoxes()
anchors = anchors_def(anchor_scale=anchor_scale[model_type])
with get_engine(model_path) as engine, engine.create_execution_context() as context:
inputs, outputs, bindings, stream = allocate_buffers(engine)
while True:
fps.start()
frame = webcam.read()
image, framed_meta = preprocess(frame, img_size)
inputs[0].host = image
trt_outputs = do_inference_v2(context, bindings, inputs, outputs, stream)
vis = postprocess_outputs(trt_outputs, anchors, img_size, image, frame, regressBoxes, clipBoxes, threshold, iou_threshold, framed_meta)
fps.stop()
print(fps.get_fps())
cv2.imshow('frame', vis)
if cv2.waitKey(1) == ord("q"):
webcam.stop()
if __name__ == '__main__':
main()
|
the-stack_0_26153
|
"""Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
:param observation: (Any) Observation that can be feed into the output of make_obs_ph
:param stochastic: (bool) if set to False all the actions are always deterministic (default False)
:param update_eps_ph: (float) update epsilon a new value, if negative not update happens (default: no update)
:return: (TensorFlow Tensor) tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= act (in case of parameter noise) ========
Function to chose an action given an observation
:param observation: (Any) Observation that can be feed into the output of make_obs_ph
:param stochastic: (bool) if set to False all the actions are always deterministic (default False)
:param update_eps_ph: (float) update epsilon a new value, if negative not update happens
(default: no update)
:param reset_ph: (bool) reset the perturbed policy by sampling a new perturbation
:param update_param_noise_threshold_ph: (float) the desired threshold for the difference between
non-perturbed and perturbed policy
:param update_param_noise_scale_ph: (bool) whether or not to update the scale of the noise for the next time it is
re-perturbed
:return: (TensorFlow Tensor) tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
:param obs_t: (Any) a batch of observations
:param action: (numpy int) actions that were selected upon seeing obs_t. dtype must be int32 and shape must be
(batch_size,)
:param reward: (numpy float) immediate reward attained after executing those actions dtype must be float32 and
shape must be (batch_size,)
:param obs_tp1: (Any) observations that followed obs_t
:param done: (numpy bool) 1 if obs_t was the last observation in the episode and 0 otherwise obs_tp1 gets ignored,
but must be of the valid shape. dtype must be float32 and shape must be (batch_size,)
:param weight: (numpy float) imporance weights for every element of the batch (gradient is multiplied by the
importance weight) dtype must be float32 and shape must be (batch_size,)
:return: (numpy float) td_error: a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
from gym.spaces import MultiDiscrete
from stable_baselines.common import tf_util
def scope_vars(scope, trainable_only=False):
"""
Get variables inside a scope
The scope can be specified as a string
:param scope: (str or VariableScope) scope in which the variables reside.
:param trainable_only: (bool) whether or not to return only the variables that were marked as trainable.
:return: ([TensorFlow Tensor]) vars: list of variables in `scope`.
"""
return tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope if isinstance(scope, str) else scope.name
)
def scope_name():
"""
Returns the name of current scope as a string, e.g. deepq/q_func
:return: (str) the name of current scope
"""
return tf.get_variable_scope().name
def absolute_scope_name(relative_scope_name):
"""
Appends parent scope name to `relative_scope_name`
:return: (str) the absolute name of the scope
"""
return scope_name() + "/" + relative_scope_name
def default_param_noise_filter(var):
"""
check whether or not a variable is perturbable or not
:param var: (TensorFlow Tensor) the variable
:return: (bool) can be perturb
"""
if var not in tf.trainable_variables():
# We never perturb non-trainable vars.
return False
if "fully_connected" in var.name:
# We perturb fully-connected layers.
return True
# The remaining layers are likely conv or layer norm layers, which we do not wish to
# perturb (in the former case because they only extract features, in the latter case because
# we use them for normalization purposes). If you change your network, you will likely want
# to re-consider which layers to perturb and which to keep untouched.
return False
def build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess):
"""
Creates the act function:
:param q_func: (DQNPolicy) the policy
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param stochastic_ph: (TensorFlow Tensor) the stochastic placeholder
:param update_eps_ph: (TensorFlow Tensor) the update_eps placeholder
:param sess: (TensorFlow session) The current TensorFlow session
:return: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor, (TensorFlow Tensor, TensorFlow Tensor)
act function to select and action given observation (See the top of the file for details),
A tuple containing the observation placeholder and the processed observation placeholder respectivly.
"""
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
policy = q_func(sess, ob_space, ac_space, 1, 1, None)
obs_phs = (policy.obs_ph, policy.processed_x)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
_act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
def act(obs, stochastic=True, update_eps=-1):
return _act(obs, stochastic, update_eps)
return act, obs_phs
def build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
param_noise_filter_func=None):
"""
Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
:param q_func: (DQNPolicy) the policy
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param stochastic_ph: (TensorFlow Tensor) the stochastic placeholder
:param update_eps_ph: (TensorFlow Tensor) the update_eps placeholder
:param sess: (TensorFlow session) The current TensorFlow session
:param param_noise_filter_func: (function (TensorFlow Tensor): bool) function that decides whether or not a
variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter
is used by default.
:return: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor, (TensorFlow Tensor, TensorFlow Tensor)
act function to select and action given observation (See the top of the file for details),
A tuple containing the observation placeholder and the processed observation placeholder respectivly.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01),
trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05),
trainable=False)
# Unmodified Q.
policy = q_func(sess, ob_space, ac_space, 1, 1, None)
obs_phs = (policy.obs_ph, policy.processed_x)
# Perturbable Q used for the actual rollout.
with tf.variable_scope("perturbed_model", reuse=False):
perturbable_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
def perturb_vars(original_scope, perturbed_scope):
"""
We have to wrap this code into a function due to the way tf.cond() works.
See https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for a more detailed
discussion.
:param original_scope: (str or VariableScope) the original scope.
:param perturbed_scope: (str or VariableScope) the perturbed scope.
:return: (TensorFlow Operation)
"""
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
operation = tf.assign(perturbed_var,
var + tf.random_normal(shape=tf.shape(var), mean=0.,
stddev=param_noise_scale))
else:
# Do not perturb, just assign.
operation = tf.assign(perturbed_var, var)
perturb_ops.append(operation)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
with tf.variable_scope("adaptive_model", reuse=False):
adaptive_policy = q_func(sess, ob_space, ac_space, 1, 1, None, obs_phs=obs_phs)
perturb_for_adaption = perturb_vars(original_scope="model", perturbed_scope="adaptive_model/model")
kl_loss = tf.reduce_sum(
tf.nn.softmax(policy.q_values) *
(tf.log(tf.nn.softmax(policy.q_values)) - tf.log(tf.nn.softmax(adaptive_policy.q_values))),
axis=-1)
mean_kl = tf.reduce_mean(kl_loss)
def update_scale():
"""
update the scale expression
:return: (TensorFlow Tensor) the updated scale expression
"""
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_thres_expr = param_noise_threshold.assign(
tf.cond(update_param_noise_threshold_ph >= 0, lambda: update_param_noise_threshold_ph,
lambda: param_noise_threshold))
# Put everything together.
perturbed_deterministic_actions = tf.argmax(perturbable_policy.q_values, axis=1)
deterministic_actions = tf.argmax(policy.q_values, axis=1)
batch_size = tf.shape(policy.obs_ph)[0]
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=n_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
perturbed_stochastic_actions = tf.where(chose_random, random_actions, perturbed_deterministic_actions)
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
perturbed_output_actions = tf.cond(stochastic_ph, lambda: perturbed_stochastic_actions,
lambda: deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="model", perturbed_scope="perturbed_model/model"),
lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_thres_expr,
]
_act = tf_util.function(inputs=[policy.obs_ph, stochastic_ph, update_eps_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True},
updates=[update_eps_expr])
_perturbed_act = tf_util.function(
inputs=[policy.obs_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph,
update_param_noise_scale_ph],
outputs=perturbed_output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False,
update_param_noise_scale_ph: False},
updates=updates)
def act(obs, reset=None, update_param_noise_threshold=None, update_param_noise_scale=None, stochastic=True,
update_eps=-1):
"""
get the action from the current observation
:param obs: (Any) Observation that can be feed into the output of make_obs_ph
:param reset: (bool) reset the perturbed policy by sampling a new perturbation
:param update_param_noise_threshold: (float) the desired threshold for the difference between
non-perturbed and perturbed policy
:param update_param_noise_scale: (bool) whether or not to update the scale of the noise for the next time
it is re-perturbed
:param stochastic: (bool) if set to False all the actions are always deterministic (default False)
:param update_eps: (float) update epsilon a new value, if negative not update happens
(default: no update)
:return: (TensorFlow Tensor) tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be
performed for every element of the batch.
"""
if reset is None or update_param_noise_threshold is None or update_param_noise_scale is None:
return _act(obs, stochastic, update_eps)
else:
return _perturbed_act(obs, stochastic, update_eps, reset, update_param_noise_threshold,
update_param_noise_scale)
return act, obs_phs
def build_train(q_func, ob_space, ac_space, optimizer, sess, grad_norm_clipping=None, gamma=1.0, double_q=True,
scope="deepq", reuse=None, param_noise=False, param_noise_filter_func=None):
"""
Creates the train function:
:param q_func: (DQNPolicy) the policy
:param ob_space: (Gym Space) The observation space of the environment
:param ac_space: (Gym Space) The action space of the environment
:param reuse: (bool) whether or not to reuse the graph variables
:param optimizer: (tf.train.Optimizer) optimizer to use for the Q-learning objective.
:param sess: (TensorFlow session) The current TensorFlow session
:param grad_norm_clipping: (float) clip gradient norms to this value. If None no clipping is performed.
:param gamma: (float) discount rate.
:param double_q: (bool) if true will use Double Q Learning (https://arxiv.org/abs/1509.06461). In general it is a
good idea to keep it enabled.
:param scope: (str or VariableScope) optional scope for variable_scope.
:param reuse: (bool) whether or not the variables should be reused. To be able to reuse the scope must be given.
:param param_noise: (bool) whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
:param param_noise_filter_func: (function (TensorFlow Tensor): bool) function that decides whether or not a
variable should be perturbed. Only applicable if param_noise is True. If set to None, default_param_noise_filter
is used by default.
:return: (tuple)
act: (function (TensorFlow Tensor, bool, float): TensorFlow Tensor) function to select and action given
observation. See the top of the file for details.
train: (function (Any, numpy float, numpy float, Any, numpy bool, numpy float): numpy float)
optimize the error in Bellman's equation. See the top of the file for details.
update_target: (function) copy the parameters from optimized Q function to the target Q function.
See the top of the file for details.
step_model: (DQNPolicy) Policy for evaluation
"""
n_actions = ac_space.nvec if isinstance(ac_space, MultiDiscrete) else ac_space.n
with tf.variable_scope("input", reuse=reuse):
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
with tf.variable_scope(scope, reuse=reuse):
if param_noise:
act_f, obs_phs = build_act_with_param_noise(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess,
param_noise_filter_func=param_noise_filter_func)
else:
act_f, obs_phs = build_act(q_func, ob_space, ac_space, stochastic_ph, update_eps_ph, sess)
# q network evaluation
with tf.variable_scope("step_model", reuse=True, custom_getter=tf_util.outer_scope_getter("step_model")):
step_model = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True, obs_phs=obs_phs)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + "/model")
# target q network evaluation
with tf.variable_scope("target_q_func", reuse=False):
target_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=False)
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name + "/target_q_func")
# compute estimate of best possible value starting from state at t + 1
double_q_values = None
double_obs_ph = target_policy.obs_ph
if double_q:
with tf.variable_scope("double_q", reuse=True, custom_getter=tf_util.outer_scope_getter("double_q")):
double_policy = q_func(sess, ob_space, ac_space, 1, 1, None, reuse=True)
double_q_values = double_policy.q_values
double_obs_ph = double_policy.obs_ph
with tf.variable_scope("loss", reuse=reuse):
# set up placeholders
act_t_ph = tf.placeholder(tf.int32, [None], name="action")
rew_t_ph = tf.placeholder(tf.float32, [None], name="reward")
done_mask_ph = tf.placeholder(tf.float32, [None], name="done")
importance_weights_ph = tf.placeholder(tf.float32, [None], name="weight")
# q scores for actions which we know were selected in the given state.
q_t_selected = tf.reduce_sum(step_model.q_values * tf.one_hot(act_t_ph, n_actions), axis=1)
# compute estimate of best possible value starting from state at t + 1
if double_q:
q_tp1_best_using_online_net = tf.argmax(double_q_values, axis=1)
q_tp1_best = tf.reduce_sum(target_policy.q_values * tf.one_hot(q_tp1_best_using_online_net, n_actions), axis=1)
else:
q_tp1_best = tf.reduce_max(target_policy.q_values, axis=1)
q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
errors = tf_util.huber_loss(td_error)
weighted_error = tf.reduce_mean(importance_weights_ph * errors)
tf.summary.scalar("td_error", tf.reduce_mean(td_error))
tf.summary.histogram("td_error", td_error)
tf.summary.scalar("loss", weighted_error)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_expr = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(var_target.assign(var))
update_target_expr = tf.group(*update_target_expr)
# compute optimization op (potentially with gradient clipping)
gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)
if grad_norm_clipping is not None:
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(rew_t_ph))
tf.summary.histogram('rewards', rew_t_ph)
tf.summary.scalar('importance_weights', tf.reduce_mean(importance_weights_ph))
tf.summary.histogram('importance_weights', importance_weights_ph)
if len(obs_phs[0].shape) == 3:
tf.summary.image('observation', obs_phs[0])
else:
tf.summary.histogram('observation', obs_phs[0])
optimize_expr = optimizer.apply_gradients(gradients)
summary = tf.summary.merge_all()
# Create callable functions
train = tf_util.function(
inputs=[
obs_phs[0],
act_t_ph,
rew_t_ph,
target_policy.obs_ph,
double_obs_ph,
done_mask_ph,
importance_weights_ph
],
outputs=[summary, td_error],
updates=[optimize_expr]
)
update_target = tf_util.function([], [], updates=[update_target_expr])
return act_f, train, update_target, step_model
|
the-stack_0_26155
|
# coding: utf-8
# In[150]:
import requests
import time
import json
import csv
from datetime import datetime, date, time, timedelta
import random
# In[151]:
def get_id_list_from_file(filename):
with open(filename,'r') as file:
id_list = json.load(file)
return id_list
# In[152]:
sci_id_list = get_id_list_from_file('IDs_SCI.json')
mys_id_list = get_id_list_from_file('IDs_MYS.json')
# In[153]:
token = '988386c4311c0661e1fea9e2d88e3c3d69ba9f777819194dae8e5bb7b50579239850bece14d1a42bd613f&expires_in=86400&user_id=17159132'
link = 'https://oauth.vk.com/authorize?client_id=6306832&display=page&redirect_uri=https://oauth.vk.com/blank.html&scope=friends&response_type=token'
# In[154]:
def get_members_id_list(owner_id):
members_id_list = []
#first iteration for first 25k chunk
try:
r = requests.post('https://api.vk.com/method/execute.aa04201d?group_id='+
str(owner_id)+'&offset='+str(0)+'&count='+str(25000)+'&access_token='+token).json()['response']
except KeyError as e:
print(r)
members_count = r[0] #number of total members
print('Community: ', owner_id, ', members: ',members_count)
members_id_list.extend(r[1]) #extend for a chunk of 25k
if members_count > 25000:
print('Community members > 25000. Starting loop..')
for offset in range(25000, members_count, 25000):
try:
r = requests.post('https://api.vk.com/method/execute.aa04201d?group_id='+
str(owner_id)+'&offset='+str(offset)+'&count='+str(25000)+'&access_token='+token).json()['response']
except KeyError as e:
print(r)
members_id_list.extend(r[1])
#t.sleep(.35)
print('ID collected for community successfully: ')
else:
print('ID collected for community(<25k) successfully: ')
return members_id_list
# In[155]:
def dump_all(foldername,id_list):
for item in id_list:
filename = foldername+'/'+item+'/'+'MEM_'+item+'.json'
with open(filename,'w') as file:
data = get_members_id_list(-int(item))
json.dump(data,file,indent=2,ensure_ascii=False)
file.close()
# In[156]:
dump_all('SCI',sci_id_list)
|
the-stack_0_26158
|
import argparse
import math
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.losses import BinaryCrossentropy, CategoricalCrossentropy
from tensorflow_addons.metrics import F1Score
from transformers import TFGPT2Model
from typing import List
class BinaryF1Score(tf.keras.metrics.Metric):
def __init__(self):
super(BinaryF1Score, self).__init__(name='binary_f1_score')
self._precision = tf.keras.metrics.Precision(thresholds=0.5)
self._recall = tf.keras.metrics.Recall(thresholds=0.5)
def update_state(self, y_true, y_pred, sample_weight=None):
self._precision.update_state(y_true, y_pred)
self._recall.update_state(y_true, y_pred)
def result(self):
val_precision = self._precision.result()
val_recall = self._recall.result()
return 2 * tf.math.divide_no_nan((val_recall * val_precision), (val_recall + val_precision))
def reset_states(self):
self._precision.reset_states()
self._recall.reset_states()
def get_dataset_from_recorder_generic(path_local: List[str], batch_size: int, feature_description):
raw_dataset = tf.data.TFRecordDataset(path_local)
def _parse_function(example_proto):
example = tf.io.parse_single_example(example_proto, feature_description)
return example['inputs'], example['labels']
parsed_dataset = raw_dataset.map(_parse_function)
parsed_dataset = parsed_dataset.shuffle(10_000).repeat().batch(batch_size, drop_remainder=True)
return parsed_dataset
def get_dataset_from_recorder_binary(path_local: List[str], batch_size: int, block_size: int):
feature_description = {
'inputs': tf.io.FixedLenFeature([block_size], tf.int64, default_value=[0] * block_size),
'labels': tf.io.FixedLenFeature([1], tf.int64, default_value=0)
}
return get_dataset_from_recorder_generic(path_local, batch_size, feature_description)
def get_dataset_from_recorder_multi_class(path_local: List[str], batch_size: int, block_size: int):
num_category = 5
feature_description = {
'inputs': tf.io.FixedLenFeature([block_size], tf.int64, default_value=[0] * block_size),
'labels': tf.io.FixedLenFeature([num_category], tf.int64, default_value=[0] * num_category)
}
return get_dataset_from_recorder_generic(path_local, batch_size, feature_description)
def get_model(path_pretrained: str, block_size: int, batch_size: int, number_category: int):
gpt2 = TFGPT2Model.from_pretrained(path_pretrained)
input_layer = tf.keras.layers.Input(shape=block_size, batch_size=batch_size, dtype=tf.int32)
embeddings = gpt2.layers[0](input_layer).last_hidden_state
docs_embedding = tf.keras.layers.Lambda(lambda x: tf.math.reduce_mean(x, axis=1))(embeddings)
if number_category == 2:
output_layer = tf.keras.layers.Dense(units=1, activation='sigmoid')(docs_embedding)
else:
output_layer = tf.keras.layers.Dense(units=number_category, activation='softmax')(docs_embedding)
model = tf.keras.models.Model(inputs=[input_layer], outputs=[output_layer])
return model
def get_callbacks(val_monitor: str):
early_stop = EarlyStopping(monitor=val_monitor, mode='max', patience=4, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor=val_monitor, mode='max', patience=2, min_lr=1e-6, min_delta=5e-3,
factor=1 / math.exp(1))
return [early_stop, reduce_lr]
def log_result(name_log_file: str, history_train, evaluate):
with open(f'log/{name_log_file}.txt', 'w+') as output_file:
for metric, values in history_train.history.items():
output_file.write(f'{metric}: {values} \n')
for msg, value in evaluate.items():
output_file.write(f'{msg}: {value}\n')
def run_task(tpu_strategy, block_size: int, task: str):
# replace with your google cloud storage address
path_ds_train = f'gs://pub-mihai-niculescu-gpt2/eval/laroseda/{task}/train'
path_ds_dev = f'gs://pub-mihai-niculescu-gpt2/eval/laroseda/{task}/dev'
path_ds_test = f'gs://pub-mihai-niculescu-gpt2/eval/laroseda/{task}/test'
total_size_train = 10798
total_size_dev = 1202
total_size_test = 3000
monitor_val = 'val_binary_f1_score' if task == 'binary' else 'val_f1_score'
num_category = 2 if task == 'binary' else 5
for path_model, info in {
'../../../model/models/base': {'batch_size': 144, 'epochs': 30},
'../../../model/models/medium': {'batch_size': 64, 'epochs': 30},
'../../../model/models/large': {'batch_size': 40, 'epochs': 30}
}.items():
batch_size = info['batch_size']
name_model = path_model.split('/')[-1]
get_dataset = get_dataset_from_recorder_binary if task == 'binary' else get_dataset_from_recorder_multi_class
files_train = tf.io.gfile.glob(f'{path_ds_train}/*.tfrecord')
files_dev = tf.io.gfile.glob(f'{path_ds_dev}/*.tfrecord')
files_test = tf.io.gfile.glob(f'{path_ds_test}/*.tfrecord')
ds_train = get_dataset(files_train, batch_size, block_size)
ds_dev = get_dataset(files_dev, batch_size, block_size)
ds_test = get_dataset(files_test, batch_size, block_size)
with tpu_strategy.scope():
model = get_model(path_model, block_size, batch_size, number_category=num_category)
loss = BinaryCrossentropy() if task == 'binary' else CategoricalCrossentropy()
f1_score = BinaryF1Score() if task == 'binary' else F1Score(num_classes=num_category, average='macro')
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-4),
loss=loss,
metrics=[f1_score, 'accuracy']
)
callbacks = get_callbacks(monitor_val)
history = model.fit(
ds_train, epochs=info['epochs'], steps_per_epoch=total_size_train // batch_size,
validation_data=ds_dev, validation_steps=total_size_dev // batch_size,
callbacks=callbacks
)
_, f1_score_dev, accuracy_dev = model.evaluate(ds_dev, steps=total_size_dev // batch_size)
_, f1_score_test, accuracy_test = model.evaluate(ds_test, steps=total_size_test // batch_size)
model.save(f'../../../model/evaluation/laroseda/{name_model}/{name_model}-{task}.h5')
log_result(f'{name_model}-{task}', history, {
'Accuracy Dev': accuracy_dev, 'F1 score Dev': f1_score_dev,
'Accuracy Test': accuracy_test, 'F1 score Test': f1_score_test
})
del model
del ds_train, ds_dev, ds_test
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tpu_name', type=str, required=True, help='Name of tpu for training')
args = parser.parse_args()
block_size = 128
tpu_name = args.tpu_name
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=tpu_name)
tf.config.experimental_connect_to_cluster(cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
tpu_strategy = tf.distribute.TPUStrategy(cluster_resolver)
run_task(tpu_strategy, block_size, 'binary')
run_task(tpu_strategy, block_size, 'multi')
|
the-stack_0_26159
|
#!/usr/bin/python
#
# (c) 2018, Yanis Guenane <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_network_facts
deprecated:
removed_in: '2.13'
why: Deprecated in favour of C(_info) module.
alternative: Use M(vultr_network_info) instead.
short_description: Gather facts about the Vultr networks available.
description:
- Gather facts about networks available in Vultr.
author: "Yanis Guenane (@Spredzy)"
extends_documentation_fragment:
- community.general.vultr
'''
EXAMPLES = r'''
- name: Gather Vultr networks facts
local_action:
module: vultr_network_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_network_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_network_facts:
description: Response from Vultr API
returned: success
type: complex
sample:
"vultr_network_facts": [
{
"date_created": "2018-08-02 11:18:49",
"id": "net5b62e8991adfg",
"name": "mynet",
"region": "Amsterdam",
"v4_subnet": "192.168.42.0",
"v4_subnet_mask": 24
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrNetworkFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrNetworkFacts, self).__init__(module, "vultr_network_facts")
self.returns = {
'DCID': dict(key='region', transform=self._get_region_name),
'NETWORKID': dict(key='id'),
'date_created': dict(),
'description': dict(key='name'),
'v4_subnet': dict(),
'v4_subnet_mask': dict(convert_to='int'),
}
def _get_region_name(self, region):
return self.query_resource_by_key(
key='DCID',
value=region,
resource='regions',
use_cache=True
)['name']
def get_networks(self):
return self.api_query(path="/v1/network/list")
def parse_network_list(network_list):
if isinstance(network_list, list):
return []
return [network for id, network in network_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
network_facts = AnsibleVultrNetworkFacts(module)
result = network_facts.get_result(parse_network_list(network_facts.get_networks()))
ansible_facts = {
'vultr_network_facts': result['vultr_network_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
|
the-stack_0_26160
|
from __future__ import print_function, absolute_import, division
import os
import subprocess
import SCons
import glob
import re
import sys
#===========================================================
# The first 4 functions provide for building a library,
# program, multiple-programs, or plugin from all the source
#
# The next section contains useful utility functions.
#
# The functions that follow in the final section add support
# for various packages (e.g. ROOT, Xerces, ...)
#===========================================================
##################################
# library
##################################
def library(env, libname=''):
# Library name comes from directory name
if libname=='':
libname = os.path.split(os.getcwd())[1]
env.PrependUnique(CPPPATH = ['.']) # Relative imports
env.PrependUnique(CPPPATH = ['..']) # Absolute imports prefixed with library name, e.g. #include<JANA/JObject.h>
# Add C/C++, and FORTRAN targets
env.AppendUnique(ALL_SOURCES = env.Glob('*.c'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cc'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cpp'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.F'))
sources = env['ALL_SOURCES']
# Build static library from all source
myobjs = env.Object(sources)
mylib = env.Library(target = libname, source = myobjs)
# Cleaning and installation are restricted to the directory
# scons was launched from or its descendents
CurrentDir = env.Dir('.').srcnode().abspath
if not CurrentDir.startswith(env.GetLaunchDir()):
# Not in launch directory. Tell scons not to clean these targets
env.NoClean([myobjs, mylib])
else:
# We're in launch directory (or descendent) schedule installation
# Installation directories for library and headers
installdir = env.subst('$INSTALLDIR')
includedir = "%s/%s" %(env.subst('$INCDIR'), libname)
libdir = env.subst('$LIBDIR')
# Install targets
env.Install(libdir, mylib)
env.Install(includedir, env.Glob('*.h*'))
##################################
# executable
##################################
def executable(env, exename=''):
# Executable name comes from directory name
if exename=='':
exename = os.path.split(os.getcwd())[1]
env.PrependUnique(CPPPATH = ['.']) # Relative imports
env.PrependUnique(CPPPATH = ['#src/lib']) # Absolute imports, e.g. <JANA/JApplication.h>
# Add C/C++, and FORTRAN targets
env.AppendUnique(ALL_SOURCES = env.Glob('*.c'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cc'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cpp'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.F'))
# Push commonly used libraries to end of list
ReorderCommonLibraries(env)
sources = env['ALL_SOURCES']
# Build program from all source
myobjs = env.Object(sources)
myexe = env.Program(target = exename, source = myobjs)
# Cleaning and installation are restricted to the directory
# scons was launched from or its descendents
CurrentDir = env.Dir('.').srcnode().abspath
if not CurrentDir.startswith(env.GetLaunchDir()):
# Not in launch directory. Tell scons not to clean these targets
env.NoClean([myobjs, myexe])
else:
# We're in launch directory (or descendent) schedule installation
# Installation directories for executable and headers
installdir = env.subst('$INSTALLDIR')
includedir = env.subst('$INCDIR')
bindir = env.subst('$BINDIR')
# Install targets
env.Install(bindir, myexe)
##################################
# executables
##################################
def executables(env):
# This will generate multiple executables from the
# source in the current directory. It does this
# by identifying source files that define "main()"
# and linking those with all source files that do not
# define "main()". Program names are based on the
# filename of the source file defining "main()"
main_sources = []
common_sources = []
curpath = os.getcwd()
srcpath = env.Dir('.').srcnode().abspath
os.chdir(srcpath)
files = glob.glob('*.c') + glob.glob('*.cc') + glob.glob('*.cpp')
for f in files:
if 'main(' in open(f).read():
main_sources.append(f)
else:
common_sources.append(f)
for f in glob.glob('*.F'):
if ' PROGRAM ' in open(f).read():
main_sources.append(f)
else:
common_sources.append(f)
os.chdir(curpath)
env.PrependUnique(CPPPATH = ['.'])
# Push commonly used libraries to end of list
ReorderCommonLibraries(env)
common_sources.extend(env['ALL_SOURCES'])
# Build program from all source
main_objs = env.Object(main_sources)
common_objs = env.Object(common_sources)
progs = []
for obj in main_objs:
exename = re.sub('\.o$', '', str(obj)) # strip off ".o" from object file name
progs.append(env.Program(target = exename, source = [obj, common_objs]))
# Cleaning and installation are restricted to the directory
# scons was launched from or its descendents
CurrentDir = env.Dir('.').srcnode().abspath
if not CurrentDir.startswith(env.GetLaunchDir()):
# Not in launch directory. Tell scons not to clean these targets
env.NoClean([common_objs, main_objs, progs])
else:
# We're in launch directory (or descendent) schedule installation
bindir = env.subst('$BINDIR')
env.Install(bindir, progs)
##################################
# plugin
##################################
def plugin(env, pluginname=''):
# Library name comes from directory name
if pluginname=='':
pluginname = os.path.split(os.getcwd())[1]
srcdir = str(env.Dir('.').srcnode().path)
env.AppendUnique(CPPPATH = ['.', '#src/lib/JANA', '#src/lib'])
# Add C/C++ targets
env.AppendUnique(ALL_SOURCES = env.Glob('*.c'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cc'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.cpp'))
env.AppendUnique(ALL_SOURCES = env.Glob('*.F'))
sources = env['ALL_SOURCES']
# Build static library from all source
myobjs = env.SharedObject(sources)
myplugin = env.SharedLibrary(target = pluginname, source = myobjs, SHLIBPREFIX='', SHLIBSUFFIX='.so')
# Cleaning and installation are restricted to the directory
# scons was launched from or its descendents
CurrentDir = env.Dir('.').srcnode().abspath
if not CurrentDir.startswith(env.GetLaunchDir()):
# Not in launch directory. Tell scons not to clean these targets
env.NoClean([myobjs, myplugin])
else:
# We're in launch directory (or descendent) schedule installation
# Installation directories for plugin and headers
installdir = env.subst('$INSTALLDIR')
includedir = "%s/%s" %(env.subst('$INCDIR'), pluginname)
pluginsdir = env.subst('$PLUGINSDIR')
# Install targets
installed = env.Install(pluginsdir, myplugin)
env.Install(includedir, env.Glob('*.h*'))
#===========================================================
# Misc utility routines for the SBMS system
#===========================================================
##################################
# AddCompileFlags
##################################
def AddCompileFlags(env, allflags):
# The allflags parameter should be a string containing all
# of the link flags (e.g. what is returned by root-config --cflags)
# It is split on white space and the parameters sorted into
# the 2 lists: ccflags, cpppath
ccflags = []
cpppath = []
for f in allflags.split():
if f.startswith('-I'):
cpppath.append(f[2:])
else:
ccflags.append(f)
if len(ccflags)>0 :
env.AppendUnique(CCFLAGS=ccflags)
if len(cpppath)>0 :
env.AppendUnique(CPPPATH=cpppath)
##################################
# AddLinkFlags
##################################
def AddLinkFlags(env, allflags):
# The allflags parameter should be a string containing all
# of the link flags (e.g. what is returned by root-config --glibs)
# It is split on white space and the parameters sorted into
# the 3 lists: linkflags, libpath, and libs
linkflags = []
libpath = []
libs = []
for f in allflags.split():
if f.startswith('-L'):
libpath.append(f[2:])
elif f.startswith('-l'):
libs.append(f[2:])
else:
linkflags.append(f)
if len(linkflags)>0 :
env.AppendUnique(LINKFLAGS=linkflags)
if len(libpath)>0 :
env.AppendUnique(LIBPATH=libpath)
if len(libs)>0 :
env.AppendUnique(LIBS=libs)
##################################
# ReorderCommonLibraries
##################################
def ReorderCommonLibraries(env):
# Some common libraries are often added by multiple packages
# (e.g. libz is one that many packages use). The gcc4.8.0
# compiler that comes with Ubuntu13.10 seems particularly
# sensitive to the ordering of the libraries. This means if
# one package "AppendUnique"s the "z" library, it may appear
# too early in the link command for another library that needs
# it, even though the second library tries appending it at the
# end. This routine looks for some commonly used libraries
# in the LIBS variable of the given environment and moves them
# to the end of the list.
# If LIBS is not set or is a simple string, return now
if type(env['LIBS']) is not list: return
# If any of the following are in LIBS, they will be moved
# to the back of LIBS maintaining the order in this list
libs = ['ccdb', 'mysql', 'xerces-c','z', 'bz2', 'pthread', 'm', 'dl']
for lib in libs:
if lib in env['LIBS']:
env['LIBS'].remove(lib)
env.Append(LIBS=[lib])
##################################
# ApplyPlatformSpecificSettings
##################################
def ApplyPlatformSpecificSettings(env, platform):
# Look for SBMS file based on this platform and run the InitENV
# function in it to allow for platform-specific settings. Normally,
# the BMS_OSNAME will be passed in which almost certainly contains
# "."s. The Python module loader doesn't like these and we have to
# replace them with "-"s to appease it.
platform = re.sub('\.', '-', str(platform))
modname = "sbms_%s" % platform
if (int(env['SHOWBUILD']) > 0):
print("looking for %s.py" % modname)
try:
InitENV = getattr(__import__(modname), "InitENV")
# Run the InitENV function (if found)
if(InitENV != None):
print("sbms : Applying settings for platform %s" % platform)
InitENV(env)
except ImportError as e:
if (int(env['SHOWBUILD']) > 0): print("%s" % e)
pass
##################################
# OptionallyBuild
##################################
def OptionallyBuild(env, dirs):
# This is used to add directories that are not built as
# part of the standard build, but can still be added
# to the dependency tree so that the user can build them
# by either invoking scons from within the specific source
# directory or by specifying it on the command line.
#
#
subdirs = []
for dir in dirs:
add_dir = False
if env.GetLaunchDir().endswith(dir): add_dir = True
#if dir in env['COMMAND_LINE_TARGETS']: add_dir = True
for target in env['COMMAND_LINE_TARGETS']:
if target.endswith(dir): add_dir = True
if add_dir : subdirs.extend([dir])
if len(subdirs)>0 : env.SConscript(dirs=subdirs, exports='env', duplicate=0)
##################################
# TestCompile
##################################
def TestCompile(env, name, includes, content, options):
# This provides an autoconf-like method to test compilation
# of a C++ program to see which arguments are needed to get it
# to compile and link. The arguments are:
# env - build environment
# name - name of test (used to make unique filenames)
# includes - list of header files to be #included in test program
# content - content of test program (n.b. this is what gets placed
# inside of "main()" and before the return statement)
# options - list of different argument lists that should be tried
# to see which results in a successful compilation/link.
# The first to succeed is returned (as a list, not a single
# string). If none succeed, then a Python "None" value is
# returned. Note that each element of the list is itself a
# string that may contain many arguments, separated by spaces.
#
# n.b. if either the m32 or m64 flags are set by the user
# via the command line then "-m32" or "-m64" are added to the
# compile command. Otherwise, nothing is added and the default
# bitness is used.
ifname = '%s' % env.File('.%s_%s.cc' % (env['OSNAME'], name))
ofname = '%s' % env.File('.%s_%s' % (env['OSNAME'], name))
f = open(ifname, 'w')
for header in includes: f.write('#include<%s>\n' % header)
f.write('int main(int n, char*argv[]){%s;return 0;}\n' % content)
f.close();
args = [env['CXX'], '-o', ofname]
if (env['BITNESS32']!=0) : args.append('-m32')
if (env['BITNESS64']!=0) : args.append('-m64')
args.append(ifname)
ret = None
for opt in options:
myargs = opt.split()
if(env['SHOWBUILD'] >0):
print('Test compiling %s:' % name)
print(args + myargs)
res = subprocess.call(args + myargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res==0:
if(env['SHOWBUILD'] >0): print('---Succeeded')
ret = myargs
break
else:
if(env['SHOWBUILD'] >1):
print('----Failed. Test file content was:------')
print(subprocess.call(['cat', ifname]))
print('----------------------------------------')
if os.path.exists(ifname): os.unlink(ifname);
if os.path.exists(ofname): os.unlink(ofname);
return ret
#===========================================================
# Package support follows
#===========================================================
##################################
# pthread
##################################
def Add_pthread(env):
includes = ['pthread.h']
content = 'pthread_create(NULL, NULL, NULL, NULL);'
if(TestCompile(env, 'pthread', includes, content, ['']) == None):
if(TestCompile(env, 'pthread', includes, content, ['-pthread']) != None):
env.AppendUnique(LINKFLAGS=['-pthread'])
else:
if(TestCompile(env, 'pthread', includes, content, ['-lpthread']) != None):
env.AppendUnique(LIBS=['pthread'])
##################################
# NUMA
##################################
def CheckForNUMA(env):
includes = ['numa.h']
content = 'numa_available();'
libs = ['-lnuma']
if TestCompile(env, 'numa', includes, content, libs) != None:
env['HAVE_NUMA'] = 1
else:
env['HAVE_NUMA'] = 0
def AddNUMA(env):
if 'HAVE_NUMA' not in env:
CheckForNUMA(env)
if env['HAVE_NUMA'] == 1:
env.AppendUnique(LIBS = ['numa'])
##################################
# JANA
##################################
def AddJANA(env):
AddXERCES(env)
AddCCDB(env)
AddNUMA(env)
env.AppendUnique(LIBS=['JANA','dl'])
##################################
# JANAInstalled (use an already installed jana-config file)
##################################
def AddJANAInstalled(env):
jana_home = os.getenv('JANA_HOME')
if(jana_home != None):
JANA_CFLAGS = subprocess.Popen(["%s/bin/jana-config" % jana_home, "--cflags"], stdout=subprocess.PIPE).communicate()[0]
JANA_LINKFLAGS = subprocess.Popen(["%s/bin/jana-config" % jana_home, "--libs"], stdout=subprocess.PIPE).communicate()[0]
JANA_CFLAGS = JANA_CFLAGS.decode('utf-8')
JANA_LINKFLAGS = JANA_LINKFLAGS.decode('utf-8')
AddCompileFlags(env, JANA_CFLAGS)
AddLinkFlags(env, JANA_LINKFLAGS)
##################################
# HDDS
##################################
def AddHDDS(env):
hdds_home = os.getenv('HDDS_HOME', 'hdds')
env.AppendUnique(CPPPATH = ["%s/src" % hdds_home])
env.AppendUnique(LIBPATH = ["%s/lib/%s" % (hdds_home, env['OSNAME'])])
##################################
# HDDM
##################################
def AddHDDM(env):
env.AppendUnique(LIBS = 'HDDM')
##################################
# EVIO
##################################
def AddEVIO(env):
evioroot = os.getenv('EVIOROOT', 'evio')
env.AppendUnique(CPPPATH = ['%s/include' % evioroot])
env.AppendUnique(LIBPATH = ['%s/lib' % evioroot])
env.AppendUnique(LIBS=['evioxx', 'evio'])
AddET(env)
##################################
# ET
##################################
def AddET(env):
# Only add ET if ETROOT is set
etroot = os.getenv('ETROOT', 'none')
if(etroot != 'none') :
env.AppendUnique(CXXFLAGS = ['-DHAVE_ET'])
env.AppendUnique(CPPPATH = ['%s/include' % etroot])
env.AppendUnique(LIBPATH = ['%s/lib' % etroot])
env.AppendUnique(LIBS=['et_remote', 'et'])
##################################
# CMSG
##################################
def AddCMSG(env):
# Only add cMsg if CMSGROOT is set
cmsgroot = os.getenv('CMSGROOT', 'none')
if(cmsgroot != 'none') :
env.AppendUnique(CXXFLAGS = ['-DHAVE_CMSG'])
env.AppendUnique(CPPPATH = ['%s/include' % cmsgroot])
env.AppendUnique(LIBPATH = ['%s/lib' % cmsgroot])
env.AppendUnique(LIBS=['cmsgxx', 'cmsg', 'cmsgRegex'])
##################################
# xstream
##################################
def Add_xstream(env):
env.AppendUnique(CPPPATH = ['#external/xstream/include'])
env.AppendUnique(CCFLAGS = ['-fPIC'])
env.AppendUnique(LIBS=['xstream', 'bz2', 'z'])
##################################
# CCDB
##################################
def AddCCDB(env):
ccdb_home = os.getenv('CCDB_HOME', 'none')
if(ccdb_home != 'none'):
CCDB_CPPPATH = "%s/include" % (ccdb_home)
CCDB_LIBPATH = "%s/lib" % (ccdb_home)
CCDB_LIBS = "ccdb"
env.AppendUnique(CPPPATH = [CCDB_CPPPATH])
env.AppendUnique(LIBPATH = [CCDB_LIBPATH])
env.AppendUnique(LIBS = [CCDB_LIBS])
##################################
# Xerces
##################################
def AddXERCES(env):
# This relies on sbms_config.py::mk_jana_config_h having been run
try: # (need try block in case HAVE_XERCES is not defined in env)
if env['HAVE_XERCES']==1:
XERCES_LIBS = "xerces-c"
env.AppendUnique(LIBS = [XERCES_LIBS])
xercescroot = os.getenv('XERCESCROOT')
if(xercescroot != None):
XERCES_CPPPATH = "%s/include" % (xercescroot)
XERCES_LIBPATH = "%s/lib" % (xercescroot)
env.AppendUnique(CPPPATH = [XERCES_CPPPATH])
env.AppendUnique(LIBPATH = [XERCES_LIBPATH])
except:
env['HAVE_XERCES']=0 # would usually get here if env['HAVE_XERCES'] is not defined
##################################
# CERNLIB
##################################
def AddCERNLIB(env):
env.PrependUnique(FORTRANFLAGS = ['-ffixed-line-length-0', '-fno-second-underscore'])
env.PrependUnique(FORTRANFLAGS = ['-fno-automatic'])
env.PrependUnique(FORTRANPATH = ['include'])
cern = os.getenv('CERN', '/usr/local/cern/PRO')
cern_level = os.getenv('CERN_LEVEL', '2006')
cern_root = '%s/%s' % (cern, cern_level)
CERN_FORTRANPATH = "%s/include" % cern_root
CERN_LIBPATH = "%s/lib" % cern_root
env.AppendUnique(FORTRANPATH = [CERN_FORTRANPATH])
env.AppendUnique(CPPPATH = CERN_FORTRANPATH)
env.AppendUnique(LIBPATH = CERN_LIBPATH)
env.AppendUnique(LINKFLAGS = ['-rdynamic'])
env.AppendUnique(LIBS = ['gfortran', 'geant321', 'pawlib', 'lapack3', 'blas', 'graflib', 'grafX11', 'packlib', 'mathlib', 'kernlib', 'X11', 'nsl', 'crypt', 'dl'])
env.SetOption('warn', 'no-fortran-cxx-mix') # supress warnings about linking fortran with c++
##################################
# ROOT
##################################
def AddROOT(env):
#
# Here we use the root-config program to give us the compiler
# and linker options needed for ROOT. We use the AddCompileFlags()
# and AddLinkFlags() routines (defined below) to split the arguments
# into the categories scons wants. E.g. scons wants to know the
# search path and basenames for libraries rather than just giving it
# the full compiler options like "-L/path/to/lib -lmylib".
#
# We also create a builder for ROOT dictionaries and add targets to
# build dictionaries for any headers with "ClassDef" in them.
rootsys = os.getenv('ROOTSYS')
if rootsys != None:
ROOT_CFLAGS = subprocess.Popen(["%s/bin/root-config" % rootsys, "--cflags"], stdout=subprocess.PIPE).communicate()[0]
ROOT_LINKFLAGS = subprocess.Popen(["%s/bin/root-config" % rootsys, "--glibs"], stdout=subprocess.PIPE).communicate()[0]
ROOT_CFLAGS = ROOT_CFLAGS.decode('utf-8')
ROOT_LINKFLAGS = ROOT_LINKFLAGS.decode('utf-8')
AddCompileFlags(env, ROOT_CFLAGS)
AddLinkFlags(env, ROOT_LINKFLAGS)
env.AppendUnique(LIBS = "Geom")
if os.getenv('LD_LIBRARY_PATH' ) != None : env.Append(LD_LIBRARY_PATH = os.environ['LD_LIBRARY_PATH' ])
if os.getenv('DYLD_LIBRARY_PATH') != None : env.Append(DYLD_LIBRARY_PATH = os.environ['DYLD_LIBRARY_PATH'])
# NOTE on (DY)LD_LIBRARY_PATH :
# Linux (and most unixes) use LD_LIBRARY_PATH while Mac OS X uses
# DYLD_LIBRARY_PATH. Unfortunately, the "thisroot.csh" script distributed
# with ROOT sets both of these so we can't use the presence of the
# DYLD_LIBRARY_PATH environment variable to decide which of these to
# work with. Thus, we just append to whichever are set, which may be both.
# Create Builder that can convert .h file into _Dict.cc file
if os.getenv('LD_LIBRARY_PATH' ) != None : env.AppendENVPath('LD_LIBRARY_PATH' , '%s/lib' % rootsys )
if os.getenv('DYLD_LIBRARY_PATH') != None : env.AppendENVPath('DYLD_LIBRARY_PATH', '%s/lib' % rootsys )
rootcintpath = "%s/bin/rootcint" % (rootsys)
rootclingpath = "%s/bin/rootcling" % (rootsys)
if env['SHOWBUILD']==0:
rootcintaction = SCons.Script.Action("%s -f $TARGET -c $SOURCE" % (rootcintpath) , 'ROOTCINT [$SOURCE]')
rootclingaction = SCons.Script.Action("%s -f $TARGET -c $SOURCE" % (rootclingpath), 'ROOTCLING [$SOURCE]')
else:
rootcintaction = SCons.Script.Action("%s -f $TARGET -c $SOURCE" % (rootcintpath) )
rootclingaction = SCons.Script.Action("%s -f $TARGET -c $SOURCE" % (rootclingpath))
if os.path.exists(rootclingpath) :
bld = SCons.Script.Builder(action = rootclingaction, suffix='_Dict.cc', src_suffix='.h')
elif os.path.exists(rootcintpath):
bld = SCons.Script.Builder(action = rootcintaction, suffix='_Dict.cc', src_suffix='.h')
else:
print('Neither rootcint nor rootcling exists. Unable to create ROOT dictionaries if any encountered.')
return
env.Append(BUILDERS = {'ROOTDict' : bld})
# Generate ROOT dictionary file targets for each header
# containing "ClassDef"
#
# n.b. It seems if scons is run when the build directory doesn't exist,
# then the cwd is set to the source directory. Otherwise, it is the
# build directory. Since the headers will only exist in the source
# directory, we must temporarily cd into that to look for headers that
# we wish to generate dictionaries for. (This took a long time to figure
# out!)
curpath = os.getcwd()
srcpath = env.Dir('.').srcnode().abspath
if(int(env['SHOWBUILD'])>1):
print("---- Scanning for headers to generate ROOT dictionaries in: %s" % srcpath)
os.chdir(srcpath)
for f in glob.glob('*.[h|hh|hpp]'):
if 'ClassDef' in open(f).read():
env.AppendUnique(ALL_SOURCES = env.ROOTDict(f))
if(int(env['SHOWBUILD'])>1):
print(" ROOT dictionary for %s" % f)
os.chdir(curpath)
|
the-stack_0_26161
|
# Copyright (C) 2021-2022, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from math import ceil
from typing import List, Optional, Tuple, Union
import cv2
import numpy as np
from .common_types import BoundingBox, Polygon4P
__all__ = ['bbox_to_polygon', 'polygon_to_bbox', 'resolve_enclosing_bbox', 'resolve_enclosing_rbbox',
'rotate_boxes', 'compute_expanded_shape', 'rotate_image', 'estimate_page_angle',
'convert_to_relative_coords', 'rotate_abs_geoms']
def bbox_to_polygon(bbox: BoundingBox) -> Polygon4P:
return bbox[0], (bbox[1][0], bbox[0][1]), (bbox[0][0], bbox[1][1]), bbox[1]
def polygon_to_bbox(polygon: Polygon4P) -> BoundingBox:
x, y = zip(*polygon)
return (min(x), min(y)), (max(x), max(y))
def resolve_enclosing_bbox(bboxes: Union[List[BoundingBox], np.ndarray]) -> Union[BoundingBox, np.ndarray]:
"""Compute enclosing bbox either from:
- an array of boxes: (*, 5), where boxes have this shape:
(xmin, ymin, xmax, ymax, score)
- a list of BoundingBox
Return a (1, 5) array (enclosing boxarray), or a BoundingBox
"""
if isinstance(bboxes, np.ndarray):
xmin, ymin, xmax, ymax, score = np.split(bboxes, 5, axis=1)
return np.array([xmin.min(), ymin.min(), xmax.max(), ymax.max(), score.mean()])
else:
x, y = zip(*[point for box in bboxes for point in box])
return (min(x), min(y)), (max(x), max(y))
def resolve_enclosing_rbbox(rbboxes: List[np.ndarray], intermed_size: int = 1024) -> np.ndarray:
cloud = np.concatenate(rbboxes, axis=0)
# Convert to absolute for minAreaRect
cloud *= intermed_size
rect = cv2.minAreaRect(cloud.astype(np.int32))
return cv2.boxPoints(rect) / intermed_size
def rotate_abs_points(points: np.ndarray, angle: float = 0.) -> np.ndarray:
"""Rotate points counter-clockwise.
Points: array of size (N, 2)
"""
angle_rad = angle * np.pi / 180. # compute radian angle for np functions
rotation_mat = np.array([
[np.cos(angle_rad), -np.sin(angle_rad)],
[np.sin(angle_rad), np.cos(angle_rad)]
], dtype=points.dtype)
return np.matmul(points, rotation_mat.T)
def compute_expanded_shape(img_shape: Tuple[int, int], angle: float) -> Tuple[int, int]:
"""Compute the shape of an expanded rotated image
Args:
img_shape: the height and width of the image
angle: angle between -90 and +90 degrees
Returns:
the height and width of the rotated image
"""
points = np.array([
[img_shape[1] / 2, img_shape[0] / 2],
[-img_shape[1] / 2, img_shape[0] / 2],
])
rotated_points = rotate_abs_points(points, angle)
wh_shape = 2 * np.abs(rotated_points).max(axis=0)
return wh_shape[1], wh_shape[0]
def rotate_abs_geoms(
geoms: np.ndarray,
angle: float,
img_shape: Tuple[int, int],
expand: bool = True,
) -> np.ndarray:
"""Rotate a batch of bounding boxes or polygons by an angle around the
image center.
Args:
boxes: (N, 4) or (N, 4, 2) array of ABSOLUTE coordinate boxes
angle: anti-clockwise rotation angle in degrees
img_shape: the height and width of the image
expand: whether the image should be padded to avoid information loss
Returns:
A batch of rotated polygons (N, 4, 2)
"""
# Switch to polygons
polys = np.stack(
[geoms[:, [0, 1]], geoms[:, [2, 1]], geoms[:, [2, 3]], geoms[:, [0, 3]]],
axis=1
) if geoms.ndim == 2 else geoms
polys = polys.astype(np.float32)
# Switch to image center as referential
polys[..., 0] -= img_shape[1] / 2
polys[..., 1] = img_shape[0] / 2 - polys[..., 1]
# Rotated them around image center
rotated_polys = rotate_abs_points(polys.reshape(-1, 2), angle).reshape(-1, 4, 2)
# Switch back to top-left corner as referential
target_shape = compute_expanded_shape(img_shape, angle) if expand else img_shape
# Clip coords to fit since there is no expansion
rotated_polys[..., 0] = (rotated_polys[..., 0] + target_shape[1] / 2).clip(0, target_shape[1])
rotated_polys[..., 1] = (target_shape[0] / 2 - rotated_polys[..., 1]).clip(0, target_shape[0])
return rotated_polys
def remap_boxes(
loc_preds: np.ndarray,
orig_shape: Tuple[int, int],
dest_shape: Tuple[int, int]
) -> np.ndarray:
""" Remaps a batch of rotated locpred (N, 4, 2) expressed for an origin_shape to a destination_shape.
This does not impact the absolute shape of the boxes, but allow to calculate the new relative RotatedBbox
coordinates after a resizing of the image.
Args:
loc_preds: (N, 4, 2) array of RELATIVE loc_preds
orig_shape: shape of the origin image
dest_shape: shape of the destination image
Returns:
A batch of rotated loc_preds (N, 4, 2) expressed in the destination referencial
"""
if len(dest_shape) != 2:
raise ValueError(f"Mask length should be 2, was found at: {len(dest_shape)}")
if len(orig_shape) != 2:
raise ValueError(f"Image_shape length should be 2, was found at: {len(orig_shape)}")
orig_height, orig_width = orig_shape
dest_height, dest_width = dest_shape
mboxes = loc_preds.copy()
mboxes[:, :, 0] = ((loc_preds[:, :, 0] * orig_width) + (dest_width - orig_width) / 2) / dest_width
mboxes[:, :, 1] = ((loc_preds[:, :, 1] * orig_height) + (dest_height - orig_height) / 2) / dest_height
return mboxes
def rotate_boxes(
loc_preds: np.ndarray,
angle: float,
orig_shape: Tuple[int, int],
min_angle: float = 1.,
target_shape: Optional[Tuple[int, int]] = None,
) -> np.ndarray:
"""Rotate a batch of straight bounding boxes (xmin, ymin, xmax, ymax, c) or rotated bounding boxes
(4, 2) of an angle, if angle > min_angle, around the center of the page.
If target_shape is specified, the boxes are remapped to the target shape after the rotation. This
is done to remove the padding that is created by rotate_page(expand=True)
Args:
loc_preds: (N, 5) or (N, 4, 2) array of RELATIVE boxes
angle: angle between -90 and +90 degrees
orig_shape: shape of the origin image
min_angle: minimum angle to rotate boxes
Returns:
A batch of rotated boxes (N, 4, 2): or a batch of straight bounding boxes
"""
# Change format of the boxes to rotated boxes
_boxes = loc_preds.copy()
if _boxes.ndim == 2:
_boxes = np.stack(
[
_boxes[:, [0, 1]],
_boxes[:, [2, 1]],
_boxes[:, [2, 3]],
_boxes[:, [0, 3]],
],
axis=1
)
# If small angle, return boxes (no rotation)
if abs(angle) < min_angle or abs(angle) > 90 - min_angle:
return _boxes
# Compute rotation matrix
angle_rad = angle * np.pi / 180. # compute radian angle for np functions
rotation_mat = np.array([
[np.cos(angle_rad), -np.sin(angle_rad)],
[np.sin(angle_rad), np.cos(angle_rad)]
], dtype=_boxes.dtype)
# Rotate absolute points
points = np.stack((_boxes[:, :, 0] * orig_shape[1], _boxes[:, :, 1] * orig_shape[0]), axis=-1)
image_center = (orig_shape[1] / 2, orig_shape[0] / 2)
rotated_points = image_center + np.matmul(points - image_center, rotation_mat)
rotated_boxes = np.stack(
(rotated_points[:, :, 0] / orig_shape[1], rotated_points[:, :, 1] / orig_shape[0]), axis=-1
)
# Apply a mask if requested
if target_shape is not None:
rotated_boxes = remap_boxes(rotated_boxes, orig_shape=orig_shape, dest_shape=target_shape)
return rotated_boxes
def rotate_image(
image: np.ndarray,
angle: float,
expand: bool = False,
preserve_origin_shape: bool = False,
) -> np.ndarray:
"""Rotate an image counterclockwise by an given angle.
Args:
image: numpy tensor to rotate
angle: rotation angle in degrees, between -90 and +90
expand: whether the image should be padded before the rotation
preserve_origin_shape: if expand is set to True, resizes the final output to the original image size
Returns:
Rotated array, padded by 0 by default.
"""
# Compute the expanded padding
if expand:
exp_shape = compute_expanded_shape(image.shape[:-1], angle)
h_pad, w_pad = int(max(0, ceil(exp_shape[0] - image.shape[0]))), int(
max(0, ceil(exp_shape[1] - image.shape[1])))
exp_img = np.pad(image, ((h_pad // 2, h_pad - h_pad // 2), (w_pad // 2, w_pad - w_pad // 2), (0, 0)))
else:
exp_img = image
height, width = exp_img.shape[:2]
rot_mat = cv2.getRotationMatrix2D((width / 2, height / 2), angle, 1.0)
rot_img = cv2.warpAffine(exp_img, rot_mat, (width, height))
if expand:
# Pad to get the same aspect ratio
if (image.shape[0] / image.shape[1]) != (rot_img.shape[0] / rot_img.shape[1]):
# Pad width
if (rot_img.shape[0] / rot_img.shape[1]) > (image.shape[0] / image.shape[1]):
h_pad, w_pad = 0, int(rot_img.shape[0] * image.shape[1] / image.shape[0] - rot_img.shape[1])
# Pad height
else:
h_pad, w_pad = int(rot_img.shape[1] * image.shape[0] / image.shape[1] - rot_img.shape[0]), 0
rot_img = np.pad(rot_img, ((h_pad // 2, h_pad - h_pad // 2), (w_pad // 2, w_pad - w_pad // 2), (0, 0)))
if preserve_origin_shape:
# rescale
rot_img = cv2.resize(rot_img, image.shape[:-1][::-1], interpolation=cv2.INTER_LINEAR)
return rot_img
def estimate_page_angle(polys: np.ndarray) -> float:
"""Takes a batch of rotated previously ORIENTED polys (N, 4, 2) (rectified by the classifier) and return the
estimated angle ccw in degrees
"""
# Compute mean left points and mean right point with respect to the reading direction (oriented polygon)
xleft = polys[:, 0, 0] + polys[:, 3, 0]
yleft = polys[:, 0, 1] + polys[:, 3, 1]
xright = polys[:, 1, 0] + polys[:, 2, 0]
yright = polys[:, 1, 1] + polys[:, 2, 1]
return np.median(np.arctan(
(yleft - yright) / (xright - xleft) # Y axis from top to bottom!
)) * 180 / np.pi
def convert_to_relative_coords(geoms: np.ndarray, img_shape: Tuple[int, int]) -> np.ndarray:
"""Convert a geometry to relative coordinates
Args:
geoms: a set of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)
img_shape: the height and width of the image
Returns:
the updated geometry
"""
# Polygon
if geoms.ndim == 3 and geoms.shape[1:] == (4, 2):
polygons = np.empty(geoms.shape, dtype=np.float32)
polygons[..., 0] = geoms[..., 0] / img_shape[1]
polygons[..., 1] = geoms[..., 1] / img_shape[0]
return polygons.clip(0, 1)
if geoms.ndim == 2 and geoms.shape[1] == 4:
boxes = np.empty(geoms.shape, dtype=np.float32)
boxes[:, ::2] = geoms[:, ::2] / img_shape[1]
boxes[:, 1::2] = geoms[:, 1::2] / img_shape[0]
return boxes.clip(0, 1)
raise ValueError(f"invalid format for arg `geoms`: {geoms.shape}")
|
the-stack_0_26162
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftCrossEntropy(nn.Module):
"""
Cross entropy that accepts soft targets
"""
def __init__(self):
super(SoftCrossEntropy, self).__init__()
self.weights = torch.ones(14)
for i in range(14):
if i == 0:
self.weights[i] = 0.01
else:
self.weights[i] = 0.99 / 13
def forward(self,
raw_pred: torch.Tensor,
ref_labels: torch.Tensor):
log_prob = F.log_softmax(raw_pred, dim=1)
res = - (ref_labels * log_prob)
ret_val = torch.mean(torch.sum(res, dim=1))
return ret_val
|
the-stack_0_26163
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import versioneer
requires = open('requirements.txt').read().strip().split('\n')
setup(
name='intake_s3_manifests',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='S3 manifests plugin for Intake',
url='https://github.com/informatics-lab/intake-s3-manifests',
maintainer='Jacob Tomlinson',
maintainer_email='[email protected]',
license='BSD',
py_modules=['intake_s3_manifests'],
packages=find_packages(),
package_data={'': ['*.csv', '*.yml', '*.html']},
include_package_data=True,
install_requires=requires,
long_description=open('README.md').read(),
zip_safe=False, )
|
the-stack_0_26164
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
write_path_jsontocsv.py
========================
Reads JSON path result file in accordance with the Yang model for requesting
path computation and writes results to a CSV file.
See: draft-ietf-teas-yang-path-computation-01.txt
"""
from argparse import ArgumentParser
from pathlib import Path
from json import loads
from gnpy.core.equipment import load_equipment
from gnpy.core.request import jsontocsv
parser = ArgumentParser(description = 'A function that writes json path results in an excel sheet.')
parser.add_argument('filename', nargs='?', type = Path)
parser.add_argument('output_filename', nargs='?', type = Path)
parser.add_argument('eqpt_filename', nargs='?', type = Path, default=Path(__file__).parent / 'eqpt_config.json')
if __name__ == '__main__':
args = parser.parse_args()
with open(args.output_filename,"w") as file :
with open(args.filename) as f:
print(f'Reading {args.filename}')
json_data = loads(f.read())
equipment = load_equipment(args.eqpt_filename)
print(f'Writing in {args.output_filename}')
jsontocsv(json_data,equipment,file)
|
the-stack_0_26165
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
"""Function implementation"""
import logging
import time
import json
from datetime import datetime
from threading import current_thread
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from resilient_lib import RequestsCommon
from fn_mcafee_esm.util.helper import check_config, get_authenticated_headers, check_status_code
log = logging.getLogger(__name__)
def query_esm(rc, options, headers, data, type):
log.debug("Calling query_esm()")
base_url = options["esm_url"]
url = base_url + "/rs/esm/v2/qryExecuteDetail?type={}&reverse=False".format(type)
r = rc.execute_call_v2('post', url, headers=headers, data=data, verify=options["trust_cert"],
proxies=rc.get_proxies())
check_status_code(r.status_code)
# return r.json()
result_dict = r.json()
queryID = result_dict.get("resultID")
# Check the query status
qconf = {
"resultID": queryID
}
qconf_json = json.dumps(qconf)
status_dict = get_qry_status(rc, options, headers, qconf_json)
status = status_dict.get("percentComplete")
# If the query is not 100% complete it will wait 10 seconds and then check again. If the query is not complete after
# 10 mins, an Error is raised indicating something went wrong.
for i in range(60):
if status == 100:
break
else:
time.sleep(10)
status_dict = get_qry_status(rc, options, headers, qconf_json)
status = status_dict.get("percentComplete")
if status != 100 and i == 59:
raise FunctionError("Query timed out.")
total_records = status_dict.get("totalRecords")
return qconf_json, total_records
def get_qry_status(rc, options, headers, data):
log.debug("Calling get_qry_status()")
url = options["esm_url"] + '/rs/esm/v2/qryGetStatus'
result = rc.execute_call_v2('post', url, headers=headers, data=data, verify=options["trust_cert"],
proxies=rc.get_proxies())
check_status_code(result.status_code)
return result.json()
def get_results(rc, options, session_header, qconf_json):
log.debug("Calling get_results() with {}".format(qconf_json))
url = options["esm_url"] + '/rs/esm/v2/qryGetResults?startPos=0&numRows=100&reverse=false'
result = rc.execute_call_v2('post', url, headers=session_header, data=qconf_json, verify=options["trust_cert"],
proxies=rc.get_proxies())
check_status_code(result.status_code)
return result.json()
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'mcafee_esm_get_list_of_cases"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.opts = opts
self.options = opts.get("fn_mcafee_esm", {})
# Check config file and change trust_cert to Boolean
self.options = check_config(self.options)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.opts = opts
self.options = opts.get("fn_mcafee_esm", {})
@function("mcafee_esm_query")
def _mcafee_esm_query_logs_function(self, event, *args, **kwargs):
"""Function: Queries McAfee ESM."""
try:
start_time = time.time()
yield StatusMessage("starting...")
options = self.options
# Instantiate RequestsCommon object
rc = RequestsCommon(opts=self.opts, function_opts=self.options)
authenticated_headers = get_authenticated_headers(rc, options["esm_url"], options["esm_username"],
options["esm_password"], options["trust_cert"])
# Get inputs
mcafee_esm_qry_type = self.get_select_param(
kwargs.get("mcafee_esm_qry_type", "EVENT")) # select
mcafee_esm_qry_config = self.get_textarea_param(kwargs.get("mcafee_esm_qry_config")) # textarea
# Log inputs
if mcafee_esm_qry_type:
log.info("mcafee_esm_qry_type: %s", mcafee_esm_qry_type)
else:
raise FunctionError("mcafee_esm_qry_type needs to be set")
if mcafee_esm_qry_config:
log.info("mcafee_esm_qry_config: %s", mcafee_esm_qry_config)
else:
raise FunctionError("mcafee_esm_qry_config needs to be set")
# Query Logs
qconf_json, total_records = query_esm(rc, options, authenticated_headers, mcafee_esm_qry_config,
mcafee_esm_qry_type)
query_result = None
if total_records:
yield StatusMessage("{} records".format(str(total_records)))
query_result = get_results(rc, options, authenticated_headers, qconf_json)
else:
yield StatusMessage("No results returned")
end_time = time.time()
results = {
"inputs": {
"mcafee_esm_qry_type": mcafee_esm_qry_type,
"mcafee_esm_qry_config": mcafee_esm_qry_config
},
"metrics": {
"execution_time": str(end_time - start_time),
"function": "mcafee_esm_query_logs",
"thread": current_thread().name,
"timestamp": datetime.fromtimestamp(end_time).strftime("%Y-%m-%d %H:%M:%S")
},
"result": query_result
}
yield StatusMessage("done...")
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception as e:
yield FunctionError(e)
|
the-stack_0_26166
|
# Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
from finn.core.datatype import DataType
from finn.custom_op.fpgadataflow import HLSCustomOp
from onnx import TensorProto, helper
from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
class GlobalAccPool_Batch(HLSCustomOp):
"""Class that corresponds to finn-hlslib AccPool_Batch function."""
def __init__(self, onnx_node):
super().__init__(onnx_node)
def get_nodeattr_types(self):
my_attrs = {
"NumChannels": ("i", True, 0),
"PE": ("i", True, 0),
# FINN DataTypes for input
"inputDataType": ("s", True, ""),
# number of input vectors, examples:
# [1] is a single vector (like a FC layer with batch=1)
# [4] is four vectors (like a FC layer with batch=4)
# [1, 4, 4] is four * four vectors (like a conv layer with batch=1)
"numInputVectors": ("ints", False, [1]),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
def get_normal_input_shape(self):
ch = self.get_nodeattr("NumChannels")
vecs = list(self.get_nodeattr("numInputVectors"))
ishape = tuple(vecs + [ch])
return ishape
def get_folded_input_shape(self):
ch = self.get_nodeattr("NumChannels")
pe = self.get_nodeattr("PE")
vecs = list(self.get_nodeattr("numInputVectors"))
assert ch % pe == 0, "PE must divide NumChannels"
folds = int(ch / pe)
folded_ishape = tuple(vecs + [folds, pe])
return folded_ishape
def get_normal_output_shape(self):
ch = self.get_nodeattr("NumChannels")
vecs = list(self.get_nodeattr("numInputVectors"))
if len(vecs) == 1:
oshape = tuple(vecs + [ch])
elif len(vecs) == 3:
oshape = tuple([vecs[0]] + [1, 1, ch])
return oshape
def get_folded_output_shape(self):
ch = self.get_nodeattr("NumChannels")
pe = self.get_nodeattr("PE")
unfolded_shape = list(self.get_normal_output_shape())
assert ch % pe == 0, "PE must divide NumChannels"
folds = int(ch / pe)
oshape = tuple(unfolded_shape[:-1] + [folds, pe])
return oshape
def make_shape_compatible_op(self, model):
exp_ishape = self.get_normal_input_shape()
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == exp_ishape, "Unexpected input shape."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten(),
),
)
def infer_node_datatype(self, model):
odt = self.get_output_datatype()
model.set_tensor_datatype(self.onnx_node.output[0], odt)
def verify_node(self):
info_messages = []
# verify that "domain" is set to "finn"
domain_value = self.onnx_node.domain
if domain_value == "finn":
info_messages.append("Attribute domain is set correctly")
else:
info_messages.append('Attribute domain should be set to "finn"')
# verify that "backend" is set to "fpgadataflow"
backend_value = self.get_nodeattr("backend")
if backend_value == "fpgadataflow":
info_messages.append("Attribute backend is set correctly")
else:
info_messages.append('Attribute backend should be set to "fpgadataflow"')
# verify that all necessary attributes exist
try:
self.get_nodeattr("code_gen_dir_cppsim")
self.get_nodeattr("executable_path")
self.get_nodeattr("NumChannels")
self.get_nodeattr("PE")
self.get_nodeattr("inputDataType")
info_messages.append("All necessary attributes exist")
except Exception:
info_messages.append(
"""The required GlobalAccPool_Batch attributes do not exist."""
)
# verify that input data is 2D
if len(self.get_nodeattr("numInputVectors")) != 3:
info_messages.append("""GlobalAccPool_Batch requires 2D data input.""")
raise Exception
return info_messages
def get_input_datatype(self):
"""Returns FINN DataType of input."""
return DataType[self.get_nodeattr("inputDataType")]
def get_output_datatype(self):
"""Returns FINN DataType of output."""
# determine data type from image size and input type
idt = DataType[self.get_nodeattr("inputDataType")]
vecs = list(self.get_nodeattr("numInputVectors"))
npixels = vecs[-1] * vecs[-2]
if idt.signed():
extreme_value = npixels * idt.min()
else:
extreme_value = npixels * idt.max()
return DataType.get_smallest_possible(extreme_value)
def get_instream_width(self):
"""Returns input stream width."""
ibits = self.get_input_datatype().bitwidth()
pe = self.get_nodeattr("PE")
in_width = pe * ibits
return in_width
def get_outstream_width(self):
"""Returns output stream width."""
obits = self.get_output_datatype().bitwidth()
pe = self.get_nodeattr("PE")
out_width = pe * obits
return out_width
def get_number_output_values(self):
return np.prod(self.get_folded_output_shape()[1:-1])
def get_exp_cycles(self):
# Channels/PE * batch size * idim * idim + Channels/PE
ch = self.get_nodeattr("NumChannels")
pe = self.get_nodeattr("PE")
folds = int(ch / pe)
return np.prod(self.get_folded_input_shape()[:-1]) + folds
def execute_node(self, context, graph):
mode = self.get_nodeattr("exec_mode")
node = self.onnx_node
exp_ishape = self.get_normal_input_shape()
exp_oshape = self.get_normal_output_shape()
folded_ishape = self.get_folded_input_shape()
folded_oshape = self.get_folded_output_shape()
if mode == "cppsim":
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
elif mode == "rtlsim":
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
inp = context[node.input[0]]
assert str(inp.dtype) == "float32", "Input datatype is not float32"
assert inp.shape == exp_ishape, """Input shape doesn't match expected shape ."""
export_idt = self.get_input_datatype()
# reshape input into folded form
inp = inp.reshape(folded_ishape)
# make copy before saving array
reshaped_input = inp.copy()
np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input)
if mode == "cppsim":
# execute the precompiled model
super().exec_precompiled_singlenode_model()
# load output npy file
super().npy_to_dynamic_output(context)
assert (
context[node.output[0]].shape == folded_oshape
), "cppsim \
did not produce expected ofolded utput shape"
context[node.output[0]] = context[node.output[0]].reshape(*exp_oshape)
elif mode == "rtlsim":
sim = self.get_rtlsim()
nbits = self.get_instream_width()
rtlsim_inp = npy_to_rtlsim_input(
"{}/input_0.npy".format(code_gen_dir), export_idt, nbits
)
super().reset_rtlsim(sim)
super().toggle_clk(sim)
rtlsim_output = self.rtlsim(sim, rtlsim_inp)
odt = self.get_output_datatype()
target_bits = odt.bitwidth()
packed_bits = self.get_outstream_width()
out_npy_path = "{}/output.npy".format(code_gen_dir)
out_shape = self.get_folded_output_shape()
rtlsim_output_to_npy(
rtlsim_output, out_npy_path, odt, out_shape, packed_bits, target_bits
)
# load and reshape output
output = np.load(out_npy_path)
output = np.asarray([output], dtype=np.float32).reshape(*exp_oshape)
context[node.output[0]] = output
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
assert (
context[node.output[0]].shape == exp_oshape
), """Output shape doesn't match expected shape."""
def global_includes(self):
self.code_gen_dict["$GLOBALS$"] = ['#include "maxpool.h"']
def defines(self, var):
self.code_gen_dict["$DEFINES$"] = []
def read_npy_data(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
dtype = self.get_input_datatype()
elem_bits = dtype.bitwidth()
packed_bits = self.get_instream_width()
packed_hls_type = "ap_uint<%d>" % packed_bits
elem_hls_type = dtype.get_hls_datatype_str()
npy_type = "float"
npy_in = "%s/input_0.npy" % code_gen_dir
self.code_gen_dict["$READNPYDATA$"] = []
self.code_gen_dict["$READNPYDATA$"].append(
'npy2apintstream<%s, %s, %d, %s>("%s", in0);'
% (packed_hls_type, elem_hls_type, elem_bits, npy_type, npy_in)
)
def strm_decl(self):
self.code_gen_dict["$STREAMDECLARATIONS$"] = []
self.code_gen_dict["$STREAMDECLARATIONS$"].append(
'hls::stream<ap_uint<{}>> in0 ("in0");'.format(self.get_instream_width())
)
self.code_gen_dict["$STREAMDECLARATIONS$"].append(
'hls::stream<ap_uint<{}>> out ("out");'.format(self.get_outstream_width())
)
def docompute(self):
self.code_gen_dict["$DOCOMPUTE$"] = [
"""AccPool_Batch<{}, {}, {}, {}, {}> (in0, out, 1);""".format(
self.get_normal_input_shape()[1],
self.get_nodeattr("NumChannels"),
self.get_input_datatype().get_hls_datatype_str(),
self.get_nodeattr("PE"),
self.get_output_datatype().get_hls_datatype_str(),
)
]
def dataoutstrm(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_cppsim")
dtype = self.get_output_datatype()
elem_bits = dtype.bitwidth()
packed_bits = self.get_outstream_width()
packed_hls_type = "ap_uint<%d>" % packed_bits
elem_hls_type = dtype.get_hls_datatype_str()
npy_type = "float"
npy_out = "%s/output.npy" % code_gen_dir
oshape = self.get_folded_output_shape()
oshape_cpp_str = str(oshape).replace("(", "{").replace(")", "}")
self.code_gen_dict["$DATAOUTSTREAM$"] = [
'apintstream2npy<%s, %s, %d, %s>(out, %s, "%s");'
% (
packed_hls_type,
elem_hls_type,
elem_bits,
npy_type,
oshape_cpp_str,
npy_out,
)
]
def save_as_npy(self):
self.code_gen_dict["$SAVEASCNPY$"] = []
def blackboxfunction(self):
self.code_gen_dict["$BLACKBOXFUNCTION$"] = [
"""void {}(hls::stream<ap_uint<{}>> &in0,
hls::stream<ap_uint<{}>> &out)""".format(
self.onnx_node.name,
self.get_instream_width(),
self.get_outstream_width(),
)
]
def pragmas(self):
self.code_gen_dict["$PRAGMAS$"] = ["#pragma HLS INTERFACE axis port=in0"]
self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE axis port=out")
self.code_gen_dict["$PRAGMAS$"].append(
"#pragma HLS INTERFACE ap_ctrl_none port=return"
)
|
the-stack_0_26168
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
class CombinedLoss(nn.Module):
def __init__(self, params, loss_dict):
super(CombinedLoss, self).__init__()
loss_names = params['loss_type']
self.loss_weight = params['loss_weight']
assert (len(loss_names) == len(self.loss_weight))
self.loss_list = []
for loss_name in loss_names:
if(loss_name in loss_dict):
one_loss = loss_dict[loss_name](params)
self.loss_list.append(one_loss)
else:
raise ValueError("{0:} is not defined, or has not been added to the \
loss dictionary".format(loss_name))
def forward(self, loss_input_dict):
loss_value = 0.0
for i in range(len(self.loss_list)):
loss_value += self.loss_weight[i]*self.loss_list[i](loss_input_dict)
return loss_value
|
the-stack_0_26169
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import pytest
from openvino.tools.accuracy_checker.adapters import SSDAdapter, Adapter
from openvino.tools.accuracy_checker.config import ConfigError
from .common import make_representation
def test_detection_adapter():
raw = {
'detection_out': np.array([[[[0, 3, 0.2, 0, 0, 1, 1], [0, 2, 0.5, 4, 4, 7, 7], [0, 5, 0.7, 3, 3, 9, 8]]]])
}
expected = make_representation('0.2 3 0 0 1 1;0.5 2 4 4 7 7;0.7 5 3 3 9 8')
actual = SSDAdapter({}, output_blob='detection_out').process([raw], ['0'], [{}])
assert np.array_equal(actual, expected)
def test_detection_adapter_partially_filling_output_blob():
raw = {
'detection_out': np.array(
[[[[0, 3, 0.2, 0, 0, 1, 1], [0, 2, 0.5, 4, 4, 7, 7], [0, 5, 0.7, 3, 3, 9, 8], [-1, 0, 0, 0, 0, 0, 0]]]]
)
}
expected = make_representation('0.2 3 0 0 1 1;0.5 2 4 4 7 7;0.7 5 3 3 9 8')
actual = SSDAdapter({}, output_blob='detection_out').process([raw], ['0'], {})
assert np.array_equal(actual, expected)
def test_detection_adapter_partially_filling_output_blob_with_zeros_at_the_end():
raw = {
'detection_out': np.array([[[
[0, 3, 0.2, 0, 0, 1, 1],
[0, 2, 0.5, 4, 4, 7, 7],
[0, 5, 0.7, 3, 3, 9, 8],
[-1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]
]]])
}
expected = make_representation('0.2 3 0 0 1 1;0.5 2 4 4 7 7;0.7 5 3 3 9 8')
actual = SSDAdapter({}, output_blob='detection_out').process([raw], ['0'], {})
assert np.array_equal(actual, expected)
def test_detection_adapter_batch_2():
raw = {
'detection_out': np.array([[[[0, 3, 0.2, 0, 0, 1, 1], [0, 2, 0.5, 4, 4, 7, 7], [1, 5, 0.7, 3, 3, 9, 8]]]])
}
expected = make_representation(['0.2 3 0 0 1 1;0.5 2 4 4 7 7', '0.7 5 3 3 9 8'])
actual = SSDAdapter({}, output_blob='detection_out').process([raw], ['0', '1'], {})
assert np.array_equal(actual, expected)
def test_dictionary_adapter_no_raise_warning_on_specific_args():
adapter_config = {'type': 'age_gender', 'gender_out': 'gender', 'age_out': 'age'}
with pytest.warns(None) as record:
Adapter.provide('age_gender', adapter_config)
assert len(record) == 0
def test_age_gender_adapter_raise_config_error_on_extra_args():
adapter_config = {'type': 'age_gender', 'gender_out': 'gender', 'age_out': 'age', 'something_extra': 'extra'}
with pytest.raises(ConfigError):
Adapter.provide('age_gender', adapter_config)
def test_face_person_detection_adapter_raise_config_error_on_extra_args():
adapter_config = {
'type': 'face_person_detection',
'face_detection_out': 'face',
'person_detection_out': 'person',
'something_extra': 'extra'
}
with pytest.raises(ConfigError):
Adapter.provide('face_person_detection', adapter_config)
def test_head_pose_adapter_raise_config_error_on_extra_args():
adapter_config = {
'type': 'head_pose',
'angle_yaw': 'yaw',
'angle_pitch': 'pitch',
'angle_roll': 'roll',
'something_extra': 'extra'
}
with pytest.raises(ConfigError):
Adapter.provide('head_pose', adapter_config)
def test_vehicle_attributes_adapter_raise_config_error_on_extra_args():
adapter_config = {
'type': 'vehicle_attributes',
'color_out': 'color',
'type_out': 'type',
'something_extra': 'extra'
}
with pytest.raises(ConfigError):
Adapter.provide('vehicle_attributes', adapter_config)
|
the-stack_0_26170
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw PASCAL dataset to TFRecord for object_detection.
Example usage:
python object_detection/dataset_tools/create_pascal_tf_record.py \
--data_dir=/home/user/VOCdevkit \
--year=VOC2012 \
--output_path=/home/user/pascal.record
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import logging
import os
from lxml import etree
import PIL.Image
import tensorflow as tf
from object_detection.utils import dataset_util
from object_detection.utils import label_map_util
# 定义自己的变量
# record_output_path = 'pascal_train.record'
record_output_path = 'pascal_val.record'
# mode_set = 'train'
mode_set = 'val'
data_dir = 'G:/develop/PycharmProjects/datasets/VOCdevkit'
flags = tf.app.flags
flags.DEFINE_string('data_dir', data_dir, 'Root directory to raw PASCAL VOC dataset.')
flags.DEFINE_string('set', mode_set, 'Convert training set, validation set or '
'merged set.')
flags.DEFINE_string('annotations_dir', 'Annotations',
'(Relative) path to annotations directory.')
flags.DEFINE_string('year', 'VOC2012', 'Desired challenge year.')
flags.DEFINE_string('output_path', record_output_path, 'Path to output TFRecord')
flags.DEFINE_string('label_map_path', '../data/pascal_label_map.pbtxt',
'Path to label map proto')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '
'difficult instances')
FLAGS = flags.FLAGS
SETS = ['train', 'val', 'trainval', 'test']
YEARS = ['VOC2007', 'VOC2012', 'VOC3000', 'merged']
def dict_to_tf_example(year, data,
dataset_directory,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory='JPEGImages'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
dataset_directory: Path to root directory holding PASCAL dataset
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
img_path = os.path.join(year, image_subdirectory, data['filename'])
full_path = os.path.join(dataset_directory, img_path)
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
for obj in data['object']:
difficult = bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
truncated.append(int(obj['truncated']))
poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
data['filename'].encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def main(_):
if FLAGS.set not in SETS:
raise ValueError('set must be in : {}'.format(SETS))
if FLAGS.year not in YEARS:
raise ValueError('year must be in : {}'.format(YEARS))
data_dir = FLAGS.data_dir
years = ['VOC2007', 'VOC2012', 'VOC3000']
if FLAGS.year != 'merged':
years = [FLAGS.year]
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
label_map_dict = label_map_util.get_label_map_dict(FLAGS.label_map_path)
for year in years:
logging.info('Reading from PASCAL %s dataset.', year)
examples_path = os.path.join(data_dir, year, 'ImageSets', 'Main',
FLAGS.set + '.txt')
annotations_dir = os.path.join(data_dir, year, FLAGS.annotations_dir)
examples_list = dataset_util.read_examples_list(examples_path)
for idx, example in enumerate(examples_list):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(examples_list))
path = os.path.join(annotations_dir, example + '.xml')
with tf.gfile.GFile(path, 'r') as fid:
xml_str = fid.read()
xml = etree.fromstring(xml_str)
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(year, data, FLAGS.data_dir, label_map_dict,
FLAGS.ignore_difficult_instances)
writer.write(tf_example.SerializeToString())
writer.close()
print('record数据生成成功!!!')
if __name__ == '__main__':
tf.app.run()
|
the-stack_0_26171
|
# randomCircles.py
#
# Demonstrates how to draw random circles on a GUI display.
#
from gui import *
from random import *
numberOfCircles = 1000 # how many circles to draw
# create display
d = Display("Random Circles", 600, 400)
# draw various filled circles with random position, radius, color
for i in range(numberOfCircles):
# create a random circle, and place it on the display
# get random position and radius
x = randint(0, d.getWidth()-1) # x may be anywhere on display
y = randint(0, d.getHeight()-1) # y may be anywhere on display
radius = randint(1, 40) # random radius (1-40 pixels)
# get random color (RGB)
red = randint(0, 255) # random R (0-255)
green = randint(0, 255) # random G (0-255)
blue = randint(0, 255) # random B (0-255)
color = Color(red, green, blue) # build color from random RGB
# create a filled circle from random values
c = Circle(x, y, radius, color, True)
# finally, add circle to the display
d.add(c)
# now, all circles have been added
|
the-stack_0_26172
|
import torch
import torch.nn as nn
from torchvision import models
class Vgg19(torch.nn.Module):
def __init__(self, model_path, requires_grad=False):
super().__init__()
if not model_path:
vgg = models.vgg19(pretrained=True)
else:
vgg = models.vgg19()
vgg.load_state_dict(torch.load(model_path))
vgg_pretrained_features = vgg.features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
class BatchNormVgg19(torch.nn.Module):
def __init__(self, model_path):
super(BatchNormVgg19, self).__init__()
if not model_path:
vgg = models.vgg19_bn(pretrained=True)
else:
vgg = models.vgg19_bn()
vgg.load_state_dict(torch.load(model_path))
self.layers = vgg.features
self.slice1 = nn.Sequential(*list(self.layers.children())[0:6])
self.slice2 = nn.Sequential(*list(self.layers.children())[6:13])
self.slice3 = nn.Sequential(*list(self.layers.children())[13:20])
self.slice4 = nn.Sequential(*list(self.layers.children())[20:33])
self.slice5 = nn.Sequential(*list(self.layers.children())[33:46])
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
out_1_2 = self.slice1(x)
out_2_2 = self.slice2(out_1_2)
out_3_2 = self.slice3(out_2_2)
out_4_2 = self.slice4(out_3_2)
out_5_2 = self.slice5(out_4_2)
return [out_1_2, out_2_2, out_3_2, out_4_2, out_5_2]
|
the-stack_0_26173
|
"""
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
YOUR DESCRIPTION HERE
"""
import tkinter
import babynames
import babygraphicsgui as gui
FILENAMES = [
'data/full/baby-1900.txt', 'data/full/baby-1910.txt',
'data/full/baby-1920.txt', 'data/full/baby-1930.txt',
'data/full/baby-1940.txt', 'data/full/baby-1950.txt',
'data/full/baby-1960.txt', 'data/full/baby-1970.txt',
'data/full/baby-1980.txt', 'data/full/baby-1990.txt',
'data/full/baby-2000.txt', 'data/full/baby-2010.txt'
]
CANVAS_WIDTH = 1000
CANVAS_HEIGHT = 600
YEARS = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
GRAPH_MARGIN_SIZE = 20
COLORS = ['red', 'purple', 'green', 'blue']
TEXT_DX = 2
LINE_WIDTH = 2
MAX_RANK = 1000
def get_x_coordinate(width, year_index):
"""
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
"""
gap = (width-2*GRAPH_MARGIN_SIZE)//len(YEARS) # calculate x axis gap
x = GRAPH_MARGIN_SIZE+gap*year_index
return x
def draw_fixed_lines(canvas):
"""
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
"""
canvas.delete('all') # delete all existing lines from the canvas
# Write your code below this line
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, width=LINE_WIDTH, fill='black')
canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, width=LINE_WIDTH, fill='black')
for y_index in range(len(YEARS)): # draw some default line.
x_pos = get_x_coordinate(CANVAS_WIDTH, y_index)
canvas.create_line(x_pos, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE/2, x_pos, GRAPH_MARGIN_SIZE/2, width=LINE_WIDTH, fill='black')
canvas.create_text(x_pos+TEXT_DX, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE+TEXT_DX, text=YEARS[y_index], anchor=tkinter.NW, font='times 15')
#################################
def draw_names(canvas, name_data, lookup_names):
"""
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
"""
draw_fixed_lines(canvas) # draw the fixed background grid
rank_distance = (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE*2) / 1000 # Let Y axis be divided into 1000 equal parts.
gap = (CANVAS_WIDTH - 2 * GRAPH_MARGIN_SIZE) // len(YEARS) # X axis gap
# Write your code below this line
for n_index in range(len(lookup_names)): # do each search name
rank = []
for y_index in range(len(YEARS)): # do each year (from 1900 to 2010)
if str(YEARS[y_index]) in name_data[lookup_names[n_index]]: # if 'current name' exists 'current year'
rank.append(name_data[lookup_names[n_index]][str(YEARS[y_index])]) # add it to rank list
else:
rank.append('*') # means its rank is over 1000.
x_pos_2 = 0
y_pos_2 = 0
word_2 = ''
color_index = n_index % len(COLORS) # color_index will be 0,1,2,3
for i in range(len(YEARS)-1): # draw line and text
if rank[i] == '*': # the y position of the line's first point
y_pos = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
else:
y_pos = GRAPH_MARGIN_SIZE + rank_distance * int(rank[i])
if rank[i+1] == '*': # the y position of the line's second point
y_pos_2 = CANVAS_HEIGHT - GRAPH_MARGIN_SIZE
else:
y_pos_2 = GRAPH_MARGIN_SIZE + rank_distance * int(rank[i+1])
x_pos = GRAPH_MARGIN_SIZE + gap*i # the x position of the line's first point
x_pos_2 = GRAPH_MARGIN_SIZE + gap*(i+1) # the x position of the line's second point
canvas.create_line(x_pos, y_pos, x_pos_2, y_pos_2, width=LINE_WIDTH, fill=COLORS[color_index])
word = lookup_names[n_index] + ' ' + rank[i] # text word
word_2 = lookup_names[n_index] + ' ' + rank[i+1]
canvas.create_text(x_pos+TEXT_DX, y_pos-TEXT_DX, text=word, anchor=tkinter.SW, font='times 12', fill=COLORS[color_index])
# the line at 119 is to draw the last text, and to avoid obob issue. I think I have a bad writing style here.
# variable in line 96,97,98 is also to avoid obob issue, i think its quite stupid.
canvas.create_text(x_pos_2 + TEXT_DX, y_pos_2 - TEXT_DX, text=word_2, anchor=tkinter.SW, font='times 12', fill=COLORS[color_index])
#################################
# main() code is provided, feel free to read through it but DO NOT MODIFY
def main():
# Load data
name_data = babynames.read_files(FILENAMES)
# Create the window and the canvas
top = tkinter.Tk()
top.wm_title('Baby Names')
canvas = gui.make_gui(top, CANVAS_WIDTH, CANVAS_HEIGHT, name_data, draw_names, babynames.search_names)
# Call draw_fixed_lines() once at startup so we have the lines
# even before the user types anything.
draw_fixed_lines(canvas)
# This line starts the graphical loop that is responsible for
# processing user interactions and plotting data
top.mainloop()
if __name__ == '__main__':
main()
|
the-stack_0_26174
|
# Copyright 2016 Skyscanner Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import unittest
import pytest
from failsafe import FallbackFailsafe, FallbacksExhausted
from failsafe import RetryPolicy
loop = asyncio.get_event_loop()
class TestFallbackFailsafe(unittest.TestCase):
def test_value_is_called(self):
async def call(option):
assert option == "fallback option 1"
return "return value"
fallback_failsafe = FallbackFailsafe(["fallback option 1", "fallback option 2"])
result = loop.run_until_complete(
fallback_failsafe.run(call)
)
assert result == "return value"
def test_fallback_is_called_when_exception_is_raised(self):
async def call(option):
if option == "fallback option 1":
raise Exception()
elif option == "fallback option 2":
return "return value"
fallback_failsafe = FallbackFailsafe(["fallback option 1", "fallback option 2"])
result = loop.run_until_complete(
fallback_failsafe.run(call)
)
assert result == "return value"
def test_exception_is_raised_when_no_fallback_succeeds(self):
async def call(_):
raise Exception()
fallback_failsafe = FallbackFailsafe(["fallback option 1", "fallback option 2"])
with pytest.raises(FallbacksExhausted):
loop.run_until_complete(
fallback_failsafe.run(call)
)
def test_args_are_passed_to_function(self):
async def call(fallback_option, positional_argument, *args, **kwargs):
assert fallback_option == "fallback option"
assert positional_argument == "positional argument"
assert args == ("arg1", "arg2")
assert kwargs == {"key1": "value1", "key2": "value2"}
return "return value"
fallback_failsafe = FallbackFailsafe(["fallback option"])
result = loop.run_until_complete(
fallback_failsafe.run(call, "positional argument", "arg1", "arg2", key1="value1", key2="value2")
)
assert result == "return value"
def test_original_exception_is_raised_and_fallback_is_not_executed_on_abortion(self):
async def call(fallback_option):
assert fallback_option == "fallback option1"
raise ValueError()
policy = RetryPolicy(abortable_exceptions=[ValueError])
fallback_failsafe = FallbackFailsafe(["fallback option1", "fallback option2"],
retry_policy_factory=lambda _: policy)
with pytest.raises(ValueError):
loop.run_until_complete(
fallback_failsafe.run(call))
|
the-stack_0_26175
|
"""
Example from the docs.
"""
from rjgtoys.thing import Thing
data = {
"result" : {
"status": "success",
"value": {
"type": "person",
"name": "Bob",
"address": {
"street": "High Street",
"town": "Chigley"
}
}
}
}
def print_items(data):
print(
"The {type} called {name} lives in {town}".format(
type=data['result']['value']['type'],
name=data['result']['value']['name'],
town=data['result']['value']['address']['town']
)
)
def print_attrs(data):
print(
"The {type} called {name} lives in {town}".format(
type=data.result.value.type,
name=data.result.value.name,
town=data.result.value.address.town
)
)
# The raw data can be printed:
print("Print raw data...")
print_items(data)
try:
print_attrs(data)
except AttributeError:
print("The raw data does not support attribute access")
# Convert to a Thing...
print("Now convert...")
data = Thing.from_object(data)
print_items(data)
print_attrs(data)
|
the-stack_0_26178
|
"""
model_avg+attribute_pop
"""
import torch
from torch import log, unsqueeze
import torch.nn as nn
from torch.nn.modules.transformer import TransformerEncoder, TransformerEncoderLayer
import torch.nn.utils.rnn as rnn_utils
import torch.nn.functional as F
class _ATTR_NETWORK(nn.Module):
def __init__(self, vocab_obj, args, device):
super(_ATTR_NETWORK, self).__init__()
self.m_device = device
self.m_vocab_size = vocab_obj.vocab_size
self.m_user_num = vocab_obj.user_num
self.m_item_num = vocab_obj.item_num
self.m_attr_embed_size = args.attr_emb_size
self.m_user_embed_size = args.user_emb_size
self.m_item_embed_size = args.item_emb_size
self.m_attn_head_num = args.attn_head_num
self.m_attn_layer_num = args.attn_layer_num
self.m_output_hidden_size = args.output_hidden_size
self.m_attn_linear_size = args.attn_linear_size
self.m_attr_embedding = nn.Embedding(self.m_vocab_size, self.m_attr_embed_size)
self.m_user_embedding = nn.Embedding(self.m_user_num, self.m_user_embed_size)
self.m_item_embedding = nn.Embedding(self.m_item_num, self.m_item_embed_size)
encoder_layers = TransformerEncoderLayer(self.m_attr_embed_size, self.m_attn_head_num, self.m_attn_linear_size)
self.m_attn = TransformerEncoder(encoder_layers, self.m_attn_layer_num)
self.m_gamma = args.gamma
self.m_output_attr_embedding_user = nn.Embedding(self.m_vocab_size, self.m_attr_embed_size)
self.m_output_attr_embedding_item = nn.Embedding(self.m_vocab_size, self.m_attr_embed_size)
# self.m_output_attr_embedding_user = nn.Embedding(self.m_vocab_size, self.m_attr_embed_size*2)
# self.m_output_attr_embedding_item = nn.Embedding(self.m_vocab_size, self.m_attr_embed_size*2)
self.m_beta = 1.0
self.f_init_weight()
self = self.to(self.m_device)
def f_init_weight(self):
initrange = 0.1
torch.nn.init.uniform_(self.m_output_attr_embedding_user.weight, -initrange, initrange)
torch.nn.init.uniform_(self.m_output_attr_embedding_item.weight, -initrange, initrange)
# torch.nn.init.uniform_(self.m_attr_embedding.weight, -initrange, initrange)
# torch.nn.init.normal_(self.m_tag_item_embedding.weight, 0.0, 0.01)
torch.nn.init.uniform_(self.m_user_embedding.weight, -initrange, initrange)
torch.nn.init.uniform_(self.m_item_embedding.weight, -initrange, initrange)
def f_generate_mask(self, length):
max_len = length.max().item()
mask = torch.arange(0, max_len).expand(len(length), max_len).to(length.device)
mask = mask < length.unsqueeze(1)
mask = ~mask
return mask
def f_get_avg_attr_user(self, attr, attr_lens):
### attr_user_embed: batch_size*seq_len*embed_size
attr_user_embed = self.m_attr_embedding(attr)
attr_user_mask = self.f_generate_mask(attr_lens)
masked_attr_user_embed = attr_user_embed*((~attr_user_mask).unsqueeze(-1))
attr_user = masked_attr_user_embed.sum(1)/((~attr_user_mask).sum(1).unsqueeze(-1))
return attr_user, attr_user_mask
def f_get_avg_attr_item(self, attr, attr_lens):
attr_item_embed = self.m_attr_embedding(attr)
attr_item_mask = self.f_generate_mask(attr_lens)
masked_attr_item_embed = attr_item_embed*((~attr_item_mask).unsqueeze(-1))
attr_item = masked_attr_item_embed.sum(1)/((~attr_item_mask).sum(1).unsqueeze(-1))
return attr_item, attr_item_mask
def f_get_logits(self, embed, attr):
logits = torch.matmul(embed, attr.unsqueeze(-1))
logits = logits.squeeze(-1)
return logits
def forward(self, attr_item, attr_tf_item, attr_lens_item, item_ids, attr_user, attr_tf_user, attr_lens_user, user_ids, pos_targets, pos_lens, neg_targets, neg_lens):
# print("==="*10)
""" item """
# attr_attn_item, attr_item_mask = self.f_get_avg_attr_item(attr_item, attr_lens_item)
attr_item_mask = self.f_generate_mask(attr_lens_item)
# item_x = attr_attn_item
# """ user """
attr_user_mask = self.f_generate_mask(attr_lens_user)
# attr_attn_user, attr_user_mask = self.f_get_avg_attr_user(attr_user, attr_lens_user)
# user_x = attr_attn_user
### user_x: batch_size*user_embed
user_embed = self.m_user_embedding(user_ids)
item_embed = self.m_item_embedding(item_ids)
user_output = user_embed
item_output = item_embed
neg_embed_user = self.m_output_attr_embedding_user(neg_targets)
neg_embed_item = self.m_output_attr_embedding_item(neg_targets)
### user_item_output: batch_size*ouput_size
### neg_logits: batch_size*neg_num
neg_logits_user = self.f_get_logits(neg_embed_user, user_output)
neg_logits_item = self.f_get_logits(neg_embed_item, item_output)
# print("neg_lens", neg_lens)
# exit()
neg_mask = self.f_generate_mask(neg_lens)
neg_mask = ~neg_mask
### targets: batch_size*pos_num
pos_embed_user = self.m_output_attr_embedding_user(pos_targets)
pos_embed_item = self.m_output_attr_embedding_item(pos_targets)
### user_item_output: batch_size*ouput_size
### neg_logits: batch_size*neg_num
pos_logits_user = self.f_get_logits(pos_embed_user, user_output)
pos_logits_item = self.f_get_logits(pos_embed_item, item_output)
pos_logits = pos_logits_user+pos_logits_item
neg_logits = neg_logits_user+neg_logits_item
pos_mask = self.f_generate_mask(pos_lens)
pos_mask = ~pos_mask
logits = torch.cat([pos_logits, neg_logits], dim=-1)
logits += attr_tf_user
# tmp_logits = logits.gather(1, attr_user)+attr_tf_user
# logits.scatter_(1, attr_user, tmp_logits)
# batch_size = user_ids.size(0)
# for i in range(batch_size):
# logits[i] += attr_tf_user[i]
# logits[torch.arange(batch_size), attr_user] += attr_tf_user*(~attr_user_mask)
mask = torch.cat([pos_mask, neg_mask], dim=-1)
new_targets = torch.cat([torch.ones_like(pos_targets), torch.zeros_like(neg_targets)], dim=1)
new_targets = new_targets*mask
return logits, mask, new_targets
def f_eval_forward(self, attr_item, attr_tf_item, attr_lens_item, item_ids, attr_user, attr_tf_user, attr_lens_user, user_ids):
# """ item """
# attr_attn_item, attr_item_mask = self.f_get_avg_attr_item(attr_item, attr_lens_item)
# item_x = attr_attn_item
# """ user """
# attr_attn_user, attr_user_mask = self.f_get_avg_attr_user(attr_user, attr_lens_user)
# user_x = attr_attn_user
attr_item_mask = self.f_generate_mask(attr_lens_item)
# item_x = attr_attn_item
# """ user """
attr_user_mask = self.f_generate_mask(attr_lens_user)
scalar_weight = 1
# user_output = user_embed
# item_output = item_embed
### user_x: batch_size*user_embed
user_embed = self.m_user_embedding(user_ids)
item_embed = self.m_item_embedding(item_ids)
# item_attr_user_output = torch.zeros_like(user_embed)
# user_attr_item_output = torch.zeros_like(item_embed)
# user_output = torch.cat([user_embed, scalar_weight*user_x], dim=-1)
# item_output = torch.cat([item_embed, scalar_weight*item_x], dim=-1)
user_output = user_embed
item_output = item_embed
logits_user = torch.matmul(user_output, self.m_output_attr_embedding_user.weight.t())
logits_item = torch.matmul(item_output, self.m_output_attr_embedding_item.weight.t())
logits = logits_user+logits_item
# batch_size = user_ids.size(0)
# for i in range(batch_size):
# logits[i][attr_user[i]] += attr_tf_user[i]*(~attr_user_mask[i])
tmp_logits = logits.gather(1, attr_user)+attr_tf_user*(~attr_user_mask)
logits.scatter_(1, attr_user, tmp_logits)
return logits
|
the-stack_0_26179
|
import datetime
import itertools
import time
from functools import wraps
import os.path
import numpy as np
def timeit(func):
@wraps(func)
def wrap(*args, **kwargs):
print(f"\r{func.__name__} execute at {datetime.datetime.now().strftime('%H:%M:%S %Y-%m-%d')}")
tik = time.time()
res = func(*args, **kwargs)
tok = time.time()
print(
f"\n\r# {func.__name__} executed in {round((tok - tik), 2)}s, "
f"finished {datetime.datetime.now().strftime('%H:%M:%S %Y-%m-%d')}"
)
return res
return wrap
def iter_count(file_name, encoding='utf-8'): # GB18030
"""计算文件行数"""
from itertools import (takewhile, repeat)
buffer = 1024 * 1024 # 设置指定缓冲块大小
with open(file_name, encoding=encoding) as f:
buf_gen = takewhile(lambda x: x, (f.read(buffer) for _ in repeat(None)))
return sum(buf.count('\n') for buf in buf_gen)
def batch_reader(fp, encoding='utf-8', batch_size=1000, **open_func_kwargs): # GB18030
"""
支持传入文件路径或list类型变量
return: List[str]
"""
if isinstance(fp, str) and os.path.exists(fp):
lines_num = iter_count(fp)
assert lines_num > 0
range_cycles = range(int(np.ceil(lines_num / batch_size)))
with open(fp, 'r', encoding=encoding, **open_func_kwargs) as f:
for i in range_cycles:
text = []
for j in range(batch_size):
line = f.readline()
text.append(line)
yield text
else:
assert isinstance(fp, list)
lines_num = len(fp)
assert lines_num > 0
range_cycles = range(int(np.ceil(lines_num / batch_size)))
for i in range_cycles:
start_p = batch_size * i
yield fp[start_p: batch_size * (i+1)]
def flatten(list_of_list):
"""将两层或多层iterable数据结构,展平到一层list"""
return list(itertools.chain.from_iterable(list_of_list))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.