filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_18913 | # coding: utf-8
# Assign this shadow size even if
# a font bitmap set has no shadow to keep location consistency.
shadow_size = 3
def construct_color(r, g, b) :
rx = hex(r).split('x')[1]
if len(rx) == 1 :
rx = '0' + rx
gx = hex(g).split('x')[1]
if len(gx) == 1 :
gx = '0' + rx
bx = hex(b).split('x')[1]
if len(gx) == 1 :
gx = '0' + rx
return '#' + rx + gx + bx
blue = construct_color(214, 244, 255)
darkblue = construct_color(118, 200, 241)
green = construct_color(101, 181, 91)
red = construct_color(228, 63, 63)
white = construct_color(255, 255, 255)
grey = construct_color(110, 111, 115)
black = construct_color(0, 0, 0)
shadow_color = construct_color(50, 50, 50)
config = {
'Do not delete this configure file.' : ''
# The base folder of this font map generator.
, 'base folder'
: './'
# The folder stores all TrueType font (.ttf) files.
# The specified folder is relative to this configure file.
# Absolute folder will be base folder + font folder.
, 'font folder'
: 'fonts'
# The Space Engineers (SE) installation path.
, 'space engineer base folder'
: 'C:\Program Files (x86)\Steam\SteamApps\common\SpaceEngineers'
# Font size in SE
, 'font size'
: 33
# The font priority list, from high to low.
# The bitmap of each character
# is given by the TrueType font (.tff)
# who has a valid bitmap and a highest priority.
, 'font priority list'
: [ ]
# The width of the result .dds image.
, 'image width'
: 1024
# The width of the result .dds image
, 'image height'
: 1024
# output .dds file name prefix
, 'output dds prefix'
: 'FontDataExtra-'
# Original dds file names.
# They are used when
# the user wants to keep the original font bitmaps
# and only construct the characters that
# are not included in the original font bitmaps.
, 'original dds file names'
: [ 'FontData-0.dds' ]
# Predefined colors
, 'predefined colors'
: { 'blue': {
'output' : True,
'color': blue,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'darkblue': {
'output' : True,
'color': darkblue,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'green': {
'output' : True,
'color': green,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'red': {
'output' : True,
'color': red,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'white': {
'output' : True,
'color': white,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'grey': {
'output' : True,
'color': grey,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : False },
'white_shadow': {
'output' : True,
'color': white,
'shadow_color': shadow_color,
'shadow_size': shadow_size,
'shadow' : True }
}
# Left Side Bearing, lsb
#
# illusion:
#
# |< last >| |< this >|
# |< char >| |< char >|
# |< bitmap >||< lsb >||< bitmap >|
#
, 'lsb'
: -1
# font map xml template file
, 'xml template'
: 'xml_template.xml'
# font map xml file name
, 'xml file name'
: 'FontData.xml'
# font place holder north margin
, 'north margin'
: 0
# font place holder west margin
, 'west margin'
: 0
# font place holder south margin
, 'south margin'
: 0
# font place holder east margin
, 'east margin'
: 0
# keep original font map
, 'keep original font map'
: True
, 'text file folder'
: 'text_files'
, 'unsupported folder'
: 'unsupported'
, 'backup folder'
: 'backup'
, 'output folder'
: 'output'
}
|
the-stack_0_18916 | """Build macro that compiles a TensorFlow graph into a cc_library.
To use from your BUILD file, add the following line to load the macro:
load("//tensorflow/compiler/aot:tfcompile.bzl", "tf_library")
Then call the macro like this:
tf_library(
name = "test_graph_tfmatmul",
config = "test_graph_tfmatmul.config.pbtxt",
cpp_class = "MatMulComp",
graph = ":test_graph_tfmatmul.pb",
)
"""
load(
"//tensorflow:tensorflow.bzl",
"if_android",
"tf_cc_test",
"tf_copts",
)
load("//tensorflow:tensorflow.bzl", "tfcompile_target_cpu")
def tf_library(
name,
graph,
config,
debug_info = None,
freeze_checkpoint = None,
freeze_saver = None,
cpp_class = None,
gen_test = True,
gen_benchmark = True,
visibility = None,
testonly = None,
tfcompile_flags = None,
tfcompile_tool = "//tensorflow/compiler/aot:tfcompile",
include_standard_runtime_deps = True,
enable_xla_hlo_profiling = False,
enable_tracemes = False,
mlir_components = "None",
deps = None,
tags = []):
"""Runs tfcompile to compile a TensorFlow graph into executable code with fast
math enabled on cpu.
Given an invocation of tf_library(name="foo", ...), generates the following
build targets:
foo: A cc_library containing the generated header and
computation.
foo_test: A cc_test with simple tests and benchmarks. Only created if
gen_test=True.
foo_benchmark: A cc_binary that runs a minimal-dependency benchmark,
useful for mobile devices or other platforms that can't
compile the full test libraries. Only created if
gen_benchmark=True.
The output header is called <name>.h.
Args:
name: The name of the build rule.
graph: The TensorFlow GraphDef to compile. If the file ends in '.pbtxt'
it is expected to be in the human-readable proto text format, otherwise
it is expected to be in the proto binary format.
config: File containing tensorflow.tf2xla.Config proto. If the file ends
in '.pbtxt' it is expected to be in the human-readable proto text
format, otherwise it is expected to be in the proto binary format.
freeze_checkpoint: If provided, run freeze_graph with this checkpoint to
convert variables into constants.
freeze_saver: If provided, run freeze_graph with this saver, in SaverDef
binary form, to convert variables into constants.
cpp_class: The name of the generated C++ class, wrapping the generated
function. The syntax of this flag is
[[<optional_namespace>::],...]<class_name>. This mirrors the C++ syntax
for referring to a class, where multiple namespaces may precede the
class name, separated by double-colons. The class will be generated in
the given namespace(s), or if no namespaces are given, within the global
namespace.
gen_test: If True, also generate a cc_test rule that builds a simple
test and benchmark.
gen_benchmark: If True, also generate a binary with a simple benchmark.
Unlike the output of gen_test, this benchmark can be run on android.
visibility: Bazel build visibility.
testonly: Bazel testonly attribute.
tfcompile_flags: Extra flags to pass to tfcompile to control compilation.
tfcompile_tool: The tfcompile binary. A non-default can be passed to
use a tfcompile built with extra dependencies.
include_standard_runtime_deps: If True, the standard list of
kernel/runtime deps is added to deps. If False, deps must contain the
full set of deps needed by the generated library.
enable_xla_hlo_profiling: Enable XLA HLO profiling in the generated
program, and emit metadata that lets us pretty-print the gathered
profile counters.
enable_tracemes: Tell tfcompile to generate calls to
TraceMe::Activity{Start|End} around HLO instructions that can be used by
Xprof to construct profiler timelines.
mlir_components: When the value is "None", no components use MLIR. When
the value is "Bridge", use MLIR to translate GraphDef to HLO.
deps: a list of deps to include on the build rules for the generated
library, added to the standard deps if standard_runtime_deps is True.
tags: tags to apply to subsidiary build rules.
"""
if not cpp_class:
fail("cpp_class must be specified")
tfcompile_graph = graph
if freeze_checkpoint or freeze_saver:
if not freeze_checkpoint:
fail("freeze_checkpoint must be specified when freeze_saver is " +
"specified")
freeze_name = "freeze_" + name
freeze_file = freeze_name + ".pb"
# First run tfcompile to generate the list of out_nodes.
#
# Here and below, we set CUDA_VISIBLE_DEVICES='' to prevent the code we
# launch from using any GPUs which might be present. This is important
# because builds may run concurrently with tests, and tests need to be
# able to assume that they have control of the full GPU.
out_nodes_file = "out_nodes_" + freeze_name
native.genrule(
name = ("gen_" + out_nodes_file),
srcs = [config],
outs = [out_nodes_file],
cmd = ("CUDA_VISIBLE_DEVICES='' " +
"$(location " + tfcompile_tool + ")" +
" --config=$(location " + config + ")" +
" --dump_fetch_nodes > $@"),
tools = [tfcompile_tool],
# Run tfcompile on the build host, rather than forge, since it's
# typically way faster on the local machine.
local = 1,
tags = tags,
)
# Now run freeze_graph to convert variables into constants.
freeze_args = (
" --input_graph=$(location " + graph + ")" +
" --checkpoint_version=1" +
" --input_binary=" + str(not graph.endswith(".pbtxt")) +
" --input_checkpoint=$(location " + freeze_checkpoint + ")" +
" --output_graph=$(location " + freeze_file + ")" +
" --output_node_names=$$(<$(location " + out_nodes_file +
"))"
)
freeze_saver_srcs = []
if freeze_saver:
freeze_args += " --input_saver=$(location " + freeze_saver + ")"
freeze_saver_srcs += [freeze_saver]
native.genrule(
name = freeze_name,
srcs = [
graph,
freeze_checkpoint,
out_nodes_file,
] + freeze_saver_srcs,
outs = [freeze_file],
cmd = (
"CUDA_VISIBLE_DEVICES='' " +
"$(location " +
"//tensorflow/python/tools:freeze_graph)" +
freeze_args
),
exec_tools = ["//tensorflow/python/tools:freeze_graph"],
tags = tags,
)
tfcompile_graph = freeze_file
# Rule that runs tfcompile to produce the header and object file.
header_file = name + ".h"
metadata_object_file = name + "_tfcompile_metadata.o"
function_object_file = name + "_tfcompile_function.o"
# The XLA backends morph kernal name prefix __ that is not in the form of
# __xla_.
ep = ("__xla_" + native.package_name() + "__" + name).replace("/", "_")
if type(tfcompile_flags) == type(""):
flags = tfcompile_flags
else:
flags = " ".join([
"'" + arg.replace("'", "'\\''") + "'"
for arg in (tfcompile_flags or [])
])
# Do this before we append the `select` into `flags`, because doing so
# transforms `flags` into a variable of type `select`, and we can't call
# `find` on such an object.
need_xla_data_proto = flags and flags.find("--gen_program_shape") != -1
target_cpu = tfcompile_target_cpu()
extra_flags = "--target_cpu=" + target_cpu + " " if target_cpu else " "
flags = extra_flags + flags
if enable_xla_hlo_profiling:
profiling_flag = "--xla_hlo_profile"
else:
profiling_flag = ""
if enable_tracemes:
traceme_flag = "--xla_cpu_enable_xprof_traceme=true"
else:
traceme_flag = "--xla_cpu_enable_xprof_traceme=false"
mlir_flag = "--mlir_components=" + mlir_components
srcs = [tfcompile_graph, config]
debug_info_flag = ""
if debug_info:
srcs.append(debug_info)
debug_info_flag = " --debug_info=$(location " + debug_info + ")"
default_fast_math_xla_flags = ("XLA_FLAGS='" +
"--xla_cpu_enable_fast_math=true " +
"--xla_cpu_fast_math_honor_nans=false " +
"--xla_cpu_fast_math_honor_infs=false " +
"--xla_cpu_fast_math_honor_functions=false " +
"--xla_cpu_fast_math_honor_division=false " +
"--xla_cpu_enable_fast_min_max=true " +
"$${XLA_FLAGS:-}' ")
native.genrule(
name = ("gen_" + name),
srcs = srcs,
outs = [
header_file,
metadata_object_file,
function_object_file,
],
cmd = (
default_fast_math_xla_flags +
"CUDA_VISIBLE_DEVICES='' " +
"$(location " + tfcompile_tool + ")" +
" --graph=$(location " + tfcompile_graph + ")" +
debug_info_flag +
" --config=$(location " + config + ")" +
" --entry_point=" + ep +
" --cpp_class=" + cpp_class +
" --target_triple=" + target_llvm_triple() +
" --out_header=$(@D)/" + header_file +
" --out_metadata_object=$(@D)/" + metadata_object_file +
" --out_function_object=$(@D)/" + function_object_file +
" " + flags + " " + profiling_flag + " " + mlir_flag + " " + traceme_flag
),
tools = [tfcompile_tool],
visibility = visibility,
testonly = testonly,
# Run tfcompile on the build host since it's typically faster on the
# local machine.
#
# Note that setting the local=1 attribute on a *test target* causes the
# test infrastructure to skip that test. However this is a genrule, not
# a test target, and runs with --strategy=Genrule=forced_forge, meaning
# the local=1 attribute is ignored, and the genrule is still run.
#
# https://www.bazel.io/versions/master/docs/be/general.html#genrule
local = 1,
tags = tags,
)
# Rule that runs tfcompile to produce the SessionModule proto, useful for
# debugging. TODO(b/64813587): Once the SessionModule proto is
# deterministic, move this into the main rule above.
session_module_pb = name + "_session_module.pb"
native.genrule(
name = (name + "_session_module"),
srcs = srcs,
outs = [
session_module_pb,
],
cmd = (
default_fast_math_xla_flags +
"CUDA_VISIBLE_DEVICES='' " +
"$(location " + tfcompile_tool + ")" +
" --graph=$(location " + tfcompile_graph + ")" +
debug_info_flag +
" --config=$(location " + config + ")" +
" --entry_point=" + ep +
" --cpp_class=" + cpp_class +
" --target_triple=" + target_llvm_triple() +
" --out_session_module=$(@D)/" + session_module_pb +
" " + flags
),
tools = [tfcompile_tool],
visibility = visibility,
testonly = testonly,
local = 1,
tags = tags,
)
# The cc_library rule packaging up the header and object file, and needed
# kernel implementations.
native.cc_library(
name = name,
srcs = [function_object_file, metadata_object_file],
hdrs = [header_file],
visibility = visibility,
testonly = testonly,
deps = [
# These deps are required by all tf_library targets even if
# include_standard_runtime_deps is False. Without them, the
# generated code will fail to compile.
"//tensorflow/compiler/tf2xla:xla_compiled_cpu_function",
"//tensorflow/core:framework_lite",
] + (need_xla_data_proto and [
# If we're generating the program shape, we must depend on the
# proto.
"//tensorflow/compiler/xla:xla_data_proto_cc",
] or []) + (enable_xla_hlo_profiling and [
"//tensorflow/compiler/xla/service:hlo_profile_printer_data_cc",
] or []) + (include_standard_runtime_deps and [
# TODO(cwhipkey): only depend on kernel code that the model actually
# needed.
"//tensorflow/compiler/xla/service/cpu:runtime_conv2d",
"//tensorflow/compiler/xla/service/cpu:runtime_key_value_sort",
"//tensorflow/compiler/xla/service/cpu:runtime_matmul",
"//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_conv2d",
"//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_matmul",
"//third_party/eigen3",
] or []) + (deps or []),
tags = tags,
)
# Variables used for gen_test and gen_benchmark.
cpp_class_split = cpp_class.rsplit("::", 2)
if len(cpp_class_split) == 1:
no_ns_name = cpp_class_split[0]
else:
no_ns_name = cpp_class_split[1]
sed_replace = (
"-e \"s|{{TFCOMPILE_HEADER}}|$(location " + header_file + ")|g\" " +
"-e \"s|{{TFCOMPILE_CPP_CLASS}}|" + cpp_class + "|g\" " +
"-e \"s|{{TFCOMPILE_NAME}}|" + no_ns_name + "|g\" "
)
if gen_test:
test_name = name + "_test"
test_file = test_name + ".cc"
# Rule to rewrite test.cc to produce the test_file.
native.genrule(
name = ("gen_" + test_name),
testonly = 1,
srcs = [
"//tensorflow/compiler/aot:test.cc",
header_file,
],
outs = [test_file],
cmd = (
"sed " + sed_replace +
" $(location //tensorflow/compiler/aot:test.cc) " +
"> $(OUTS)"
),
tags = tags,
)
# The cc_test rule for the generated code. To ensure that this works
# reliably across build configurations, we must use tf_cc_test instead
# of native.cc_test. This is related to how we build
# //tensorflow/core:lib -- see the note in
# tensorflow/core/BUILD for more details.
tf_cc_test(
name = test_name,
srcs = [test_file],
deps = [
":" + name,
"//tensorflow/compiler/aot:tf_library_test_main",
"//tensorflow/compiler/xla:executable_run_options",
"//third_party/eigen3",
"//tensorflow/core:lib",
"//tensorflow/core:test",
],
tags = tags,
)
if gen_benchmark:
benchmark_name = name + "_benchmark"
benchmark_file = benchmark_name + ".cc"
benchmark_main = ("//tensorflow/compiler/aot:" +
"benchmark_main.template")
# Rule to rewrite benchmark.cc to produce the benchmark_file.
native.genrule(
name = ("gen_" + benchmark_name),
srcs = [
benchmark_main,
header_file,
],
testonly = testonly,
outs = [benchmark_file],
cmd = ("sed " + sed_replace +
" $(location " + benchmark_main + ") " +
"> $(OUTS)"),
tags = tags,
)
# The cc_benchmark rule for the generated code. This does not need the
# tf_cc_binary since we (by deliberate design) do not depend on
# //tensorflow/core:lib.
#
# Note: to get smaller size on android for comparison, compile with:
# --copt=-fvisibility=hidden
# --copt=-D_LIBCPP_TYPE_VIS=_LIBCPP_HIDDEN
# --copt=-D_LIBCPP_EXCEPTION_ABI=_LIBCPP_HIDDEN
native.cc_binary(
name = benchmark_name,
srcs = [benchmark_file],
testonly = testonly,
copts = tf_copts(),
linkopts = if_android(["-pie", "-s"]),
deps = [
":" + name,
"//tensorflow/compiler/aot:benchmark",
"//tensorflow/compiler/xla:executable_run_options",
"//third_party/eigen3",
] + if_android([
"//tensorflow/compiler/aot:benchmark_extra_android",
]),
tags = tags,
)
def target_llvm_triple():
"""Returns the target LLVM triple to be used for compiling the target."""
# TODO(toddw): Add target_triple for other targets. For details see:
# http://llvm.org/docs/doxygen/html/Triple_8h_source.html
return select({
"//tensorflow:android_armeabi": "armv5-none-android",
"//tensorflow:android_arm": "armv7-none-android",
"//tensorflow:android_arm64": "aarch64-none-android",
"//tensorflow:android_x86": "i686-none-android",
"//tensorflow:ios": "arm64-none-ios",
"//tensorflow:ios_x86_64": "x86_64-apple-ios",
"//tensorflow:linux_ppc64le": "ppc64le-ibm-linux-gnu",
"//tensorflow:macos": "x86_64-none-darwin",
"//tensorflow:windows": "x86_64-none-windows",
"//tensorflow:linux_s390x": "systemz-none-linux-gnu",
"//conditions:default": "x86_64-pc-linux",
})
|
the-stack_0_18917 | import requests
import json
import sys
import re
import os
from slugify import slugify
class Downloader(object):
def __init__(
self,
cookie,
download_path=os.environ.get('FILE_PATH', './data'),
pk='BCpkADawqM2OOcM6njnM7hf9EaK6lIFlqiXB0iWjqGWUQjU7R8965xUvIQNqdQbnDTLz0IAO7E6Ir2rIbXJtFdzrGtitoee0n1XXRliD-RH9A-svuvNW9qgo3Bh34HEZjXjG4Nml4iyz3KqF',
brightcove_account_id=3695997568001,
):
self.cookie = cookie.strip().strip('"')
self.download_path = download_path
self.pk = pk.strip()
self.brightcove_account_id = brightcove_account_id
self.pythonversion = 3 if sys.version_info >= (3, 0) else 2
def is_unicode_string(self, string):
if (self.pythonversion == 3 and isinstance(string, str)) or (self.pythonversion == 2 and isinstance(string, unicode)):
return True
else:
return False
def download_course_by_url(self, url):
m = re.match(r'https://www.skillshare.com/classes/.*?/(\d+)', url)
if not m:
raise Exception('Failed to parse class ID from URL')
self.download_course_by_class_id(m.group(1))
def download_course_by_class_id(self, class_id):
data = self.fetch_course_data_by_class_id(class_id=class_id)
teacher_name = None
if 'vanity_username' in data['_embedded']['teacher']:
teacher_name = data['_embedded']['teacher']['vanity_username']
if not teacher_name:
teacher_name = data['_embedded']['teacher']['full_name']
if not teacher_name:
raise Exception('Failed to read teacher name from data')
if self.is_unicode_string(teacher_name):
teacher_name = teacher_name.encode('ascii', 'replace')
title = data['title']
if self.is_unicode_string(title):
title = title.encode('ascii', 'replace') # ignore any weird char
base_path = os.path.abspath(
os.path.join(
self.download_path,
slugify(teacher_name),
slugify(title),
)
).rstrip('/')
if not os.path.exists(base_path):
os.makedirs(base_path)
for u in data['_embedded']['units']['_embedded']['units']:
for s in u['_embedded']['sessions']['_embedded']['sessions']:
video_id = None
if 'video_hashed_id' in s and s['video_hashed_id']:
video_id = s['video_hashed_id'].split(':')[1]
if not video_id:
# NOTE: this happens sometimes...
# seems random and temporary but might be some random
# server-side check on user-agent etc?
# ...think it's more stable now with those set to
# emulate an android device
raise Exception('Failed to read video ID from data')
s_title = s['title']
if self.is_unicode_string(s_title):
s_title = s_title.encode('ascii', 'replace') # ignore any weird char
file_name = '{} - {}'.format(
str(s['index'] + 1).zfill(2),
slugify(s_title),
)
self.download_video(
fpath='{base_path}/{session}.mp4'.format(
base_path=base_path,
session=file_name,
),
video_id=video_id,
)
print('')
def fetch_course_data_by_class_id(self, class_id):
res = requests.get(
url='https://api.skillshare.com/classes/{}'.format(class_id),
headers={
'Accept': 'application/vnd.skillshare.class+json;,version=0.8',
'User-Agent': 'Skillshare/4.1.1; Android 5.1.1',
'Host': 'api.skillshare.com',
'cookie': self.cookie,
}
)
if not res.status_code == 200:
raise Exception('Fetch error, code == {}'.format(res.status_code))
return res.json()
def download_video(self, fpath, video_id):
meta_url = 'https://edge.api.brightcove.com/playback/v1/accounts/{account_id}/videos/{video_id}'.format(
account_id=self.brightcove_account_id,
video_id=video_id,
)
meta_res = requests.get(
meta_url,
headers={
'Accept': 'application/json;pk={}'.format(self.pk),
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0',
'Origin': 'https://www.skillshare.com'
}
)
if meta_res.status_code != 200:
raise Exception('Failed to fetch video meta')
for x in meta_res.json()['sources']:
if 'container' in x and x['container'] == 'MP4' and 'src' in x:
dl_url = x['src']
break
print('Downloading {}...'.format(fpath))
if os.path.exists(fpath):
print('Video already downloaded, skipping...')
return
with open(fpath, 'wb') as f:
response = requests.get(dl_url, allow_redirects=True, stream=True)
total_length = response.headers.get('content-length')
if not total_length:
f.write(response.content)
else:
dl = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
done = int(50 * dl / total_length)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
print('')
|
the-stack_0_18919 | import os
import numpy as np
from astropy.table import Table
from . import get_data_home
DATA_URL = ("https://github.com/astroML/astroML-data/raw/master/datasets/"
"sgSDSSimagingSample.fit.gz")
def fetch_imaging_sample(data_home=None, download_if_missing=True):
"""Loader for SDSS Imaging sample data
Parameters
----------
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all astroML data is stored in '~/astroML_data'.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : recarray, shape = (330753,)
record array containing imaging data
Examples
--------
>>> from astroML.datasets import fetch_imaging_sample
>>> data = fetch_imaging_sample() # doctest: +IGNORE_OUTPUT
>>> data.shape # number of objects in dataset
(330753,)
>>> print(data.dtype.names[:5]) # names of the first five columns
('ra', 'dec', 'run', 'rExtSFD', 'uRaw')
>>> print(data['ra'][:2])
[0.358174 0.358382]
>>> print(data['dec'][:2])
[-0.508718 -0.551157]
Notes
-----
This data was selected from the SDSS database using the following SQL
query::
SELECT
round(p.ra,6) as ra, round(p.dec,6) as dec,
p.run, --- comments are preceded by ---
round(p.extinction_r,3) as rExtSFD, --- r band extinction from SFD
round(p.modelMag_u,3) as uRaw, --- ISM-uncorrected model mags
round(p.modelMag_g,3) as gRaw, --- rounding up model magnitudes
round(p.modelMag_r,3) as rRaw,
round(p.modelMag_i,3) as iRaw,
round(p.modelMag_z,3) as zRaw,
round(p.modelMagErr_u,3) as uErr, --- errors are important!
round(p.modelMagErr_g,3) as gErr,
round(p.modelMagErr_r,3) as rErr,
round(p.modelMagErr_i,3) as iErr,
round(p.modelMagErr_z,3) as zErr,
round(p.psfMag_u,3) as psfRaw, --- psf magnitudes
round(p.psfMag_g,3) as psfRaw,
round(p.psfMag_r,3) as psfRaw,
round(p.psfMag_i,3) as psfRaw,
round(p.psfMag_z,3) as psfRaw,
round(p.psfMagErr_u,3) as psfuErr,
round(p.psfMagErr_g,3) as psfgErr,
round(p.psfMagErr_r,3) as psfrErr,
round(p.psfMagErr_i,3) as psfiErr,
round(p.psfMagErr_z,3) as psfzErr,
p.type, --- tells if a source is resolved or not
(case when (p.flags & '16') = 0 then 1 else 0 end) as ISOLATED
INTO mydb.SDSSimagingSample
FROM PhotoTag p
WHERE
--- 10x2 sq.deg.
p.ra > 0.0 and p.ra < 10.0 and p.dec > -1 and p.dec < 1
--- resolved and unresolved sources
and (p.type = 3 OR p.type = 6) and
--- '4295229440' is magic code for no
--- DEBLENDED_AS_MOVING or SATURATED objects
(p.flags & '4295229440') = 0 and
--- PRIMARY objects only, which implies
--- !BRIGHT && (!BLENDED || NODEBLEND || nchild == 0)]
p.mode = 1 and
--- adopted faint limit (same as about SDSS limit)
p.modelMag_r < 22.5
--- the end of query
"""
data_home = get_data_home(data_home)
archive_file = os.path.join(data_home, os.path.basename(DATA_URL))
if not os.path.exists(archive_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
data = Table.read(DATA_URL)
data.write(archive_file)
else:
data = Table.read(archive_file)
return np.asarray(data)
|
the-stack_0_18920 | from Tuleap.RestClient.Artifacts import Artifacts
from lib.base import BaseTuleapAction
class CreateArtifacts(BaseTuleapAction):
def run(self, tracker_id, values_by_field_by_artifact):
success = self._login()
if success:
# Artifacts
artifacts = Artifacts(self.connection)
for values_by_field in values_by_field_by_artifact.values():
success = artifacts.create_artifact(tracker_id, values_by_field)
if success:
self.response = artifacts.get_data()
else:
return False, self.response
return True, self.response
return False, self.response
|
the-stack_0_18921 | from datetime import date
from django.test import TestCase
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite
from models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# the same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
|
the-stack_0_18922 | import wx
from LaserProject import *
from ThreadConstants import *
from icons import icons8_connected_50, icons8_play_50
class JobSpooler(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: JobSpooler.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE | wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((668, 448))
self.list_job_spool = wx.ListCtrl(self, wx.ID_ANY, style=wx.LC_HRULES | wx.LC_REPORT | wx.LC_VRULES)
self.panel_controller = wx.Panel(self, wx.ID_ANY, style=wx.BORDER_RAISED)
self.gauge_controller = wx.Gauge(self.panel_controller, wx.ID_ANY, 100)
self.checkbox_limit_buffer = wx.CheckBox(self.panel_controller, wx.ID_ANY, "Limit Write Buffer")
self.text_packet_buffer = wx.TextCtrl(self.panel_controller, wx.ID_ANY, "")
self.spin_packet_buffer_max = wx.SpinCtrl(self.panel_controller, wx.ID_ANY, "1500", min=1, max=100000)
self.button_writer_control = wx.Button(self, wx.ID_ANY, "Start Job")
self.button_controller = wx.BitmapButton(self, wx.ID_ANY, icons8_connected_50.GetBitmap())
self.__set_properties()
self.__do_layout()
self.Bind(wx.EVT_LIST_BEGIN_DRAG, self.on_list_drag, self.list_job_spool)
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.on_item_rightclick, self.list_job_spool)
self.Bind(wx.EVT_CHECKBOX, self.on_check_limit_packet_buffer, self.checkbox_limit_buffer)
self.Bind(wx.EVT_SPINCTRL, self.on_spin_packet_buffer_max, self.spin_packet_buffer_max)
self.Bind(wx.EVT_TEXT, self.on_spin_packet_buffer_max, self.spin_packet_buffer_max)
self.Bind(wx.EVT_TEXT_ENTER, self.on_spin_packet_buffer_max, self.spin_packet_buffer_max)
self.Bind(wx.EVT_BUTTON, self.on_button_start_job, self.button_writer_control)
self.Bind(wx.EVT_BUTTON, self.on_button_controller, self.button_controller)
# end wxGlade
self.project = None
self.dirty = False
self.update_buffer_size = False
self.update_writer_state = False
self.update_spooler = False
self.buffer_size = 0
self.elements_progress = 0
self.elements_progress_total = 0
self.command_index = 0
self.listener_list = None
self.list_lookup = {}
self.Bind(wx.EVT_CLOSE, self.on_close, self)
def set_project(self, project):
self.project = project
project["spooler", self.on_spooler_update] = self
project["buffer", self.on_buffer_update] = self
project["writer", self.on_writer_state] = self
self.set_writer_button_by_state()
self.checkbox_limit_buffer.SetValue(self.project.writer.thread.limit_buffer)
self.spin_packet_buffer_max.SetValue(self.project.writer.thread.buffer_max)
self.refresh_spooler_list()
def on_close(self, event):
self.project["spooler", self.on_spooler_update] = None
self.project["buffer", self.on_buffer_update] = None
self.project["writer", self.on_writer_state] = None
try:
del self.project.windows["jobspooler"]
except KeyError:
pass
self.project = None
event.Skip() # Call destroy as regular.
def __set_properties(self):
# begin wxGlade: JobSpooler.__set_properties
self.SetTitle("Spooler")
self.list_job_spool.AppendColumn("#", format=wx.LIST_FORMAT_LEFT, width=29)
self.list_job_spool.AppendColumn("Name", format=wx.LIST_FORMAT_LEFT, width=90)
self.list_job_spool.AppendColumn("Status", format=wx.LIST_FORMAT_LEFT, width=73)
self.list_job_spool.AppendColumn("Device", format=wx.LIST_FORMAT_LEFT, width=53)
self.list_job_spool.AppendColumn("Type", format=wx.LIST_FORMAT_LEFT, width=50)
self.list_job_spool.AppendColumn("Speed", format=wx.LIST_FORMAT_LEFT, width=73)
self.list_job_spool.AppendColumn("Settings", format=wx.LIST_FORMAT_LEFT, width=82)
self.list_job_spool.AppendColumn("Submitted", format=wx.LIST_FORMAT_LEFT, width=70)
self.list_job_spool.AppendColumn("Time Estimate", format=wx.LIST_FORMAT_LEFT, width=92)
self.checkbox_limit_buffer.SetValue(1)
self.panel_controller.SetBackgroundColour(wx.Colour(204, 204, 204))
self.button_writer_control.SetBackgroundColour(wx.Colour(102, 255, 102))
self.button_writer_control.SetFont(
wx.Font(15, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, 0, "Segoe UI"))
self.button_writer_control.SetBitmap(icons8_play_50.GetBitmap())
self.button_controller.SetSize(self.button_controller.GetBestSize())
# end wxGlade
def __do_layout(self):
# begin wxGlade: JobSpooler.__do_layout
sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_11 = wx.BoxSizer(wx.VERTICAL)
sizer_12 = wx.BoxSizer(wx.HORIZONTAL)
sizer_2.Add(self.list_job_spool, 8, wx.EXPAND, 0)
sizer_11.Add(self.gauge_controller, 0, wx.EXPAND, 0)
sizer_12.Add(self.checkbox_limit_buffer, 1, 0, 0)
sizer_12.Add(self.text_packet_buffer, 5, 0, 0)
label_4 = wx.StaticText(self.panel_controller, wx.ID_ANY, "/")
sizer_12.Add(label_4, 0, 0, 0)
sizer_12.Add(self.spin_packet_buffer_max, 0, 0, 0)
sizer_11.Add(sizer_12, 1, wx.EXPAND, 0)
self.panel_controller.SetSizer(sizer_11)
sizer_2.Add(self.panel_controller, 0, wx.EXPAND, 0)
sizer_3.Add(self.button_writer_control, 1, 0, 0)
sizer_3.Add(self.button_controller, 0, 0, 0)
sizer_2.Add(sizer_3, 1, wx.EXPAND, 0)
sizer_1.Add(sizer_2, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
def refresh_spooler_list(self):
self.list_job_spool.DeleteAllItems()
if len(self.project.writer.queue) > 0:
pass
# This should actually process and update the queue items.
for i, e in enumerate(self.project.writer.queue):
m = self.list_job_spool.InsertItem(i, "#%d" % i)
if m != -1:
self.list_job_spool.SetItem(m, 1, str(e))
settings = ""
if m == 0:
self.list_job_spool.SetItem(m, 2, "Executing")
else:
self.list_job_spool.SetItem(m, 2, "Queued")
self.list_job_spool.SetItem(m, 3, self.project.writer.board)
if isinstance(e, PathElement):
self.list_job_spool.SetItem(m, 4, "Path")
if VARIABLE_NAME_POWER in e.properties:
settings += " power=%.0f" % (e.properties[VARIABLE_NAME_POWER])
elif isinstance(e, ImageElement):
self.list_job_spool.SetItem(m, 4, "Raster")
if VARIABLE_NAME_RASTER_STEP in e.properties:
settings += " step=%d" % (e.properties[VARIABLE_NAME_RASTER_STEP])
elif isinstance(e, RawElement):
self.list_job_spool.SetItem(m, 4, "Raw")
if isinstance(e, LaserElement):
if VARIABLE_NAME_SPEED in e.properties:
self.list_job_spool.SetItem(m, 5, "%.1fmm/s" % (e.properties[VARIABLE_NAME_SPEED]))
self.list_job_spool.SetItem(m, 6, settings)
self.list_job_spool.SetItem(m, 7, "n/a")
self.list_job_spool.SetItem(m, 8, "unknown")
def on_list_drag(self, event): # wxGlade: JobSpooler.<event_handler>
event.Skip()
def on_item_rightclick(self, event): # wxGlade: JobSpooler.<event_handler>
index = event.Index
if index == 0:
event.Skip()
return # We can't delete the running element.
try:
element = self.project.writer.queue[index]
except IndexError:
return
menu = wx.Menu()
convert = menu.Append(wx.ID_ANY, "Remove %s" % str(element)[:16], "", wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, self.on_tree_popup_delete(element), convert)
convert = menu.Append(wx.ID_ANY, "Clear All", "", wx.ITEM_NORMAL)
self.Bind(wx.EVT_MENU, self.on_tree_popup_clear(element), convert)
self.PopupMenu(menu)
menu.Destroy()
def on_tree_popup_clear(self, element):
def delete(event):
self.project.writer.queue = []
self.refresh_spooler_list()
return delete
def on_tree_popup_delete(self, element):
def delete(event):
self.project.writer.queue.remove(element)
self.refresh_spooler_list()
return delete
def on_check_limit_packet_buffer(self, event): # wxGlade: JobInfo.<event_handler>
self.project.writer.thread.limit_buffer = not self.project.writer.thread.limit_buffer
def on_spin_packet_buffer_max(self, event): # wxGlade: JobInfo.<event_handler>
if self.project is not None:
self.project.writer.thread.buffer_max = self.spin_packet_buffer_max.GetValue()
def on_check_auto_start_controller(self, event): # wxGlade: JobInfo.<event_handler>
if self.project is not None:
self.project.writer.thread.autostart = not self.project.controller.autostart
def on_check_home_after(self, event): # wxGlade: JobInfo.<event_handler>
if self.project is not None:
self.project.writer.thread.autohome = not self.project.writer.thread.autohome
def on_check_beep_after(self, event): # wxGlade: JobInfo.<event_handler>
if self.project is not None:
self.project.writer.thread.autobeep = not self.project.writer.thread.autobeep
def on_button_controller(self, event): # wxGlade: JobSpooler.<event_handler>
self.project.close_old_window("controller")
from Controller import Controller
window = Controller(None, wx.ID_ANY, "")
window.set_project(self.project)
window.Show()
self.project.windows["controller"] = window
def on_button_start_job(self, event): # wxGlade: JobInfo.<event_handler>
state = self.project.writer.thread.state
if state == THREAD_STATE_STARTED:
self.project.writer.thread.pause()
self.set_writer_button_by_state()
elif state == THREAD_STATE_PAUSED:
self.project.writer.thread.resume()
self.set_writer_button_by_state()
elif state == THREAD_STATE_UNSTARTED or state == THREAD_STATE_FINISHED:
self.project.writer.start_queue_consumer()
self.set_writer_button_by_state()
elif state == THREAD_STATE_ABORT:
self.project("abort", 0)
self.project.writer.reset_thread()
def set_writer_button_by_state(self):
state = self.project.writer.thread.state
if state == THREAD_STATE_FINISHED or state == THREAD_STATE_UNSTARTED:
self.button_writer_control.SetBackgroundColour("#009900")
self.button_writer_control.SetLabel("Start Job")
elif state == THREAD_STATE_PAUSED:
self.button_writer_control.SetBackgroundColour("#00dd00")
self.button_writer_control.SetLabel("Resume Job")
elif state == THREAD_STATE_STARTED:
self.button_writer_control.SetBackgroundColour("#00ff00")
self.button_writer_control.SetLabel("Pause Job")
elif state == THREAD_STATE_ABORT:
self.button_writer_control.SetBackgroundColour("#00ffff")
self.button_writer_control.SetLabel("Manual Reset")
def post_update(self):
if not self.dirty:
self.dirty = True
wx.CallAfter(self.post_update_on_gui_thread)
def post_update_on_gui_thread(self):
if self.project is None:
return # left over update on closed window
if self.update_buffer_size:
self.update_buffer_size = False
self.text_packet_buffer.SetValue(str(self.buffer_size))
self.gauge_controller.SetValue(self.buffer_size)
self.gauge_controller.SetRange(self.spin_packet_buffer_max.GetValue())
if self.update_writer_state:
self.update_writer_state = False
self.set_writer_button_by_state()
if self.update_spooler:
self.update_spooler = False
self.refresh_spooler_list()
self.dirty = False
def on_spooler_update(self, value):
self.update_spooler = True
self.post_update()
def on_buffer_update(self, value):
self.update_buffer_size = True
self.buffer_size = value
self.post_update()
def on_writer_state(self, state):
self.update_writer_state = True
self.post_update()
|
the-stack_0_18925 | BLANK_INIT = True
#================== DATASET
DATA_FILE = 'data/stroke_rehabilitation-train'
DATA_VAL_FILE = 'data/stroke_rehabilitation-val'
META_FILE = 'data/stroke_rehabilitation-meta'
DATA_TEST_FILE = 'data/stroke_rehabilitation-test'
CLASSES = 2
FEATURE_DIM = 66
STATE_DIM = FEATURE_DIM * 2
ACTION_DIM = FEATURE_DIM + CLASSES
COLUMN_LABEL = '_label'
COLUMN_DROP = ['_count']
META_COSTS = 'cost'
META_AVG = 'avg'
META_STD = 'std'
#================== RL
FEATURE_FACTOR = 0.001
REWARD_CORRECT = 0
REWARD_INCORRECT = -1
#================== TRAINING
AGENTS = 1000
TRAINING_EPOCHS = 10000
EPOCH_STEPS = 1
EPSILON_START = 0.50
EPSILON_END = 0.05
EPSILON_EPOCHS = 2000 # epsilon will fall to EPSILON_END after EPSILON_EPOCHS
EPSILON_UPDATE_EPOCHS = 10 # update epsilon every x epochs
#================== LOG
from log_states.log_mb import TRACKED_STATES
LOG_TRACKED_STATES = TRACKED_STATES
LOG_EPOCHS = 100 # states prediction will be logged every LOG_EPOCHS
LOG_PERF_EPOCHS = 100
LOG_PERF_VAL_SIZE = 1000
#================== NN
BATCH_SIZE = 100000
POOL_SIZE = 500000
NN_FC_DENSITY = 128
NN_HIDDEN_LAYERS = 3
OPT_LR = 1.0e-4
OPT_ALPHA = 0.95
OPT_MAX_NORM = 1.0
# LR scheduling => lower LR by LR_SC_FACTOR every LR_SC_EPOCHS epochs
LR_SC_FACTOR = 0.1
LR_SC_EPOCHS = 5000
LR_SC_MIN = 1.0e-7
TARGET_RHO = 0.01
#================== AUX
SAVE_EPOCHS = 1000
MAX_MASK_CONST = 1.e6
SEED = 112233
|
the-stack_0_18926 | #
# @lc app=leetcode id=186 lang=python3
#
# [186] Reverse Words in a String II
#
# https://leetcode.com/problems/reverse-words-in-a-string-ii/description/
#
# algorithms
# Medium (42.22%)
# Likes: 417
# Dislikes: 96
# Total Accepted: 83.6K
# Total Submissions: 197.9K
# Testcase Example: '["t","h","e"," ","s","k","y"," ","i","s"," ","b","l","u","e"]'
#
# Given an input string , reverse the string word by word.
#
# Example:
#
#
# Input: ["t","h","e"," ","s","k","y"," ","i","s"," ","b","l","u","e"]
# Output: ["b","l","u","e"," ","i","s"," ","s","k","y"," ","t","h","e"]
#
# Note:
#
#
# A word is defined as a sequence of non-space characters.
# The input string does not contain leading or trailing spaces.
# The words are always separated by a single space.
#
#
# Follow up: Could you do it in-place without allocating extra space?
#
#
# @lc code=start
def reverse(s, start, end):
l = start
r = end - 1
while l < r:
s[l], s[r] = s[r], s[l]
l += 1
r -= 1
class Solution:
def reverseWords(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
s.reverse()
start = 0
end = 0
while end < len(s):
if s[end] == ' ':
reverse(s, start, end)
start = end + 1
end += 1
reverse(s, start, end)
# @lc code=end
|
the-stack_0_18927 | # Assignment 1
a = 1
b = 2
a, b = b, a
print(a, b)
######################################## Cut cells here.
# Assignment 2
my_list = [1, 2, 3, 4, 50, 60]
first, *other, last = my_list
print(first)
print(other)
print(last)
######################################## Cut cells here.
#Assignment 3
x = [[31,17],
[40 ,51],
[13 ,12]]
print(list(zip(*x)))
######################################## Cut cells here.
# Assignment 4
import string
caesar = {i:a for i, a in enumerate(string.ascii_lowercase)}
def decrypt(shift, cipher):
deciphered = [caesar[(string.ascii_lowercase.index(k)-shift)%26] for k in cipher]
return "".join(deciphered)
for shift in range(1, 26):
print(f'{shift:2.0f}: {decrypt(shift, "zvsbapvu")}')
# A shift of 7 seems most likely
|
the-stack_0_18930 | import pyximport; pyximport.install()
from gryphon.lib.exchange.quadriga_btc_cad import QuadrigaBTCCADExchange
from gryphon.tests.exceptional.exchange_wrappers.live_orders import LiveOrdersTest
class TestQuadrigaLiveOrders(LiveOrdersTest):
def __init__(self):
super(TestQuadrigaLiveOrders, self).__init__()
# Quadriga has a 10 CAD minimum order size.
self.order1_price_amount = '10.01'
self.order2_price_amount = '10.02'
def setUp(self):
self.exchange = QuadrigaBTCCADExchange()
|
the-stack_0_18932 | # generation of mails
import re
from django.utils.html import strip_tags
from django.utils.text import wrap
from django.conf import settings
from django.urls import reverse as urlreverse
from ietf.utils.mail import send_mail, send_mail_text
from ietf.mailtrigger.utils import gather_address_lists
def email_admin_re_charter(request, group, subject, text, mailtrigger):
(to,cc) = gather_address_lists(mailtrigger,group=group)
full_subject = u"Regarding %s %s: %s" % (group.type.name, group.acronym, subject)
text = strip_tags(text)
send_mail(request, to, None, full_subject,
"group/email_iesg_secretary_re_charter.txt",
dict(text=text,
group=group,
group_url=settings.IDTRACKER_BASE_URL + group.about_url(),
charter_url=settings.IDTRACKER_BASE_URL + urlreverse('ietf.doc.views_doc.document_main', kwargs=dict(name=group.charter.name)) if group.charter else "[no charter]",
),
cc=cc,
)
def email_personnel_change(request, group, text, changed_personnel):
(to, cc) = gather_address_lists('group_personnel_change',group=group,changed_personnel=changed_personnel)
full_subject = u"Personnel change for %s %s" % (group.acronym,group.type.name)
send_mail_text(request, to, None, full_subject, text, cc=cc)
def email_milestones_changed(request, group, changes, states):
def wrap_up_email(addrs, text):
subject = u"Milestones changed for %s %s" % (group.acronym, group.type.name)
if re.search("Added .* for review, due",text):
subject = u"Review Required - " + subject
text = wrap(strip_tags(text), 70)
text += "\n\n"
text += u"URL: %s" % (settings.IDTRACKER_BASE_URL + group.about_url())
send_mail_text(request, addrs.to, None, subject, text, cc=addrs.cc)
# first send to those who should see any edits (such as management and chairs)
addrs = gather_address_lists('group_milestones_edited',group=group)
if addrs.to or addrs.cc:
wrap_up_email(addrs, u"\n\n".join(c + "." for c in changes))
# then send only the approved milestones to those who shouldn't be
# bothered with milestones pending approval
addrs = gather_address_lists('group_approved_milestones_edited',group=group)
msg = u"\n\n".join(c + "." for c,s in zip(changes,states) if not s == "review")
if (addrs.to or addrs.cc) and msg:
wrap_up_email(addrs, msg)
|
the-stack_0_18933 | def f(a):
a = []
for i in range(5):
i = i ** 3
a.append(i)
yield a
# return a
def main():
for x in f(5):
print(x,)
if __name__ == '__main__':
main()
|
the-stack_0_18934 | """
Device for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import asyncio
from enum import Enum
import logging
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send
)
from .const import (
ATTR_MANUFACTURER, POWER_CONFIGURATION_CHANNEL, SIGNAL_AVAILABLE, IN, OUT,
ATTR_CLUSTER_ID, ATTR_ATTRIBUTE, ATTR_VALUE, ATTR_COMMAND, SERVER,
ATTR_COMMAND_TYPE, ATTR_ARGS, CLIENT_COMMANDS, SERVER_COMMANDS,
ATTR_ENDPOINT_ID, IEEE, MODEL, NAME, UNKNOWN, QUIRK_APPLIED,
QUIRK_CLASS, ZDO_CHANNEL, MANUFACTURER_CODE, POWER_SOURCE
)
from .channels import EventRelayChannel, ZDOChannel
_LOGGER = logging.getLogger(__name__)
class DeviceStatus(Enum):
"""Status of a device."""
CREATED = 1
INITIALIZED = 2
class ZHADevice:
"""ZHA Zigbee device object."""
def __init__(self, hass, zigpy_device, zha_gateway):
"""Initialize the gateway."""
self.hass = hass
self._zigpy_device = zigpy_device
# Get first non ZDO endpoint id to use to get manufacturer and model
endpoint_ids = zigpy_device.endpoints.keys()
self._manufacturer = UNKNOWN
self._model = UNKNOWN
ept_id = next((ept_id for ept_id in endpoint_ids if ept_id != 0), None)
if ept_id is not None:
self._manufacturer = zigpy_device.endpoints[ept_id].manufacturer
self._model = zigpy_device.endpoints[ept_id].model
self._zha_gateway = zha_gateway
self.cluster_channels = {}
self._relay_channels = {}
self._all_channels = []
self._name = "{} {}".format(
self.manufacturer,
self.model
)
self._available = False
self._available_signal = "{}_{}_{}".format(
self.name, self.ieee, SIGNAL_AVAILABLE)
self._unsub = async_dispatcher_connect(
self.hass,
self._available_signal,
self.async_initialize
)
from zigpy.quirks import CustomDevice
self.quirk_applied = isinstance(self._zigpy_device, CustomDevice)
self.quirk_class = "{}.{}".format(
self._zigpy_device.__class__.__module__,
self._zigpy_device.__class__.__name__
)
self._power_source = None
self.status = DeviceStatus.CREATED
@property
def name(self):
"""Return device name."""
return self._name
@property
def ieee(self):
"""Return ieee address for device."""
return self._zigpy_device.ieee
@property
def manufacturer(self):
"""Return manufacturer for device."""
return self._manufacturer
@property
def model(self):
"""Return model for device."""
return self._model
@property
def nwk(self):
"""Return nwk for device."""
return self._zigpy_device.nwk
@property
def lqi(self):
"""Return lqi for device."""
return self._zigpy_device.lqi
@property
def rssi(self):
"""Return rssi for device."""
return self._zigpy_device.rssi
@property
def last_seen(self):
"""Return last_seen for device."""
return self._zigpy_device.last_seen
@property
def manufacturer_code(self):
"""Return manufacturer code for device."""
if ZDO_CHANNEL in self.cluster_channels:
return self.cluster_channels.get(ZDO_CHANNEL).manufacturer_code
return None
@property
def power_source(self):
"""Return the power source for the device."""
if self._power_source is not None:
return self._power_source
if ZDO_CHANNEL in self.cluster_channels:
return self.cluster_channels.get(ZDO_CHANNEL).power_source
return None
@property
def gateway(self):
"""Return the gateway for this device."""
return self._zha_gateway
@property
def all_channels(self):
"""Return cluster channels and relay channels for device."""
return self._all_channels
@property
def available_signal(self):
"""Signal to use to subscribe to device availability changes."""
return self._available_signal
@property
def available(self):
"""Return True if sensor is available."""
return self._available
def set_available(self, available):
"""Set availability from restore and prevent signals."""
self._available = available
def set_power_source(self, power_source):
"""Set the power source."""
self._power_source = power_source
def update_available(self, available):
"""Set sensor availability."""
if self._available != available and available:
# Update the state the first time the device comes online
async_dispatcher_send(
self.hass,
self._available_signal,
False
)
async_dispatcher_send(
self.hass,
"{}_{}".format(self._available_signal, 'entity'),
available
)
self._available = available
@property
def device_info(self):
"""Return a device description for device."""
ieee = str(self.ieee)
return {
IEEE: ieee,
ATTR_MANUFACTURER: self.manufacturer,
MODEL: self.model,
NAME: self.name or ieee,
QUIRK_APPLIED: self.quirk_applied,
QUIRK_CLASS: self.quirk_class,
MANUFACTURER_CODE: self.manufacturer_code,
POWER_SOURCE: ZDOChannel.POWER_SOURCES.get(self.power_source)
}
def add_cluster_channel(self, cluster_channel):
"""Add cluster channel to device."""
# only keep 1 power configuration channel
if cluster_channel.name is POWER_CONFIGURATION_CHANNEL and \
POWER_CONFIGURATION_CHANNEL in self.cluster_channels:
return
if isinstance(cluster_channel, EventRelayChannel):
self._relay_channels[cluster_channel.unique_id] = cluster_channel
self._all_channels.append(cluster_channel)
else:
self.cluster_channels[cluster_channel.name] = cluster_channel
self._all_channels.append(cluster_channel)
def get_channels_to_configure(self):
"""Get a deduped list of channels for configuration.
This goes through all channels and gets a unique list of channels to
configure. It first assembles a unique list of channels that are part
of entities while stashing relay channels off to the side. It then
takse the stashed relay channels and adds them to the list of channels
that will be returned if there isn't a channel in the list for that
cluster already. This is done to ensure each cluster is only configured
once.
"""
channel_keys = []
channels = []
relay_channels = self._relay_channels.values()
def get_key(channel):
channel_key = "ZDO"
if hasattr(channel.cluster, 'cluster_id'):
channel_key = "{}_{}".format(
channel.cluster.endpoint.endpoint_id,
channel.cluster.cluster_id
)
return channel_key
# first we get all unique non event channels
for channel in self.all_channels:
c_key = get_key(channel)
if c_key not in channel_keys and channel not in relay_channels:
channel_keys.append(c_key)
channels.append(channel)
# now we get event channels that still need their cluster configured
for channel in relay_channels:
channel_key = get_key(channel)
if channel_key not in channel_keys:
channel_keys.append(channel_key)
channels.append(channel)
return channels
async def async_configure(self):
"""Configure the device."""
_LOGGER.debug('%s: started configuration', self.name)
await self._execute_channel_tasks(
self.get_channels_to_configure(), 'async_configure')
_LOGGER.debug('%s: completed configuration', self.name)
entry = self.gateway.zha_storage.async_create_or_update(self)
_LOGGER.debug('%s: stored in registry: %s', self.name, entry)
async def async_initialize(self, from_cache=False):
"""Initialize channels."""
_LOGGER.debug('%s: started initialization', self.name)
await self._execute_channel_tasks(
self.all_channels, 'async_initialize', from_cache)
_LOGGER.debug(
'%s: power source: %s',
self.name,
ZDOChannel.POWER_SOURCES.get(self.power_source)
)
self.status = DeviceStatus.INITIALIZED
_LOGGER.debug('%s: completed initialization', self.name)
async def _execute_channel_tasks(self, channels, task_name, *args):
"""Gather and execute a set of CHANNEL tasks."""
channel_tasks = []
semaphore = asyncio.Semaphore(3)
zdo_task = None
for channel in channels:
if channel.name == ZDO_CHANNEL:
# pylint: disable=E1111
if zdo_task is None: # We only want to do this once
zdo_task = self._async_create_task(
semaphore, channel, task_name, *args)
else:
channel_tasks.append(
self._async_create_task(
semaphore, channel, task_name, *args))
if zdo_task is not None:
await zdo_task
await asyncio.gather(*channel_tasks)
async def _async_create_task(self, semaphore, channel, func_name, *args):
"""Configure a single channel on this device."""
try:
async with semaphore:
await getattr(channel, func_name)(*args)
_LOGGER.debug('%s: channel: %s %s stage succeeded',
self.name,
"{}-{}".format(
channel.name, channel.unique_id),
func_name)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning(
'%s channel: %s %s stage failed ex: %s',
self.name,
"{}-{}".format(channel.name, channel.unique_id),
func_name,
ex
)
async def async_unsub_dispatcher(self):
"""Unsubscribe the dispatcher."""
if self._unsub:
self._unsub()
@callback
def async_update_last_seen(self, last_seen):
"""Set last seen on the zigpy device."""
self._zigpy_device.last_seen = last_seen
@callback
def async_get_clusters(self):
"""Get all clusters for this device."""
return {
ep_id: {
IN: endpoint.in_clusters,
OUT: endpoint.out_clusters
} for (ep_id, endpoint) in self._zigpy_device.endpoints.items()
if ep_id != 0
}
@callback
def async_get_std_clusters(self):
"""Get ZHA and ZLL clusters for this device."""
from zigpy.profiles import zha, zll
return {
ep_id: {
IN: endpoint.in_clusters,
OUT: endpoint.out_clusters
} for (ep_id, endpoint) in self._zigpy_device.endpoints.items()
if ep_id != 0 and endpoint.profile_id in (
zha.PROFILE_ID,
zll.PROFILE_ID
)
}
@callback
def async_get_cluster(self, endpoint_id, cluster_id, cluster_type=IN):
"""Get zigbee cluster from this entity."""
clusters = self.async_get_clusters()
return clusters[endpoint_id][cluster_type][cluster_id]
@callback
def async_get_cluster_attributes(self, endpoint_id, cluster_id,
cluster_type=IN):
"""Get zigbee attributes for specified cluster."""
cluster = self.async_get_cluster(endpoint_id, cluster_id,
cluster_type)
if cluster is None:
return None
return cluster.attributes
@callback
def async_get_cluster_commands(self, endpoint_id, cluster_id,
cluster_type=IN):
"""Get zigbee commands for specified cluster."""
cluster = self.async_get_cluster(endpoint_id, cluster_id, cluster_type)
if cluster is None:
return None
return {
CLIENT_COMMANDS: cluster.client_commands,
SERVER_COMMANDS: cluster.server_commands,
}
async def write_zigbee_attribute(self, endpoint_id, cluster_id,
attribute, value, cluster_type=IN,
manufacturer=None):
"""Write a value to a zigbee attribute for a cluster in this entity."""
cluster = self.async_get_cluster(endpoint_id, cluster_id, cluster_type)
if cluster is None:
return None
from zigpy.exceptions import DeliveryError
try:
response = await cluster.write_attributes(
{attribute: value},
manufacturer=manufacturer
)
_LOGGER.debug(
'set: %s for attr: %s to cluster: %s for entity: %s - res: %s',
value,
attribute,
cluster_id,
endpoint_id,
response
)
return response
except DeliveryError as exc:
_LOGGER.debug(
'failed to set attribute: %s %s %s %s %s',
'{}: {}'.format(ATTR_VALUE, value),
'{}: {}'.format(ATTR_ATTRIBUTE, attribute),
'{}: {}'.format(ATTR_CLUSTER_ID, cluster_id),
'{}: {}'.format(ATTR_ENDPOINT_ID, endpoint_id),
exc
)
return None
async def issue_cluster_command(self, endpoint_id, cluster_id, command,
command_type, args, cluster_type=IN,
manufacturer=None):
"""Issue a command against specified zigbee cluster on this entity."""
cluster = self.async_get_cluster(endpoint_id, cluster_id, cluster_type)
if cluster is None:
return None
response = None
if command_type == SERVER:
response = await cluster.command(command, *args,
manufacturer=manufacturer,
expect_reply=True)
else:
response = await cluster.client_command(command, *args)
_LOGGER.debug(
'Issued cluster command: %s %s %s %s %s %s %s',
'{}: {}'.format(ATTR_CLUSTER_ID, cluster_id),
'{}: {}'.format(ATTR_COMMAND, command),
'{}: {}'.format(ATTR_COMMAND_TYPE, command_type),
'{}: {}'.format(ATTR_ARGS, args),
'{}: {}'.format(ATTR_CLUSTER_ID, cluster_type),
'{}: {}'.format(ATTR_MANUFACTURER, manufacturer),
'{}: {}'.format(ATTR_ENDPOINT_ID, endpoint_id)
)
return response
|
the-stack_0_18935 | from datetime import datetime
from tests import PyResTests
import pyres.json_parser as json
class JSONTests(PyResTests):
def test_encode_decode_date(self):
dt = datetime(1972, 1, 22);
encoded = json.dumps({'dt': dt})
decoded = json.loads(encoded)
assert decoded['dt'] == dt
def test_dates_in_lists(self):
dates = [datetime.now() for i in range(50)]
decoded = json.loads(json.dumps(dates))
for value in dates:
assert isinstance(value, datetime)
def test_dates_in_dict(self):
dates = dict((i, datetime.now()) for i in range(50))
decoded = json.loads(json.dumps(dates))
for i, value in dates.items():
assert isinstance(i, int)
assert isinstance(value, datetime)
|
the-stack_0_18936 | """
The tsnet.network.discretize contains methods to perform
spatial and temporal discretization by adjusting wave speed
and time step to solve compatibility equations in case of
uneven wave travel time.
"""
from __future__ import print_function
import numpy as np
def discretization(tm, dt):
"""Discretize in temporal and spatial space using wave speed adjustment scheme.
Parameters
----------
tm : tsnet.network.geometry.TransientModel
Network
dt : float
User defined time step
Returns
-------
tm : tsnet.network.geometry.TransientModel
Network with updated parameters
"""
max_dt = max_time_step(tm)
if dt > max_dt:
raise ValueError("time step is too large. Please define \
a time step that is less than %.5f " %max_dt)
else :
Ndis = cal_N(tm, dt)
# add number of segments as a new attribute to each pipe
i = 0
for _, pipe in tm.pipes():
pipe.number_of_segments = int(Ndis[i])
i+=1
# adjust wave speed and calculate time step
tm = adjust_wavev(tm)
return tm
def max_time_step(tm):
"""Determine the maximum time step based on Courant's criteria.
Parameters
----------
tm : tsnet.network.geometry.TransientModel
Network
Returns
-------
max_dt : float
Maximum time step allowed for this network
"""
max_dt = np.inf
for _, pipe in tm.pipes():
dt = pipe.length / (2. * pipe.wavev)
if max_dt > dt :
max_dt = dt #- 0.001 # avoid numerical issue which cause N = 0
return max_dt
def discretization_N(tm, dt):
"""Discretize in temporal and spatial space using wave speed adjustment scheme.
Parameters
----------
tm : tsnet.network.geometry.TransientModel
Network
dt : float
User defined time step
Returns
-------
tm : tsnet.network.geometry.TransientModel
Network with updated parameters
"""
Ndis = cal_N(tm, dt)
# add number of segments as a new attribute to each pipe
i = 0
for _, pipe in tm.pipes():
pipe.number_of_segments = int(Ndis[i])
i+=1
# adjust wave speed and calculate time step
tm = adjust_wavev(tm)
return tm
def max_time_step_N(tm, N):
"""Determine the maximum time step based on Courant's criteria.
Parameters
----------
tm : tsnet.network.geometry.TransientModel
Network
Returns
-------
max_dt : float
Maximum time step allowed for this network
"""
max_dt = np.inf
for _, pipe in tm.pipes():
dt = pipe.length / (N * pipe.wavev)
if max_dt > dt :
max_dt = dt #- 1e-5 # avoid numerical issue which cause N = 0
return max_dt
def cal_N(tm, dt):
"""Determine the number of computation unites ($N_i$) for each pipes.
Parameters
----------
tm : tsnet.network.geometry.TransientModel
Network
dt : float
Time step for transient simulation
"""
N = np.zeros((tm.num_pipes,1))
for _, pipe in tm.pipes() :
# N[int(pipe.id)-1] = int(2*np.int(pipe.length/ (2. * pipe.wavev *dt)))
N[int(pipe.id)-1] = round(np.int(pipe.length/ (pipe.wavev *dt)))
return N
def adjust_wavev(tm):
"""Adjust wave speed and time step to solve compatibility equations.
Parameters
----------
tm : tsnet.network.geometry.TransientModel
Network
Returns
-------
tm : tsnet.network.geometry.TransientModel
Network with adjusted wave speed.
dt : float
Adjusted time step
"""
from numpy import transpose as trans
phi = [np.longdouble(pipe.length / pipe.wavev / pipe.number_of_segments)
for _, pipe in tm.pipes()]
phi = np.array(phi).reshape((len(phi), 1))
tm.wavespeed_adj = np.sum(phi**2)
theta = np.longdouble(1/ np.matmul(trans(phi), phi) * \
np.matmul(trans(phi), np.ones((len(phi), 1))))
# adjust time step
dt = np.float64(1/theta)
# adjust the wave speed of each links
for _, pipe in tm.pipes():
pipe.wavev = np.float64(pipe.wavev * phi[int(pipe.id)-1] * theta)
# set time step as a new attribute to TransientModel
tm.time_step =dt
return tm
|
the-stack_0_18939 | #! /usr/bin/env python
"""Script to """
from __future__ import print_function
import os
import sys
import argparse
from bdb import BdbQuit
try:
from pysyte.types.paths import pwd
except ImportError:
from pysyte.types.paths import pwd
__version__ = '0.1.0'
class ScriptError(NotImplementedError):
pass
def run_args(args, methods):
"""Run any methods eponymous with args"""
if not args:
return False
valuable_args = {k for k, v in args.__dict__.items() if v}
arg_methods = {methods[a] for a in valuable_args if a in methods}
for method in arg_methods:
method(args)
def version(args):
print('%s %s' % (args, __version__))
raise SystemExit
def parse_args(methods):
"""Parse out command line arguments"""
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument('items', metavar='items', type=str, nargs='+',
help='some items')
parser.add_argument('-s', '--short', action='store_true',
help='The shorter of absolute or relative path')
parser.add_argument('-v', '--version', action='store_true',
help='Show version')
args = parser.parse_args()
run_args(args, methods)
return args
def script(args):
p = pwd()
method = p.short_relative_path_to if args.short else p.relpathto
print(method(' '.join(args.items)))
return True
def main():
"""Run the script"""
try:
args = parse_args(globals())
return os.EX_OK if script(args) else not os.EX_OK
except BdbQuit:
pass
except SystemExit as e:
return e.code
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_18940 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperparameters defining different problems.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.models import modalities # pylint: disable=unused-import
from tensor2tensor.utils import registry
import tensorflow as tf
def problem_hparams(problem_name, model_hparams):
"""Generate problem hyperparameters based on problem name.
Args:
problem_name: a string
model_hparams: a tf.contrib.training.HParams
Returns:
a tf.contrib.training.HParams
Raises:
ValueError: if problem_name is unknown.
"""
base_name, was_reversed, was_copy = parse_problem_name(problem_name)
p = _lookup_problem_hparams_fn(base_name)(model_hparams)
if was_reversed:
_reverse_problem_hparams(p)
if "image_cifar10" in base_name:
p.loss_multiplier = 1.
if was_copy:
_copy_problem_hparams(p)
return p
def parse_problem_name(problem_name):
"""Determines if problem_name specifies a copy and/or reversal.
Args:
problem_name: A string containing a single problem name from FLAGS.problems.
Returns:
base_name: A string with the base problem name.
was_reversed: A boolean.
was_copy: A boolean.
"""
# Recursively strip tags until we reach a base name.
if len(problem_name) > 4 and problem_name[-4:] == "_rev":
base, _, was_copy = parse_problem_name(problem_name[:-4])
return base, True, was_copy
elif len(problem_name) > 5 and problem_name[-5:] == "_copy":
base, was_reversed, _ = parse_problem_name(problem_name[:-5])
return base, was_reversed, True
else:
return problem_name, False, False
def _lookup_problem_hparams_fn(name):
if name not in PROBLEM_HPARAMS_MAP:
map_str = "* " + "\n* ".join(sorted(PROBLEM_HPARAMS_MAP.keys()))
error_msg = "%s not in the supported set of problems:\n%s" % (name, map_str)
raise ValueError(error_msg)
return PROBLEM_HPARAMS_MAP.get(name)
def _copy_problem_hparams(p_hparams):
"""Use input modality, vocab, and space id for target."""
p = p_hparams
# Duplicate input modality.
p.target_modality = p.input_modality["inputs"]
# Duplicate input vocabulary.
p.vocabulary["targets"] = p.vocabulary["inputs"]
# Duplicate input space ids.
p.target_space_id = p.input_space_id
# Mark that p was reversed.
p.was_copy = True
def _reverse_problem_hparams(p_hparams):
"""Swap input/output modalities, vocab, and space ids."""
p = p_hparams
# Swap modalities.
input_modality = p.input_modality["inputs"]
target_modality = p.target_modality
p.input_modality["inputs"] = target_modality
p.target_modality = input_modality
# Swap vocabularies.
input_vocabulary = p.vocabulary["inputs"]
target_vocabulary = p.vocabulary["targets"]
p.vocabulary["inputs"] = target_vocabulary
p.vocabulary["targets"] = input_vocabulary
# Swap input/target space ids.
input_space_id = p.input_space_id
target_space_id = p.target_space_id
p.input_space_id = target_space_id
p.target_space_id = input_space_id
# Mark that p was reversed.
p.was_reversed = True
def default_problem_hparams():
"""A set of basic model hyperparameters."""
return tf.contrib.training.HParams(
# Use this parameter to get comparable perplexity numbers with different
# tokenizations. This value should be set to the ratio of the number of
# tokens in the test set according to the tokeization used to the number
# of tokens in the test set in the "official" tokenization. For example,
# if we are using a word-piece based model and we want to compute
# per-word perplexity, then we set loss_multiplier to the number of
# wordpieces per word in the test set.
loss_multiplier=1.0,
# Use this parameter to allow for larger sequences in the batch. Without
# the use of this parameter, the size of the inner two dimensions will be
# used to judge the sequence length.
batch_size_multiplier=1,
# To make queues of the right capacity, it's good to know the maximal
# expected batch size, as it can vary a lot. It only affects performance
# of input readers and memory use. The defaults should be safe and fast,
# but decrease if your reader uses a lot of memory and increase if slow.
max_expected_batch_size_per_shard=64,
# Modalities used to map from input features to a space compatible with
# chosen model architecture. One modality spec (which is a 2-tuple,
# (modality_full_name, vocab_size)) per feature key. modality_full_name is
# a string type:name, e.g. class_label:class_label_2d. Leaving off the
# name uses the default modality for that type (e.g. class_label ==
# class_label:default).
input_modality={},
# Modality used to map from hidden representation to the target space.
# Specified as a modality spec, a 2-tuple described above.
target_modality=None,
# Identifiers used to tell the model which input/target space will be
# expected. For example, it can tell that we expect French as characters
# as output, or Spanish as sound. An integer with the following semantics:
# 0: Generic / unknown output space (default)
# 1: Image labels
# 2: English characters
# 3: English tokens
# 4: English bpe tokens
# 5: French characters
# 6: French tokens
# 7: German characters
# 8: German tokens
# 9: German bpe tokens
# 10: Digit cipher lexicon 0
# 11: Digit cipher lexicon 1
# 12: Audio waveform domain
# 13: Audio spectral domain
# 14: Parse characters
# 15: Parse tokens
# Add more above if needed.
input_space_id=0,
target_space_id=0,
# Vocabulary per feature key.
# a vocabulary converts to/from human-readable strings.
# E.g. {"inputs": text_encoder.ByteTextEncoder(),
# "targets": text_encoder.SubwordTextEncoder("vocab_filename.txt")}
vocabulary={
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.TextEncoder()
},
# This is a marker to keep track if the problem was reversed or copied.
# Only set automatically, do not override the default.
#
# These tags can be combined in order to perform copies of the input or
# the targets. For instance `problem_copy` will copy the inputs, but
# `problem_rev_copy` will copy the targets.
was_reversed=False,
was_copy=False,)
def test_problem_hparams(unused_model_hparams, input_vocab_size,
target_vocab_size):
"""Problem hparams for testing model bodies."""
p = default_problem_hparams()
p.input_modality = {"inputs": (registry.Modalities.SYMBOL, input_vocab_size)}
p.target_modality = (registry.Modalities.SYMBOL, target_vocab_size)
p.vocabulary = {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.TextEncoder()
}
return p
def algorithmic(vocab_size, unused_model_hparams):
"""Default parameters for algorithmic tasks."""
p = default_problem_hparams()
p.input_modality = {"inputs": (registry.Modalities.SYMBOL, vocab_size)}
p.target_modality = (registry.Modalities.SYMBOL, vocab_size)
p.vocabulary = {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.TextEncoder(),
}
p.input_space_id = 10
p.target_space_id = 11
return p
def audio_timit_characters(unused_model_hparams):
"""English audio transcription benchmark."""
p = default_problem_hparams()
p.input_modality = {
"inputs": (registry.Modalities.AUDIO, None),
}
p.target_modality = (registry.Modalities.SYMBOL, 256)
p.vocabulary = {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.ByteTextEncoder(),
}
p.batch_size_multiplier = 256
p.loss_multiplier = 2.0
p.input_space_id = 12
p.target_space_id = 2
return p
def audio_timit_tokens(model_hparams, wrong_vocab_size):
"""English audio transcription benchmark.
Args:
model_hparams: a tf.contrib.training.HParams
wrong_vocab_size: a number used in the filename indicating the approximate
vocabulary size. This is not to be confused with the actual vocabulary
size.
Returns:
a tf.contrib.training.HParams
"""
p = default_problem_hparams()
# This vocab file must be present within the data directory.
vocab_filename = os.path.join(model_hparams.data_dir,
"tokens.vocab.%d" % wrong_vocab_size)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
p.input_modality = {
"inputs": (registry.Modalities.AUDIO, None),
}
p.target_modality = (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
p.vocabulary = {
"inputs": text_encoder.TextEncoder(),
"targets": subtokenizer,
}
p.batch_size_multiplier = 256
p.loss_multiplier = 2.0
p.input_space_id = 13
p.target_space_id = 3
return p
def audio_wsj_characters(unused_model_hparams):
"""English audio transcription benchmark."""
p = default_problem_hparams()
p.input_modality = {
"inputs": (registry.Modalities.AUDIO, None),
}
p.target_modality = (registry.Modalities.SYMBOL, 256)
p.vocabulary = {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.ByteTextEncoder(),
}
p.batch_size_multiplier = 512
p.loss_multiplier = 2.0
p.input_space_id = 13
p.target_space_id = 2
return p
def audio_wsj_tokens(model_hparams, wrong_vocab_size):
"""English audio transcription benchmark.
Args:
model_hparams: a tf.contrib.training.HParams
wrong_vocab_size: a number used in the filename indicating the approximate
vocabulary size. This is not to be confused with the actual vocabulary
size.
Returns:
a tf.contrib.training.HParams
"""
p = default_problem_hparams()
# This vocab file must be present within the data directory.
vocab_filename = os.path.join(model_hparams.data_dir,
"tokens.vocab.%d" % wrong_vocab_size)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
p.input_modality = {
"inputs": (registry.Modalities.AUDIO, None),
}
p.target_modality = (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
p.vocabulary = {
"inputs": text_encoder.TextEncoder(),
"targets": subtokenizer,
}
p.batch_size_multiplier = 512
p.loss_multiplier = 2.0
p.input_space_id = 12
p.target_space_id = 3
return p
def lm1b_16k(model_hparams):
"""Billion-word language-modeling benchmark, 16k subtoken vocabulary."""
p = default_problem_hparams()
p.perplexity_exponent = 1.184206
p.input_modality = {}
p.target_modality = (registry.Modalities.SYMBOL, 16384)
p.vocabulary = {
"targets":
text_encoder.SubwordTextEncoder(
os.path.join(model_hparams.data_dir,
"lm1b_16k.subword_text_encoder"))
}
p.target_space_id = 3
return p
def lm1b_64k(model_hparams):
"""Billion-word language-modeling benchmark, 64k subtoken vocabulary."""
p = default_problem_hparams()
p.perplexity_exponent = 1.067068
p.input_modality = {}
p.target_modality = (registry.Modalities.SYMBOL, 65536)
p.vocabulary = {
"targets":
text_encoder.SubwordTextEncoder(
os.path.join(model_hparams.data_dir,
"lm1b_64k.subword_text_encoder"))
}
p.target_space_id = 3
return p
def lmptb_10k(model_hparams):
"""Penn Tree Bank language-modeling benchmark, 10k token vocabulary."""
p = default_problem_hparams()
p.input_modality = {}
p.target_modality = (registry.Modalities.SYMBOL, 10000)
vocabulary = text_encoder.TokenTextEncoder(
os.path.join(model_hparams.data_dir, "lmptb_10k.vocab"))
p.vocabulary = {
"targets": vocabulary,
}
p.input_space_id = 3
p.target_space_id = 3
return p
def wmt_enfr_characters(unused_model_hparams):
"""English to French translation benchmark."""
p = default_problem_hparams()
p.input_modality = {"inputs": (registry.Modalities.SYMBOL, 256)}
p.target_modality = (registry.Modalities.SYMBOL, 256)
p.vocabulary = {
"inputs": text_encoder.ByteTextEncoder(),
"targets": text_encoder.ByteTextEncoder(),
}
p.loss_multiplier = 2.0
p.input_space_id = 2
p.target_space_id = 5
return p
def wmt_enfr_tokens(model_hparams, wrong_vocab_size):
"""English to French translation benchmark.
Args:
model_hparams: a tf.contrib.training.HParams
wrong_vocab_size: a number used in the filename indicating the approximate
vocabulary size. This is not to be confused with the actual vocabulary
size.
Returns:
a tf.contrib.training.HParams
"""
p = default_problem_hparams()
# This vocab file must be present within the data directory.
vocab_filename = os.path.join(model_hparams.data_dir,
"tokens.vocab.%d" % wrong_vocab_size)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
p.input_modality = {
"inputs": (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
}
p.target_modality = (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
p.vocabulary = {
"inputs": subtokenizer,
"targets": subtokenizer,
}
p.input_space_id = 3
p.target_space_id = 6
return p
def wmt_ende_bpe32k(model_hparams):
"""English to German translation benchmark."""
p = default_problem_hparams()
vocab_size = 40960
modality_spec = (registry.Modalities.SYMBOL, vocab_size)
p.input_modality = {"inputs": modality_spec}
p.target_modality = modality_spec
# This vocab file must be present within the data directory.
vocab_filename = os.path.join(model_hparams.data_dir, "vocab.bpe.32000")
p.vocabulary = {
"inputs": text_encoder.TokenTextEncoder(vocab_filename=vocab_filename),
"targets": text_encoder.TokenTextEncoder(vocab_filename=vocab_filename),
}
p.loss_multiplier = 1.4
p.input_space_id = 4
p.target_space_id = 9
return p
def wmt_ende_characters(unused_model_hparams):
"""English to German translation benchmark."""
p = default_problem_hparams()
p.input_modality = {"inputs": (registry.Modalities.SYMBOL, 256)}
p.target_modality = (registry.Modalities.SYMBOL, 256)
p.vocabulary = {
"inputs": text_encoder.ByteTextEncoder(),
"targets": text_encoder.ByteTextEncoder(),
}
p.loss_multiplier = 2.0
p.input_space_id = 2
p.target_space_id = 7
return p
def wmt_ende_tokens(model_hparams, wrong_vocab_size):
"""English to German translation benchmark."""
p = default_problem_hparams()
# This vocab file must be present within the data directory.
vocab_filename = os.path.join(model_hparams.data_dir,
"tokens.vocab.%d" % wrong_vocab_size)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
p.input_modality = {
"inputs": (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
}
p.target_modality = (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
p.vocabulary = {
"inputs": subtokenizer,
"targets": subtokenizer,
}
p.input_space_id = 3
p.target_space_id = 8
return p
def wmt_ende_v2(model_hparams, vocab_size):
"""English to German translation benchmark with separate vocabularies."""
p = default_problem_hparams()
# These vocab files must be present within the data directory.
source_vocab_filename = os.path.join(model_hparams.data_dir,
"wmt_ende_v2.en.vocab.%d" % vocab_size)
target_vocab_filename = os.path.join(model_hparams.data_dir,
"wmt_ende_v2.de.vocab.%d" % vocab_size)
p.input_modality = {"inputs": (registry.Modalities.SYMBOL, vocab_size)}
p.target_modality = (registry.Modalities.SYMBOL, vocab_size)
p.vocabulary = {
"inputs": text_encoder.SubwordTextEncoder(source_vocab_filename),
"targets": text_encoder.SubwordTextEncoder(target_vocab_filename),
}
p.input_space_id = 3
p.target_space_id = 8
return p
def wmt_concat(model_hparams, wrong_vocab_size):
"""English to German translation benchmark."""
p = default_problem_hparams()
# This vocab file must be present within the data directory.
vocab_filename = os.path.join(model_hparams.data_dir,
"tokens.vocab.%d" % wrong_vocab_size)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
vocab_size = subtokenizer.vocab_size
p.input_modality = {}
p.target_modality = (registry.Modalities.SYMBOL, vocab_size)
p.vocabulary = {"targets": subtokenizer}
return p
def wmt_parsing_characters(unused_model_hparams):
"""English to parse tree translation benchmark."""
p = default_problem_hparams()
p.input_modality = {"inputs": (registry.Modalities.SYMBOL, 256)}
p.target_modality = (registry.Modalities.SYMBOL, 256)
p.vocabulary = {
"inputs": text_encoder.ByteTextEncoder(),
"targets": text_encoder.ByteTextEncoder(),
}
p.loss_multiplier = 2.0
p.input_space_id = 2
p.target_space_id = 14
return p
def wmt_parsing_tokens(model_hparams, wrong_vocab_size):
"""English to parse tree translation benchmark.
Args:
model_hparams: a tf.contrib.training.HParams
wrong_vocab_size: a number used in the filename indicating the approximate
vocabulary size. This is not to be confused with the actual vocabulary
size.
Returns:
a tf.contrib.training.HParams
"""
p = default_problem_hparams()
# This vocab file must be present within the data directory.
vocab_filename = os.path.join(model_hparams.data_dir,
"tokens.vocab.%d" % wrong_vocab_size)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
p.input_modality = {
"inputs": (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
}
p.target_modality = (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
p.vocabulary = {
"inputs": subtokenizer,
"targets": subtokenizer,
}
p.input_space_id = 3
p.target_space_id = 15
return p
def wsj_parsing_tokens(model_hparams, wrong_source_vocab_size,
wrong_target_vocab_size):
"""English to parse tree translation benchmark.
Args:
model_hparams: a tf.contrib.training.HParams
wrong_source_vocab_size: a number used in the filename indicating the
approximate vocabulary size. This is not to be confused with the actual
vocabulary size.
wrong_target_vocab_size: a number used in the filename indicating the
approximate target vocabulary size. This is not to be confused with the
actual target vocabulary size.
Returns:
a tf.contrib.training.HParams
"""
p = default_problem_hparams()
# This vocab file must be present within the data directory.
source_vocab_filename = os.path.join(
model_hparams.data_dir,
"wsj_source.tokens.vocab.%d" % wrong_source_vocab_size)
target_vocab_filename = os.path.join(
model_hparams.data_dir,
"wsj_target.tokens.vocab.%d" % wrong_target_vocab_size)
source_subtokenizer = text_encoder.SubwordTextEncoder(source_vocab_filename)
target_subtokenizer = text_encoder.SubwordTextEncoder(target_vocab_filename)
p.input_modality = {
"inputs": (registry.Modalities.SYMBOL, source_subtokenizer.vocab_size)
}
p.target_modality = (registry.Modalities.SYMBOL,
target_subtokenizer.vocab_size)
p.vocabulary = {
"inputs": source_subtokenizer,
"targets": target_subtokenizer,
}
p.input_space_id = 3
p.target_space_id = 15
return p
def image_cifar10(unused_model_hparams):
"""CIFAR-10."""
p = default_problem_hparams()
p.input_modality = {
"inputs": ("%s:small_image_modality" % registry.Modalities.IMAGE, None)
}
p.target_modality = (registry.Modalities.CLASS_LABEL, 10)
p.batch_size_multiplier = 4
p.max_expected_batch_size_per_shard = 8
p.loss_multiplier = 3.0
p.input_space_id = 1
p.target_space_id = 1
return p
def image_mnist(unused_model_hparams):
"""MNIST."""
p = default_problem_hparams()
p.input_modality = {"inputs": (registry.Modalities.SYMBOL, 256)}
p.target_modality = (registry.Modalities.CLASS_LABEL, 10)
p.batch_size_multiplier = 4
p.max_expected_batch_size_per_shard = 8
p.loss_multiplier = 3.0
p.input_space_id = 1
p.target_space_id = 1
return p
def image_imagenet(model_hparams):
"""ImageNet."""
p = default_problem_hparams()
p.input_modality = {
"inputs": (registry.Modalities.IMAGE, None),
}
target_modality = ("%s:class_label_2d" % registry.Modalities.CLASS_LABEL
if model_hparams.imagenet_use_2d else
registry.Modalities.CLASS_LABEL)
p.target_modality = (target_modality, 1000)
p.batch_size_multiplier = 256
p.max_expected_batch_size_per_shard = 2
p.loss_multiplier = 0.7
p.input_space_id = 1
p.target_space_id = 1
return p
def image_mscoco_characters(unused_model_hparams):
"""COCO image captioning with captions as characters."""
p = default_problem_hparams()
p.input_modality = {"inputs": (registry.Modalities.IMAGE, None)}
p.target_modality = (registry.Modalities.SYMBOL, 256)
p.vocabulary = {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.ByteTextEncoder(),
}
p.batch_size_multiplier = 128
p.max_expected_batch_size_per_shard = 2
p.loss_multiplier = 2.0
p.input_space_id = 1
p.target_space_id = 2
return p
def image_mscoco_tokens(model_hparams, vocab_count):
"""COCO image captioning with captions as tokens."""
p = default_problem_hparams()
p.input_modality = {"inputs": (registry.Modalities.IMAGE, None)}
# This vocab file must be present within the data directory.
vocab_filename = os.path.join(model_hparams.data_dir,
"tokens.vocab.%d" % vocab_count)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
p.target_modality = (registry.Modalities.SYMBOL, subtokenizer.vocab_size)
p.vocabulary = {
"inputs": text_encoder.TextEncoder(),
"targets": subtokenizer,
}
p.batch_size_multiplier = 256
p.max_expected_batch_size_per_shard = 2
p.input_space_id = 1
p.target_space_id = 3
return p
# Dictionary of named hyperparameter settings for various problems.
# This is only accessed through the problem_hparams function below.
PROBLEM_HPARAMS_MAP = {
"algorithmic_addition_binary40": lambda p: algorithmic(4, p),
"algorithmic_addition_decimal40": lambda p: algorithmic(12, p),
"algorithmic_identity_binary40": lambda p: algorithmic(4, p),
"algorithmic_identity_decimal40": lambda p: algorithmic(12, p),
"algorithmic_multiplication_binary40": lambda p: algorithmic(4, p),
"algorithmic_multiplication_decimal40": lambda p: algorithmic(12, p),
"algorithmic_reverse_binary40": lambda p: algorithmic(4, p),
"algorithmic_reverse_decimal40": lambda p: algorithmic(12, p),
"algorithmic_reverse_nlplike_decimal8K": lambda p: algorithmic(8002, p),
"algorithmic_reverse_nlplike_decimal32K": lambda p: algorithmic(32002, p),
"algorithmic_shift_decimal40": lambda p: algorithmic(22, p),
"audio_timit_characters_tune": audio_timit_characters,
"audio_timit_characters_test": audio_timit_characters,
"audio_timit_tokens_8k_tune": lambda p: audio_timit_tokens(p, 2**13),
"audio_timit_tokens_8k_test": lambda p: audio_timit_tokens(p, 2**13),
"audio_wsj_characters_tune": audio_wsj_characters,
"audio_wsj_characters_test": audio_wsj_characters,
"audio_wsj_tokens_8k_tune": lambda p: audio_wsj_tokens(p, 2**13),
"audio_wsj_tokens_8k_test": lambda p: audio_wsj_tokens(p, 2**13),
"lm1b_16k": lm1b_16k,
"lm1b_64k": lm1b_64k,
"lmptb_10k": lmptb_10k,
"wmt_parsing_characters": wmt_parsing_characters,
"wmt_parsing_tokens_8k": lambda p: wmt_parsing_tokens(p, 2**13),
"wsj_parsing_tokens_16k": lambda p: wsj_parsing_tokens(p, 2**14, 2**9),
"wsj_parsing_tokens_32k": lambda p: wsj_parsing_tokens(p, 2**15, 2**9),
"wmt_enfr_characters": wmt_enfr_characters,
"wmt_enfr_tokens_8k": lambda p: wmt_enfr_tokens(p, 2**13),
"wmt_enfr_tokens_32k": lambda p: wmt_enfr_tokens(p, 2**15),
"wmt_enfr_tokens_32k_shuffled": lambda p: wmt_enfr_tokens(p, 2**15),
"wmt_enfr_tokens_32k_combined": lambda p: wmt_enfr_tokens(p, 2**15),
"wmt_enfr_tokens_128k": lambda p: wmt_enfr_tokens(p, 2**17),
# bytes per subtoken: 3.267350
"wmt_ende_concat_8k": lambda p: wmt_concat(p, 2**13),
# bytes per subtoken: 4.236272
"wmt_ende_concat_32k": lambda p: wmt_concat(p, 2**15),
"wmt_ende_characters": wmt_ende_characters,
"wmt_ende_tokens_8k": lambda p: wmt_ende_tokens(p, 2**13),
"wmt_ende_tokens_32k": lambda p: wmt_ende_tokens(p, 2**15),
"wmt_ende_tokens_128k": lambda p: wmt_ende_tokens(p, 2**17),
# bytes per subtoken: 4.59291664162
"wmt_ende_bpe32k": wmt_ende_bpe32k,
"wmt_ende_bpe32k_shuffled": wmt_ende_bpe32k,
"wmt_ende_bpe32k_combined": wmt_ende_bpe32k,
"wmt_ende_bpe32k_160": wmt_ende_bpe32k,
"wmt_ende_v2_32k_combined": lambda p: wmt_ende_v2(p, 2**15),
"wmt_ende_v2_16k_combined": lambda p: wmt_ende_v2(p, 2**14),
"image_cifar10_tune": image_cifar10,
"image_cifar10_test": image_cifar10,
"image_mnist_tune": image_mnist,
"image_mnist_test": image_mnist,
"image_mscoco_characters_tune": image_mscoco_characters,
"image_mscoco_characters_test": image_mscoco_characters,
"image_mscoco_tokens_8k_tune": lambda p: image_mscoco_tokens(p, 2**13),
"image_mscoco_tokens_8k_test": lambda p: image_mscoco_tokens(p, 2**13),
"image_mscoco_tokens_32k_tune": lambda p: image_mscoco_tokens(p, 2**15),
"image_mscoco_tokens_32k_test": lambda p: image_mscoco_tokens(p, 2**15),
"image_mscoco_tokens_128k_tune": lambda p: image_mscoco_tokens(p, 2**17),
"image_mscoco_tokens_128k_test": lambda p: image_mscoco_tokens(p, 2**17),
"image_imagenet": image_imagenet,
}
|
the-stack_0_18941 | import csv
from pathlib import Path
from leaguesettings import LeagueSettings
from match import MatchDetails
from match_maker import TicketSystem
from paths import LeagueDir
from ranking_system import RankingSystem
from settings import PersistentSettings
settings = PersistentSettings.load()
ld = LeagueDir(Path(settings.league_dir_raw))
league_settings = LeagueSettings.load(ld)
times = ["00000000000000"] + [path.name[:14] for path in list(ld.rankings.iterdir())]
rankings = RankingSystem.all(ld)
tickets = TicketSystem.all(ld, league_settings)
bots = sorted(rankings[-1].ratings.keys())
matches = MatchDetails.all(ld)
# Tickets
with open(ld.stats / "tickets.csv", 'w', newline="") as csvfile_match:
writer_match = csv.writer(csvfile_match)
# Header
writer_match.writerow(["time"] + bots)
for time, ticket in zip(times, tickets):
writer_match.writerow([time] + [float(ticket.get_ensured(bot)) for bot in bots])
# Rankings
with open(ld.stats / "mmr.csv", 'w', newline="") as csvfile_mmr:
with open(ld.stats / "mmr_mu.csv", 'w', newline="") as csvfile_mu:
with open(ld.stats / "mmr_sigma.csv", 'w', newline="") as csvfile_sigma:
writer_mmr = csv.writer(csvfile_mmr)
writer_mu = csv.writer(csvfile_mu)
writer_sigma = csv.writer(csvfile_sigma)
# Header
writer_mmr.writerow(["time"] + bots)
writer_mu.writerow(["time"] + bots)
writer_sigma.writerow(["time"] + bots)
for time, rank in zip(times, rankings):
writer_mmr.writerow([time] + [rank.get_mmr(bot) for bot in bots])
writer_mu.writerow([time] + [rank.get(bot).mu for bot in bots])
writer_sigma.writerow([time] + [rank.get(bot).sigma for bot in bots])
# Matches
with open(ld.stats / "match.csv", 'w', newline="") as csvfile_match:
with open(ld.stats / "bot_stats.csv", 'w', newline="") as csvfile_bot_stats:
writer_match = csv.writer(csvfile_match)
writer_bot_stats = csv.writer(csvfile_bot_stats)
# Header
writer_match.writerow([
"time",
"blue_bot_1",
"blue_bot_2",
"blue_bot_3",
"orange_bot_1",
"orange_bot_2",
"orange_bot_3",
"map",
"replay_id",
"blue_goals",
"orange_goals"
])
writer_bot_stats.writerow([
"time",
"bot",
"points",
"goals",
"shots",
"saves",
"assists",
"demolitions",
"own_goals",
])
for match in matches:
writer_match.writerow([
match.time_stamp,
match.blue[0],
match.blue[1],
match.blue[2],
match.orange[0],
match.orange[1],
match.orange[2],
match.map,
match.replay_id,
match.result.blue_goals,
match.result.orange_goals,
])
for bot, stats in match.result.player_scores.items():
writer_bot_stats.writerow([
match.time_stamp,
bot,
stats.points,
stats.goals,
stats.shots,
stats.saves,
stats.assists,
stats.demolitions,
stats.own_goals,
])
|
the-stack_0_18942 | from collections import defaultdict
import nltk
import pandas as pd
import pdfplumber
import re
from deep_translator import GoogleTranslator,PonsTranslator
import os
import BLEU_matching
import keyword_identifiers
import translators
import text_preprocessing
def input_language():
language = input("Please enter the language of the questionnaire (without spaces)")
return language
def input_filename():
filename = input("Please enter the name of the questionnaire PDF:\n")
print(f'You entered {filename} and please import the pdf file into the folder if its later than 2009 ')
pdf = pdfplumber.open(filename)
return pdf
def input_translator_choice():
translator_choice = input("Would you like to \nA-use the default google translator \nB- use the alternative sPONSTranslator")
return translator_choice
language = input_language()
pdf = input_filename()
translator_choice = input_translator_choice()
def choose_translator(translator_choice,sentence):
target = keyword_identifiers.translator_dict[language.lower()]
if translator_choice.lower() == "a":
return GoogleTranslator(source = 'en', target = target).translate(sentence)
elif translator_choice.lower() == "b":
#return MicrosoftTranslator(source = 'en', target = target).translate(sentence)
return PonsTranslator(source='en', target=target).translate(sentence)
def translate_keywords():
translated_keywords_dict = defaultdict()
for key in keyword_identifiers.questions_to_keywords.values():
for item in key:
translated_keywords_dict[choose_translator(translator_choice,item)] = []
return (translated_keywords_dict)
def tokenize_and_translate_questions(pages):
translated_questions = defaultdict()
for number in pages:
p1 = pdf.pages[number]
text = text_preprocessing.remove_whitespace(p1.extract_text())
sentences = nltk.sent_tokenize(text)
for item in sentences:
if item[-1] == "?":
translated_questions[translators.translator_into_english(item)] = item
return translated_questions
def filter_non_words(input_dictionary):
words = set(nltk.corpus.words.words())
new_dictionary = defaultdict()
for question in input_dictionary.keys():
item = (" ".join(w.lower() for w in nltk.wordpunct_tokenize(str(question)) \
if w.lower() in words or not w.isalpha()))
new_dictionary[item] = input_dictionary[question]
return new_dictionary
def find_and_preprocess_questions():
translated_keywords = translate_keywords().keys()
pages = set()
for word in translated_keywords:
word = word.lower()
for i in range(0,len(pdf.pages)):
page_number = pdf.pages[i]
text = page_number.extract_text()
if re.findall(word,text,re.IGNORECASE):
pages.add(i)
clean_translations = tokenize_and_translate_questions(list(pages))
filtered_dictionary = filter_non_words(clean_translations)
keywords_questions = text_preprocessing.check_keywords(filtered_dictionary)
removed_brackets_list = text_preprocessing.remove_brackets(keywords_questions)
return removed_brackets_list
def group_questions_by_keyword(ungrouped_dictionary):
grouped_questions= defaultdict()
for question in ungrouped_dictionary.keys():
keyword_group = []
keyword_list = ungrouped_dictionary[question]
for translated_question in translated_questions_to_check.keys():
if any(keyword in translated_question for keyword in keyword_list):
keyword_group.append(translated_question)
grouped_questions[question] = keyword_group
return grouped_questions
#returns dictionary of david questoin to matched question in foreign language
def main():
keyword_to_translations = group_questions_by_keyword(keyword_identifiers.questions_to_keywords)
matched_questions = defaultdict()
for key,value in keyword_to_translations.items():
if value == []:
matched_questions[key] = ["not found"]
else:
matched_questions[key] = BLEU_matching.bleu_implementation(key,value)
final_dataframe_dictionary = {}
for davids_question in matched_questions.keys():
if matched_questions[davids_question] == ["not found"]:
final_dataframe_dictionary[davids_question] = "not found"
else:
final_dataframe_dictionary[davids_question] = translated_questions_to_check[matched_questions[davids_question]]
return final_dataframe_dictionary
translated_questions_to_check = find_and_preprocess_questions()
output_dict = main()
for key,value in output_dict.items():
print(key,value)
|
the-stack_0_18943 | import argparse
from pathlib import Path
from PIL import Image
import numpy as np
try:
from tqdm import tqdm
except ModuleNotFoundError:
pass
def args_parse():
parser = argparse.ArgumentParser("Calculate image mean and std.")
parser.add_argument("--images_dir", help="Directory contains a list of images")
_args = parser.parse_args()
return _args
def images_stats(imgs):
mean = []
std = []
for img in tqdm(imgs):
img_np = np.array(Image.open(img))/255
rgb_mean = np.mean(img_np, axis=(0, 1))
rgb_std = np.std(img_np, axis=(0, 1))
mean.append(rgb_mean)
std.append(rgb_std)
mean = np.array(mean)
std = np.array(std)
return mean, std
if __name__ == "__main__":
imgs = list(sorted(Path(args.images_dir).rglob("*.jpg")))
print("process %s images:" % len(imgs))
mean, std = images_stats(imgs)
u = np.mean(mean, axis=0)
sigma = np.mean(std, axis=0)
print("target mean: ", np.around(u, decimals=3))
print("target std: ", np.around(sigma, decimals=3))
|
the-stack_0_18944 | '''
message utility functions for processing native messages and formatting responses
'''
import json
import sys
import struct
# Read a message from stdin and decode it.
def get_message():
raw_length = sys.stdin.buffer.read(4)
if not raw_length:
send_message("Error: message length missing")
sys.exit(0)
message_length = struct.unpack('=I', raw_length)[0]
message = sys.stdin.buffer.read(message_length).decode("utf-8")
return json.loads(message)
# Encode a message for transmission, given its content.
def _encode_message(message_content):
encoded_content = json.dumps(message_content).encode("utf-8")
encoded_length = struct.pack('=I', len(encoded_content))
# use struct.pack("10s", bytes), to pack a string of the length of 10 characters
return {'length': encoded_length, 'content': struct.pack(str(len(encoded_content))+"s",encoded_content)}
# Send an encoded message to stdout.
def send_message(raw_message):
encoded_message = _encode_message(raw_message)
sys.stdout.buffer.write(encoded_message['length'])
sys.stdout.buffer.write(encoded_message['content'])
sys.stdout.buffer.flush()
|
the-stack_0_18946 | from featmultinomial import FeatMultinomialNB
default_config = {
# Corpus files
'u_corpus_f': 'corpus/unlabeled_new_corpus.pickle',
'test_corpus_f': 'corpus/test_new_corpus.pickle',
'training_corpus_f': 'corpus/training_new_corpus.pickle',
'feature_corpus_f': 'corpus/feature_corpus.pickle',
# Options to be displayed
'number_of_classes': 30,
'number_of_features': 30,
# Classifier
'classifier': FeatMultinomialNB(),
# Features
'feature_boost': 0.5,
# Active learning instance selection function
'get_next_instance': None,
# Active learning feature selection functions
'get_next_features': None,
'handle_feature_prediction': None,
# Active learning class selection function
'get_class_options': None,
# Run expectation maximization algorithm after training
'can_run_em': False
}
|
the-stack_0_18948 | #!/usr/bin/env python
# Script to run tests and benchmarks.
import argparse
import glob
import os
import subprocess
import sys
from typing import Sequence
def parse_arguments():
parser = argparse.ArgumentParser(description="Select tests to run")
parser.add_argument(
"--gpu-integration-tests",
help="Run GPU integration tests - requires a GPU with CUDA installation.",
dest="gpu_integration_tests",
default=False,
action=argparse.BooleanOptionalAction,
)
parser.add_argument(
"--iterators-tests",
help="Run Iterators tests.",
dest="iterators_tests",
default=False,
action=argparse.BooleanOptionalAction,
)
return parser.parse_args()
def _convert_path_to_module(test_script: str) -> str:
"""Convert the path of the test script to its module name."""
test_script = test_script.replace(os.sep, ".")
test_script = test_script.strip(".")
if test_script.endswith(".py"):
return test_script[:-3]
return test_script
def _configure_env():
env = os.environ
build_dir = env["IREE_LLVM_SANDBOX_BUILD_DIR"]
# TODO: just grab from .env.
env["PYTHONPATH"] = (
os.path.join(build_dir, "tools/sandbox/python_packages") +
((":" + env["PYTHONPATH"]) if "PYTHONPATH" in env else ""))
env["MLIR_RUNNER_UTILS_LIB"] = os.path.join(build_dir,
"lib/libmlir_runner_utils.so")
env["MLIR_C_RUNNER_UTILS_LIB"] = os.path.join(
build_dir, "lib/libmlir_c_runner_utils.so")
env["MLIR_RUNNER_EXTRA_LIBS"] = os.path.join(
build_dir, "lib/libmlir_async_runtime_copy.so")
return env
def _run_test(test_script: str, test_args: Sequence[str] = []) -> bool:
"""Run the provided test script an return failure or success.
A test succeeds if:
- it does not time out
- it returns zero
- it does not print FAILURE
"""
print(f"- running {test_script}: ", end="")
module = _convert_path_to_module(test_script)
env = _configure_env()
proc = subprocess.Popen(["python", "-m", module] + test_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
try:
outs, errs = proc.communicate(timeout=20)
except subprocess.TimeoutExpired:
proc.kill()
print("\033[31m" + "FAILED" + "\033[m")
print(" -> test execution timed out")
return False
if proc.returncode != 0:
print("\033[31m" + "FAILED" + "\033[m")
print(f" -> test returned code {proc.returncode}")
print(errs.decode("utf-8"))
return False
# Check the output for numerical failures.
outs = outs.decode("utf-8")
errs = errs.decode("utf-8")
for line in outs.splitlines() + errs.splitlines():
if line.count("FAILURE") != 0:
print("\033[31m" + "FAILED" + "\033[m")
print(f" -> test failure: {line}")
return False
print("\033[32m" + "SUCCESS" + "\033[m")
return True
def run_small_search():
return _run_test(
"./python/examples/tuning/test_nevergrad_small_matmul.py",
['--search-budget 500', '--n_iters 3000', '--num-parallel-tasks 10'])
def main(args):
results = []
for f in glob.glob("./python/**/*test.py", recursive=True):
results.append(_run_test(f))
# Tun a small search.
results.append(
_run_test("./python/examples/tuning/test_nevergrad_small_matmul.py", [
'--search-budget', '10', '--n_iters', '10', '--num-parallel-tasks',
'4'
]))
errors = results.count(False)
if errors:
print(f"-> {errors} tests failed!")
# Additionally run the lit tests.
print(f"- running lit tests:")
lit_args = ["lit", "-v"]
if not args.gpu_integration_tests:
lit_args.append("--filter-out=Integration/Dialect/VectorExt/GPU")
test_dirs = ["test"]
if args.iterators_tests:
test_dirs += [
"experimental/iterators/unittests", "experimental/iterators/test"
]
returncode = subprocess.call(lit_args + test_dirs, env=_configure_env())
if returncode != 0:
print(f"-> lit tests failed!")
if returncode != 0 or errors:
exit(1)
if __name__ == '__main__':
main(parse_arguments())
|
the-stack_0_18949 | # #!/usr/bin/python
#
import numpy as np
import pandas as pd
import datetime as dt
import dateutil.relativedelta as dtr
def temp_f(c_month,farm_N):
degs = (tarbert_avetemp[c_month-1]-ardrishaig_avetemp[c_month-1])/(685715-665300)
Ndiff = farm_N - 665300 #farm Northing - tarbert Northing
return round(tarbert_avetemp[c_month-1] - Ndiff*degs, 1)
def d_hatching(c_temp):
return 3*(3.3 - 0.93*np.log(c_temp/3) -0.16*np.log(c_temp/3)**2) #for 3 broods
nfarms = 10
ncages = np.array([1,6,4,8,12,9,9,8,9,9])
fishf = [40000,40000,40000,40000,40000,40000,40000,40000,40000]
ext_pressure = 150 #planktonic lice per day per cage/farm arriving from wildlife -> seasonal?
start_pop = 150*np.sum(ncages)
prop_influx = 0.33
eggs = 1200#3 broods #[50,50,40,40,50,60,80,80,80,80,70,50]
#d_hatching = [9,10,11,9,8,6,4,4,4,4,5,7]
ardrishaig_avetemp = np.array([8.2,7.55,7.45,8.25,9.65,11.35,13.15,13.75,13.65,12.85,11.75,9.85]) #www.seatemperature.org
tarbert_avetemp = np.array([8.4,7.8,7.7,8.6,9.8,11.65,13.4,13.9,13.9,13.2,12.15,10.2])
xy_array = np.array([[190300,665300],[192500,668200],[191800,669500],[186500,674500],
[190400,676800],[186300,679600],[190800,681000],[195300,692200],[199800,698000]])
#Tarbert South, Rubha Stillaig, Glenan Bay, Meall Mhor, Gob a Bharra, Strondoir Bay, Ardgaddan, Ardcastle, Quarry Point
f_muEMB = 3.5
f_sigEMB = 0.7
dates_list = [[] for i in range(nfarms-1)]
dates_list[0].extend(pd.date_range(dt.datetime(2017, 12, 1), dt.datetime(2018, 1, 1)).tolist())
dates_list[0].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[0].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
dates_list[1].extend(pd.date_range(dt.datetime(2017, 10, 1), dt.datetime(2018, 1, 1)).tolist())
dates_list[1].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[1].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
dates_list[2].extend(pd.date_range(dt.datetime(2017, 12, 1), dt.datetime(2018, 1, 1)).tolist())
dates_list[2].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[2].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
dates_list[3].extend(pd.date_range(dt.datetime(2017, 10, 1), dt.datetime(2017, 11, 1)).tolist())
dates_list[3].extend(pd.date_range(dt.datetime(2017, 12, 1), dt.datetime(2018, 1, 1)).tolist())
dates_list[3].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[3].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
dates_list[4].extend(pd.date_range(dt.datetime(2017, 10, 1), dt.datetime(2018, 1, 1)).tolist())
dates_list[4].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[4].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
dates_list[5].extend(pd.date_range(dt.datetime(2017, 10, 1), dt.datetime(2018, 1, 1)).tolist())
dates_list[5].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[5].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
dates_list[6].extend(pd.date_range(dt.datetime(2017, 11, 1), dt.datetime(2018, 1, 1)).tolist())
dates_list[6].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[6].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
dates_list[7].extend(pd.date_range(dt.datetime(2017, 10, 1), dt.datetime(2017, 12, 1)).tolist())
dates_list[7].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[7].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
dates_list[8].extend(pd.date_range(dt.datetime(2017, 10, 1), dt.datetime(2017, 12, 1)).tolist())
dates_list[8].extend(pd.date_range(dt.datetime(2018, 2, 1), dt.datetime(2018, 3, 1)).tolist())
dates_list[8].extend(pd.date_range(dt.datetime(2018, 5, 1), dt.datetime(2018, 6, 1)).tolist())
bool_treat = '(cur_date - dt.timedelta(days=14)) in inpt.dates_list[farm-1]'
prob_arrive = pd.read_csv('./Data/Fyne_props.csv',header=None)
E_days = pd.read_csv('./Data/Fyne_Edays.csv',header=None)
Enfish_res = [3]*12 #[4,5,6,6,8,15,37,22,10,2,2,3] #"roughly" based on marine scotland fixed engine catch data for west of scotland from 2000-2018
Ewt_res = 2500
start_date = dt.datetime(2017, 8, 1)
end_date = dt.datetime(2019, 8, 1)
cpw = [1, 1, 1, 2, 3, 1, 1, 2, 1, 1]
numwk = [1, 6, 4, 4, 4, 9, 9, 4, 9, 9]
farm_start = [dt.datetime(2017, 8, 1), dt.datetime(2017, 10, 1), dt.datetime(2017, 9, 1), dt.datetime(2017, 10, 1), dt.datetime(2017, 10, 1), dt.datetime(2017, 10, 1), dt.datetime(2017, 8, 1), dt.datetime(2017, 9, 1), dt.datetime(2017, 10, 1), dt.datetime(2017, 9, 1)]
cage_start = [[farm_start[i] + dtr.relativedelta(weeks=j) for j in range(numwk[i])]*cpw[i] for i in range(nfarms)]
NSbool_str = 'cur_date>=inpt.cage_start[farm][cage-1]' |
the-stack_0_18950 | import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
import torch.distributed
from utils.dataset import pc_dataloader,std_collate_fn
from torch.utils.data.dataloader import DataLoader
from models.STD import PGM
import os
import datetime
import logging
from pathlib import Path
import importlib
import shutil
from tqdm import tqdm
import numpy as np
import time
EXP_DIR = '/data/usr/zhengyu/exp'
def main():
def log_string(str):
logger.info(str)
print(str)
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2'
# create dir
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
experiment_dir = Path(EXP_DIR)
experiment_dir.mkdir(exist_ok=True)
experiment_dir = experiment_dir.joinpath('STD')
experiment_dir.mkdir(exist_ok=True)
exp_dir = experiment_dir
experiment_dir = experiment_dir.joinpath(timestr)
experiment_dir.mkdir(exist_ok=True)
checkpoints_dir = experiment_dir.joinpath('checkpoints')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = experiment_dir.joinpath('logs')
log_dir.mkdir(exist_ok=True)
# LOG
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('{0}/exp_{1}.txt'.format(log_dir,timestr))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# torch.distributed.init_process_group(backend="nccl")
model = PGM(0).cuda()
# model = nn.parallel.DistributedDataParallel(model)
last_exp = "2021-08-18_19-33"
try:
checkpoint = torch.load(str(exp_dir.joinpath(last_exp)) + '/checkpoints/best.pt')
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state_dict'])
log_string('Use pretrain model')
except:
log_string('No existing model, starting training from scratch...')
start_epoch = 0
INTIAL_LR = 0.0001
AFTER_LR = 0.00005
optimizer = torch.optim.Adam(
model.parameters(),
lr=INTIAL_LR,
betas=(0.9, 0.999),
eps=1e-08
)
EPOCH = 100
best_loss = 0
data = pc_dataloader()
trainDataloader = DataLoader(data, batch_size=1, shuffle=True, collate_fn=std_collate_fn)
for epoch in range(start_epoch, EPOCH):
log_string('**** Epoch %d/%s ****' % (epoch+1,EPOCH))
# adjust lr
if epoch == 80:
for p in optimizer.param_groups:
p['lr'] = AFTER_LR
loss_sum = 0
proposal_num = 0
for i, (points, target) in tqdm(enumerate(trainDataloader), total=len(trainDataloader), smoothing=0.9):
optimizer.zero_grad()
proposals,features,loss = model(points,target)
loss.backward()
optimizer.step()
loss_sum += loss
proposal_num += len(proposals)
log_string('Training mean loss: %f' % (loss_sum / len(trainDataloader)))
log_string("Training output proposal: %f"%(proposal_num/len(trainDataloader)))
if epoch % 5 == 0:
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best.pt'
log_string('Saving at %s' % savepath)
state = {
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
log_string('Saving model....')
# save the last model
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best.pt'
log_string('Saving at %s' % savepath)
state = {
'epoch': EPOCH,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
log_string('Saving last model....')
if __name__ == '__main__':
main() |
the-stack_0_18952 | from setuptools import setup, find_packages
import sys, os.path
# Don't import scigym module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'scigym'))
from version import VERSION
# Read the contents of the README file
with open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Environment-specific dependencies.
extras = {
'teleportation' : [],
'entangled_ions': [],
'toricgame': ['matplotlib==3.4.2'] # for rendering only
}
# Meta dependency groups.
all_deps = []
for group_name in extras:
all_deps += extras[group_name]
extras['all'] = all_deps
setup(name='scigym',
version=VERSION,
description='SciGym -- The OpenAI Gym for Science: A platform for your scientific reinforcement learning problem.',
url='https://github.com/HendrikPN/scigym',
author='HendrikPN',
author_email='[email protected]',
license='MIT',
packages=[package for package in find_packages()
if package.startswith('scigym')],
zip_safe=False,
install_requires=['gym==0.18.0'],
extras_require=extras,
package_data={'scigym': []},
tests_require=['pytest'],
long_description=long_description,
long_description_content_type='text/markdown',
)
|
the-stack_0_18953 | from flask_script import Command, Option
from flask_security.utils import hash_password
from models.models import db
from main import app
import datetime
import random
import os
import json
from pprint import pprint
class CreateDB(Command):
def run(self, **kwargs):
print(db.engine)
print(dir(db.engine))
db.engine.echo = True
r = db.create_all()
print(r)
class ResetDB(Command):
"""Drops all tables and recreates them"""
def run(self, **kwargs):
db.drop_all()
db.create_all()
class AddMazeCourse(Command):
"""Fills in predefined data into DB for Maze Game"""
option_list = (
Option('--owner', '-o', dest='owner_id', default='1'),
)
def run(self, owner_id, **kwargs):
from models.user import User
from models.course import Course
from models.role import Role
from models.assignment import Assignment
from models.assignment_group import AssignmentGroup
from models.assignment_group_membership import AssignmentGroupMembership
owner_id = int(owner_id)
maze_course = Course.new('Maze Course', owner_id, 'public', '', 'maze')
maze_group = AssignmentGroup.new(owner_id, maze_course.id, "Maze Game")
for level in range(10):
maze_level = Assignment.new(owner_id, maze_course.id, 'maze', level=str(1 + level))
db.session.add(maze_level)
db.session.flush()
membership = AssignmentGroupMembership.move_assignment(maze_level.id, maze_group.id)
db.session.add(membership)
db.session.commit()
class AddTestUsersDB(Command):
"""Fills in predefined data into DB"""
def run(self, **kwargs):
from models.user import User
from models.course import Course
from models.role import Role
from models.assignment_group import AssignmentGroup
from models.assignment_group_membership import AssignmentGroupMembership
from models.assignment import Assignment
default_course = Course.query.first()
print("Adding Teacher")
teacher = User(first_name="Klaus",
last_name="Bart",
password=hash_password("password"),
confirmed_at=datetime.datetime.now(),
active=True,
email="[email protected]")
db.session.add(teacher)
db.session.flush()
db.session.add(Role(name='instructor', course_id=default_course.id, user_id=teacher.id))
print("Adding Student")
student = User(first_name="Ada",
last_name="Bart",
password=hash_password("password"),
confirmed_at=datetime.datetime.now(),
active=True,
email="[email protected]")
db.session.add(student)
db.session.flush()
db.session.add(Role(name='student', course_id=default_course.id, user_id=student.id))
print("Adding basic assignments")
basic_group = AssignmentGroup(name="First Group", course_id=default_course.id, owner_id=teacher.id)
db.session.add(basic_group)
db.session.flush()
for i in range(5):
assignment = Assignment(name="Problem {}".format(i), instructions="Complete this problem",
owner_id=teacher.id, course_id=default_course.id)
db.session.add(assignment)
db.session.flush()
db.session.add(AssignmentGroupMembership(assignment_group_id=basic_group.id,
assignment_id=assignment.id))
db.session.commit()
print("Complete")
class PopulateDB(Command):
"""Fills in predefined data into DB"""
def run(self, **kwargs):
from models.user import User
from models.course import Course
from models.role import Role
with open('settings/secrets.json', 'r') as secret_file:
secrets = json.load(secret_file).get("ADMIN", {})
print("Adding Admin")
admin = User(first_name=secrets.get("first_name", "Admin"),
last_name=secrets.get("last_name", "User"),
password=hash_password(secrets.get("password", "password")),
confirmed_at=datetime.datetime.now(),
active=True,
email=secrets.get("email", "[email protected]"))
db.session.add(admin)
db.session.flush()
db.session.add(Role(name='instructor', user_id=admin.id))
db.session.add(Role(name='admin', user_id=admin.id))
print("Adding default course")
default_course = Course(name="Default Course", owner_id=admin.id, service="native",
url="default", visibility='public')
db.session.add(default_course)
db.session.flush()
db.session.add(Role(name='instructor', course_id=default_course.id, user_id=admin.id))
db.session.commit()
print("Complete")
class DisplayDB(Command):
def run(self, **kwargs):
from sqlalchemy import MetaData
from scripts.sqlalchemy_schemadisplay3 import create_schema_graph
connection = app.config['SQLALCHEMY_DATABASE_URI']
filename = 'docs/dbschema.png'
graph = create_schema_graph(metadata=MetaData(connection),
show_datatypes=True, # The image would get nasty big if we'd show the datatypes
show_indexes=False, # ditto for indexes
rankdir='LR', # From left to right (instead of top to bottom)
font='Helvetica',
concentrate=False # Don't try to join the relation lines together
)
graph.write_png(filename) # write out the file
class ExportCourse(Command):
option_list = (
Option('--file', '-f', dest='course_data_path', default='backups/current_course_data.json'),
Option('--course', '-c', dest='course_id', default='1'),
)
def run(self, course_id, course_data_path, **kwargs):
from models.models import AssignmentGroupMembership
from models.course import Course
from models.assignment import Assignment
from models.assignment_group import AssignmentGroup
exported_data = Course.export(int(course_id))
with open(course_data_path, 'w') as output_file:
json.dump(exported_data, output_file, indent=2, sort_keys=True)
pprint(exported_data)
class ImportCourse(Command):
option_list = (
Option('--file', '-f', dest='course_data_path', default='backups/current_course_data.json'),
Option('--owner', '-o', dest='owner_id', default='1'),
)
def run(self, owner_id, course_data_path, **kwargs):
from models.models import AssignmentGroupMembership
from models.course import Course
from models.assignment import Assignment
from models.assignment_group import AssignmentGroup
with open(course_data_path, 'r') as input_file:
imported_data = json.load(input_file)
Course.import_json(imported_data, int(owner_id))
class RemoveCourse(Command):
option_list = (
Option('--course', '-c', dest='course_id'),
)
def run(self, course_id, **kwargs):
from models.course import Course
Course.remove(int(course_id), True)
class DumpDB(Command):
option_list = (
Option('--output', '-o', dest='output', default='backups/db/'),
Option('--log_for_course', '-l', dest='log_for_course', default=None),
)
def dump_rows(self, rows, output, table_name):
data = [{c.name: str(getattr(row, c.name))
for c in row.__table__.columns}
for row in rows]
full_path = os.path.join(output, table_name + '.json')
with open(full_path, 'w') as output_file:
json.dump(data, output_file)
def _log_for_course(self, course, output):
from models.log import Log
logs = Log.get_logs_for_course(course)
self.dump_rows(logs, output, 'log')
def run(self, output, log_for_course, **kwargs):
if log_for_course:
return self._log_for_course(log_for_course, output)
from models.models import (db, AssignmentGroupMembership)
from models.user import User
from models.course import Course
from models.role import Role
from models.authentication import Authentication
from models.log import Log
from models.submission import Submission
from models.assignment import Assignment
from models.assignment_group import AssignmentGroup
tables = {
'user': User,
'course': Course,
'submission': Submission,
'assignment': Assignment,
'group': AssignmentGroup,
'membership': AssignmentGroupMembership,
'authentication': Authentication,
'log': Log,
'role': Role
}
for table_name, table_class in tables.items():
self.dump_rows(table_class.query.all(), output, table_name)
class ExportProgSnap(Command):
option_list = (
Option('--output', '-o', dest='output', default='backups/progsnap2_{}'),
Option('--log_for_course', '-l', dest='log_for_course', default=1),
Option('--groups', '-g', dest='groups', default=None),
)
def run(self, output, log_for_course, groups, **kwargs):
from models.portation import export_progsnap2
if groups is not None:
output = output + "_{}".format(groups.replace(",", "_"))
groups = [int(g) for g in groups.split(",")]
export_progsnap2(output.format(log_for_course), log_for_course, groups)
|
the-stack_0_18954 | import copy
import json
import os
from ast import literal_eval
from typing import Iterable
from slugify import slugify
from prefect import config
from prefect.agent import Agent
from prefect.environments.storage import Docker
from prefect.serialization.storage import StorageSchema
from prefect.utilities.graphql import GraphQLResult
class FargateAgent(Agent):
"""
Agent which deploys flow runs as tasks using Fargate. This agent can run anywhere as
long as the proper access configuration variables are set. Information on using the
Fargate Agent can be found at https://docs.prefect.io/cloud/agents/fargate.html
All `kwargs` are accepted that one would normally pass to boto3 for `register_task_definition`
and `run_task`. For information on the kwargs supported visit the following links:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.register_task_definition
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
**Note**: if AWS authentication kwargs such as `aws_access_key_id` and `aws_session_token`
are not provided they will be read from the environment.
Environment variables may be set on the agent to be provided to each flow run's Fargate task:
```
prefect agent start fargate --env MY_SECRET_KEY=secret --env OTHER_VAR=$OTHER_VAR
```
boto3 kwargs being provided to the Fargate Agent:
```
prefect agent start fargate networkConfiguration="{'awsvpcConfiguration': {'assignPublicIp': 'ENABLED', 'subnets': ['my_subnet_id'], 'securityGroups': []}}"
```
Args:
- name (str, optional): An optional name to give this agent. Can also be set through
the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to "agent"
- labels (List[str], optional): a list of labels, which are arbitrary string identifiers used by Prefect
Agents when polling for work
- env_vars (dict, optional): a dictionary of environment variables and values that will be set
on each flow run that this agent submits for execution
- max_polls (int, optional): maximum number of times the agent will poll Prefect Cloud for flow runs;
defaults to infinite
- aws_access_key_id (str, optional): AWS access key id for connecting the boto3
client. Defaults to the value set in the environment variable
`AWS_ACCESS_KEY_ID` or `None`
- aws_secret_access_key (str, optional): AWS secret access key for connecting
the boto3 client. Defaults to the value set in the environment variable
`AWS_SECRET_ACCESS_KEY` or `None`
- aws_session_token (str, optional): AWS session key for connecting the boto3
client. Defaults to the value set in the environment variable
`AWS_SESSION_TOKEN` or `None`
- region_name (str, optional): AWS region name for connecting the boto3 client.
Defaults to the value set in the environment variable `REGION_NAME` or `None`
- enable_task_revisions (bool, optional): Enable registration of task definitions using revisions.
When enabled, task definitions will use flow name as opposed to flow id and each new version will be a
task definition revision. Each revision will be registered with a tag called 'PrefectFlowId'
and 'PrefectFlowVersion' to enable proper lookup for existing revisions. Flow name is reformatted
to support task definition naming rules by converting all non-alphanumeric characters to '_'.
Defaults to False.
- use_external_kwargs (bool, optional): When enabled, the agent will check for the existence of an
external json file containing kwargs to pass into the run_flow process.
Defaults to False.
- external_kwargs_s3_bucket (str, optional): S3 bucket containing external kwargs.
- external_kwargs_s3_key (str, optional): S3 key prefix for the location of <slugified_flow_name>/<flow_id[:8]>.json.
- **kwargs (dict, optional): additional keyword arguments to pass to boto3 for
`register_task_definition` and `run_task`
"""
def __init__( # type: ignore
self,
name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
max_polls: int = None,
aws_access_key_id: str = None,
aws_secret_access_key: str = None,
aws_session_token: str = None,
region_name: str = None,
enable_task_revisions: bool = False,
use_external_kwargs: bool = False,
external_kwargs_s3_bucket: str = None,
external_kwargs_s3_key: str = None,
**kwargs
) -> None:
super().__init__(
name=name, labels=labels, env_vars=env_vars, max_polls=max_polls
)
from boto3 import client as boto3_client
from boto3 import resource as boto3_resource
# Config used for boto3 client initialization
aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
aws_secret_access_key = aws_secret_access_key or os.getenv(
"AWS_SECRET_ACCESS_KEY"
)
aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN")
region_name = region_name or os.getenv("REGION_NAME")
# revisions and kwargs configurations
self.enable_task_revisions = enable_task_revisions
self.use_external_kwargs = use_external_kwargs
self.external_kwargs_s3_bucket = external_kwargs_s3_bucket
self.external_kwargs_s3_key = external_kwargs_s3_key
# Parse accepted kwargs for definition and run
self.task_definition_kwargs, self.task_run_kwargs = self._parse_kwargs(
kwargs, True
)
# Client initialization
self.boto3_client = boto3_client(
"ecs",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
)
# fetch external kwargs from s3 if needed
if self.use_external_kwargs:
self.logger.info("Use of external S3 kwargs enabled.")
self.s3_resource = boto3_resource(
"s3",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
)
# get boto3 client for resource groups tagging api
if self.enable_task_revisions:
self.logger.info("Native ECS task revisions enabled.")
self.boto3_client_tags = boto3_client(
"resourcegroupstaggingapi",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
)
def _override_kwargs(
self,
flow_run: GraphQLResult,
flow_task_definition_kwargs: dict,
flow_task_run_kwargs: dict,
) -> None:
"""
Return new kwargs updated from external kwargs file.
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
- flow_task_definition_kwargs (dict): task_definition_kwargs to update
- flow_task_run_kwargs (dict): task_run_kwargs to update
"""
from botocore.exceptions import ClientError
# get external kwargs from S3
try:
self.logger.info("Fetching external kwargs from S3")
obj = self.s3_resource.Object(
self.external_kwargs_s3_bucket,
os.path.join( # type: ignore
self.external_kwargs_s3_key, # type: ignore
slugify(flow_run.flow.name), # type: ignore
"{}.json".format(flow_run.flow.id[:8]), # type: ignore
), # type: ignore
)
body = obj.get()["Body"].read().decode("utf-8")
except ClientError:
self.logger.info(
"Flow id {} does not have external kwargs.".format(flow_run.flow.id[:8])
)
body = "{}"
self.logger.debug("External kwargs:\n{}".format(body))
# update kwargs from with external kwargs
self.logger.info("Updating default kwargs with external")
external_kwargs = json.loads(body)
# parse external kwargs
ext_task_definition_kwargs, ext_task_run_kwargs = self._parse_kwargs(
external_kwargs
)
self.logger.debug(
"External task definition kwargs:\n{}".format(ext_task_definition_kwargs)
)
self.logger.debug("External task run kwargs:\n{}".format(ext_task_run_kwargs))
# update flow_task_definition_kwargs and flow_task_run_kwargs
flow_task_definition_kwargs.update(ext_task_definition_kwargs)
flow_task_run_kwargs.update(ext_task_run_kwargs)
def _add_flow_tags(
self, flow_run: GraphQLResult, flow_task_definition_kwargs: dict
) -> None:
"""
Add tags to task definition kwargs to
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
- flow_task_definition_kwargs (dict): task_definition_kwargs to add tags to
"""
# add flow id and version to definition tags
self.logger.info("Adding tags for flow_id and version.")
if not flow_task_definition_kwargs.get("tags"):
flow_task_definition_kwargs["tags"] = []
else:
flow_task_definition_kwargs["tags"] = copy.deepcopy(
flow_task_definition_kwargs["tags"]
)
append_tag = True
for i in flow_task_definition_kwargs["tags"]:
if i["key"] == "PrefectFlowId":
i["value"] = flow_run.flow.id[:8]
append_tag = False
if append_tag:
flow_task_definition_kwargs["tags"].append(
{"key": "PrefectFlowId", "value": flow_run.flow.id[:8]}
)
append_tag = True
for i in flow_task_definition_kwargs["tags"]:
if i["key"] == "PrefectFlowVersion":
i["value"] = str(flow_run.flow.version)
append_tag = False
if append_tag:
flow_task_definition_kwargs["tags"].append(
{"key": "PrefectFlowVersion", "value": str(flow_run.flow.version)}
)
def _parse_kwargs(self, user_kwargs: dict, check_envars: bool = False) -> tuple:
"""
Parse the kwargs passed in and separate them out for `register_task_definition`
and `run_task`. This is required because boto3 does not allow extra kwargs
and if they are provided it will raise botocore.exceptions.ParamValidationError.
Args:
- user_kwargs (dict): The kwargs passed to the initialization of the environment
- check_envars (bool): Whether to check envars for kwargs
Returns:
tuple: a tuple of two dictionaries (task_definition_kwargs, task_run_kwargs)
"""
definition_kwarg_list = [
"taskRoleArn",
"executionRoleArn",
"volumes",
"placementConstraints",
"cpu",
"memory",
"tags",
"pidMode",
"ipcMode",
"proxyConfiguration",
"inferenceAccelerators",
]
definition_kwarg_list_no_eval = ["cpu", "memory"]
run_kwarg_list = [
"cluster",
"count",
"startedBy",
"group",
"placementConstraints",
"placementStrategy",
"platformVersion",
"networkConfiguration",
"tags",
"enableECSManagedTags",
"propagateTags",
]
task_definition_kwargs = {}
definition_kwarg_list_eval = {
i: (i not in definition_kwarg_list_no_eval) for i in definition_kwarg_list
}
for key, item in user_kwargs.items():
if key in definition_kwarg_list:
if definition_kwarg_list_eval.get(key):
try:
# Parse kwarg if needed
item = literal_eval(item)
except (ValueError, SyntaxError):
pass
task_definition_kwargs.update({key: item})
self.logger.debug("{} = {}".format(key, item))
task_run_kwargs = {}
for key, item in user_kwargs.items():
if key in run_kwarg_list:
try:
# Parse kwarg if needed
item = literal_eval(item)
except (ValueError, SyntaxError):
pass
task_run_kwargs.update({key: item})
self.logger.debug("{} = {}".format(key, item))
# Check environment if keys were not provided
if check_envars:
for key in definition_kwarg_list:
if not task_definition_kwargs.get(key) and os.getenv(key):
self.logger.debug("{} from environment variable".format(key))
def_env_value = os.getenv(key)
if definition_kwarg_list_eval.get(key):
try:
# Parse env var if needed
def_env_value = literal_eval(def_env_value) # type: ignore
except (ValueError, SyntaxError):
pass
task_definition_kwargs.update({key: def_env_value})
for key in run_kwarg_list:
if not task_run_kwargs.get(key) and os.getenv(key):
self.logger.debug("{} from environment variable".format(key))
run_env_value = os.getenv(key)
try:
# Parse env var if needed
run_env_value = literal_eval(run_env_value) # type: ignore
except (ValueError, SyntaxError):
pass
task_run_kwargs.update({key: run_env_value})
return task_definition_kwargs, task_run_kwargs
def deploy_flow(self, flow_run: GraphQLResult) -> str:
"""
Deploy flow runs to Fargate
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: Information about the deployment
Raises:
- ValueError: if deployment attempted on unsupported Storage type
"""
self.logger.info(
"Deploying flow run {}".format(flow_run.id) # type: ignore
)
# create copies of kwargs to apply overrides as needed
flow_task_definition_kwargs = copy.deepcopy(self.task_definition_kwargs)
flow_task_run_kwargs = copy.deepcopy(self.task_run_kwargs)
# create task_definition_name dict for passing into verify method
task_definition_dict = {}
if self.use_external_kwargs:
# override from external kwargs
self._override_kwargs(
flow_run, flow_task_definition_kwargs, flow_task_run_kwargs
)
# set proper task_definition_name and tags based on enable_task_revisions flag
if self.enable_task_revisions:
# set task definition name
task_definition_dict["task_definition_name"] = slugify(flow_run.flow.name)
self._add_flow_tags(flow_run, flow_task_definition_kwargs)
else:
task_definition_dict["task_definition_name"] = "prefect-task-{}".format( # type: ignore
flow_run.flow.id[:8] # type: ignore
) # type: ignore
# Require Docker storage
if not isinstance(StorageSchema().load(flow_run.flow.storage), Docker):
self.logger.error(
"Storage for flow run {} is not of type Docker.".format(flow_run.id)
)
raise ValueError("Unsupported Storage type")
# check if task definition exists
self.logger.debug("Checking for task definition")
if not self._verify_task_definition_exists(flow_run, task_definition_dict):
self.logger.debug("No task definition found")
self._create_task_definition(
flow_run,
flow_task_definition_kwargs,
task_definition_dict["task_definition_name"],
)
# run task
task_arn = self._run_task(
flow_run, flow_task_run_kwargs, task_definition_dict["task_definition_name"]
)
self.logger.debug("Run created for task {}".format(task_arn))
return "Task ARN: {}".format(task_arn)
def _verify_task_definition_exists(
self, flow_run: GraphQLResult, task_definition_dict: dict
) -> bool:
"""
Check if a task definition already exists for the flow
Args:
- flow_run (GraphQLResult): A GraphQLResult representing a flow run object
- task_definition_dict(dict): Dictionary containing task definition name to update if needed.
Returns:
- bool: whether or not a preexisting task definition is found for this flow
"""
from botocore.exceptions import ClientError
try:
definition_exists = True
task_definition_name = task_definition_dict["task_definition_name"]
definition_response = self.boto3_client.describe_task_definition(
taskDefinition=task_definition_name, include=["TAGS"]
)
# if current active task definition has current flow id, then exists
if self.enable_task_revisions:
definition_exists = False
tag_dict = {x["key"]: x["value"] for x in definition_response["tags"]}
current_flow_id = tag_dict.get("PrefectFlowId")
current_flow_version = int(tag_dict.get("PrefectFlowVersion", 0))
if current_flow_id == flow_run.flow.id[:8]:
self.logger.debug(
"Active task definition for {} already exists".format(
flow_run.flow.id[:8]
) # type: ignore
)
definition_exists = True
elif flow_run.flow.version < current_flow_version:
tag_search = self.boto3_client_tags.get_resources(
TagFilters=[
{"Key": "PrefectFlowId", "Values": [flow_run.flow.id[:8]]}
],
ResourceTypeFilters=["ecs:task-definition"],
)
if tag_search["ResourceTagMappingList"]:
task_definition_dict["task_definition_name"] = [
x.get("ResourceARN")
for x in tag_search["ResourceTagMappingList"]
][-1]
self.logger.debug(
"Active task definition for {} already exists".format(
flow_run.flow.id[:8]
) # type: ignore
)
definition_exists = True
else:
self.logger.debug(
"Task definition {} found".format(
task_definition_name
) # type: ignore
)
except ClientError:
return False
return definition_exists
def _create_task_definition(
self,
flow_run: GraphQLResult,
flow_task_definition_kwargs: dict,
task_definition_name: str,
) -> None:
"""
Create a task definition for the flow that each flow run will use. This function
is only called when a flow is run for the first time.
Args:
- flow_runs (list): A list of GraphQLResult flow run objects
- flow_task_definition_kwargs (dict): kwargs to use for registration
- task_definition_name (str): task definition name to use
"""
self.logger.debug(
"Using image {} for task definition".format(
StorageSchema().load(flow_run.flow.storage).name # type: ignore
)
)
container_definitions = [
{
"name": "flow",
"image": StorageSchema()
.load(flow_run.flow.storage) # type: ignore
.name,
"command": ["/bin/sh", "-c", "prefect execute cloud-flow"],
"environment": [
{
"name": "PREFECT__CLOUD__API",
"value": config.cloud.api or "https://api.prefect.io",
},
{
"name": "PREFECT__CLOUD__AGENT__LABELS",
"value": str(self.labels),
},
{"name": "PREFECT__CLOUD__USE_LOCAL_SECRETS", "value": "false"},
{
"name": "PREFECT__LOGGING__LOG_TO_CLOUD",
"value": str(self.log_to_cloud).lower(),
},
{"name": "PREFECT__LOGGING__LEVEL", "value": "DEBUG"},
{
"name": "PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS",
"value": "prefect.engine.cloud.CloudFlowRunner",
},
{
"name": "PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS",
"value": "prefect.engine.cloud.CloudTaskRunner",
},
],
"essential": True,
}
]
for key, value in self.env_vars.items():
container_definitions[0]["environment"].append(dict(name=key, value=value))
# Register task definition
self.logger.debug(
"Registering task definition {}".format(
task_definition_name # type: ignore
)
)
self.boto3_client.register_task_definition(
family=task_definition_name, # type: ignore
containerDefinitions=container_definitions,
requiresCompatibilities=["FARGATE"],
networkMode="awsvpc",
**flow_task_definition_kwargs
)
def _run_task(
self,
flow_run: GraphQLResult,
flow_task_run_kwargs: dict,
task_definition_name: str,
) -> str:
"""
Run a task using the flow run.
Args:
- flow_runs (list): A list of GraphQLResult flow run objects
- flow_task_run_kwargs (dict): kwargs to use for task run
- task_definition_name (str): task definition name to use
"""
container_overrides = [
{
"name": "flow",
"environment": [
{
"name": "PREFECT__CLOUD__AUTH_TOKEN",
"value": config.cloud.agent.auth_token,
},
{
"name": "PREFECT__CONTEXT__FLOW_RUN_ID",
"value": flow_run.id, # type: ignore
},
],
}
]
# Run task
self.logger.debug(
"Running task using task definition {}".format(
task_definition_name # type: ignore
)
)
task = self.boto3_client.run_task(
taskDefinition=task_definition_name,
overrides={"containerOverrides": container_overrides},
launchType="FARGATE",
**flow_task_run_kwargs
)
return task["tasks"][0].get("taskArn")
if __name__ == "__main__":
FargateAgent().start()
|
the-stack_0_18956 | """
Game data manager
Use None for unfilled, False for black, and True for white.
The manager provides related access interfaces.
"""
from error import InvalidGridError, SettedGridError
from collections import defaultdict
from typing import List, Tuple, Union, Iterator, Set, Dict, Optional
class Manager:
"""
Game data manager
Use a two-dimensional array matrix to represent the game board.
"""
def __init__(self, size: int) -> None:
"""
Initialize a new game data manager.
Parameters:
size: int - length and width of the game board (same)
"""
self._ended = False
self._size = size
self._records: List[Tuple[int, int]] = list()
self._board: List[List[Union[None, bool]]] = [
[None for _index in range(size)]
for _index in range(size)
]
@property
def size(self) -> int:
"""Return size of game board"""
return self._size
def undo(self) -> Tuple[int, int]:
"""Undo the last step"""
row, column = self._records.pop()
return row, column
@property
def steps(self) -> int:
"""Return steps count"""
return len(self._records)
@property
def turn(self) -> bool:
"""Return which turn"""
return len(self._records) % 2 == 0
@property
def ended(self) -> bool:
"""Return if game ended"""
return self._ended
def end(self) -> None:
"""Set end flag"""
self._ended = True
def reset(self) -> None:
"""Reset game status"""
self._records.clear()
self._ended = False
self._board: List[List[Union[None, bool]]] = [
[None for _index in range(self._size)]
for _index in range(self._size)
]
def _around(self, _x: int, _y: int) -> Iterator[Tuple[int, int]]:
"""Return all grids's indexs around specific grid"""
if _x >= self._size or _y >= self._size:
raise InvalidGridError(
"Invalid index for ({x}, {y})".format(x=_x, y=_y))
for i in (_x - 1, _x, _x + 1):
for j in (_y - 1, _y, _y + 1):
if (i, j) == (_x, _y):
continue
if i < 0 or j < 0:
continue
if i >= self._size or j >= self._size:
continue
yield (i, j)
def find(self, row: int, column: int,
paths: Optional[Dict[int, Set[Tuple[int, int]]]] = None,
direction: Optional[int] = None) -> Dict[int, Set[Tuple[int, int]]]:
"""
Try to continuously find the specified amount
of continuously set data in any direction
Parameters:
row, column: position or grid
paths: path for all directions
directions:
1 2 3
↖ ↑ ↗
4 ← · → 4
↙ ↓ ↘
3 2 1
"""
target = self[row, column]
if paths is None:
paths = {1: set(), 2: set(), 3: set(), 4: set()}
# Find all grids aorund current one
around = self._around(row, column)
classified = defaultdict(list)
for nrow, ncolumn in around:
# Filter all invalid grid
if not self[nrow, ncolumn] == target:
continue
# Filter non-set grid
if self[nrow, ncolumn] == None:
continue
# Define direction
if (nrow - row) * (ncolumn - column) == 1:
classified[1].append((nrow, ncolumn))
if nrow - row == 0:
classified[2].append((nrow, ncolumn))
if (nrow - row) * (ncolumn - column) == -1:
classified[3].append((nrow, ncolumn))
if ncolumn - column == 0:
classified[4].append((nrow, ncolumn))
# If direction has not been specified
if direction is None:
for ndirection, grids in classified.items():
for nrow, ncolumn in grids:
paths[ndirection].add((row, column))
paths[ndirection].add((nrow, ncolumn))
self.find(nrow, ncolumn, paths, ndirection)
# If direction has been sprcified
else:
grids = classified[direction]
for nrow, ncolumn in grids:
if (nrow, ncolumn) in paths[direction]:
continue
paths[direction].add((nrow, ncolumn))
self.find(nrow, ncolumn, paths, direction)
# If all directional recursions break before condition satisfied
return paths
def __setitem__(self, index: Tuple[int, int], value: Union[None, bool]) -> None:
"""Set status for specific index of grid"""
_x, _y = index
if _x > self._size or _x < 0 or _y > self._size or _y < 0:
raise InvalidGridError(
"Invalid index for ({x}, {y})".format(x=_x, y=_y))
# Check for grid if grid has been set
if isinstance(self._board[_x][_y], bool) and not value is None:
raise SettedGridError("Cannot set grid which has already been set")
self._board[_x][_y] = value
if value is None:
self._records.remove(index)
else:
self._records.append(index)
def __getitem__(self, index: Tuple[int, int]) -> Union[None, bool]:
"""Return status for specific index of grid"""
_x, _y = index
if _x > self._size or _x < 0 or _y > self._size or _y < 0:
raise IndexError("Invalid index for ({x}, {y})".format(x=_x, y=_y))
return self._board[_x][_y]
def show(self) -> None:
"""Show all grids status"""
status = list()
for row in self._board:
for column in row:
if column is None:
status.append('x ')
if column is True:
status.append('Y ')
if column is False:
status.append('N ')
status.append('\n')
print(''.join(status))
# Test case
if __name__ == "__main__":
size = 10
# Test for size property
manager = Manager(size)
assert(manager.size == size)
# Boundary conditions testing of private function _around
assert(set(manager._around(0, 0)) == {(0, 1), (1, 0), (1, 1)})
assert(set(manager._around(0, size - 1)) == {
(0, size - 2), (1, size - 1), (1, size - 2)})
assert(set(manager._around(1, 1)) == {(0, 0), (0, 1), (0, 2),
(1, 0), (1, 2), (2, 0), (2, 1), (2, 2)})
assert(set(manager._around(size - 1, 0)) == {
(size - 2, 0), (size - 2, 1), (size - 1, 1)})
assert(set(manager._around(size - 1, size - 1)) == {
(size - 1, size - 2), (size - 2, size - 2), (size - 2, size - 1)})
# Test setitem and getitem function
manager[0, 0] = True
assert(manager[0, 0] == True)
assert(manager[0, 1] == None)
try:
manager[0, 0] = False
except SettedGridError as _error:
pass
else:
raise AssertionError("Setitem function test failed!")
# Test step count function
assert(manager.steps == 1)
# Test find function
manager[0, 1] = True
manager[1, 0] = False
manager[1, 1] = True
manager[3, 0] = True
manager[2, 2] = False
assert(manager.find(0, 0) == {
1: {(1, 1), (0, 0)}, 2: {(0, 1), (0, 0)},
3: set(), 4: set()
})
# Test show function
manager.show()
|
the-stack_0_18957 | import glm
import draw
import math
import copy
# This class handles the orientation, movement and various associated tasks for a given object.
#
# lots of stuff here shamelessly stolen from
# https://github.com/SebLague/Boids under the MIT license
# Portions Copyright (c) 2019 Sebastian Lague
#
# Check out his sweet video:
# https://www.youtube.com/watch?v=bqtqltqcQhw&fbclid=IwAR3MYG37B7dI3EV4YiNySQVSvtj-MP_0xcgWdh7-T18aUoAEg2BNohNWzk0
#
class posture:
def __init__(self, _cx, _cy, _cz, _hx, _hy, _hz, _v):# center, heading, velocity
self.center_pos=glm.vec3(_cx,_cy,_cz) # (x,y,z) center pos of self (on field)
self.target=glm.vec3(_hx,_hy,_hz) # (x,y,z) vector - current target of element
self.heading=glm.vec3(0,-1,0) # (x,y,z) normalized vector - current direction of element (forward)
self.velocity=_v # current velocity of element along the heading
self.velocity2=glm.vec3(0,0,0) # (x,y,z) vector - velocity2
self.momentum=glm.vec3(0,0,0) # (x,y,z) vector - current momentum of element
self.accel2=glm.vec3(0,0,0) # (x,y,z) vector - accelleration (2)
self.avgAvoidanceHeading=glm.vec3(0,0,0) # (x,y,z) vector - avg avoidance heading
self.accelleration=0.0 # current accelleration of self
self.drag=0.0 # current drag of self
self.friction=0.0 # current friction of surface
self.minSpeed=0.0 # minimum speed
self.maxSpeed=5.0 # maximum speed
self.perceptionRadius = 2.5 # radius for perception
self.avoidanceRadius = 1 # radius for avoidance
self.maxSteerForce=3.0 # maximum steerage force
self.alignWeight=1 # how much to weigh aligning with the flock
self.cohesionWeight=1 # how much to weigh cohesion with the flock
self.separateWeight=1 # how much to weigh separation from the flock
self.targetWeight=1 # how much to weigh the target destination
self.boundsRadius = 0.27 # radius of the boundary
self.avoidCollisionsWeight = 10 # how much to weight avoiding collisions
self.collisionAvoidDst = 5 # the distance to use when avoiding
self.collide_rays=self.calc_collide_rays(100, 100)
def tick_update(self):
acceleration = glm.vec3(0,0,0)
if (self.target != glm.vec3(0,0,0)):
offsetToTarget = glm.vec3(self.target - self.center_pos)
accelleration = self.steer_towards(offsetToTarget) * self.targetWeight
# no flocking behavior here
# if (numPerceivedFlockmates != 0) {
# centreOfFlockmates /= numPerceivedFlockmates;
#
# Vector3 offsetToFlockmatesCentre = (centreOfFlockmates - position);
#
# var alignmentForce = SteerTowards (avgFlockHeading) * settings.alignWeight;
# var cohesionForce = SteerTowards (offsetToFlockmatesCentre) * settings.cohesionWeight;
# var seperationForce = SteerTowards (avgAvoidanceHeading) * settings.seperateWeight;
#
# acceleration += alignmentForce;
# acceleration += cohesionForce;
# acceleration += seperationForce;
# }
if (self.is_heading_for_collision()):
collisionAvoidDir = glm.vec3(obstacle_rays());
collisionAvoidForce = glm.vec3(steer_towards(collisionAvoidDir) * self.avoidCollisionWeight)
acceleration += collisionAvoidForce;
self.velocity2 += acceleration * 50/1000
speed = glm.length(self.velocity2) #velocity.magnitude; (TODO: should be magnitude, confirm)
direction = glm.vec3(self.velocity2 / speed)
speed = glm.clamp(speed, self.minSpeed, self.maxSpeed);
self.velocity2 = direction * speed;
# cachedTransform.position += velocity * 50/1000;
# cachedTransform.forward = direction;
# position = cachedTransform.position;
self.heading = direction;
# calculate the new position based on velocity
new_pos = draw.do_translate_point(self.center_pos, (glm.normalize(self.heading) * self.velocity))
if ((math.isnan(new_pos.x) == False) and (math.isnan(new_pos.y) == False) and (math.isnan(new_pos.z)== False)):
if ((self.center_pos.x > 0) and (self.center_pos.y > 0)):
self.center_pos.x = int(new_pos.x)
self.center_pos.y = int(new_pos.y)
self.center_pos.z = int(new_pos.z)
# new_vector = glm.cross(self.velocity, self.accelleration)
# reduce momentum based on friction (2πMgD)?
# TODO - fix this calculation
# self.velocity = self.velocity - (self.velocity*(self.drag*.1))
def set_drag(self, new_drag):
self.drag = new_drag
# takes a glm.vec3
def set_accel(self, new_accel):
self.accelleration = new_accel
def __str__(self):
return "posture: %dx%dx%d %dx%dx%d %dx%dx%d %d" %(self.center_pos[0], self.center_pos[1], self.center_pos[2], self.heading[0], self.heading[1], self.heading[2], self.target[1], self.target[2], self.velocity)
def calc_collide_rays(self, numViewDirections, length):
directions=[]
goldenRatio = (1 + glm.sqrt(5)) / 2;
angleIncrement = glm.pi() * 2 * goldenRatio;
i=0
while i < (numViewDirections):
t = i / numViewDirections;
inclination = glm.acos(1 - 2 * t);
azimuth = angleIncrement * i;
x = glm.sin(inclination) * glm.cos(azimuth);
y = glm.sin (inclination) * glm.sin(azimuth);
z = glm.cos (inclination);
directions.append(glm.vec3(x, y, z)*length)
i+=1
return directions
def get_collide_rays(self):
local_rays = copy.deepcopy(self.collide_rays)
return (local_rays)
def steer_towards(self, vector):
v = glm.vec3(glm.normalize(vector) * self.maxSpeed - self.velocity2)
return glm.clamp(v, 0, self.maxSteerForce);
def is_heading_for_collision(self):
# RaycastHit hit;
# if (Physics.SphereCast (position, settings.boundsRadius, forward, out hit, settings.collisionAvoidDst, settings.obstacleMask)) {
# return true;
# } else { }
return False
def obstacle_rays(self):
rayDirections = self.get_collide_rays();
length = rayDirections.length()
i=0
while i < (length):
# Vector3 dir = cachedTransform.TransformDirection (rayDirections[i]);
#Ray ray = new Ray (position, dir);
#if (!Physics.SphereCast (ray, settings.boundsRadius, settings.collisionAvoidDst, settings.obstacleMask)) {
# return dir;
#}
i += 1
return self.heading;
|
the-stack_0_18958 | """
https://leetcode.com/problems/surrounded-regions/
Given a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's in that surrounded region.
Example:
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
Explanation:
Surrounded regions shouldn’t be on the border, which means that any 'O' on the border of the board are not flipped to 'X'. Any 'O' that is not on the border and it is not connected to an 'O' on the border will be flipped to 'X'. Two cells are connected if they are adjacent cells connected horizontally or vertically.
"""
# time complexity: O(m*n), space complexity: O(1), where m and n are the height and width of the matrix
class Solution:
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
if not(board) or not(board[0]):
return
def convert(row, col):
if 0 <= row < len(board) and 0 <= col < len(board[0]) and board[row][col] == 'O':
board[row][col] = 'A'
convert(row, col-1)
convert(row, col+1)
convert(row-1, col)
convert(row+1, col)
for i in range(len(board)):
convert(i, 0)
convert(i, len(board[0])-1)
for j in range(len(board[0])):
convert(0, j)
convert(len(board)-1, j)
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 'O':
board[i][j] = 'X'
if board[i][j] == 'A':
board[i][j] = 'O'
|
the-stack_0_18959 | from typing import Type, Union
import numpy as np
import pytest
from pandas._libs import OutOfBoundsDatetime
from pandas.compat.numpy import _np_version_under1p18
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
# TODO: more freq variants
@pytest.fixture(params=["D", "B", "W", "M", "Q", "Y"])
def period_index(request):
"""
A fixture to provide PeriodIndex objects with different frequencies.
Most PeriodArray behavior is already tested in PeriodIndex tests,
so here we just test that the PeriodArray behavior matches
the PeriodIndex behavior.
"""
freqstr = request.param
# TODO: non-monotone indexes; NaTs, different start dates
pi = pd.period_range(start=pd.Timestamp("2000-01-01"), periods=100, freq=freqstr)
return pi
@pytest.fixture(params=["D", "B", "W", "M", "Q", "Y"])
def datetime_index(request):
"""
A fixture to provide DatetimeIndex objects with different frequencies.
Most DatetimeArray behavior is already tested in DatetimeIndex tests,
so here we just test that the DatetimeArray behavior matches
the DatetimeIndex behavior.
"""
freqstr = request.param
# TODO: non-monotone indexes; NaTs, different start dates, timezones
dti = pd.date_range(start=pd.Timestamp("2000-01-01"), periods=100, freq=freqstr)
return dti
@pytest.fixture
def timedelta_index(request):
"""
A fixture to provide TimedeltaIndex objects with different frequencies.
Most TimedeltaArray behavior is already tested in TimedeltaIndex tests,
so here we just test that the TimedeltaArray behavior matches
the TimedeltaIndex behavior.
"""
# TODO: flesh this out
return pd.TimedeltaIndex(["1 Day", "3 Hours", "NaT"])
class SharedTests:
index_cls: Type[Union[DatetimeIndex, PeriodIndex, TimedeltaIndex]]
@pytest.fixture
def arr1d(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
return arr
def test_compare_len1_raises(self):
# make sure we raise when comparing with different lengths, specific
# to the case where one has length-1, which numpy would broadcast
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls._simple_new(data, freq="D")
idx = self.index_cls(arr)
with pytest.raises(ValueError, match="Lengths must match"):
arr == arr[:1]
# test the index classes while we're at it, GH#23078
with pytest.raises(ValueError, match="Lengths must match"):
idx <= idx[[0]]
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("as_index", [True, False])
def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered):
other = pd.Categorical(arr1d, ordered=ordered)
if as_index:
other = pd.CategoricalIndex(other)
left, right = arr1d, other
if reverse:
left, right = right, left
ones = np.ones(arr1d.shape, dtype=bool)
zeros = ~ones
result = left == right
tm.assert_numpy_array_equal(result, ones)
result = left != right
tm.assert_numpy_array_equal(result, zeros)
if not reverse and not as_index:
# Otherwise Categorical raises TypeError bc it is not ordered
# TODO: we should probably get the same behavior regardless?
result = left < right
tm.assert_numpy_array_equal(result, zeros)
result = left <= right
tm.assert_numpy_array_equal(result, ones)
result = left > right
tm.assert_numpy_array_equal(result, zeros)
result = left >= right
tm.assert_numpy_array_equal(result, ones)
def test_take(self):
data = np.arange(100, dtype="i8") * 24 * 3600 * 10 ** 9
np.random.shuffle(data)
arr = self.array_cls._simple_new(data, freq="D")
idx = self.index_cls._simple_new(arr)
takers = [1, 4, 94]
result = arr.take(takers)
expected = idx.take(takers)
tm.assert_index_equal(self.index_cls(result), expected)
takers = np.array([1, 4, 94])
result = arr.take(takers)
expected = idx.take(takers)
tm.assert_index_equal(self.index_cls(result), expected)
@pytest.mark.parametrize("fill_value", [2, 2.0, pd.Timestamp.now().time])
def test_take_fill_raises(self, fill_value):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls._simple_new(data, freq="D")
msg = f"'fill_value' should be a {self.dtype}. Got '{fill_value}'"
with pytest.raises(ValueError, match=msg):
arr.take([0, 1], allow_fill=True, fill_value=fill_value)
def test_take_fill(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls._simple_new(data, freq="D")
result = arr.take([-1, 1], allow_fill=True, fill_value=None)
assert result[0] is pd.NaT
result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan)
assert result[0] is pd.NaT
result = arr.take([-1, 1], allow_fill=True, fill_value=pd.NaT)
assert result[0] is pd.NaT
def test_concat_same_type(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls._simple_new(data, freq="D")
idx = self.index_cls(arr)
idx = idx.insert(0, pd.NaT)
arr = self.array_cls(idx)
result = arr._concat_same_type([arr[:-1], arr[1:], arr])
arr2 = arr.astype(object)
expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2]), None)
tm.assert_index_equal(self.index_cls(result), expected)
def test_unbox_scalar(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
result = arr._unbox_scalar(arr[0])
assert isinstance(result, int)
result = arr._unbox_scalar(pd.NaT)
assert isinstance(result, int)
msg = f"'value' should be a {self.dtype.__name__}."
with pytest.raises(ValueError, match=msg):
arr._unbox_scalar("foo")
def test_check_compatible_with(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
arr._check_compatible_with(arr[0])
arr._check_compatible_with(arr[:1])
arr._check_compatible_with(pd.NaT)
def test_scalar_from_string(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
result = arr._scalar_from_string(str(arr[0]))
assert result == arr[0]
def test_reduce_invalid(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
with pytest.raises(TypeError, match="cannot perform"):
arr._reduce("not a method")
@pytest.mark.parametrize("method", ["pad", "backfill"])
def test_fillna_method_doesnt_change_orig(self, method):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
arr[4] = pd.NaT
fill_value = arr[3] if method == "pad" else arr[5]
result = arr.fillna(method=method)
assert result[4] == fill_value
# check that the original was not changed
assert arr[4] is pd.NaT
def test_searchsorted(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
# scalar
result = arr.searchsorted(arr[1])
assert result == 1
result = arr.searchsorted(arr[2], side="right")
assert result == 3
# own-type
result = arr.searchsorted(arr[1:3])
expected = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
result = arr.searchsorted(arr[1:3], side="right")
expected = np.array([2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
# Following numpy convention, NaT goes at the beginning
# (unlike NaN which goes at the end)
result = arr.searchsorted(pd.NaT)
assert result == 0
def test_getitem_2d(self, arr1d):
# 2d slicing on a 1D array
expected = type(arr1d)(arr1d._data[:, np.newaxis], dtype=arr1d.dtype)
result = arr1d[:, np.newaxis]
tm.assert_equal(result, expected)
# Lookup on a 2D array
arr2d = expected
expected = type(arr2d)(arr2d._data[:3, 0], dtype=arr2d.dtype)
result = arr2d[:3, 0]
tm.assert_equal(result, expected)
# Scalar lookup
result = arr2d[-1, 0]
expected = arr1d[-1]
assert result == expected
def test_setitem(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
arr[0] = arr[1]
expected = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
expected[0] = expected[1]
tm.assert_numpy_array_equal(arr.asi8, expected)
arr[:2] = arr[-2:]
expected[:2] = expected[-2:]
tm.assert_numpy_array_equal(arr.asi8, expected)
def test_setitem_str_array(self, arr1d):
if isinstance(arr1d, DatetimeArray) and arr1d.tz is not None:
pytest.xfail(reason="timezone comparisons inconsistent")
expected = arr1d.copy()
expected[[0, 1]] = arr1d[-2:]
arr1d[:2] = [str(x) for x in arr1d[-2:]]
tm.assert_equal(arr1d, expected)
@pytest.mark.parametrize("as_index", [True, False])
def test_setitem_categorical(self, arr1d, as_index):
expected = arr1d.copy()[::-1]
if not isinstance(expected, PeriodArray):
expected = expected._with_freq(None)
cat = pd.Categorical(arr1d)
if as_index:
cat = pd.CategoricalIndex(cat)
arr1d[:] = cat[::-1]
tm.assert_equal(arr1d, expected)
def test_setitem_raises(self):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
val = arr[0]
with pytest.raises(IndexError, match="index 12 is out of bounds"):
arr[12] = val
with pytest.raises(TypeError, match="'value' should be a.* 'object'"):
arr[0] = object()
@pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series])
def test_setitem_numeric_raises(self, arr1d, box):
# We dont case e.g. int64 to our own dtype for setitem
msg = "requires compatible dtype"
with pytest.raises(TypeError, match=msg):
arr1d[:2] = box([0, 1])
with pytest.raises(TypeError, match=msg):
arr1d[:2] = box([0.0, 1.0])
def test_inplace_arithmetic(self):
# GH#24115 check that iadd and isub are actually in-place
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
expected = arr + pd.Timedelta(days=1)
arr += pd.Timedelta(days=1)
tm.assert_equal(arr, expected)
expected = arr - pd.Timedelta(days=1)
arr -= pd.Timedelta(days=1)
tm.assert_equal(arr, expected)
def test_shift_fill_int_deprecated(self):
# GH#31971
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = self.array_cls(data, freq="D")
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = arr.shift(1, fill_value=1)
expected = arr.copy()
if self.array_cls is PeriodArray:
fill_val = PeriodArray._scalar_type._from_ordinal(1, freq=arr.freq)
else:
fill_val = arr._scalar_type(1)
expected[0] = fill_val
expected[1:] = arr[:-1]
tm.assert_equal(result, expected)
class TestDatetimeArray(SharedTests):
index_cls = pd.DatetimeIndex
array_cls = DatetimeArray
dtype = pd.Timestamp
@pytest.fixture
def arr1d(self, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01 01:01:00", periods=3, freq="H", tz=tz)
dta = dti._data
return dta
def test_round(self, tz_naive_fixture):
# GH#24064
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01 01:01:00", periods=3, freq="H", tz=tz)
result = dti.round(freq="2T")
expected = dti - pd.Timedelta(minutes=1)
expected = expected._with_freq(None)
tm.assert_index_equal(result, expected)
dta = dti._data
result = dta.round(freq="2T")
expected = expected._data._with_freq(None)
tm.assert_datetime_array_equal(result, expected)
def test_array_interface(self, datetime_index):
arr = DatetimeArray(datetime_index)
# default asarray gives the same underlying data (for tz naive)
result = np.asarray(arr)
expected = arr._data
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, copy=False)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
# specifying M8[ns] gives the same result as default
result = np.asarray(arr, dtype="datetime64[ns]")
expected = arr._data
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="datetime64[ns]", copy=False)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="datetime64[ns]")
assert result is not expected
tm.assert_numpy_array_equal(result, expected)
# to object dtype
result = np.asarray(arr, dtype=object)
expected = np.array(list(arr), dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to other dtype always copies
result = np.asarray(arr, dtype="int64")
assert result is not arr.asi8
assert not np.may_share_memory(arr, result)
expected = arr.asi8.copy()
tm.assert_numpy_array_equal(result, expected)
# other dtypes handled by numpy
for dtype in ["float64", str]:
result = np.asarray(arr, dtype=dtype)
expected = np.asarray(arr).astype(dtype)
tm.assert_numpy_array_equal(result, expected)
def test_array_object_dtype(self, tz_naive_fixture):
# GH#23524
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
arr = DatetimeArray(dti)
expected = np.array(list(dti))
result = np.array(arr, dtype=object)
tm.assert_numpy_array_equal(result, expected)
# also test the DatetimeIndex method while we're at it
result = np.array(dti, dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_array_tz(self, tz_naive_fixture):
# GH#23524
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
arr = DatetimeArray(dti)
expected = dti.asi8.view("M8[ns]")
result = np.array(arr, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
# check that we are not making copies when setting copy=False
result = np.array(arr, dtype="M8[ns]", copy=False)
assert result.base is expected.base
assert result.base is not None
result = np.array(arr, dtype="datetime64[ns]", copy=False)
assert result.base is expected.base
assert result.base is not None
def test_array_i8_dtype(self, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
arr = DatetimeArray(dti)
expected = dti.asi8
result = np.array(arr, dtype="i8")
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
# check that we are still making copies when setting copy=False
result = np.array(arr, dtype="i8", copy=False)
assert result.base is not expected.base
assert result.base is None
def test_from_array_keeps_base(self):
# Ensure that DatetimeArray._data.base isn't lost.
arr = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")
dta = DatetimeArray(arr)
assert dta._data is arr
dta = DatetimeArray(arr[:0])
assert dta._data.base is arr
def test_from_dti(self, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
arr = DatetimeArray(dti)
assert list(dti) == list(arr)
# Check that Index.__new__ knows what to do with DatetimeArray
dti2 = pd.Index(arr)
assert isinstance(dti2, pd.DatetimeIndex)
assert list(dti2) == list(arr)
def test_astype_object(self, tz_naive_fixture):
tz = tz_naive_fixture
dti = pd.date_range("2016-01-01", periods=3, tz=tz)
arr = DatetimeArray(dti)
asobj = arr.astype("O")
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == "O"
assert list(asobj) == list(dti)
@pytest.mark.parametrize("freqstr", ["D", "B", "W", "M", "Q", "Y"])
def test_to_perioddelta(self, datetime_index, freqstr):
# GH#23113
dti = datetime_index
arr = DatetimeArray(dti)
expected = dti.to_perioddelta(freq=freqstr)
result = arr.to_perioddelta(freq=freqstr)
assert isinstance(result, TimedeltaArray)
# placeholder until these become actual EA subclasses and we can use
# an EA-specific tm.assert_ function
tm.assert_index_equal(pd.Index(result), pd.Index(expected))
@pytest.mark.parametrize("freqstr", ["D", "B", "W", "M", "Q", "Y"])
def test_to_period(self, datetime_index, freqstr):
dti = datetime_index
arr = DatetimeArray(dti)
expected = dti.to_period(freq=freqstr)
result = arr.to_period(freq=freqstr)
assert isinstance(result, PeriodArray)
# placeholder until these become actual EA subclasses and we can use
# an EA-specific tm.assert_ function
tm.assert_index_equal(pd.Index(result), pd.Index(expected))
@pytest.mark.parametrize("propname", pd.DatetimeIndex._bool_ops)
def test_bool_properties(self, datetime_index, propname):
# in this case _bool_ops is just `is_leap_year`
dti = datetime_index
arr = DatetimeArray(dti)
assert dti.freq == arr.freq
result = getattr(arr, propname)
expected = np.array(getattr(dti, propname), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("propname", pd.DatetimeIndex._field_ops)
def test_int_properties(self, datetime_index, propname):
if propname in ["week", "weekofyear"]:
# GH#33595 Deprecate week and weekofyear
return
dti = datetime_index
arr = DatetimeArray(dti)
result = getattr(arr, propname)
expected = np.array(getattr(dti, propname), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_take_fill_valid(self, datetime_index, tz_naive_fixture):
dti = datetime_index.tz_localize(tz_naive_fixture)
arr = DatetimeArray(dti)
now = pd.Timestamp.now().tz_localize(dti.tz)
result = arr.take([-1, 1], allow_fill=True, fill_value=now)
assert result[0] == now
msg = f"'fill_value' should be a {self.dtype}. Got '0 days 00:00:00'."
with pytest.raises(ValueError, match=msg):
# fill_value Timedelta invalid
arr.take([-1, 1], allow_fill=True, fill_value=now - now)
msg = f"'fill_value' should be a {self.dtype}. Got '2014Q1'."
with pytest.raises(ValueError, match=msg):
# fill_value Period invalid
arr.take([-1, 1], allow_fill=True, fill_value=pd.Period("2014Q1"))
tz = None if dti.tz is not None else "US/Eastern"
now = pd.Timestamp.now().tz_localize(tz)
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
with pytest.raises(TypeError, match=msg):
# Timestamp with mismatched tz-awareness
arr.take([-1, 1], allow_fill=True, fill_value=now)
value = pd.NaT.value
msg = f"'fill_value' should be a {self.dtype}. Got '{value}'."
with pytest.raises(ValueError, match=msg):
# require NaT, not iNaT, as it could be confused with an integer
arr.take([-1, 1], allow_fill=True, fill_value=value)
value = np.timedelta64("NaT", "ns")
msg = f"'fill_value' should be a {self.dtype}. Got '{str(value)}'."
with pytest.raises(ValueError, match=msg):
# require appropriate-dtype if we have a NA value
arr.take([-1, 1], allow_fill=True, fill_value=value)
def test_concat_same_type_invalid(self, datetime_index):
# different timezones
dti = datetime_index
arr = DatetimeArray(dti)
if arr.tz is None:
other = arr.tz_localize("UTC")
else:
other = arr.tz_localize(None)
with pytest.raises(ValueError, match="to_concat must have the same"):
arr._concat_same_type([arr, other])
def test_concat_same_type_different_freq(self):
# we *can* concatenate DTI with different freqs.
a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central"))
b = DatetimeArray(pd.date_range("2000", periods=2, freq="H", tz="US/Central"))
result = DatetimeArray._concat_same_type([a, b])
expected = DatetimeArray(
pd.to_datetime(
[
"2000-01-01 00:00:00",
"2000-01-02 00:00:00",
"2000-01-01 00:00:00",
"2000-01-01 01:00:00",
]
).tz_localize("US/Central")
)
tm.assert_datetime_array_equal(result, expected)
def test_strftime(self, datetime_index):
arr = DatetimeArray(datetime_index)
result = arr.strftime("%Y %b")
expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_strftime_nat(self):
# GH 29578
arr = DatetimeArray(DatetimeIndex(["2019-01-01", pd.NaT]))
result = arr.strftime("%Y-%m-%d")
expected = np.array(["2019-01-01", np.nan], dtype=object)
tm.assert_numpy_array_equal(result, expected)
class TestTimedeltaArray(SharedTests):
index_cls = pd.TimedeltaIndex
array_cls = TimedeltaArray
dtype = pd.Timedelta
def test_from_tdi(self):
tdi = pd.TimedeltaIndex(["1 Day", "3 Hours"])
arr = TimedeltaArray(tdi)
assert list(arr) == list(tdi)
# Check that Index.__new__ knows what to do with TimedeltaArray
tdi2 = pd.Index(arr)
assert isinstance(tdi2, pd.TimedeltaIndex)
assert list(tdi2) == list(arr)
def test_astype_object(self):
tdi = pd.TimedeltaIndex(["1 Day", "3 Hours"])
arr = TimedeltaArray(tdi)
asobj = arr.astype("O")
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == "O"
assert list(asobj) == list(tdi)
def test_to_pytimedelta(self, timedelta_index):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
expected = tdi.to_pytimedelta()
result = arr.to_pytimedelta()
tm.assert_numpy_array_equal(result, expected)
def test_total_seconds(self, timedelta_index):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
expected = tdi.total_seconds()
result = arr.total_seconds()
tm.assert_numpy_array_equal(result, expected.values)
@pytest.mark.parametrize("propname", pd.TimedeltaIndex._field_ops)
def test_int_properties(self, timedelta_index, propname):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
result = getattr(arr, propname)
expected = np.array(getattr(tdi, propname), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self, timedelta_index):
arr = TimedeltaArray(timedelta_index)
# default asarray gives the same underlying data
result = np.asarray(arr)
expected = arr._data
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, copy=False)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
# specifying m8[ns] gives the same result as default
result = np.asarray(arr, dtype="timedelta64[ns]")
expected = arr._data
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="timedelta64[ns]", copy=False)
assert result is expected
tm.assert_numpy_array_equal(result, expected)
result = np.array(arr, dtype="timedelta64[ns]")
assert result is not expected
tm.assert_numpy_array_equal(result, expected)
# to object dtype
result = np.asarray(arr, dtype=object)
expected = np.array(list(arr), dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to other dtype always copies
result = np.asarray(arr, dtype="int64")
assert result is not arr.asi8
assert not np.may_share_memory(arr, result)
expected = arr.asi8.copy()
tm.assert_numpy_array_equal(result, expected)
# other dtypes handled by numpy
for dtype in ["float64", str]:
result = np.asarray(arr, dtype=dtype)
expected = np.asarray(arr).astype(dtype)
tm.assert_numpy_array_equal(result, expected)
def test_take_fill_valid(self, timedelta_index):
tdi = timedelta_index
arr = TimedeltaArray(tdi)
td1 = pd.Timedelta(days=1)
result = arr.take([-1, 1], allow_fill=True, fill_value=td1)
assert result[0] == td1
now = pd.Timestamp.now()
value = now
msg = f"'fill_value' should be a {self.dtype}. Got '{value}'."
with pytest.raises(ValueError, match=msg):
# fill_value Timestamp invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
value = now.to_period("D")
msg = f"'fill_value' should be a {self.dtype}. Got '{value}'."
with pytest.raises(ValueError, match=msg):
# fill_value Period invalid
arr.take([0, 1], allow_fill=True, fill_value=value)
value = np.datetime64("NaT", "ns")
msg = f"'fill_value' should be a {self.dtype}. Got '{str(value)}'."
with pytest.raises(ValueError, match=msg):
# require appropriate-dtype if we have a NA value
arr.take([-1, 1], allow_fill=True, fill_value=value)
class TestPeriodArray(SharedTests):
index_cls = pd.PeriodIndex
array_cls = PeriodArray
dtype = pd.Period
@pytest.fixture
def arr1d(self, period_index):
return period_index._data
def test_from_pi(self, period_index):
pi = period_index
arr = PeriodArray(pi)
assert list(arr) == list(pi)
# Check that Index.__new__ knows what to do with PeriodArray
pi2 = pd.Index(arr)
assert isinstance(pi2, pd.PeriodIndex)
assert list(pi2) == list(arr)
def test_astype_object(self, period_index):
pi = period_index
arr = PeriodArray(pi)
asobj = arr.astype("O")
assert isinstance(asobj, np.ndarray)
assert asobj.dtype == "O"
assert list(asobj) == list(pi)
def test_take_fill_valid(self, period_index):
pi = period_index
arr = PeriodArray(pi)
value = pd.NaT.value
msg = f"'fill_value' should be a {self.dtype}. Got '{value}'."
with pytest.raises(ValueError, match=msg):
# require NaT, not iNaT, as it could be confused with an integer
arr.take([-1, 1], allow_fill=True, fill_value=value)
value = np.timedelta64("NaT", "ns")
msg = f"'fill_value' should be a {self.dtype}. Got '{str(value)}'."
with pytest.raises(ValueError, match=msg):
# require appropriate-dtype if we have a NA value
arr.take([-1, 1], allow_fill=True, fill_value=value)
@pytest.mark.parametrize("how", ["S", "E"])
def test_to_timestamp(self, how, period_index):
pi = period_index
arr = PeriodArray(pi)
expected = DatetimeArray(pi.to_timestamp(how=how))
result = arr.to_timestamp(how=how)
assert isinstance(result, DatetimeArray)
# placeholder until these become actual EA subclasses and we can use
# an EA-specific tm.assert_ function
tm.assert_index_equal(pd.Index(result), pd.Index(expected))
def test_to_timestamp_out_of_bounds(self):
# GH#19643 previously overflowed silently
pi = pd.period_range("1500", freq="Y", periods=3)
msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
pi.to_timestamp()
with pytest.raises(OutOfBoundsDatetime, match=msg):
pi._data.to_timestamp()
@pytest.mark.parametrize("propname", PeriodArray._bool_ops)
def test_bool_properties(self, period_index, propname):
# in this case _bool_ops is just `is_leap_year`
pi = period_index
arr = PeriodArray(pi)
result = getattr(arr, propname)
expected = np.array(getattr(pi, propname))
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("propname", PeriodArray._field_ops)
def test_int_properties(self, period_index, propname):
pi = period_index
arr = PeriodArray(pi)
result = getattr(arr, propname)
expected = np.array(getattr(pi, propname))
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self, period_index):
arr = PeriodArray(period_index)
# default asarray gives objects
result = np.asarray(arr)
expected = np.array(list(arr), dtype=object)
tm.assert_numpy_array_equal(result, expected)
# to object dtype (same as default)
result = np.asarray(arr, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(arr, dtype="int64")
tm.assert_numpy_array_equal(result, arr.asi8)
# to other dtypes
msg = r"float\(\) argument must be a string or a number, not 'Period'"
with pytest.raises(TypeError, match=msg):
np.asarray(arr, dtype="float64")
result = np.asarray(arr, dtype="S20")
expected = np.asarray(arr).astype("S20")
tm.assert_numpy_array_equal(result, expected)
def test_strftime(self, period_index):
arr = PeriodArray(period_index)
result = arr.strftime("%Y")
expected = np.array([per.strftime("%Y") for per in arr], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_strftime_nat(self):
# GH 29578
arr = PeriodArray(PeriodIndex(["2019-01-01", pd.NaT], dtype="period[D]"))
result = arr.strftime("%Y-%m-%d")
expected = np.array(["2019-01-01", np.nan], dtype=object)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"array,casting_nats",
[
(
pd.TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
(pd.NaT, np.timedelta64("NaT", "ns")),
),
(
pd.date_range("2000-01-01", periods=3, freq="D")._data,
(pd.NaT, np.datetime64("NaT", "ns")),
),
(pd.period_range("2000-01-01", periods=3, freq="D")._data, (pd.NaT,)),
],
ids=lambda x: type(x).__name__,
)
def test_casting_nat_setitem_array(array, casting_nats):
expected = type(array)._from_sequence([pd.NaT, array[1], array[2]])
for nat in casting_nats:
arr = array.copy()
arr[0] = nat
tm.assert_equal(arr, expected)
@pytest.mark.parametrize(
"array,non_casting_nats",
[
(
pd.TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data,
(np.datetime64("NaT", "ns"), pd.NaT.value),
),
(
pd.date_range("2000-01-01", periods=3, freq="D")._data,
(np.timedelta64("NaT", "ns"), pd.NaT.value),
),
(
pd.period_range("2000-01-01", periods=3, freq="D")._data,
(np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), pd.NaT.value),
),
],
ids=lambda x: type(x).__name__,
)
def test_invalid_nat_setitem_array(array, non_casting_nats):
msg = (
"'value' should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. "
"Got '(timedelta64|datetime64|int)' instead."
)
for nat in non_casting_nats:
with pytest.raises(TypeError, match=msg):
array[0] = nat
@pytest.mark.parametrize(
"array",
[
pd.date_range("2000", periods=4).array,
pd.timedelta_range("2000", periods=4).array,
],
)
def test_to_numpy_extra(array):
if _np_version_under1p18:
# np.isnan(NaT) raises, so use pandas'
isnan = pd.isna
else:
isnan = np.isnan
array[0] = pd.NaT
original = array.copy()
result = array.to_numpy()
assert isnan(result[0])
result = array.to_numpy(dtype="int64")
assert result[0] == -9223372036854775808
result = array.to_numpy(dtype="int64", na_value=0)
assert result[0] == 0
result = array.to_numpy(na_value=array[1].to_numpy())
assert result[0] == result[1]
result = array.to_numpy(na_value=array[1].to_numpy(copy=False))
assert result[0] == result[1]
tm.assert_equal(array, original)
@pytest.mark.parametrize("as_index", [True, False])
@pytest.mark.parametrize(
"values",
[
pd.to_datetime(["2020-01-01", "2020-02-01"]),
pd.TimedeltaIndex([1, 2], unit="D"),
pd.PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
],
)
@pytest.mark.parametrize(
"klass",
[
list,
np.array,
pd.array,
pd.Series,
pd.Index,
pd.Categorical,
pd.CategoricalIndex,
],
)
def test_searchsorted_datetimelike_with_listlike(values, klass, as_index):
# https://github.com/pandas-dev/pandas/issues/32762
if not as_index:
values = values._data
result = values.searchsorted(klass(values))
expected = np.array([0, 1], dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
pd.to_datetime(["2020-01-01", "2020-02-01"]),
pd.TimedeltaIndex([1, 2], unit="D"),
pd.PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"),
],
)
@pytest.mark.parametrize(
"arg", [[1, 2], ["a", "b"], [pd.Timestamp("2020-01-01", tz="Europe/London")] * 2]
)
def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg):
# https://github.com/pandas-dev/pandas/issues/32762
msg = "[Unexpected type|Cannot compare]"
with pytest.raises(TypeError, match=msg):
values.searchsorted(arg)
@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])
def test_period_index_construction_from_strings(klass):
# https://github.com/pandas-dev/pandas/issues/26109
strings = ["2020Q1", "2020Q2"] * 2
data = klass(strings)
result = PeriodIndex(data, freq="Q")
expected = PeriodIndex([Period(s) for s in strings])
tm.assert_index_equal(result, expected)
|
the-stack_0_18961 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
@dace.program
def tlarray(A: dace.int32[128]):
tmp = dace.ndarray([128], dace.int32, storage=dace.StorageType.CPU_ThreadLocal)
for i in dace.map[0:128]:
with dace.tasklet:
# Assuming OpenMP is used
t = omp_get_thread_num()
t >> tmp[i]
for i in dace.map[0:128]:
with dace.tasklet:
t << tmp[i]
o >> A[i]
# If tmp is thread-local, will be distributed across thread IDs
o = t
def test_threadlocal():
A = np.ndarray([128], dtype=np.int32)
A[:] = -1
# Add OpenMP include
sdfg = tlarray.to_sdfg()
sdfg.set_global_code('#include <omp.h>')
sdfg(A=A)
assert np.all(A >= 0)
print('OK. Detected threads:', np.max(A) + 1)
if __name__ == '__main__':
test_threadlocal()
|
the-stack_0_18962 | # -*- coding: utf-8 -*-
import httplib as http
import pkgutil
import mock
from nose import SkipTest
from nose.tools import * # flake8: noqa
from tests.base import ApiTestCase
from osf_tests import factories
from framework.auth.oauth_scopes import CoreScopes
from api.base.settings.defaults import API_BASE
from rest_framework.permissions import IsAuthenticatedOrReadOnly, IsAuthenticated
from api.base.permissions import TokenHasScope
from website.settings import DEBUG_MODE
from django.contrib.auth.models import User
import importlib
URLS_MODULES = []
for loader, name, _ in pkgutil.iter_modules(['api']):
if name != 'base' and name != 'test':
try:
URLS_MODULES.append(importlib.import_module('api.{}.urls'.format(name)))
except ImportError:
pass
VIEW_CLASSES = []
for mod in URLS_MODULES:
urlpatterns = mod.urlpatterns
for patt in urlpatterns:
VIEW_CLASSES.append(patt.callback.cls)
class TestApiBaseViews(ApiTestCase):
def test_root_returns_200(self):
res = self.app.get('/{}'.format(API_BASE))
assert_equal(res.status_code, 200)
def test_does_not_exist_returns_404(self):
res = self.app.get('/{}{}'.format(API_BASE,"notapage"), expect_errors=True)
assert_equal(res.status_code, 404)
def test_does_not_exist_formatting(self):
if DEBUG_MODE:
raise SkipTest
else:
url = '/{}{}/'.format(API_BASE, 'notapage')
res = self.app.get(url, expect_errors=True)
errors = res.json['errors']
assert(isinstance(errors, list))
assert_equal(errors[0], {'detail': 'Not found.'})
def test_view_classes_have_minimal_set_of_permissions_classes(self):
base_permissions = [
TokenHasScope,
(IsAuthenticated, IsAuthenticatedOrReadOnly)
]
for view in VIEW_CLASSES:
for cls in base_permissions:
if isinstance(cls, tuple):
has_cls = any([c in view.permission_classes for c in cls])
assert_true(has_cls, "{0} lacks the appropriate permission classes".format(view))
else:
assert_in(cls, view.permission_classes, "{0} lacks the appropriate permission classes".format(view))
for key in ['read', 'write']:
scopes = getattr(view, 'required_{}_scopes'.format(key), None)
assert_true(bool(scopes))
for scope in scopes:
assert_is_not_none(scope)
if key == 'write':
assert_not_in(CoreScopes.ALWAYS_PUBLIC, scopes)
def test_view_classes_support_embeds(self):
for view in VIEW_CLASSES:
assert_true(hasattr(view, '_get_embed_partial'), "{0} lacks embed support".format(view))
def test_view_classes_define_or_override_serializer_class(self):
for view in VIEW_CLASSES:
has_serializer_class = getattr(view, 'serializer_class', None) or getattr(view, 'get_serializer_class', None)
assert_true(has_serializer_class, "{0} should include serializer class or override get_serializer_class()".format(view))
@mock.patch('framework.auth.core.User.is_confirmed', mock.PropertyMock(return_value=False))
def test_unconfirmed_user_gets_error(self):
user = factories.AuthUserFactory()
res = self.app.get('/{}nodes/'.format(API_BASE), auth=user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
@mock.patch('framework.auth.core.User.is_disabled', mock.PropertyMock(return_value=True))
def test_disabled_user_gets_error(self):
user = factories.AuthUserFactory()
res = self.app.get('/{}nodes/'.format(API_BASE), auth=user.auth, expect_errors=True)
assert_equal(res.status_code, http.BAD_REQUEST)
class TestJSONAPIBaseView(ApiTestCase):
def setUp(self):
super(TestJSONAPIBaseView, self).setUp()
self.user = factories.AuthUserFactory()
self.node = factories.ProjectFactory(creator=self.user)
self.url = '/{0}nodes/{1}/'.format(API_BASE, self.node._id)
for i in range(5):
factories.ProjectFactory(parent=self.node, creator=self.user)
for i in range(5):
factories.ProjectFactory(parent=self.node)
@mock.patch('api.base.serializers.JSONAPISerializer.to_representation', autospec=True)
def test_request_added_to_serializer_context(self, mock_to_representation):
self.app.get(self.url, auth=self.user.auth)
assert_in('request', mock_to_representation.call_args[0][0].context)
def test_reverse_sort_possible(self):
response = self.app.get('http://localhost:8000/v2/users/me/nodes/?sort=-title', auth=self.user.auth)
assert_equal(response.status_code, 200)
class TestSwaggerDocs(ApiTestCase):
def test_swagger_docs_redirect_to_root(self):
res = self.app.get('/v2/docs/')
assert_equal(res.status_code, 302)
assert_equal(res.location, '/v2/')
|
the-stack_0_18963 | # Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
A set of utility methods.
"""
import binascii
import gzip
import itertools
import logging
import math
import multiprocessing
import os
import pprint
import random
import sys
from collections import defaultdict
from contextlib import contextmanager
from itertools import starmap
from typing import Any, List, Iterator, Iterable, Tuple, Dict, Optional, Union, TypeVar
import numpy as np
import torch as pt
import torch.distributed
from . import __version__, constants as C
from .log import log_sockeye_version, log_torch_version
logger = logging.getLogger(__name__)
class SockeyeError(Exception):
pass
def check_version(version: str):
"""
Checks given version against code version and determines compatibility.
Throws if versions are incompatible.
:param version: Given version.
"""
code_version = parse_version(__version__)
given_version = parse_version(version)
if given_version[0] == '3' and given_version[1] == '0':
logger.info(f"Code version: {__version__}")
logger.warning(f"Given release version ({version}) does not match code version ({__version__}). "
f"Models with version {version} should be compatible though.")
return
check_condition(code_version[0] == given_version[0],
"Given release version (%s) does not match release code version (%s)" % (version, __version__))
check_condition(code_version[1] == given_version[1],
"Given major version (%s) does not match major code version (%s)" % (version, __version__))
def load_version(fname: str) -> str:
"""
Loads version from file.
:param fname: Name of file to load version from.
:return: Version string.
"""
if not os.path.exists(fname):
logger.warning("No version file found. Defaulting to 1.0.3")
return "1.0.3"
with open(fname) as inp:
return inp.read().strip()
def parse_version(version_string: str) -> Tuple[str, str, str]:
"""
Parse version string into release, major, minor version.
:param version_string: Version string.
:return: Tuple of strings.
"""
release, major, minor = version_string.split(".", 2)
return release, major, minor
def log_basic_info(args) -> None:
"""
Log basic information like version number, arguments, etc.
:param args: Arguments as returned by argparse.
"""
log_sockeye_version(logger)
log_torch_version(logger)
logger.info("Command: %s", " ".join(sys.argv))
logger.info("Arguments: %s", args)
def seed_rngs(seed: int) -> None: # type: ignore
"""
Seed the random number generators (Python, Numpy and MXNet).
:param seed: The random seed.
"""
logger.info(f"Random seed: {seed}")
np.random.seed(seed)
random.seed(seed)
try:
import torch
torch.manual_seed(seed)
logger.info(f"PyTorch seed: {seed}")
except ImportError:
pass
def check_condition(condition: bool, error_message: str):
"""
Check the condition and if it is not met, exit with the given error message
and error_code, similar to assertions.
:param condition: Condition to check.
:param error_message: Error message to show to the user.
"""
if not condition:
raise SockeyeError(error_message)
class OnlineMeanAndVariance:
def __init__(self) -> None:
self._count = 0
self._mean = 0.
self._M2 = 0.
def update(self, value: Union[float, int]) -> None:
self._count += 1
delta = value - self._mean
self._mean += delta / self._count
delta2 = value - self._mean
self._M2 += delta * delta2
@property
def count(self) -> int:
return self._count
@property
def mean(self) -> float:
return self._mean
@property
def variance(self) -> float:
if self._count < 2:
return float('nan')
else:
return self._M2 / self._count
@property
def std(self) -> float:
variance = self.variance
return math.sqrt(variance) if not math.isnan(variance) else 0.0
def chunks(some_list: List, n: int) -> Iterable[List]:
"""Yield successive n-sized chunks from l."""
for i in range(0, len(some_list), n):
yield some_list[i:i + n]
def get_tokens(line: str) -> Iterator[str]:
"""
Yields tokens from input string.
:param line: Input string.
:return: Iterator over tokens.
"""
for token in line.rstrip().split():
if len(token) > 0:
yield token
def is_gzip_file(filename: str) -> bool:
# check for magic gzip number
with open(filename, 'rb') as test_f:
return binascii.hexlify(test_f.read(2)) == b'1f8b'
def smart_open(filename: str, mode: str = "rt", ftype: str = "auto", errors: str = 'replace'):
"""
Returns a file descriptor for filename with UTF-8 encoding.
If mode is "rt", file is opened read-only.
If ftype is "auto", uses gzip iff filename endswith .gz.
If ftype is {"gzip","gz"}, uses gzip.
If ftype is "auto" and read mode requested, uses gzip iff is_gzip_file(filename).
Note: encoding error handling defaults to "replace"
:param filename: The filename to open.
:param mode: Reader mode.
:param ftype: File type. If 'auto' checks filename suffix for gz to try gzip.open.
:param errors: Encoding error handling during reading. Defaults to 'replace'.
:return: File descriptor.
"""
if ftype in ('gzip', 'gz') \
or (ftype == 'auto' and filename.endswith(".gz")) \
or (ftype == 'auto' and 'r' in mode and is_gzip_file(filename)):
if mode == "rb" or mode == "wb":
return gzip.open(filename, mode=mode)
else:
return gzip.open(filename, mode=mode, encoding='utf-8', errors=errors)
else:
if mode == "rb" or mode == "wb":
return open(filename, mode=mode)
else:
return open(filename, mode=mode, encoding='utf-8', errors=errors)
def combine_means(means: List[Optional[float]], num_sents: List[int]) -> float:
"""
Takes a list of means and number of sentences of the same length and computes the combined mean.
:param means: A list of mean values.
:param num_sents: A list with the number of sentences used to compute each mean value.
:return: The combined mean of the list of means.
"""
if not means or not num_sents:
raise ValueError("Invalid input list.")
check_condition(len(means) == len(num_sents), "List lengths do not match")
return sum(num_sent * mean for num_sent, mean in zip(num_sents, means) if mean is not None) / sum(num_sents)
def combine_stds(stds: List[Optional[float]], means: List[Optional[float]], num_sents: List[int]) -> float:
"""
Takes a list of standard deviations, means and number of sentences of the same length and computes
the combined standard deviation.
:param stds: A list of standard deviations.
:param means: A list of mean values.
:param num_sents: A list with number of sentences used to compute each mean value.
:return: The combined standard deviation.
"""
if not stds or not means or not num_sents:
raise ValueError("Invalid input list.")
check_condition(all(len(stds) == len(l) for l in [means, num_sents]), "List lengths do not match") # type: ignore
total_mean = combine_means(means, num_sents)
return math.sqrt(sum(num_sent * (std**2 + (mean-total_mean)**2) for num_sent, std, mean in zip(num_sents, stds, means)
if std is not None and mean is not None) / sum(num_sents))
def average_tensors(tensors: List[pt.Tensor]) -> pt.Tensor:
"""
Compute the element-wise average of a list of tensors of the same shape.
:param tensors: A list of input tensors with the same shape.
:return: The average of the tensors on the same device as tensors[0].
"""
if not tensors:
raise ValueError("tensors is empty.")
if len(tensors) == 1:
return tensors[0]
check_condition(all(tensors[0].shape == t.shape for t in tensors), "tensor shapes do not match")
return sum(tensors) / len(tensors) # type: ignore
def gen_prefix_masking(prefix: pt.Tensor, vocab_size: int, dtype: pt.dtype) -> Tuple[pt.Tensor, int]:
"""
Generate prefix masks from prefix ids, which are inf everywhere except zero for prefix ids.
:param prefix: Target prefix token or factors in ids. Shape (batch size, max length of prefix).
:param vocab_size: vocabulary size
:param dtype: dtype of the retuning output
:return prefix_masks (batch size, max length of prefix, vocab_size), with type as dtype
"""
prefix_masks_sizes = list(prefix.size()) # type: List[int]
max_length = prefix_masks_sizes[1]
prefix_masks_sizes.append(vocab_size)
# prefix_masks are inf everywhere except zero for indices of prefix ids.
prefix_masks = pt.full(prefix_masks_sizes, fill_value=np.inf, device=prefix.device, dtype=dtype)
prefix_masks.scatter_(-1, prefix.to(pt.int64).unsqueeze(-1), 0.)
# Note: The use of prefix_masks.scatter_() function is equivalent (but much faster) to
# prefix_masks[prefix_one_hot != 0] = 0., where
# prefix_one_hot = pt.nn.functional.one_hot(prefix.to(pt.int64), num_classes=vocab_size).to(prefix.device)
# In the same batch during inference, it is possible that some translations have target prefix
# while others do not have. It is also possible that translation may have a target prefix with
# different length to others. Thus prefix ids may include a full zero vector if a translation
# in the batch does not have prefix, or include a vector padding with zeros on the right if some
# translations are with shorter prefix. An example of prefix ids reflecting length differences \
# is as follows:
#
# [1, 2, 3]
# [1, 2, 0]
# [0, 0, 0]
#
# Here, the first sentence has a prefix of length 3, the second one has a prefix of length 1 \
# and the last one does not have prefix.
#
# At any timestep, some target prefix ids could be 0 (i.e. 0 in the target_prefix means 'no constraint'). \
# If a prefix id is 0 for a translation at a timestep, all hots in the vocab are assigned to 0 (instead \
# of only one hot is assigned to 0 and other hots are inf). This makes sure there is no constraint on \
# selecting any specific target token for the translation in that case.
prefix_masks.masked_fill_(prefix.unsqueeze(-1) == 0, 0)
return prefix_masks, max_length
def shift_prefix_factors(prefix_factors: pt.Tensor) -> pt.Tensor:
"""
Shift prefix factors one step to the right
:param prefix_factors: tensor ids. Shape (batch size, length, num of factors).
:return new prefix_factors_shift (batch size, length + 1, num of factors)
"""
prefix_factors_sizes = prefix_factors.size()
prefix_factors_shift = pt.zeros(prefix_factors_sizes[0], prefix_factors_sizes[1] + 1, prefix_factors_sizes[2], dtype=prefix_factors.dtype, device=prefix_factors.device)
prefix_factors_shift[:, 1:] = prefix_factors
return prefix_factors_shift
def adjust_first_step_masking(target_prefix: pt.Tensor, first_step_mask: pt.Tensor) -> pt.Tensor:
"""
Adjust first_step_masking based on the target prefix
(Target prefix for each input in the same batch may have a different length. \
Thus first_step_mask needs to be adjusted accordingly.)
:param target_prefix: Shape (batch size, max target prefix length).
:param first_step_mask: Shape (batch_size * beam_size, 1)
:return (adjusted) first_steps_masking (batch_size * beam_size, max target prefix length + 1).
An illustrative example of how first_step_masking is adjusted
Inputs:
target_prefix (batch_size = 2, max target prefix length = 2)
tensor([1 2]
[1 0])
Note: Two target prefix tokens in the first sentence, \
one target prefix token in the second sentence.
first_step_mask (batch_size = 2 * beam_size = 5, 1)
tensor([[0],
[inf],
[inf],
[inf],
[inf],
[0],
[inf],
[inf],
[inf],
[inf])
Output:
Adjusted first_step_mask (batch_size * beam_size, max target prefix length + 1):
tensor([[0 0 0],
[inf inf inf],
[inf inf inf],
[inf inf inf],
[inf inf, inf],
[0 0 0],
[inf inf 0],
[inf inf 0],
[inf inf 0],
[inf inf 0]])
The concrete steps of what this function does are as follows:
Step 1: Create a zero masking matrix with shape (batch size, max target prefix length + 1)
Fill 1 into this masking matrix based on the target prefix
target prefix initialize masking masking roll one step to the right
from target prefix is not 0 and assign 1 at index 0
[1 2] -> [1 2 0] -> [1 1 0] -> [1 1 1]
[1 0] [1 0 0] [1 0 0] [1 1 0]
Step 2: Adjust first_step_mask based on masking
masking expand masking with expand first_step_mask with max target
beam size prefix length, fill 0 where masking is 0
[1 1 1] -> [1 1 1] -> [0 0 0]
[1 1 0] [1 1 1] [inf inf inf]
[1 1 1] [inf inf inf]
[1 1 1] [inf inf inf]
[1 1 1] [inf inf inf]
[1 1 0] [0 0 0]
[1 1 0] [inf inf 0]
[1 1 0] [inf inf 0]
[1 1 0] [inf inf 0]
[1 1 0] [inf inf 0]
"""
batch_beam, _ = first_step_mask.size()
batch, max_prefix_len = target_prefix.size()
beam_size = batch_beam // batch
# Step 1
masking = pt.zeros((batch, max_prefix_len + 1), device=target_prefix.device)
masking[:, :max_prefix_len] = target_prefix
masking = pt.clamp(masking, 0., 1.) # force all non zero ids to 1
masking = pt.roll(masking, 1, -1)
masking[:, 0] = 1.
# Step 2
masking = masking.unsqueeze(1).expand(-1, beam_size, -1).reshape(batch_beam, -1)
first_step_mask = first_step_mask.expand(-1, masking.size(-1)).clone()
first_step_mask.masked_fill_(masking == 0., 0.)
return first_step_mask
def parse_metrics_line(line_number: int, line: str) -> Dict[str, Any]:
"""
Parse a line of metrics into a mappings of key and values.
:param line_number: Line's number for checking if checkpoints are aligned to it.
:param line: A line from the Sockeye metrics file.
:return: Dictionary of metric names (e.g. perplexity-train) mapping to a list of values.
"""
fields = line.split('\t')
checkpoint = int(fields[0])
check_condition(line_number == checkpoint,
"Line (%d) and loaded checkpoint (%d) do not align." % (line_number, checkpoint))
metric = dict() # type: Dict[str, Any]
for field in fields[1:]:
key, value = field.split("=", 1)
if value == 'True' or value == 'False':
metric[key] = (value == 'True')
elif value == 'None':
metric[key] = None
else:
metric[key] = float(value)
return metric
def read_metrics_file(path: str) -> List[Dict[str, Any]]:
"""
Reads lines metrics file and returns list of mappings of key and values.
:param path: File to read metric values from.
:return: Dictionary of metric names (e.g. perplexity-train) mapping to a list of values.
"""
with open(path) as fin:
metrics = [parse_metrics_line(i, line.strip()) for i, line in enumerate(fin, 1)]
return metrics
def write_metrics_file(metrics: List[Dict[str, Any]], path: str):
"""
Write metrics data to tab-separated file.
:param metrics: metrics data.
:param path: Path to write to.
"""
with open(path, 'w') as metrics_out:
for checkpoint, metric_dict in enumerate(metrics, 1):
metrics_str = "\t".join(["{}={}".format(name, value) for name, value in sorted(metric_dict.items())])
metrics_out.write("{}\t{}\n".format(checkpoint, metrics_str))
def get_validation_metric_points(model_path: str, metric: str):
"""
Returns tuples of value and checkpoint for given metric from metrics file at model_path.
:param model_path: Model path containing .metrics file.
:param metric: Metric values to extract.
:return: List of tuples (value, checkpoint).
"""
metrics_path = os.path.join(model_path, C.METRICS_NAME)
data = read_metrics_file(metrics_path)
return [(d['%s-val' % metric], cp) for cp, d in enumerate(data, 1)]
def grouper(iterable: Iterable, size: int) -> Iterable:
"""
Collect data into fixed-length chunks or blocks without discarding underfilled chunks or padding them.
:param iterable: A sequence of inputs.
:param size: Chunk size.
:return: Sequence of chunks.
"""
it = iter(iterable)
while True:
chunk = list(itertools.islice(it, size))
if not chunk:
return
yield chunk
def repeat_interleave_with_expand(state: pt.Tensor, repeats: int, dim: int) -> pt.Tensor:
"""
Replace repeat_interleave with expand for latency (i.e. equivalent to state.repeat_interleave(repeats=repeats, dim=dim))
:param state: a pt.Tensor
:param repeats: int
:param dim: int
:return repeat_state
"""
state_size = state.size()
return state.unsqueeze(dim + 1).expand(*state_size[:dim + 1], repeats, *state_size[dim + 1:])\
.clone().view(*state_size[:dim], state.size(dim) * repeats, *state_size[dim + 1:])
def metric_value_is_better(new: float, old: float, metric: str) -> bool:
"""
Returns true if new value is strictly better than old for given metric.
"""
if C.METRIC_MAXIMIZE[metric]:
return new > old
else:
return new < old
_DTYPE_TO_STRING = {
np.float32: 'float32',
np.float16: 'float16',
np.int8: 'int8',
np.int32: 'int32',
pt.float32: 'float32',
pt.float16: 'float16',
pt.int32: 'int32',
pt.int8: 'int8',
}
def _print_dtype(dtype):
return _DTYPE_TO_STRING.get(dtype, str(dtype))
def log_parameters(model: pt.nn.Module):
"""
Logs information about model parameters.
"""
fixed_parameter_names = []
learned_parameter_names = []
total_learned = 0
total_fixed = 0
visited = defaultdict(list)
for name, module in model.named_modules(remove_duplicate=False):
for param_name, param in module.named_parameters(prefix=name, recurse=False):
repr = "%s [%s, %s]" % (name, tuple(param.shape), _print_dtype(param.dtype))
size = param.shape.numel()
if not param.requires_grad:
fixed_parameter_names.append(repr)
total_fixed += size if param not in visited else 0
else:
total_learned += size if param not in visited else 0
learned_parameter_names.append(repr)
visited[param].append(param_name)
shared_parameter_names = [] # type: List[str]
total_shared = 0
for param, names in visited.items():
if len(names) > 1:
total_shared += param.shape.numel()
shared_parameter_names.append(" = ".join(names))
total_parameters = total_learned + total_fixed
logger.info("# of parameters: %d | trainable: %d (%.2f%%) | shared: %d (%.2f%%) | fixed: %d (%.2f%%)",
total_parameters,
total_learned, total_learned / total_parameters * 100,
total_shared, total_shared / total_parameters * 100,
total_fixed, total_fixed / total_parameters * 100)
logger.info("Trainable parameters: \n%s", pprint.pformat(learned_parameter_names))
logger.info("Shared parameters: \n%s", pprint.pformat(shared_parameter_names, width=120))
logger.info("Fixed parameters:\n%s", pprint.pformat(fixed_parameter_names))
@contextmanager
def no_context():
"""
No-op context manager that can be used in "with" statements
"""
yield None
class SingleProcessPool:
def map(self, func, iterable):
return list(map(func, iterable))
def starmap(self, func, iterable):
return list(starmap(func, iterable))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def create_pool(max_processes):
if max_processes == 1:
return SingleProcessPool()
else:
return multiprocessing.pool.Pool(processes=max_processes)
def is_distributed() -> bool:
return torch.distributed.is_initialized()
def is_primary_worker() -> bool:
"""
True when current process is the primary worker (rank 0) or the only worker
(not running in distributed mode)
"""
return not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
def get_local_rank() -> int:
return int(os.environ[C.DIST_ENV_LOCAL_RANK])
T = TypeVar('T')
def broadcast_object(obj: T, src: int = 0) -> T:
"""
Broadcast a single Python object across workers (default source is primary
worker with rank 0)
"""
obj_list = [obj]
torch.distributed.broadcast_object_list(obj_list, src=src)
return obj_list[0]
def all_gather_object(obj: T) -> List[T]:
"""Gather each worker's instance of an object, returned as a list"""
obj_list = [None] * torch.distributed.get_world_size() # type: List[T]
torch.distributed.all_gather_object(obj_list, obj)
return obj_list
|
the-stack_0_18964 | import json
import os
import re
import numpy as np
from django.conf import settings
from django.views.decorators.csrf import csrf_protect
from django.core.files.storage import FileSystemStorage
from django.shortcuts import redirect
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from PySAM import Windpower
from PySAM.ResourceTools import FetchResourceFiles
from api.models.calliope import Abstract_Tech, Run_Parameter
from api.models.configuration import Location, Technology, Tech_Param, \
Loc_Tech, Loc_Tech_Param, ParamsManager, Scenario, Scenario_Param, \
Scenario_Loc_Tech, Timeseries_Meta, Model, Model_Comment, \
Model_Favorite, User_File, Model_User
from api.tasks import task_status, upload_ts, copy_model
from taskmeta.models import CeleryTask
def validate_model_name(value):
if len(value) < 3:
raise ValidationError(f"Error: Invalid model name, too short.")
regex = re.compile(r"(<(.*)>.*?|<(.*) />|[^\w\s\(\)-])")
matched = regex.search(value)
if matched is None:
return
diff = set(value).difference(set(["(", ")", " ", "-", "_"]))
if len(diff) == 0:
raise ValidationError("Error: Invalid model name, should not contain only symbols")
result = matched.group(0)
raise ValidationError(f"Error: Invalid model name, should not contain '{result}'")
@csrf_protect
def add_model(request):
"""
Create a new model. Option to provide an existing model to copy as a new
instance. User must already have view access to the template model.
Parameters:
template_model_uuid (uuid): optional
model_name (str): required
Returns (json): Action Confirmation
Example:
POST: /api/add_model/
"""
user = request.user
model_name = request.POST["model_name"].strip()
template_model_uuid = request.POST["template_model_uuid"]
payload = {}
try:
validate_model_name(model_name)
except ValidationError as e:
payload["status"] = "Failed"
payload["message"] = str(e)
return HttpResponse(json.dumps(payload), content_type="application/json")
try:
template_model = Model.objects.get(uuid=template_model_uuid)
template_model.handle_view_access(user)
except Exception as e:
template_model = None
print("User building from blank model: {}".format(e))
# Create Model
model_name = Model.find_unique_name(model_name)
model = Model.objects.create(name=model_name)
Model_User.objects.create(user=user, model=model, can_edit=True)
comment = "{} initiated this model.".format(user.get_full_name())
Model_Comment.objects.create(model=model, comment=comment, type="version")
payload['model_uuid'] = str(model.uuid)
payload["status"] = "Added"
if template_model is not None:
try:
model.is_uploading = True
model.save()
copy_model.apply_async(
kwargs={"src_model_id": template_model.id,
"dst_model_id": model.id,
"user_id": user.id})
payload["status"] = "Submitted"
except Exception as e:
payload["status"] = "Failed"
payload["message"] = str(e)
model.delete()
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def remove_model(request):
"""
Removes a user's access to a model. The model will still exist and
may be seen by other collaborators.
Parameters:
model_uuid (uuid): required
Returns (json): Action Confirmation
Example:
POST: /api/remove_model/
"""
user = request.user
model_uuid = request.POST["model_uuid"]
model = Model.by_uuid(model_uuid)
model.handle_view_access(request.user)
Model_User.objects.filter(model=model, user=user).hard_delete()
payload = {"message": "Dropped as collaborator."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def duplicate_model(request):
"""
Duplicate a model as a view only snapshot. User's may choose to take a
snapshot of a model to provide a retrieval checkpoint and/or begin a
forked version of their original model. A snapshot will replicate all of
its underlying data as new instances.
Parameters:
model_uuid (uuid): required
Returns (json): Action Confirmation
Example:
POST: /api/duplicate_model/
"""
user = request.user
model_uuid = request.POST["model_uuid"]
payload = {}
old_model = Model.by_uuid(model_uuid)
old_model.handle_edit_access(user)
# Create Model
model = Model.objects.create(name=old_model.name)
latest = Model.objects.filter(name=model.name).exclude(
snapshot_version=None).values_list('snapshot_version',
flat=True)
model.snapshot_version = np.max(list(latest) + [0]) + 1
model.snapshot_base = old_model
payload['model_uuid'] = str(model.uuid)
model.save()
try:
model.is_uploading = True
model.save()
copy_model.apply_async(
kwargs={"src_model_id": old_model.id,
"dst_model_id": model.id,
"user_id": user.id})
payload["status"] = "Submitted"
except Exception as e:
payload["status"] = "Failed"
payload["message"] = str(e)
model.delete()
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def add_collaborator(request):
"""
Add a collaborator to a model. A collaborator may become:
granted of edit permissions (value=1),
granted of view only permissions (value=0),
removed of all permisssions (value=null)
Parameters:
model_uuid (uuid): required
collaborator_id (str): required
collaborator_can_edit (int): optional (0 or 1)
Returns (json): Action Confirmation
Example:
POST: /api/add_collaborator/
"""
model_uuid = request.POST["model_uuid"]
user_id = request.POST["collaborator_id"]
user = User.objects.filter(id=user_id).first()
try:
can_edit = bool(int(request.POST["collaborator_can_edit"]))
except ValueError:
can_edit = None
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if user:
message = Model_User.update(model, user, can_edit)
else:
message = "No user registered by that email."
payload = {"message": message}
return HttpResponse(json.dumps(payload), content_type="application/json")
def validate_model_comment(value):
value = value.strip()
if len(value) == 0:
raise ValidationError("Please write your comment.")
regex = re.compile(r"(<(.*)>.*?|<(.*) />)")
matched = regex.search(value)
if matched is None:
return
result = matched.group(0)
raise ValidationError(f"Invalid comment string, please remove '{result}'")
@csrf_protect
def add_model_comment(request):
"""
Add a user comment to a model's activity page
Parameters:
model_uuid (uuid): required
comment (str): required
Returns (json): Action Confirmation
Example:
POST: /api/add_model_comment/
"""
model_uuid = request.POST["model_uuid"]
comment = request.POST["comment"]
try:
validate_model_comment(comment)
except ValidationError as e:
payload = {"message": str(e)}
return HttpResponse(json.dumps(payload), content_type="application/json")
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
Model_Comment.objects.create(
model=model, user=request.user, comment=comment, type="comment"
)
model.notify_collaborators(request.user)
payload = {"message": "Added comment."}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Locations
@csrf_protect
def update_location(request):
"""
Add or Update a location.
To update a location, must provide a location_id
Parameters:
model_uuid (uuid): required
location_id (int): optional
location_name (str): required
location_lat (float): required
location_long (float): required
location_area (float): required
location_description (str): required
Returns (json): Action Confirmation
Example:
POST: /api/update_location/
"""
model_uuid = request.POST["model_uuid"]
location_id = int(request.POST.get("location_id", 0))
location_name = request.POST["location_name"].strip()
location_lat = float(request.POST["location_lat"])
location_long = float(request.POST["location_long"])
location_area = request.POST["location_area"]
location_description = request.POST["location_description"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if location_area == "":
location_area = None
elif float(location_area) < 0:
location_area = None
if ((location_lat < -90) or (location_lat > 90)):
location_lat = 0
if ((location_long < -180) or (location_long > 180)):
location_long = 0
non_unique_name = True
while non_unique_name:
existing = model.locations.filter(pretty_name__iexact=location_name)
if existing:
if location_id == existing.first().id:
non_unique_name = False
else:
location_name += " COPY"
else:
non_unique_name = False
if location_id:
model.locations.filter(id=location_id).update(
pretty_name=location_name,
name=ParamsManager.simplify_name(location_name),
latitude=location_lat,
longitude=location_long,
available_area=location_area,
description=location_description,
)
# Log Activity
comment = "{} updated the location: {}.".format(
request.user.get_full_name(), location_name
)
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.notify_collaborators(request.user)
model.deprecate_runs(location_id=location_id)
payload = {
"message": "edited location",
"location_id": location_id,
"location_name": location_name,
"location_lat": location_lat,
"location_long": location_long,
"location_area": location_area,
"location_description": location_description,
}
else:
location = Location.objects.create(
model_id=model.id,
pretty_name=location_name,
name=ParamsManager.simplify_name(location_name),
latitude=location_lat,
longitude=location_long,
available_area=location_area,
description=location_description,
)
# Log Activity
comment = "{} added a location: {}.".format(
request.user.get_full_name(), location_name
)
Model_Comment.objects.create(model=model, comment=comment, type="add")
model.notify_collaborators(request.user)
payload = {
"message": "added location",
"location_id": location.id,
"location_name": location_name,
"location_lat": location_lat,
"location_long": location_long,
"location_area": location_area,
"location_description": location_description,
}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_location(request):
"""
Delete a location. This action will cascade "delete" all instances that
refer to it.
Parameters:
model_uuid (uuid): required
location_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_location/
"""
model_uuid = request.POST["model_uuid"]
location_id = request.POST["location_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
locations = model.locations.filter(id=location_id)
if len(locations) > 0:
pretty_name = locations.first().pretty_name
model.deprecate_runs(location_id=location_id)
locations.delete()
# Log Activity
comment = "{} deleted the location: {}.".format(
request.user.get_full_name(), pretty_name
)
Model_Comment.objects.create(model=model,
comment=comment, type="delete")
model.notify_collaborators(request.user)
payload = {"message": "deleted location"}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Technologies
@csrf_protect
def add_technology(request):
"""
Add a new technology. Option to create technology from an existing
technology to inherit its technology level parameters. Any override
parameters set at the nodes level will not be transferred.
Parameters:
model_uuid (uuid): required
technology_pretty_name (str): required
technology_type (str): required
technology_id (int): optional
Returns (json): Action Confirmation
Example:
POST: /api/add_technolgy/
"""
model_uuid = request.POST["model_uuid"]
technology_pretty_name = request.POST["technology_name"]
technology_id = request.POST.get("technology_id", None)
technology_type = request.POST["technology_type"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
abstract_tech = Abstract_Tech.objects.filter(name=technology_type).first()
technology_name = ParamsManager.simplify_name(technology_pretty_name)
if technology_id is not None:
existing = Technology.objects.filter(id=technology_id).first()
existing.model.handle_view_access(request.user)
technology = existing.duplicate(model.id, technology_pretty_name)
else:
technology = Technology.objects.create(
model_id=model.id,
abstract_tech_id=abstract_tech.id,
name=technology_name,
pretty_name=technology_pretty_name,
)
Tech_Param.objects.create(
model_id=model.id,
technology_id=technology.id,
parameter_id=1,
value=technology_type,
)
Tech_Param.objects.create(
model_id=model.id,
technology_id=technology.id,
parameter_id=2,
value=technology_pretty_name,
)
# Log Activity
comment = "{} added a technology: {}.".format(
request.user.get_full_name(), technology_pretty_name
)
Model_Comment.objects.create(model=model, comment=comment, type="add")
model.notify_collaborators(request.user)
request.session["technology_id"] = technology.id
payload = {"message": "added technology", "technology_id": technology.id}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_technology(request):
"""
Delete a technology. This action will cascade "delete" all instances that
refer to it.
Parameters:
model_uuid (uuid): required
technology_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_technology/
"""
model_uuid = request.POST["model_uuid"]
technology_id = request.POST["technology_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
technologies = model.technologies.filter(id=technology_id)
if len(technologies) > 0:
technology_pretty_name = technologies.first().pretty_name
model.deprecate_runs(technology_id=technology_id)
technologies.delete()
# Log Activity
comment = "{} deleted the technology: {}.".format(
request.user.get_full_name(), technology_pretty_name
)
Model_Comment.objects.create(model=model,
comment=comment, type="delete")
model.notify_collaborators(request.user)
payload = {"message": "deleted technology"}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_tech_params(request):
"""
Update the parameters for a technology. Parameter data is provided in a
form_data object which stores updates under the following keys:
'essentials', 'add', 'edit', 'delete'
Parameters:
model_uuid (uuid): required
technology_id (int): required
form_data (json): required
Returns (json): Action Confirmation
Example:
POST: /api/update_tech_params/
"""
model_uuid = request.POST["model_uuid"]
technology_id = request.POST["technology_id"]
form_data = json.loads(request.POST["form_data"])
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
technology = model.technologies.filter(id=technology_id)
if len(technology) > 0:
technology.first().update(form_data)
# Log Activity
comment = "{} updated the technology: {}.".format(
request.user.get_full_name(),
technology.first().pretty_name,
)
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.notify_collaborators(request.user)
model.deprecate_runs(technology_id=technology_id)
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_favorite(request):
"""
Add a parameter as a favorite. Favorites are persisted on a model by model
basis. Therefore, if one user adds or removes a favorite parameter,
all collaborators on that model will experience those changes.
Parameters:
model_uuid (uuid): required
param_id (int): required
add_favorite (int): required
Returns (json): Action Confirmation
Example:
GET: /api/update_favorite/
"""
model_uuid = request.GET["model_uuid"]
add_favorite = int(request.GET["add_favorite"])
param_id = int(request.GET["param_id"])
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if add_favorite:
Model_Favorite.objects.create(model_id=model.id, parameter_id=param_id)
payload = {"message": "added favorite"}
else:
Model_Favorite.objects.filter(model_id=model.id,
parameter_id=param_id).hard_delete()
payload = {"message": "removed favorite"}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def convert_to_timeseries(request):
"""
Convert a static parameter into a timeseries. Note that this does not yet
assign a timeseries meta instance to the parameter instance. Any previous
data that has been configured for this parameter will be lost.
Parameters:
model_uuid (uuid): required
technology_id (int): required
param_id (int): required
Returns (json): Action Confirmation
Example:
GET: /api/convert_to_timeseries/
"""
model_uuid = request.GET["model_uuid"]
param_id = int(request.GET["param_id"])
technology_id = request.GET["technology_id"]
try:
loc_tech_id = int(request.GET["loc_tech_id"])
except Exception as e:
loc_tech_id = None
print("Technology only: {}".format(e))
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if loc_tech_id:
Loc_Tech_Param.objects.filter(
model_id=model.id, parameter_id=param_id, loc_tech_id=loc_tech_id
).hard_delete()
Loc_Tech_Param.objects.create(
model_id=model.id,
parameter_id=param_id,
loc_tech_id=loc_tech_id,
value=0,
timeseries=True,
)
payload = {"message": "added timeseries to node"}
else:
Tech_Param.objects.filter(model_id=model.id,
parameter_id=param_id,
technology_id=technology_id).hard_delete()
Tech_Param.objects.create(
model_id=model.id,
parameter_id=param_id,
technology_id=technology_id,
value=0,
timeseries=True,
)
payload = {"message": "added timeseries to technology"}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Location-Technologies (Nodes)
@csrf_protect
def add_loc_tech(request):
"""
Add a new node (location + technology). An argument for location_2_id is
only required for nodes with a transmission technology.
Parameters:
model_uuid (uuid): required
technology_id (int): required
location_1_id (int): required
location_2_id (int): optional
loc_tech_description (str): optional
Returns (json): Action Confirmation
Example:
POST: /api/add_loc_tech/
"""
model_uuid = request.POST["model_uuid"]
technology_id = request.POST["technology_id"]
location_1_id = request.POST["location_1_id"]
location_2_id = request.POST.get("location_2_id", None)
loc_tech_description = request.POST.get("loc_tech_description", None)
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
technology = model.technologies.filter(id=technology_id).first()
location_1 = model.locations.filter(id=location_1_id).first()
location_2 = model.locations.filter(id=location_2_id).first()
if technology.abstract_tech.name != "transmission":
location_2_id = None
existing = model.loc_techs.filter(
technology=technology,
location_1=location_1,
location_2=location_2,
)
if existing.first():
loc_tech = existing.first()
else:
loc_tech = Loc_Tech.objects.create(
model=model,
technology=technology,
location_1=location_1,
location_2=location_2,
description=loc_tech_description,
)
# Log Activity
comment = "{} added a node: {} ({}) @ {}.".format(
request.user.get_full_name(),
technology.pretty_name,
technology.tag,
location_1.pretty_name,
)
Model_Comment.objects.create(model=model, comment=comment, type="add")
model.notify_collaborators(request.user)
request.session["loc_tech_id"] = loc_tech.id
payload = {"message": "added location technology",
"loc_tech_id": loc_tech.id}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_loc_tech(request):
"""
Delete a node (location + technology). This action will cascade "delete"
all instances that refer to it.
Parameters:
model_uuid (uuid): required
loc_tech_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_loc_tech/
"""
model_uuid = request.POST["model_uuid"]
loc_tech_id = request.POST["loc_tech_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
loc_techs = model.loc_techs.filter(id=loc_tech_id)
# Log Activity
comment = "{} deleted the node: {} ({}) @ {}.".format(
request.user.get_full_name(),
loc_techs.first().technology.pretty_name,
loc_techs.first().technology.tag,
loc_techs.first().location_1.pretty_name,
)
Model_Comment.objects.create(model=model, comment=comment, type="delete")
model.notify_collaborators(request.user)
model.deprecate_runs(technology_id=loc_techs.first().technology_id)
loc_techs.delete()
payload = {"message": "deleted location technology"}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_loc_tech_params(request):
"""
Update the parameters for a node. Parameter data is provided in a
form_data object which stores updates under the following keys:
'add', 'edit', 'delete'
Parameters:
model_uuid (uuid): required
loc_tech_id (int): required
form_data (json): required
Returns (json): Action Confirmation
Example:
POST: /api/update_loc_tech_params/
"""
model_uuid = request.POST["model_uuid"]
loc_tech_id = request.POST["loc_tech_id"]
form_data = json.loads(request.POST["form_data"])
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
loc_tech = model.loc_techs.filter(id=loc_tech_id)
if len(loc_tech) > 0:
loc_tech.first().update(form_data)
# Log Activity
comment = "{} updated the node: {} ({}) @ {}.".format(
request.user.get_full_name(),
loc_tech.first().technology.pretty_name,
loc_tech.first().technology.tag,
loc_tech.first().location_1.pretty_name,
)
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.notify_collaborators(request.user)
model.deprecate_runs(technology_id=loc_tech.first().technology_id)
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Scenarios
@csrf_protect
def add_scenario(request):
"""
Create a new scenario. Option to create a new scenario from an existing one
by providing an existing scenario_id. Configuration and settings will be
copied as new instances.
Parameters:
model_uuid (uuid): required
scenario_name (str): required
scenario_id (str): optional
Returns (json): Action Confirmation
Example:
POST: /api/add_scenario/
"""
model_uuid = request.POST["model_uuid"]
scenario_id = request.POST.get("scenario_id", None)
scenario_name = request.POST["scenario_name"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if scenario_id not in [None, '']:
existing = model.scenarios.filter(id=scenario_id).first()
scenario = existing.duplicate(scenario_name)
else:
scenario = Scenario.objects.create(model_id=model.id,
name=scenario_name)
parameters = Run_Parameter.objects.all()
for param in parameters:
if param.name == "name":
value = "{}: {}".format(model.name, scenario_name)
else:
value = param.default_value
Scenario_Param.objects.create(
scenario=scenario, run_parameter=param,
value=value, model=model
)
# Log Activity
comment = "{} added a scenario: {}.".format(
request.user.get_full_name(), scenario_name
)
Model_Comment.objects.create(model=model, comment=comment, type="add")
model.notify_collaborators(request.user)
request.session["scenario_id"] = scenario.id
payload = {"message": "added scenario", "scenario_id": scenario.id}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def toggle_scenario_loc_tech(request):
"""
Add/remove a node (loc_tech) to/from a scenario.
Parameters:
model_uuid (uuid): required
scenario_id (int): required
loc_tech_ids (int, comma delimited): required
add (int): required: 1-True, 0-False
Returns (json): Action Confirmation
Example:
POST: /api/toggle_scenario_loc_tech/
"""
model_uuid = request.POST["model_uuid"]
scenario_id = request.POST["scenario_id"]
loc_tech_ids = request.POST["loc_tech_ids"]
loc_tech_ids = [int(i) for i in str(loc_tech_ids).split(',')]
add = bool(int(request.POST["add"]))
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
scenario = model.scenarios.filter(id=scenario_id).first()
scenario_loc_techs = Scenario_Loc_Tech.objects.filter(
model_id=model.id, scenario_id=scenario_id,
loc_tech_id__in=loc_tech_ids
)
scenario_loc_techs.delete()
if add:
slts = [Scenario_Loc_Tech(model_id=model.id, scenario_id=scenario_id,
loc_tech_id=lt) for lt in loc_tech_ids]
Scenario_Loc_Tech.objects.bulk_create(slts)
# Log Activity
comment = "{} updated the scenario: {}.".format(
request.user.get_full_name(), scenario.name
)
Model_Comment.objects.filter(model=model,
comment=comment, type="edit").hard_delete()
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.deprecate_runs(scenario_id=scenario_id)
# Return new list of active loc tech IDs
active_lts = Scenario_Loc_Tech.objects.filter(scenario_id=scenario_id)
active_lt_ids = list(active_lts.values_list("loc_tech_id", flat=True))
payload = {"active_lt_ids": active_lt_ids,
"message": "Updated scenario's location technologies"}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def update_scenario_params(request):
"""
Update the parameters on a scenario. Parameter data is provided in a
form_data object which stores updates under the following keys:
'add', 'edit', 'delete'
Parameters:
model_uuid (uuid): required
scenario_id (int): required
form_data (json): required
Returns (json): Action Confirmation
Example:
POST: /api/update_scenario_params/
"""
model_uuid = request.POST["model_uuid"]
scenario_id = request.POST["scenario_id"]
form_data = json.loads(request.POST["form_data"])
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
scenario = model.scenarios.filter(id=scenario_id).first()
Scenario_Param.update(scenario, form_data)
# Log Activity
comment = "{} updated the scenario: {}.".format(
request.user.get_full_name(), scenario.name
)
Model_Comment.objects.create(model=model, comment=comment, type="edit")
model.notify_collaborators(request.user)
model.deprecate_runs(scenario_id=scenario_id)
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_scenario(request):
"""
Delete a scenario. This action will cascade "delete" all instances that
refer to it.
Parameters:
model_uuid (uuid): required
scenario_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_scenario/
"""
model_uuid = request.POST["model_uuid"]
scenario_id = request.POST["scenario_id"]
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
scenarios = model.scenarios.filter(id=scenario_id)
if len(scenarios) > 0:
name = scenarios.first().name
scenarios.delete()
# Log Activity
comment = "{} deleted the scenario: {}.".format(
request.user.get_full_name(), name
)
Model_Comment.objects.create(model=model,
comment=comment, type="delete")
model.notify_collaborators(request.user)
payload = {"message": "deleted scenario"}
return HttpResponse(json.dumps(payload), content_type="application/json")
# ------ Timeseries
@csrf_protect
def upload_file(request):
"""
Upload a timeseries file.
Parameters:
model_uuid (uuid): required
description (str): optional
myfile (file): required
Returns: Redirect to the timeseries page for the given model
Example:
POST: /api/upload_file/
"""
model_uuid = request.POST["model_uuid"]
description = request.POST.get("file-description", None)
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
if (request.method == "POST") and ("myfile" in request.FILES):
myfile = request.FILES["myfile"]
fs = FileSystemStorage()
filename = fs.save("user_files/" + myfile.name, myfile)
User_File.objects.create(filename=filename,
description=description, model=model)
return redirect("/%s/timeseries/" % model_uuid)
return redirect("/{}/timeseries/".format(model_uuid))
@csrf_protect
def delete_timeseries(request):
"""
Delete a timeseries
Parameters:
model_uuid (uuid): required
timeseries_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_timeseries/
"""
model_uuid = request.POST.get("model_uuid", None)
timeseries_id = request.POST.get("timeseries_id", None)
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
timeseries_meta = Timeseries_Meta.objects.filter(
model=model, id=timeseries_id
)
timeseries_meta.delete()
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def delete_file(request):
"""
Delete a user timeseries file
Parameters:
model_uuid (uuid): required
file_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/delete_file/
"""
model_uuid = request.POST.get("model_uuid", None)
file_id = request.POST.get("file_id", None)
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
file_record = User_File.objects.filter(model=model, id=file_id)
file_record.delete()
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def import_timeseries(request):
"""
Import a timeseries
Parameters:
model_uuid (uuid): required
timeseries_id (int): required
Returns (json): Action Confirmation
Example:
POST: /api/import_timeseries/
"""
model_uuid = request.POST["model_uuid"]
name = request.POST["name"]
values = request.POST["timeseries"].split(',')
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
Timeseries_Meta.create_ts_8760(model, name, values)
payload = {"message": "Success."}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def upload_timeseries(request):
"""
Build and save a clean timeseries csv from a user uploaded file.
Parameters:
model_uuid (uuid): required
file_id (int): required
timeseries_name (str): required
timestamp_col (int): required
value_col (int): required
has_header (bool): required
Returns (json): Action Confirmation
Example:
GET: /api/upload_timeseries/
"""
model_uuid = request.GET.get("model_uuid", None)
file_id = request.GET.get("file_id", None)
timeseries_name = request.GET.get("timeseries_name", None)
timestamp_col = request.GET.get("timestamp_col", None)
value_col = request.GET.get("value_col", None)
has_header = request.GET.get("has_header", None)
if has_header == "true":
has_header = True
else:
has_header = False
model = Model.by_uuid(model_uuid)
model.handle_edit_access(request.user)
file_record = User_File.objects.filter(model=model, id=file_id)
simple_filename = file_record.first().simple_filename()
payload = {}
existing = Timeseries_Meta.objects.filter(model=model,
name=timeseries_name).first()
if existing:
payload["status"] = task_status.FAILURE
payload["message"] = "Timeseries name already exists"
return HttpResponse(json.dumps(payload),
content_type="application/json")
new_meta = Timeseries_Meta.objects.create(
model=model,
name=timeseries_name,
original_filename=simple_filename,
original_timestamp_col=timestamp_col,
original_value_col=value_col,
)
try:
async_result = upload_ts.apply_async(
kwargs={
"model_uuid": model_uuid,
"timeseries_meta_id": new_meta.id,
"file_id": file_id,
"timestamp_col": timestamp_col,
"value_col": value_col,
"has_header": has_header,
}
)
upload_task = CeleryTask.objects.get(task_id=async_result.id)
new_meta.upload_task = upload_task
new_meta.is_uploading = True
new_meta.save()
payload["status"] = "Success"
# Only means that the submission of the celery task was successful.
except Exception as e:
print(e)
payload["status"] = "Failed"
payload["message"] = str(e)
if not has_header:
payload["message"] += (
" Please try checking the box, "
'"The first row of the selected CSV file is a header row."'
)
new_meta.delete()
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def wtk_timeseries(request):
"""
Pull timeseries from WTK (PySAM)
Parameters:
lat (float): required e.g. 32.22
lon (float): required e.g. -97.83
Returns (json): Data
Example:
POST: /api/wtk_timeseries/
"""
latitude = request.POST["lat"]
longitude = request.POST["lon"]
coordinate = (longitude, latitude)
# Fetch wind resource data
wtk_fp = wtk_fetch_resource_files(coordinate)
# --- Initialize generator ---
if wtk_fp is not None:
generator = Windpower.default('WindPowerNone')
generator.Resource.assign({'wind_resource_filename': wtk_fp})
generator.execute()
generation = np.array(generator.Outputs.gen)
cf_profile = generation / generator.Farm.system_capacity
payload = {"cf_profile": list(cf_profile)}
else:
payload = {"message": "Not Found"}
return HttpResponse(json.dumps(payload), content_type="application/json")
def wtk_fetch_resource_files(coordinate):
"""Fetch wind resource data"""
wr = FetchResourceFiles(
tech='wind',
resource_year='tmy',
nrel_api_email=settings.NREL_API_EMAIL,
nrel_api_key=settings.NREL_API_KEY,
resource_dir=os.path.join(settings.DATA_STORAGE, 'wind-data')
)
wr.fetch([coordinate])
# --- Get resource data file path ---
wtk_path_dict = wr.resource_file_paths_dict
wtk_fp = wtk_path_dict[coordinate]
return wtk_fp
|
the-stack_0_18965 | #!/usr/bin/python2.7
#
# Adapted from:
# http://stackoverflow.com/questions/14557944/downsizing-an-otf-font-by-removing-glyphs
import sys
try:
import fontforge
except ImportError:
print('ImportError, try: sudo apt install python-fontforge')
exit(2)
if len(sys.argv) >= 4:
font = fontforge.open(sys.argv[1])
for path in sys.argv[3:]:
with open(path, "rb") as f:
data = f.read()
for i in data.decode("UTF-8"):
font.selection[ord(i)] = True
f.close()
font.selection.invert()
for i in font.selection.byGlyphs:
font.removeGlyph(i)
font.generate(sys.argv[2])
else:
print("Usage: {} source.woff output.woff source1.txt ...".format(sys.argv[0]))
print("Example: {} /path/to/ukai.ttc chineseTranslation.txt ukaiStripped.ttf".format(sys.argv[0]))
exit(2)
|
the-stack_0_18966 | import datetime
import pytest
from redisolar.dao.redis import FeedDaoRedis
from redisolar.models import MeterReading
TESTING_SITE_ID = 1
NOW = datetime.datetime.utcnow()
@pytest.fixture
def feed_dao(redis, key_schema):
yield FeedDaoRedis(redis, key_schema)
def generate_meter_reading(site_id: int, datetime: datetime.datetime):
return MeterReading(site_id=site_id,
timestamp=datetime,
temp_c=15.0,
wh_generated=0.025,
wh_used=0.015)
def test_basic_insert_returns_recent(feed_dao):
now = datetime.datetime.now()
reading0 = generate_meter_reading(1, now)
reading1 = generate_meter_reading(1, now - datetime.timedelta(minutes=1))
feed_dao.insert(reading0)
feed_dao.insert(reading1)
global_list = feed_dao.get_recent_global(100)
assert len(global_list) == 2
assert global_list[0] == reading1
assert global_list[1] == reading0
site_list = feed_dao.get_recent_for_site(1, 100)
assert len(site_list) == 2
assert site_list[0] == reading1
assert site_list[1] == reading0
|
the-stack_0_18969 | import json
import random
from time import time
import torch
import weaviate
import math
from uuid import uuid4
client = weaviate.Client("http://localhost:8080",
timeout_config = (5, 120)
) # or another location where your Weaviate instance is running
schema = {
"classes": [{
"class": "SemanticUnit",
"description": "A written text, for example a news article or blog post",
"vectorIndexType": "hnsw",
"vectorIndexConfig": {
"efConstruction": 128,
"maxConnections": 64,
},
# "shardingConfig": {
# "desiredCount":4,
# },
"vectorizer": "none",
"properties": [
{
"dataType": [
"string"
],
"description": "ID",
"name": "reference"
},
{
"dataType": [
"text"
],
"description": "titles of the unit",
"name": "title",
},
{
"dataType": [
"text"
],
"description": "semantic unit flat text",
"name": "text"
},
{
"dataType": [
"string"
],
"description": "document type",
"name": "docType"
},
{
"dataType": [
"int"
],
"description": "so we can do some int queries",
"name": "itemId"
},
{
"dataType": [
"int"
],
"description": "so we can do some int queries",
"name": "itemIdHundred"
},
{
"dataType": [
"int"
],
"description": "so we can do some int queries",
"name": "itemIdTen"
},
{
"dataType": [
"int"
],
"description": "so we can do some int queries",
"name": "dummy"
}
]
}]
}
# cleanup from previous runs
client.schema.delete_all()
client.schema.create(schema)
batch = weaviate.ObjectsBatchRequest()
batchSize = 256
data=[]
with open("data.json", "r") as f:
data = json.load(f)
update_ratio = 0.0
# ids=[]
# if update_ratio != 0:
# id_ratio = 1-update_ratio
# id_count = len(data) * id_ratio
# for i in range(int(id_count)):
# ids+=[str(uuid4())]
# def get_uuid():
# if update_ratio == 0:
# return None
# return random.choice(ids)
def normalize(v):
norm=0
for x in v:
norm+= x*x
norm=math.sqrt(norm)
for i, x in enumerate(v):
v[i] = x/norm
return v
start = time()
for i, doc in enumerate(data):
props = {
"title": doc['properties']['title'],
"text": doc['properties']['text'],
"docType": doc['properties']['token'],
"itemId": doc['properties']['itemId'],
"itemIdHundred": doc['properties']['itemIdHundred'],
"itemIdTen": doc['properties']['itemIdTen'],
"dummy": 7,
}
batch.add(props, "SemanticUnit", vector=normalize(doc['vector']), uuid=doc['id'])
# when either batch size is reached or we are at the last object
if (i != 0 and i % batchSize == 0) or i == len(data) - 1:
print(f'send! {i/len(data)*100:.2f}% - index time: {(time() -start)/60:.2f}mn')
# send off the batch
res = client.batch.create(batch)
for single in res:
if "errors" in single["result"]:
print(single["result"]["errors"])
# and reset for the next batch
batch = weaviate.ObjectsBatchRequest()
|
the-stack_0_18970 | """
Top Fields
==========
This class returns top fields based on document frequency
:Author: Faegheh Hasibi
"""
from nordlys.core.retrieval.elastic import Elastic
class TopFields(object):
DEBUG = 0
def __init__(self, elastic):
self.elastic = elastic
self.__fields = None
self.__fsdm_fields = {"names", "categories", "attributes", "similar_entity_names", "related_entity_names"}
@property
def fields(self):
if self.__fields is None:
self.__fields = set(self.elastic.get_fields())
return self.__fields
def get_top_term(self, en, n):
"""Returns top-n fields with highest document frequency for the given entity ID."""
doc_freq = {}
if self.DEBUG:
print("Entity:[" + en + "]")
for field in self.fields:
df = self.elastic.doc_freq(en, field)
if df > 0:
doc_freq[field] = df
top_fields = self.__get_top_n(doc_freq, n)
return top_fields
def __get_top_n(self, fields_freq, n):
"""Sorts fields and returns top-n."""
sorted_fields = sorted(fields_freq.items(), key=lambda item: (item[1], item[0]), reverse=True)
top_fields = dict()
i = 0
for field, freq in sorted_fields:
if i >= n:
break
if field in self.__fsdm_fields:
continue
i += 1
top_fields[field] = freq
if self.DEBUG:
print("(" + field + ", " + str(freq) + ")")
if self.DEBUG:
print("\nNumber of fields:", len(top_fields), "\n")
return top_fields
|
the-stack_0_18972 | def _impl(ctx):
input_file = ctx.files.enum_file[0]
enum_name = input_file.basename[:-len(input_file.extension)]
ctx.action(
inputs=[input_file],
#outputs=[enum_name + '.cpp', enum_name + '.h', enum_name + 't.cpp'],
outputs=[ctx.outputs.header, ctx.outputs.cpp, ctx.outputs.test],
arguments=[
"--config", input_file.path,
"--output_dir", ctx.outputs.cpp.dirname
],
progress_message="Generating from %s" % input_file.short_path,
executable=ctx.executable._gen_tool)
_gen_cppenum = rule(
implementation=_impl,
attrs={
"enum_name": attr.string(),
"enum_file": attr.label(
mandatory=True,
allow_files=True,
single_file=True
),
"_gen_tool": attr.label(
executable=True,
cfg="host",
allow_files=True,
default=Label("//external:gen_cppenum")
)
},
output_to_genfiles=True,
outputs={
"header": "%{enum_name}.h",
"cpp": "%{enum_name}.cpp",
"test": "%{enum_name}.t.cpp",
},
)
def gen_cppenum(enum_file, visibility=None):
name = enum_file[:enum_file.rindex('.')]
_gen_cppenum(
name=name + '_genfiles',
enum_name=name,
enum_file=enum_file
)
native.cc_library(
name=name,
hdrs=[
name + ".h",
],
srcs=[
name + ".cpp",
],
visibility=visibility,
)
native.cc_test(
name=name + '_test',
srcs=[
name + ".t.cpp",
],
deps=[
":" + name,
"//external:boost-test",
],
)
|
the-stack_0_18973 | import xarray as xr
import numpy as np
import glob
# PARAMETER SPECIFICATION
def _get_universal():
universal={'rootdir': '/local/projects/so_decadal_variability/',
'localdir': '',
'prefix': 'SO_',
'gridsuffix':'.nc'}
return universal
#################
# NOTE : nameposition is a super hacky solution to extract
# the name of the variable from the filename. It changes
# dependent on whether we are using the ocean grid or not.
# Need to overhaul procedure.
# Think I can combine these functions (to _get_specifics)
# but leaving separate for now
# def _get_specifics_flux(fluxname):
# specific={}
# specific['erai'] = {'suffix':'_1979-2018.nc',
# 'nameposition':slice(-24,-22)}
# specific['era5'] = {'suffix':'_1979-2019.nc',
# 'nameposition':slice(-20,-18)}
# specific['jra55'] = {'suffix':'_1979-2019.nc',
# 'nameposition':slice(-25,-23)}
# specific['merra2'] = {'suffix':'_1980-2019.nc',
# 'nameposition':slice(-26,-24)}
# return specific[fluxname]
# def _get_specifics_ocean(oceanname):
# specific={}
# specific['en4'] = {'suffix':'_197901-201812.nc',
# 'depthname':'depth'}
# specific['iap'] = {'suffix':'_197901-201812.nc',
# 'depthname':'depth_std'}
# return specific[oceanname]
##################
def _get_specifics(name):
specific={}
specific['erai'] = {'suffix':'_1979-2018.nc',
'nameposition':slice(-24,-22),
'gridlon':'longitude',
'gridlat':'latitude',
'tempoffset':-273.15}
specific['era5'] = {'suffix':'_1979-2019.nc',
'nameposition':slice(-24,-22),
'gridlon':'longitude',
'gridlat':'latitude',
'tempoffset':0}
specific['jra55'] = {'suffix':'_1979-2020.nc',
'nameposition':slice(-25,-23),
'gridlon':'lon',
'gridlat':'lat',
'tempoffset':-273.15}
specific['merra2'] = {'suffix':'_1980-2019.nc',
'nameposition':slice(-26,-24),
'gridlon':'lon',
'gridlat':'lat',
'tempoffset':-273.15}
specific['en4'] = {'suffix':'_197901-201812.nc',
'depthname':'depth'}
specific['iap'] = {'suffix':'_197901-201812.nc',
'depthname':'depth_std'}
return specific[name]
## PATHS
def _get_oceanpath(oceanname, fluxname=None, varname=None):
universal=_get_universal()
specific=_get_specifics(oceanname)
if fluxname is None:
filename = universal['prefix']+'ocean_*'+oceanname+specific['suffix']
else:
filename = universal['prefix']+'ocean_*'+oceanname+'_'+fluxname+specific['suffix']
if varname is not None:
filename = universal['prefix']+'ocean_'+varname+'_'+oceanname+specific['suffix']
path = universal['rootdir']+universal['localdir']+'ocean/'+filename
return path
def _get_gridpath(name, varname=None):
universal=_get_universal()
specific=_get_specifics(name)
filename = universal['prefix']+'grid_*'+name+universal['gridsuffix']
if varname is not None:
filename = universal['prefix']+'grid_'+varname+'_'+oceanname+universal['gridsuffix']
path = universal['rootdir']+universal['localdir']+'grid/'+filename
return path
def _get_fluxpath(fluxname, oceanname=None, varname='*[!e]'):
universal=_get_universal()
specific=_get_specifics(fluxname)
if oceanname is None:
ocean = ''
else:
ocean = '_'+oceanname
filename = universal['prefix']+'flux_'+varname+'_'+fluxname+ocean+specific['suffix']
path = universal['rootdir']+universal['localdir']+'flux/'+filename
return path
# LOADING
def _get_oceands(oceanname,fluxname=None):
path = _get_oceanpath(oceanname,fluxname)
return xr.open_mfdataset(path)
def _get_gridds(name):
path = _get_gridpath(name)
return xr.open_mfdataset(path)
def _get_fluxds(fluxname,oceanname=None):
path = _get_fluxpath(fluxname,oceanname)
return xr.open_mfdataset(path)
# Getting flux data (more complicated loading because of muddled time coordinate)
### This was a hack that was in here before to select particular flux data
### I'm not entirely sure of its purpose, since the appropriate data seems to be
### collected irrespective. Will remove for now but may need to return to this.
# universal=_get_universal()
# specific=_get_specifics_flux(fluxname)
# # Getting flux data (more complicated loading because of muddled time coordinate)
# fluxfiles=[]
# if oceanname is None:
# filename = universal['prefix']+'flux_*'+fluxname+specific['suffix']
# else:
# filename = universal['prefix']+'flux_*'+fluxname+'_'+oceanname+specific['suffix']
# for file in glob.glob(universal['rootdir']+'flux/'+filename):
# print(file)
# f = file[specific['nameposition']]
# if f in ['sr','fw','ht']:
# fluxfiles.append(file)
# print('Retrieving data from :')
# print(fluxfiles)
# return xr.open_mfdataset(fluxfiles)
# PROCESSING
def _preprocess(fluxds,oceands,gridds,timeslice,onoceangrid,specifics):
# HACK : current hack to avoid time selection for gridfile
# when on flux grid (which is static), whereas ocean grid
# has time dimension
# Revisit grid data to rectify
timeselect = {'time':timeslice}
if onoceangrid:
fluxds = fluxds.sel(timeselect).assign_coords({'time':oceands['time'].sel(timeselect)})
gridds = gridds.sel(timeselect)
oceands = oceands.sel(timeselect)
else:
fluxds = fluxds.sel(timeselect)
# gridds = gridds.sel(timeselect)
oceands = oceands.sel(timeselect).assign_coords({'time':fluxds['time'].sel(timeselect)})
# Check for consistency of longitude coordinates
### This is a temporary patch for merra2 and jra55 (which have a very small
### error in the longitude/latitude array), but could be instituted properly
if ~np.array_equal(oceands[specifics['gridlon']],fluxds[specifics['gridlon']]):
oceands = oceands.assign_coords(
{specifics['gridlon']:fluxds[specifics['gridlon']]})
if ~np.array_equal(oceands[specifics['gridlat']],fluxds[specifics['gridlat']]):
oceands = oceands.assign_coords(
{specifics['gridlat']:fluxds[specifics['gridlat']]})
# Merge
ds = xr.merge([fluxds,oceands,gridds])
# Change names of lon and lat
ds = ds.rename({specifics['gridlat']:'lat',specifics['gridlon']:'lon'})
# Roll longitude to it goes from 0 to 360
# ds = ds.roll(lon=180,roll_coords=False).assign_coords({'lon':np.arange(0,360)})
# Make heat flux positive into the ocean
ds['ht'] *= -1
ds['sr'] *= -1
# Turn gamman to a proper density
if 'gamman' in ds.data_vars:
ds['gamman']+=1000
# Apply offset to temperature
if 'sst' in ds.data_vars:
ds['sst']+=specifics['tempoffset']
return ds
def _preprocess_oceanonly(oceands,gridds,timeslice, roll):
timeselect = {'time':timeslice}
gridds = gridds.sel(timeselect)
oceands = oceands.sel(timeselect)
# Merge
ds = xr.merge([oceands,gridds])
# Roll longitude to it goes from 0 to 360
if roll:
ds = ds.roll(lon=180,roll_coords=False).assign_coords({'lon':np.arange(0,360)})
# Turn gamman to a proper density
ds['gamman']+=1000
return ds
# LOADING WRAPPERS
def loaddata(fluxname, oceanname, timeslice, onoceangrid, debug=False):
if onoceangrid:
# ocean
oceands = _get_oceands(oceanname)
# grid
gridname = oceanname
gridds = _get_gridds(gridname)
# flux
fluxds = _get_fluxds(fluxname,oceanname)
else:
oceands = _get_oceands(oceanname,fluxname)
gridname = fluxname
gridds = _get_gridds(gridname)
fluxds = _get_fluxds(fluxname)
if debug:
return oceands,gridds,fluxds
# Some renaming conventions
if _get_specifics(oceanname)['depthname']!='depth':
oceands = oceands.rename({_get_specific(oceanname)['depthname']:'depth'})
gridds = gridds.rename({_get_specific(oceanname)['depthname']:'depth'})
return _preprocess(fluxds,oceands,gridds,
timeslice,onoceangrid,_get_specifics(gridname))
def loaddata_oceanonly(oceanname, timeslice, roll=True):
specific=_get_specifics_ocean(oceanname)
oceands = _get_oceands(oceanname)
gridds = _get_gridds(oceanname)
# Some renaming conventions
if specific['depthname']!='depth':
oceands = oceands.rename({specific['depthname']:'depth'})
gridds = gridds.rename({specific['depthname']:'depth'})
return _preprocess_oceanonly(oceands,gridds,timeslice, roll)
# POST-PROCESSING AND SAVING
def save_ocean(da,oceanname):
universal = _get_universal()
specific = _get_specifics_ocean(oceanname)
# Change naming conventions back
if specific['depthname']!='depth':
da = da.rename({'depth':specific['depthname']})
varname = da.name
filename = universal['prefix']+'ocean_'+varname+'_'+oceanname+specific['suffix']
path = universal['rootdir']+universal['localdir']+'ocean/'+filename
print('Saving to '+path)
da.to_netcdf(path) |
the-stack_0_18974 | from __future__ import print_function
import os
import sys
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
from scipy.stats import linregress
from collections import defaultdict
from sklearn.metrics import mean_squared_error
def print_json(data):
print(json.dumps(data, indent=2))
def bin_markers(df, diff=0, missing_value='-'):
"""
merge consecutive markers with same genotypes
return slelected row index
Examples:
"""
df = df.replace(missing_value, np.nan)
first_row = df.iloc[0,:]
temp = [df.index[0]] # save temp index
pre_row = first_row
df_rest = df.iloc[1:,:]
result_ids = []
for idx, row in df_rest.iterrows():
df_tmp = pd.concat([pre_row, row], axis=1).dropna()
diff_num = (df_tmp.iloc[:,0] != df_tmp.iloc[:,1]).sum()
if diff_num <= diff:
temp.append(idx)
else:
if len(temp) > 1:
result_ids.append(temp)
else:
result_ids.append([idx])
temp = [idx]
pre_row = row
if result_ids[0][0] != df.index[0]:
result_ids.insert(0, [df.index[0]])
results = []
represent_idx, block_idx = [], []
for index in result_ids:
if len(index) > 1:
df_tmp = df.loc[index, :]
good_idx = df_tmp.isnull().sum(axis=1).idxmin()
results.append(good_idx)
represent_idx.append(good_idx)
block_idx.append(index)
else:
results.append(index[0])
return represent_idx, block_idx, results
def sort_merge_sort(arrays):
"""
get redundant lists by merging lists with overlaping region.
Example:
>>> a = [[1,3], [3, 5], [6,10], [7, 9], [11,15], [11,12],[16,30]]
>>> sort_merge_sort(a)
>>> [array([1, 3, 5]), array([ 6, 7, 9, 10]), array([11, 12, 15]), [16, 30]]
"""
val_start = [i[0] for i in arrays]
val_end = [i[-1] for i in arrays]
df = pd.DataFrame(dict(zip(['array', 'val_start', 'val_end'], [arrays, val_start, val_end]))).sort_values(['val_start', 'val_end']).reset_index(drop=True)
first_arr = df.loc[0, 'array']
temp = first_arr
pre_arr = first_arr
results = []
for arr in df.loc[1:, 'array']:
if arr[0] <= pre_arr[-1]:
temp.extend(arr)
else:
if len(temp) == len(pre_arr):
results.append(pre_arr)
else:
temp_sorted_unique = pd.Series(temp).sort_values().unique()
results.append(temp_sorted_unique)
temp = arr
pre_arr = arr
if len(temp) == len(pre_arr):
results.append(pre_arr)
else:
temp_sorted_unique = pd.Series(temp).sort_values().unique()
results.append(temp_sorted_unique)
return results
def get_blocks(np_1d_array, dist=150, block_size=2):
"""
group values to a block with specified distance
Examples:
>>> a = np.array([1,2,4,10,12,13,15])
>>> test(a, dist=1)
[[1, 2], [12, 13]]
>>> test(a, dist=2)
[[1, 2, 4], [10, 12, 13, 15]]
"""
first_val = np_1d_array[0]
temp = [first_val] # save temp blocks
pre_val = first_val
results = []
for val in np_1d_array[1:]:
if (val - pre_val) <= dist:
temp.append(val)
else:
if len(temp) >= block_size:
results.append(temp)
temp = [val]
pre_val = val
if len(temp) >= block_size:
results.append(temp)
return results
def random_alternative(lens, values=[0,2]):
"""
return a numpy array with alternating interger values
"""
v1, v2 = values
st_value = np.random.choice(values)
alternative_value = v1 if st_value == v2 else v2
a = np.empty((lens,))
a[::2] = st_value
a[1::2] = alternative_value
return a.astype('int')
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def GenDataFrameFromPath(path, pattern='*.png', fs=False):
"""
generate a dataframe for all file in a dir with the specific pattern of file name.
use: GenDataFrameFromPath(path, pattern='*.png')
"""
fnpaths = list(path.glob(pattern))
df = pd.DataFrame(dict(zip(['fnpath'], [fnpaths])))
df['dir'] = df['fnpath'].apply(lambda x: x.parent)
df['fn'] = df['fnpath'].apply(lambda x: x.name)
if fs:
df['size'] = df['fnpath'].apply(lambda x: os.path.getsize(x))
return df
def ConciseVcf(fn):
"""
concise the vcf file by remove the header, useless columns and simplfied genotype
ConciseVcf(fn)
"""
n = 0
f = open(fn)
for i in f:
if i.startswith('##'):
n += 1
else:
break
df = pd.read_csv(fn, header=n, delim_whitespace=True)
df = df.drop(['INFO', 'FORMAT', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER'], axis=1)
for idx in df.columns[2:]:
df[idx] = df[idx].map(lambda x: x.split(':')[0])
df = df.replace(['0/0', '0/1', '1/0', '1/1', './.'], [0, 1, 1, 2, 9])
return df
def getChunk(fn, ignore=1):
'''ignore: rows starts with pound sign'''
df0_chr = defaultdict(int)
chr_order = []
with open(fn) as f:
for dash_line in range(ignore):
f.readline()
for i in f:
j = i.split()[0].split('-')[0]
df0_chr[j] += 1
if j in chr_order:
pass
else:
chr_order.append(j)
if len(chr_order) != len(set(chr_order)):
sys.exit('Please check your marker name and sort them by chr name.')
return chr_order, df0_chr
class SimpleStats(object):
"""
This class will do the simple statistics on two series objecjts.
a) linear regressoin: slope, intercept, r^2, p_value
b) mean, std of the difference and absolute differnece
c) MSE (mean squared error) and RMSE (root mean squared error)
d) agreement
e) plot the regreesion figure and the difference distribution figure
"""
def __init__(self, series1, series2):
self.s1 = series1
self.s2 = series2
self.length = series1.shape[0]
self.diff = series1 - series2
self.absdiff = (series1 - series2).abs()
def regression(self):
slope, intercept, r_value, p_value, __ = linregress(self.s1, self.s2)
return slope, intercept, r_value**2, p_value
def mean_std_diff(self):
mean, std = self.diff.mean(), self.diff.std()
return mean, std
def mean_std_absdiff(self):
abs_mean, abs_std = self.absdiff.mean(), self.absdiff.std()
return abs_mean, abs_std
def mse(self):
mse = mean_squared_error(self.s1, self.s2)
return mse
def rmse(self):
rmse = mean_squared_error(self.s1, self.s2)**0.5
return rmse
def agreement(self, cutoff):
return (self.absdiff<=float(cutoff)).sum()/self.length
|
the-stack_0_18975 | # Copyright 2002 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# This module attempts to detect whether the internet is available.
# To use it, import requires_internet into your Python code, and call
# requires_internet.check(). If the internet is available, then the
# import statement succeeds. If it is not, then the statement will
# result in a MissingExternalDependencyError exception.
"""Common code to check if the internet is available."""
from Bio import MissingExternalDependencyError
def check():
try:
check.available
except AttributeError:
# I'm going to check for internet availability
RELIABLE_DOMAIN = "biopython.org"
import socket
try:
socket.getaddrinfo(
RELIABLE_DOMAIN, 80, socket.AF_UNSPEC, socket.SOCK_STREAM
)
except socket.gaierror as x:
check.available = False
else:
check.available = True
if not check.available:
raise MissingExternalDependencyError("internet not available")
|
the-stack_0_18976 |
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
from transformers import RobertaTokenizer
import pandas as pd
from ast import literal_eval
from torch.nn import CrossEntropyLoss
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
id2tag = ['O', 'B-toxic', 'I-toxic']
tag2id = {v:k for k, v in enumerate(id2tag)}
tag_pad_id = CrossEntropyLoss().ignore_index
def encode_roberta(sentence):
sentence_tokens = [tokenizer.tokenize(sentence[0])] + \
[tokenizer.tokenize(f' {t}') for t in sentence[1:]]
sentence_ids = [tokenizer.convert_tokens_to_ids(t) for t in sentence_tokens]
start_idx_mask = []
all_ids = []
for subwords in sentence_ids:
curr_mask = [1]
if len(subwords) > 1:
curr_mask += [0] * (len(subwords) - 1)
start_idx_mask.extend(curr_mask)
all_ids.extend(subwords)
special_token_mask = tokenizer.get_special_tokens_mask(all_ids)
prefix_offset = 0
while prefix_offset < len(special_token_mask) and special_token_mask[prefix_offset] == 1:
prefix_offset += 1
suffix_offset = len(special_token_mask) - len(start_idx_mask) - prefix_offset
start_idx_mask = [0] * prefix_offset + start_idx_mask + [0] * suffix_offset
sentence_inputs = tokenizer.prepare_for_model(all_ids, add_special_tokens=True)
input_ids = sentence_inputs["input_ids"]
attention_mask = sentence_inputs["attention_mask"]
#######
inputs = tokenizer(
text=' '.join(sentence),
add_special_tokens=True
)
assert inputs["input_ids"] == input_ids
assert inputs["attention_mask"] == attention_mask
#######
return input_ids, attention_mask, start_idx_mask
def get_labels_tokens(orig_sentence, chunks):
curr = 0
labels = []
tokens = []
for s, e in chunks:
other_txt = orig_sentence[curr:s].split()
label_txt = orig_sentence[s:e + 1].split()
curr = e + 1
tokens.extend(other_txt)
labels.extend(['O'] * len(other_txt))
tokens.append(label_txt[0])
labels.append('B-toxic')
for k in range(1, len(label_txt)):
tokens.append(label_txt[k])
labels.append('I-toxic')
if curr < len(orig_sentence):
other_txt = orig_sentence[curr:].split()
tokens.extend(other_txt)
labels.extend(['O'] * len(other_txt))
return tokens, labels
def get_chunks(span):
chunks = []
curr_start = None
for span_i, t in enumerate(span):
if span_i == 0 or curr_start is None:
curr_start = t
elif t > span[span_i - 1] + 1:
chunks.append((curr_start, span[span_i - 1]))
curr_start = t
if curr_start is not None:
chunks.append((curr_start, span[-1]))
return chunks
def get_text_from_ids(input_ids):
return tokenizer.convert_tokens_to_string(
[tokenizer._convert_id_to_token(input_id) for input_id in input_ids])
class SpanDataset(Dataset):
def __getitem__(self, n):
return self._features[n]
def __len__(self):
return len(self._features)
def __init__(self, phase):
self._phase = phase
self.init_dataset()
def init_dataset(self):
train = pd.read_csv("tsd_train.csv")
sentences = train['text']
if self._phase in {'train', 'dev'}:
spans = train.spans.apply(literal_eval)
max_seq_len = -1
max_token_len = -1
features = []
for i, orig_sentence in enumerate(sentences):
chunks = []
if self._phase in {'train', 'dev'}:
chunks = get_chunks(spans[i])
tokens, labels = get_labels_tokens(orig_sentence, chunks)
# roberta tokenization
input_ids, attention_mask, start_idx_mask = encode_roberta(tokens)
max_seq_len = max(max_seq_len, len(input_ids))
max_token_len = max(max_token_len, len(labels))
labels_ids = [tag2id[k] for k in labels]
padded_labels_ids = labels_ids + [tag_pad_id]*(200 - len(labels_ids))
datum = {
'input_ids': torch.LongTensor(input_ids),
'attention_mask': torch.LongTensor(attention_mask),
'start_idx_mask': torch.BoolTensor(start_idx_mask),
'labels': torch.LongTensor(labels_ids),
'padded_labels': torch.LongTensor(padded_labels_ids)
}
features.append(datum)
print(f'max_seq_len {max_seq_len} max_token_len {max_token_len}')
self._features = features
def variable_collate_fn(batch):
batch_features = {}
batch_features['input_ids'] = pad_sequence([x['input_ids'] for x in batch],
batch_first=True,
padding_value=tokenizer.pad_token_id)
batch_features['attention_mask'] = pad_sequence([x['attention_mask'] for x in batch],
batch_first=True,
padding_value=0)
batch_features['start_idx_mask'] = pad_sequence([x['start_idx_mask'] for x in batch],
batch_first=True,
padding_value=0)
if 'labels' in batch[0]:
batch_features['labels'] = pad_sequence([x['labels'] for x in batch],
batch_first=True,
padding_value=tag_pad_id)
batch_features['padded_labels'] = pad_sequence([x['padded_labels'] for x in batch],
batch_first=True,
padding_value=tag_pad_id)
return batch_features
if __name__ == '__main__':
data_iter = SpanDataset('dev')
for d in data_iter:
print(d)
break
|
the-stack_0_18981 | import board
import busio
import adafruit_sht31d
# Create library object using our Bus I2C port
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_sht31d.SHT31D(i2c)
print("\033[1mSensor\033[0m = SHT31-D")
print("\033[1mSerial Number\033[0m = ", sensor.serial_number, "\n")
for i in range(3):
if i == 0:
sensor.repeatability = adafruit_sht31d.REP_LOW
print("\033[1m\033[36mLow Repeatability:\033[0m\n")
if i == 1:
sensor.repeatability = adafruit_sht31d.REP_MED
print("\n\033[1m\033[36mMedium Repeatability:\033[0m\n")
if i == 2:
sensor.repeatability = adafruit_sht31d.REP_HIGH
sensor.clock_stretching = True
print("\n\033[1m\033[36mHigh Repeatability:\033[0m")
print("\033[1m\033[95mClock Stretching:\033[0m \033[92mEnabled\033[0m\n")
for itr in range(3):
print("\033[1mTemperature:\033[0m %0.3f ºC" % sensor.temperature)
print("\033[1mHumidity:\033[0m %0.2f %%" % sensor.relative_humidity, "\n")
|
the-stack_0_18983 | from typing import Any, Dict
from lpc import lpc
def format_and_escape_string(lexer: lpc.Lexer, value: str) -> str:
result = value[1:-1]
result = result.replace("\\n", "\n")
result = result.replace("\\t", "\t")
result = result.replace("\\r", "\r")
result = result.replace("\\f", "\f")
result = result.replace("\\b", "\b")
result = result.replace("\\\"", "\"")
result = result.replace("\\\\", "\\")
return result
class JSONParser(lpc.LPC[Dict[Any, Any]]):
def __init__(self) -> None:
lexer = lpc.Lexer()
lexer.AddPattern("\\s+", None, "WS")
lexer.AddPattern("{|}|\\[|\\]|,|:", None, "SYMBOL")
lexer.AddPattern("(true)|(false)|(null)", None, "KEYWORD")
lexer.AddPattern("-?(?:0|[1-9]\\d*)(?:\\.\\d+)(?:[eE][+-]?\\d+)?", None, "decimal")
lexer.AddPattern("-?(?:0|[1-9]\\d*)", None, "integer")
lexer.AddPattern("\"([^\\\"\\\\]|\\\\.)*\"", format_and_escape_string, "string")
parsers : Dict[str, lpc.Parser] = {}
parsers["string"] = lpc.Terminal("string", "string")
parsers["value"] = lpc.Choice("Value", [
parsers["string"],
lpc.Terminal("number", "integer", transformer=lambda value: int(value)),
lpc.Terminal("number", "decimal", transformer=lambda value: float(value)),
lpc.Terminal("true", "KEYWORD", "true", transformer=lambda _: True),
lpc.Terminal("false", "KEYWORD", "false", transformer=lambda _: False),
lpc.Terminal("null", "KEYWORD", "null", transformer=lambda _: None),
lpc.Lazy("object", lambda: parsers["object"]),
lpc.Lazy("array", lambda: parsers["array"]),
], tag=True)
parsers["pair"] = lpc.Sequence("pair", [parsers["string"], lpc.Terminal(":", "SYMBOL", ":"), parsers["value"]])
parsers["object"] = lpc.Sequence("object", [
lpc.Terminal("{", "SYMBOL", "{"),
lpc.Separated("pairs", parsers["pair"], lpc.Terminal(",", "SYMBOL", ",")),
lpc.Terminal("}", "SYMBOL", "}"),
], transformer=lambda value: {res.value[0].value:res.value[2].value[1] for res in value[1].value})
parsers["array"] = lpc.Sequence("array", [
lpc.Terminal("[", "SYMBOL", "["),
lpc.Separated("values", parsers["value"], lpc.Terminal(",", "SYMBOL", ",")),
lpc.Terminal("]", "SYMBOL", "]"),
], transformer=lambda value: [res.value[1] for res in value[1].value])
super().__init__(lexer, lpc.Choice("json", [parsers["array"], parsers["object"]], tag=False), ["WS"])
|
the-stack_0_18984 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add model for task log filename template.
Revision ID: f9da662e7089
Revises: 786e3737b18f
Create Date: 2021-12-09 06:11:21.044940
"""
from alembic import op
from sqlalchemy import Column, ForeignKey, Integer, Text
from airflow.utils.sqlalchemy import UtcDateTime
# Revision identifiers, used by Alembic.
revision = "f9da662e7089"
down_revision = "786e3737b18f"
branch_labels = None
depends_on = None
def upgrade():
"""Add model for task log template and establish fk on task instance."""
op.create_table(
"log_template",
Column("id", Integer, primary_key=True, autoincrement=True),
Column("filename", Text, nullable=False),
Column("elasticsearch_id", Text, nullable=False),
Column("created_at", UtcDateTime, nullable=False),
)
dag_run_log_filename_id = Column(
"log_template_id",
Integer,
ForeignKey("log_template.id", name="task_instance_log_template_id_fkey", ondelete="NO ACTION"),
)
with op.batch_alter_table("dag_run") as batch_op:
batch_op.add_column(dag_run_log_filename_id)
def downgrade():
"""Remove fk on task instance and model for task log filename template."""
with op.batch_alter_table("dag_run") as batch_op:
batch_op.drop_column("log_template_id")
op.drop_table("log_template")
|
the-stack_0_18985 | import collections
import json
import os
import re
import string
import sys
import csv
import rouge_stats as rs
import glob
def ropen(f) :
return open(f,'r',encoding='utf8')
def jload(infile) :
''' loads .json file, preprocessed from a .txt file'''
with ropen(infile) as f:
res = json.load(f)
return res
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def score2txt(files, p, r, f) :
zipped = zip(files, p, r, f)
txt = ''
for item in zipped :
file,pre,recall,fm=item
txt += file + ', '
txt += str(pre) + ', '
txt += str(recall) + ', '
txt += str(fm) + '\n'
return txt
def avg(xs) :
s=sum(xs)
l=len(xs)
if 0==l : return None
return s/l
def eval_with_rouge(type) :
files=[]
f1=[]
p1=[]
r1=[]
f2=[]
p2=[]
r2=[]
fl=[]
pl=[]
rl=[]
ref_dir = type + "/answer/"
pred_dir = type + "/output/predictions/"
print('ref_dir:', ref_dir)
print('pred_dir:', pred_dir)
doc_files = sorted(glob.glob(type + "/answer/*.txt"))
print(len(doc_files))
j = 0
for doc_file in doc_files :
fname=doc_file.split('/')[-1]
ref_name=ref_dir+fname
pred_name=pred_dir+fname
'''
print('fname:',fname)
print('ref_name:', ref_name)
print('rouge', i, ', pred_name:', pred_name)
'''
with open(ref_name,'r',encoding='utf8') as fgold:
gold=fgold.read()
with open(pred_name,'r',encoding='utf8') as fsliver:
silver=fsliver.read()
if not gold:
print('gold file missing:', ref_name)
continue
if not silver:
print('silver file missing:', pred_name)
continue
files.append(fname)
'''
print('gold:\n', gold )
print('silver:\n', silver )
'''
res = rs.rstat('rouge', silver,gold)
#print('res:\n', *res)
#print('res[0]:\n',res[0])
p1.append(res[0]['rouge-1']['p'])
r1.append(res[0]['rouge-1']['r'])
f1.append(res[0]['rouge-1']['f'])
p2.append(res[0]['rouge-2']['p'])
r2.append(res[0]['rouge-2']['r'])
f2.append(res[0]['rouge-2']['f'])
pl.append(res[0]['rouge-l']['p'])
rl.append(res[0]['rouge-l']['r'])
fl.append(res[0]['rouge-l']['f'])
'''
print('p1:\n', p1)
print('r1:\n', r1)
print('f1:\n', f1)
print('p2:\n', p2)
print('r2:\n', r2)
print('f2:\n', f2)
print('pl:\n', pl)
print('rl:\n', rl)
print('fl:\n', fl)
'''
j = j+1
#if j==2: break
p = []
p.append(p1)
p.append(p2)
p.append(pl)
r = []
r.append(r1)
r.append(r2)
r.append(rl)
f = []
f.append(f1)
f.append(f2)
f.append(fl)
'''
print('p:/n', p)
print('r:/n', r)
print('f:/n', f)
'''
rouge_names=('ROUGE_1','ROUGE_2','ROUGE_l')
for i, rouge_name in enumerate(rouge_names) :
#print (rouge_name,':', ', Precision=', avg(p[i]), ', Recall=', avg(r[i]), ', F-Measure=', avg(f[i]))
#save ABS ROUGE scores into file
content = 'fileName, Precision, Recall, F-Measure' + '\n'
content += score2txt(files, p[i], r[i], f[i])
#print('content:\n',content )
toFile = "Abs" + rouge_name + ".csv"
with open(pred_dir + toFile,'w',encoding='utf8') as frouge:
frouge.write(content + "\n")
return ((avg(p1),avg(r1),avg(f1)), (avg(p2),avg(r2),avg(f2)), (avg(pl),avg(rl),avg(fl)))
def main():
if len(sys.argv) != 2 or sys.argv[1] not in ['test', 'test2', 'val', 'val2' ]:
print('Run one of the commands as below:')
print(' python evaluate.py test')
print(' python evaluate.py test2')
print(' python evaluate.py val ')
print(' python evaluate.py val2 ')
sys.exit(0)
type = sys.argv[1]
content = ''
r1, r2, rl = eval_with_rouge(type )
'''
print('r1:', r1)
print('r2:', r2)
print('rl:', rl)
'''
content += 'ROUGE_1 F-Measure= '+ str(round(r1[2], 5))
content += ', ROUGE_2 F-Measure= '+ str(round(r2[2],5))
content += ', ROUGE_L F-Measure= ' + str(round(rl[2], 5)) + '\n'
doc_files = glob.glob(type + "/answer/*.txt")
totalQ = len(doc_files)
print('totalQ=', totalQ)
outDir = type + '/output/'
totalSentsList = jload( outDir + 'Total_Sents.json')
avgSents = round(sum(totalSentsList)/len(totalSentsList), 2)
totalWordsList = jload( outDir + 'Total_Words.json')
avgWords = round(sum(totalWordsList)/len(totalWordsList), 2)
nlpParseDurList = jload( outDir + 'nlpParse_duration.json')
avgNlpParsrDur = round(sum(nlpParseDurList)/len(totalWordsList), 5)
doctalkSummDurList = jload( outDir + 'DoctalkSumm_duration.json')
avgDoctalkSummDur = round(sum(doctalkSummDurList)/len(totalWordsList), 5)
qaDur_doctalk_self_list = jload( outDir + 'QA_doctalk_self_duration.json')
avgDoctalkQaSelf = round(sum(qaDur_doctalk_self_list)/totalQ, 5)
stats = 'average Sentences: ' + str(avgSents) + '\n'
stats += 'average words: ' + str(avgWords) + '\n'
stats += 'Total articles: ' + str(len(totalWordsList)) + '\n'
stats += 'average nlpParse duration per article (seconds): ' + str(avgNlpParsrDur) + '\n'
stats += 'average Doctak summarization duration per article (seconds): ' + str(avgDoctalkSummDur) + '\n'
stats += 'Total questions: ' + str(totalQ) + '\n'
stats += 'average doctalk self duration per question (seconds): ' + str(avgDoctalkQaSelf) + '\n'
print(stats )
print("score:\n", content)
toFile = outDir + "score_textrank.txt"
print('save score to file:', toFile)
with open(toFile,'w',encoding='utf8') as fscore:
fscore.write(stats + "\n")
fscore.write(content + "\n")
if __name__ == '__main__':
main()
|
the-stack_0_18992 | """
Copyright (c) NREL. All rights reserved.
"""
from __future__ import print_function
import numpy as np
import openmdao.api as om
from wisdem.nrelcsm.nrel_csm_cost_2015 import Turbine_CostsSE_2015
# --------------------------------------------------------------------
class BladeMass(om.ExplicitComponent):
"""
Compute blade mass of the form :math:`mass = k*diameter^b`.
Value of :math:`k` was updated in 2015 to be 0.5.
Value of :math:`b` was updated to be 2.47/2.54 for turbine class I blades with/without carbon or
2.44/2.5 for other turbine classes with/without carbon.
Values of k and b can be overridden by the user with use of `blade_mass_coeff` (k) and/or `blade_user_exp` (b).
To use `blade_user_exp`, the value of `turbine_class` must be less than 1.
Parameters
----------
rotor_diameter : float, [m]
rotor diameter of the machine
turbine_class : float
turbine class. Set to 1 for Class I, 2 for Class II+, or 0 for user overrides of blade_user_exp
blade_has_carbon : boolean
does the blade have carbon?
blade_mass_coeff : float
k in the blade mass equation: k*(rotor_diameter/2)^b
blade_user_exp : float
optional user-entered exp for the blade mass equation
Returns
-------
blade_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("rotor_diameter", 0.0, units="m")
self.add_discrete_input("turbine_class", 1)
self.add_discrete_input("blade_has_carbon", False)
self.add_input("blade_mass_coeff", 0.5)
self.add_input("blade_user_exp", 2.5)
self.add_output("blade_mass", 0.0, units="kg")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
rotor_diameter = inputs["rotor_diameter"]
turbine_class = discrete_inputs["turbine_class"]
blade_has_carbon = discrete_inputs["blade_has_carbon"]
blade_mass_coeff = inputs["blade_mass_coeff"]
blade_user_exp = inputs["blade_user_exp"]
# select the exp for the blade mass equation
exp = 0.0
if turbine_class == 1:
if blade_has_carbon:
exp = 2.47
else:
exp = 2.54
elif turbine_class > 1:
if blade_has_carbon:
exp = 2.44
else:
exp = 2.50
else:
exp = blade_user_exp
# calculate the blade mass
outputs["blade_mass"] = blade_mass_coeff * (rotor_diameter / 2) ** exp
# --------------------------------------------------------------------
class HubMass(om.ExplicitComponent):
"""
Compute hub mass in the form of :math:`mass = k*m_{blade} + b`.
Value of :math:`k` was updated in 2015 to be 2.3.
Value of :math:`b` was updated in 2015 to be 1320.
Parameters
----------
blade_mass : float, [kg]
component mass
hub_mass_coeff : float
k inthe hub mass equation: k*blade_mass + b
hub_mass_intercept : float
b in the hub mass equation: k*blade_mass + b
Returns
-------
hub_mass : float, [kg]
component mass
"""
def setup(self):
# Variables
self.add_input("blade_mass", 0.0, units="kg")
self.add_input("hub_mass_coeff", 2.3)
self.add_input("hub_mass_intercept", 1320.0)
self.add_output("hub_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
blade_mass = inputs["blade_mass"]
hub_mass_coeff = inputs["hub_mass_coeff"]
hub_mass_intercept = inputs["hub_mass_intercept"]
# calculate the hub mass
outputs["hub_mass"] = hub_mass_coeff * blade_mass + hub_mass_intercept
# --------------------------------------------------------------------
class PitchSystemMass(om.ExplicitComponent):
"""
Compute pitch bearing mass in the form of :math:`m_{bearing} = k*m_{blade}*nblade + b1`.
Then compute pitch system mass, with bearing housing in the form of :math:`mass = (1+h)*m_{bearing} + b2`.
The values of the constants were NOT updated in 2015 and are the same as the original CSM.
Value of :math:`k` is 0.1295.
Value of :math:`h` is 0.328.
Value of :math:`b1` is 491.31.
Value of :math:`b2` is 555.0.
Parameters
----------
blade_mass : float, [kg]
component mass
blade_number : float
number of rotor blades
pitch_bearing_mass_coeff : float
k in the pitch bearing mass equation: k*blade_mass*blade_number + b
pitch_bearing_mass_intercept : float
b in the pitch bearing mass equation: k*blade_mass*blade_number + b
bearing_housing_fraction : float
bearing housing fraction
mass_sys_offset : float
mass system offset
Returns
-------
pitch_system_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("blade_mass", 0.0, units="kg")
self.add_discrete_input("blade_number", 3)
self.add_input("pitch_bearing_mass_coeff", 0.1295)
self.add_input("pitch_bearing_mass_intercept", 491.31)
self.add_input("bearing_housing_fraction", 0.3280)
self.add_input("mass_sys_offset", 555.0)
self.add_output("pitch_system_mass", 0.0, units="kg")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
blade_mass = inputs["blade_mass"]
blade_number = discrete_inputs["blade_number"]
pitch_bearing_mass_coeff = inputs["pitch_bearing_mass_coeff"]
pitch_bearing_mass_intercept = inputs["pitch_bearing_mass_intercept"]
bearing_housing_fraction = inputs["bearing_housing_fraction"]
mass_sys_offset = inputs["mass_sys_offset"]
# calculate the hub mass
pitchBearingMass = pitch_bearing_mass_coeff * blade_mass * blade_number + pitch_bearing_mass_intercept
outputs["pitch_system_mass"] = pitchBearingMass * (1 + bearing_housing_fraction) + mass_sys_offset
# --------------------------------------------------------------------
class SpinnerMass(om.ExplicitComponent):
"""
Compute spinner (nose cone) mass in the form of :math:`mass = k*diameter + b`.
Value of :math:`k` was updated in 2015 to be 15.5.
Value of :math:`b` was updated in 2015 to be -980.
Parameters
----------
rotor_diameter : float, [m]
rotor diameter of the machine
spinner_mass_coeff : float
k inthe spinner mass equation: k*rotor_diameter + b
spinner_mass_intercept : float
b in the spinner mass equation: k*rotor_diameter + b
Returns
-------
spinner_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("rotor_diameter", 0.0, units="m")
self.add_input("spinner_mass_coeff", 15.5)
self.add_input("spinner_mass_intercept", -980.0)
self.add_output("spinner_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
rotor_diameter = inputs["rotor_diameter"]
spinner_mass_coeff = inputs["spinner_mass_coeff"]
spinner_mass_intercept = inputs["spinner_mass_intercept"]
# calculate the spinner mass
outputs["spinner_mass"] = spinner_mass_coeff * rotor_diameter + spinner_mass_intercept
# --------------------------------------------------------------------
class LowSpeedShaftMass(om.ExplicitComponent):
"""
Compute low speed shaft mass in the form of :math:`mass = k*(m_{blade}*power)^b1 + b2`.
Value of :math:`k` was updated in 2015 to be 13.
Value of :math:`b1` was updated in 2015 to be 0.65.
Value of :math:`b2` was updated in 2015 to be 775.
Parameters
----------
blade_mass : float, [kg]
mass for a single wind turbine blade
machine_rating : float, [kW]
machine rating
lss_mass_coeff : float
k inthe lss mass equation: k*(blade_mass*rated_power)^b1 + b2
lss_mass_exp : float
b1 in the lss mass equation: k*(blade_mass*rated_power)^b1 + b2
lss_mass_intercept : float
b2 in the lss mass equation: k*(blade_mass*rated_power)^b1 + b2
Returns
-------
lss_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("blade_mass", 0.0, units="kg")
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("lss_mass_coeff", 13.0)
self.add_input("lss_mass_exp", 0.65)
self.add_input("lss_mass_intercept", 775.0)
self.add_output("lss_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
blade_mass = inputs["blade_mass"]
machine_rating = inputs["machine_rating"]
lss_mass_coeff = inputs["lss_mass_coeff"]
lss_mass_exp = inputs["lss_mass_exp"]
lss_mass_intercept = inputs["lss_mass_intercept"]
# calculate the lss mass
outputs["lss_mass"] = (
lss_mass_coeff * (blade_mass * machine_rating / 1000.0) ** lss_mass_exp + lss_mass_intercept
)
# --------------------------------------------------------------------
class BearingMass(om.ExplicitComponent):
"""
Compute main bearing mass (single bearing) in the form of :math:`mass = k*diameter^b`.
Value of :math:`k` was updated in 2015 to be 1e-4.
Value of :math:`b` was updated in 2015 to be 3.5.
Parameters
----------
rotor_diameter : float, [m]
rotor diameter of the machine
bearing_mass_coeff : float
k inthe bearing mass equation: k*rotor_diameter^b
bearing_mass_exp : float
exp in the bearing mass equation: k*rotor_diameter^b
Returns
-------
main_bearing_mass : float, [kg]
component mass
"""
def setup(self):
# Variables
self.add_input("rotor_diameter", 0.0, units="m")
self.add_input("bearing_mass_coeff", 0.0001)
self.add_input("bearing_mass_exp", 3.5)
self.add_output("main_bearing_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
rotor_diameter = inputs["rotor_diameter"]
bearing_mass_coeff = inputs["bearing_mass_coeff"]
bearing_mass_exp = inputs["bearing_mass_exp"]
# calculates the mass of a SINGLE bearing
outputs["main_bearing_mass"] = bearing_mass_coeff * rotor_diameter ** bearing_mass_exp
# --------------------------------------------------------------------
class RotorTorque(om.ExplicitComponent):
"""
Computed rated rpm and rotor torque from rated power, rotor diameter, max tip speed, and drivetrain efficiency.
Rotor torque will be used to size other drivetrain components, such as the generator.
Parameters
----------
rotor_diameter : float, [m]
rotor diameter of the machine
machine_rating : float, [kW]
machine rating
max_tip_speed : float, [m/s]
Maximum allowable blade tip speed
max_efficiency : float
Maximum possible drivetrain efficiency
Returns
-------
rated_rpm : float, [rpm]
rpm of rotor at rated power
rotor_torque : float, [N*m]
torque from rotor at rated power
"""
def setup(self):
self.add_input("rotor_diameter", 0.0, units="m")
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("max_tip_speed", 0.0, units="m/s")
self.add_input("max_efficiency", 0.0)
self.add_output("rated_rpm", 0.0, units="rpm")
self.add_output("rotor_torque", 0.0, units="N*m")
def compute(self, inputs, outputs):
# Rotor force calculations for nacelle inputs
maxTipSpd = inputs["max_tip_speed"]
maxEfficiency = inputs["max_efficiency"]
ratedHubPower_W = inputs["machine_rating"] * 1000.0 / maxEfficiency
rotorSpeed = maxTipSpd / (0.5 * inputs["rotor_diameter"])
outputs["rated_rpm"] = rotorSpeed / (2 * np.pi) * 60.0
outputs["rotor_torque"] = ratedHubPower_W / rotorSpeed
# --------------------------------------------------------------------
class GearboxMass(om.ExplicitComponent):
"""
Compute gearbox mass in the form of :math:`mass = k*torque^b`.
Value of :math:`k` was updated in 2015 to be 113.
Value of :math:`b` was updated in 2015 to be 0.71.
Parameters
----------
rotor_torque : float, [N*m]
torque from rotor at rated power
gearbox_mass_coeff : float
k inthe gearbox mass equation: k*rotor_torque^b
gearbox_mass_exp : float
exp in the gearbox mass equation: k*rotor_torque^b
Returns
-------
gearbox_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("rotor_torque", 0.0, units="N*m")
self.add_input("gearbox_mass_coeff", 113.0)
self.add_input("gearbox_mass_exp", 0.71)
self.add_output("gearbox_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
rotor_torque = inputs["rotor_torque"]
gearbox_mass_coeff = inputs["gearbox_mass_coeff"]
gearbox_mass_exp = inputs["gearbox_mass_exp"]
# calculate the gearbox mass
outputs["gearbox_mass"] = gearbox_mass_coeff * (rotor_torque / 1000.0) ** gearbox_mass_exp
# --------------------------------------------------------------------
class BrakeMass(om.ExplicitComponent):
"""
Compute brake mass in the form of :math:`mass = k*torque`.
Value of :math:`k` was updated in 2020 to be 0.00122.
Parameters
----------
rotor_torque : float, [N*m]
rotor torque at rated power
brake_mass_coeff : float
Mass scaling coefficient
Returns
-------
brake_mass : float, [kg]
overall component mass
"""
def setup(self):
self.add_input("rotor_torque", 0.0, units="N*m")
self.add_input("brake_mass_coeff", 0.00122)
self.add_output("brake_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
# Unpack inputs
rotor_torque = float(inputs["rotor_torque"])
coeff = float(inputs["brake_mass_coeff"])
# Regression based sizing derived by J.Keller under FOA 1981 support project
outputs["brake_mass"] = coeff * rotor_torque
# --------------------------------------------------------------------
class HighSpeedShaftMass(om.ExplicitComponent):
"""
Compute high speed shaft mass in the form of :math:`mass = k*power`.
Value of :math:`k` was updated in 2015 to be 0.19894.
Parameters
----------
machine_rating : float, [kW]
machine rating
hss_mass_coeff : float
NREL CSM hss equation; removing intercept since it is negligible
Returns
-------
hss_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("hss_mass_coeff", 0.19894)
self.add_output("hss_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
machine_rating = inputs["machine_rating"]
hss_mass_coeff = inputs["hss_mass_coeff"]
outputs["hss_mass"] = hss_mass_coeff * machine_rating
# --------------------------------------------------------------------
class GeneratorMass(om.ExplicitComponent):
"""
Compute generator mass in the form of :math:`mass = k*power + b`.
Value of :math:`k` was updated in 2015 to be 2300.
Value of :math:`b` was updated in 2015 to be 3400.
Parameters
----------
machine_rating : float, [kW]
machine rating
generator_mass_coeff : float
k inthe generator mass equation: k*rated_power + b
generator_mass_intercept : float
b in the generator mass equation: k*rated_power + b
Returns
-------
generator_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("generator_mass_coeff", 2300.0)
self.add_input("generator_mass_intercept", 3400.0)
self.add_output("generator_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
machine_rating = inputs["machine_rating"]
generator_mass_coeff = inputs["generator_mass_coeff"]
generator_mass_intercept = inputs["generator_mass_intercept"]
# calculate the generator mass
outputs["generator_mass"] = generator_mass_coeff * machine_rating / 1000.0 + generator_mass_intercept
# --------------------------------------------------------------------
class BedplateMass(om.ExplicitComponent):
"""
Compute bedplate mass in the form of :math:`mass = diameter^b`.
Value of :math:`b` was updated in 2015 to be 2.2.
Parameters
----------
rotor_diameter : float, [m]
rotor diameter of the machine
bedplate_mass_exp : float
exp in the bedplate mass equation: rotor_diameter^b
Returns
-------
bedplate_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("rotor_diameter", 0.0, units="m")
self.add_input("bedplate_mass_exp", 2.2)
self.add_output("bedplate_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
rotor_diameter = inputs["rotor_diameter"]
bedplate_mass_exp = inputs["bedplate_mass_exp"]
# calculate the bedplate mass
outputs["bedplate_mass"] = rotor_diameter ** bedplate_mass_exp
# --------------------------------------------------------------------
class YawSystemMass(om.ExplicitComponent):
"""
Compute yaw system mass in the form of :math:`mass = k*diameter^b`.
The values of the constants were NOT updated in 2015 and are the same as the original CSM.
Value of :math:`k` is 9e-4.
Value of :math:`b` is 3.314.
Parameters
----------
rotor_diameter : float, [m]
rotor diameter of the machine
yaw_mass_coeff : float
k inthe yaw mass equation: k*rotor_diameter^b
yaw_mass_exp : float
exp in the yaw mass equation: k*rotor_diameter^b
Returns
-------
yaw_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("rotor_diameter", 0.0, units="m")
self.add_input("yaw_mass_coeff", 0.0009)
self.add_input("yaw_mass_exp", 3.314)
self.add_output("yaw_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
rotor_diameter = inputs["rotor_diameter"]
yaw_mass_coeff = inputs["yaw_mass_coeff"]
yaw_mass_exp = inputs["yaw_mass_exp"]
# calculate yaw system mass #TODO - 50% adder for non-bearing mass
outputs["yaw_mass"] = 1.5 * (
yaw_mass_coeff * rotor_diameter ** yaw_mass_exp
) # JMF do we really want to expose all these?
# TODO: no variable speed mass; ignore for now
# --------------------------------------------------------------------
class HydraulicCoolingMass(om.ExplicitComponent):
"""
Compute hydraulic cooling mass in the form of :math:`mass = k*power`.
The values of the constants were NOT updated in 2015 and are the same as the original CSM.
Value of :math:`k` is 0.08.
Parameters
----------
machine_rating : float, [kW]
machine rating
hvac_mass_coeff : float
hvac linear coeff
Returns
-------
hvac_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("hvac_mass_coeff", 0.08)
self.add_output("hvac_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
machine_rating = inputs["machine_rating"]
hvac_mass_coeff = inputs["hvac_mass_coeff"]
# calculate hvac system mass
outputs["hvac_mass"] = hvac_mass_coeff * machine_rating
# --------------------------------------------------------------------
class NacelleCoverMass(om.ExplicitComponent):
"""
Compute nacelle cover mass in the form of :math:`mass = k*power + b`.
The values of the constants were NOT updated in 2015 and are the same as the original CSM.
Value of :math:`k` is 1.2817.
Value of :math:`b` is 428.19.
Parameters
----------
machine_rating : float, [kW]
machine rating
cover_mass_coeff : float
k inthe spinner mass equation: k*rotor_diameter + b
cover_mass_intercept : float
b in the spinner mass equation: k*rotor_diameter + b
Returns
-------
cover_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("cover_mass_coeff", 1.2817)
self.add_input("cover_mass_intercept", 428.19)
self.add_output("cover_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
machine_rating = inputs["machine_rating"]
cover_mass_coeff = inputs["cover_mass_coeff"]
cover_mass_intercept = inputs["cover_mass_intercept"]
# calculate nacelle cover mass
outputs["cover_mass"] = cover_mass_coeff * machine_rating + cover_mass_intercept
# TODO: ignoring controls and electronics mass for now
# --------------------------------------------------------------------
class PlatformsMainframeMass(om.ExplicitComponent):
"""
Compute platforms mass in the form of :math:`mass = k*m_{bedplate}` and
crane mass as 3000kg, if flagged by the user.
The values of the constants were NOT updated in 2015 and are the same as the original CSM.
Value of :math:`k` is 0.125.
Parameters
----------
bedplate_mass : float, [kg]
component mass
platforms_mass_coeff : float
nacelle platforms mass coeff as a function of bedplate mass [kg/kg]
crane : boolean
flag for presence of onboard crane
crane_weight : float, [kg]
weight of onboard crane
Returns
-------
platforms_mass : float, [kg]
component mass
"""
# nacelle platforms, service crane, base hardware
def setup(self):
self.add_input("bedplate_mass", 0.0, units="kg")
self.add_input("platforms_mass_coeff", 0.125)
self.add_discrete_input("crane", False)
self.add_input("crane_weight", 3000.0, units="kg")
self.add_output("platforms_mass", 0.0, units="kg")
# TODO: there is no base hardware mass model in the old model. Cost is not dependent on mass.
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
bedplate_mass = inputs["bedplate_mass"]
platforms_mass_coeff = inputs["platforms_mass_coeff"]
crane = discrete_inputs["crane"]
crane_weight = inputs["crane_weight"]
# calculate nacelle cover mass
platforms_mass = platforms_mass_coeff * bedplate_mass
# --- crane ---
if crane:
crane_mass = crane_weight
else:
crane_mass = 0.0
outputs["platforms_mass"] = platforms_mass + crane_mass
# --------------------------------------------------------------------
class TransformerMass(om.ExplicitComponent):
"""
Compute transformer mass in the form of :math:`mass = k*power + b`.
Value of :math:`k` was updated in 2015 to be 1915.
Value of :math:`b` was updated in 2015 to be 1910.
Parameters
----------
machine_rating : float, [kW]
machine rating
transformer_mass_coeff : float
k inthe transformer mass equation: k*rated_power + b
transformer_mass_intercept : float
b in the transformer mass equation: k*rated_power + b
Returns
-------
transformer_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("machine_rating", 0.0, units="kW")
self.add_input("transformer_mass_coeff", 1915.0)
self.add_input("transformer_mass_intercept", 1910.0)
self.add_output("transformer_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
machine_rating = inputs["machine_rating"]
transformer_mass_coeff = inputs["transformer_mass_coeff"]
transformer_mass_intercept = inputs["transformer_mass_intercept"]
# calculate the transformer mass
outputs["transformer_mass"] = transformer_mass_coeff * machine_rating / 1000.0 + transformer_mass_intercept
# --------------------------------------------------------------------
class TowerMass(om.ExplicitComponent):
"""
Compute tower mass in the form of :math:`mass = k*H_{hub}^b`.
Value of :math:`k` was updated in 2015 to be 19.828.
Value of :math:`b` was updated in 2015 to be 2.0282.
Parameters
----------
hub_height : float, [m]
hub height of wind turbine above ground / sea level
tower_mass_coeff : float
k inthe tower mass equation: k*hub_height^b
tower_mass_exp : float
b in the tower mass equation: k*hub_height^b
Returns
-------
tower_mass : float, [kg]
component mass
"""
def setup(self):
self.add_input("hub_height", 0.0, units="m")
self.add_input("tower_mass_coeff", 19.828)
self.add_input("tower_mass_exp", 2.0282)
self.add_output("tower_mass", 0.0, units="kg")
def compute(self, inputs, outputs):
hub_height = inputs["hub_height"]
tower_mass_coeff = inputs["tower_mass_coeff"]
tower_mass_exp = inputs["tower_mass_exp"]
# calculate the tower mass
outputs["tower_mass"] = tower_mass_coeff * hub_height ** tower_mass_exp
# Turbine mass adder
class TurbineMassAdder(om.ExplicitComponent):
"""
Aggregates all components masses into category labels of hub system, rotor, nacelle, and tower.
Parameters
----------
blade_mass : float, [kg]
component mass
hub_mass : float, [kg]
component mass
pitch_system_mass : float, [kg]
component mass
spinner_mass : float, [kg]
component mass
lss_mass : float, [kg]
component mass
main_bearing_mass : float, [kg]
component mass
gearbox_mass : float, [kg]
component mass
hss_mass : float, [kg]
component mass
brake_mass : float, [kg]
component mass
generator_mass : float, [kg]
component mass
bedplate_mass : float, [kg]
component mass
yaw_mass : float, [kg]
component mass
hvac_mass : float, [kg]
component mass
cover_mass : float, [kg]
component mass
platforms_mass : float, [kg]
component mass
transformer_mass : float, [kg]
component mass
tower_mass : float, [kg]
component mass
blade_number : float
number of rotor blades
main_bearing_number : float
number of main bearings
Returns
-------
hub_system_mass : float, [kg]
hub system mass
rotor_mass : float, [kg]
hub system mass
nacelle_mass : float, [kg]
nacelle mass
turbine_mass : float, [kg]
turbine mass
"""
def setup(self):
# rotor
self.add_input("blade_mass", 0.0, units="kg")
self.add_input("hub_mass", 0.0, units="kg")
self.add_input("pitch_system_mass", 0.0, units="kg")
self.add_input("spinner_mass", 0.0, units="kg")
# nacelle
self.add_input("lss_mass", 0.0, units="kg")
self.add_input("main_bearing_mass", 0.0, units="kg")
self.add_input("gearbox_mass", 0.0, units="kg")
self.add_input("hss_mass", 0.0, units="kg")
self.add_input("brake_mass", 0.0, units="kg")
self.add_input("generator_mass", 0.0, units="kg")
self.add_input("bedplate_mass", 0.0, units="kg")
self.add_input("yaw_mass", 0.0, units="kg")
self.add_input("hvac_mass", 0.0, units="kg")
self.add_input("cover_mass", 0.0, units="kg")
self.add_input("platforms_mass", 0.0, units="kg")
self.add_input("transformer_mass", 0.0, units="kg")
# tower
self.add_input("tower_mass", 0.0, units="kg")
self.add_discrete_input("blade_number", 3)
self.add_discrete_input("main_bearing_number", 2)
self.add_output("hub_system_mass", 0.0, units="kg")
self.add_output("rotor_mass", 0.0, units="kg")
self.add_output("nacelle_mass", 0.0, units="kg")
self.add_output("turbine_mass", 0.0, units="kg")
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
blade_mass = inputs["blade_mass"]
hub_mass = inputs["hub_mass"]
pitch_system_mass = inputs["pitch_system_mass"]
spinner_mass = inputs["spinner_mass"]
lss_mass = inputs["lss_mass"]
main_bearing_mass = inputs["main_bearing_mass"]
gearbox_mass = inputs["gearbox_mass"]
hss_mass = inputs["hss_mass"]
brake_mass = inputs["brake_mass"]
generator_mass = inputs["generator_mass"]
bedplate_mass = inputs["bedplate_mass"]
yaw_mass = inputs["yaw_mass"]
hvac_mass = inputs["hvac_mass"]
cover_mass = inputs["cover_mass"]
platforms_mass = inputs["platforms_mass"]
transformer_mass = inputs["transformer_mass"]
tower_mass = inputs["tower_mass"]
blade_number = discrete_inputs["blade_number"]
bearing_number = discrete_inputs["main_bearing_number"]
outputs["hub_system_mass"] = hub_mass + pitch_system_mass + spinner_mass
outputs["rotor_mass"] = blade_mass * blade_number + outputs["hub_system_mass"]
outputs["nacelle_mass"] = (
lss_mass
+ bearing_number * main_bearing_mass
+ gearbox_mass
+ hss_mass
+ brake_mass
+ generator_mass
+ bedplate_mass
+ yaw_mass
+ hvac_mass
+ cover_mass
+ platforms_mass
+ transformer_mass
)
outputs["turbine_mass"] = outputs["rotor_mass"] + outputs["nacelle_mass"] + tower_mass
# --------------------------------------------------------------------
class nrel_csm_mass_2015(om.Group):
def setup(self):
self.add_subsystem("blade", BladeMass(), promotes=["*"])
self.add_subsystem("hub", HubMass(), promotes=["*"])
self.add_subsystem("pitch", PitchSystemMass(), promotes=["*"])
self.add_subsystem("spinner", SpinnerMass(), promotes=["*"])
self.add_subsystem("lss", LowSpeedShaftMass(), promotes=["*"])
self.add_subsystem("bearing", BearingMass(), promotes=["*"])
self.add_subsystem("torque", RotorTorque(), promotes=["*"])
self.add_subsystem("gearbox", GearboxMass(), promotes=["*"])
self.add_subsystem("hss", HighSpeedShaftMass(), promotes=["*"])
self.add_subsystem("brake", BrakeMass(), promotes=["*"])
self.add_subsystem("generator", GeneratorMass(), promotes=["*"])
self.add_subsystem("bedplate", BedplateMass(), promotes=["*"])
self.add_subsystem("yaw", YawSystemMass(), promotes=["*"])
self.add_subsystem("hvac", HydraulicCoolingMass(), promotes=["*"])
self.add_subsystem("cover", NacelleCoverMass(), promotes=["*"])
self.add_subsystem("platforms", PlatformsMainframeMass(), promotes=["*"])
self.add_subsystem("transformer", TransformerMass(), promotes=["*"])
self.add_subsystem("tower", TowerMass(), promotes=["*"])
self.add_subsystem("turbine", TurbineMassAdder(), promotes=["*"])
class nrel_csm_2015(om.Group):
def setup(self):
self.add_subsystem("nrel_csm_mass", nrel_csm_mass_2015(), promotes=["*"])
self.add_subsystem("turbine_costs", Turbine_CostsSE_2015(verbosity=False), promotes=["*"])
|
the-stack_0_18993 | # -*- coding: utf-8 -*-
#
# ramstk.models.fmea.view.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Failure Mode and Effects Analysis (FMEA) Package View Model."""
# Standard Library Imports
from typing import Any, Dict
# Third Party Imports
from pubsub import pub
from treelib import Tree
# RAMSTK Package Imports
from ramstk.models import RAMSTKBaseView
class RAMSTKFMEAView(RAMSTKBaseView):
"""Contain the attributes and methods of the FMEA view.
This class manages the usage profile data from the RAMSTKMode,
RAMSTKMechanism, RAMSTKCause, RAMSTKControl, and RAMSKTAction table models.
"""
# Define private dictionary class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_root = 0
_tag = "fmea"
# Define public dictionary class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self, **kwargs: Dict[Any, Any]) -> None:
"""Initialize a FMEA view model instance."""
super().__init__(**kwargs)
# Initialize private dictionary attributes.
self._dic_load_functions = {
"mode": self._do_load_modes,
"mechanism": self._do_load_mechanisms,
"cause": self._do_load_causes,
"control": self._do_load_controls,
"action": self._do_load_actions,
}
self._dic_trees = {
"mode": Tree(),
"mechanism": Tree(),
"cause": Tree(),
"control": Tree(),
"action": Tree(),
}
# Initialize private list attributes.
self._lst_modules = [
"mode",
"mechanism",
"cause",
"control",
"action",
]
# Initialize private scalar attributes.
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
# Subscribe to PyPubSub messages.
pub.subscribe(super().on_insert, "succeed_insert_mode")
pub.subscribe(super().on_insert, "succeed_insert_mechanism")
pub.subscribe(super().on_insert, "succeed_insert_cause")
pub.subscribe(super().on_insert, "succeed_insert_control")
pub.subscribe(super().on_insert, "succeed_insert_action")
pub.subscribe(super().do_set_tree, "succeed_retrieve_modes")
pub.subscribe(super().do_set_tree, "succeed_retrieve_mechanisms")
pub.subscribe(super().do_set_tree, "succeed_retrieve_causes")
pub.subscribe(super().do_set_tree, "succeed_retrieve_controls")
pub.subscribe(super().do_set_tree, "succeed_retrieve_actions")
pub.subscribe(super().do_set_tree, "succeed_delete_mode")
pub.subscribe(super().do_set_tree, "succeed_delete_mechanism")
pub.subscribe(super().do_set_tree, "succeed_delete_cause")
pub.subscribe(super().do_set_tree, "succeed_delete_control")
pub.subscribe(super().do_set_tree, "succeed_delete_action")
def _do_load_modes(self) -> None:
"""Load the failure modes into the tree.
:return: None
:rtype: None
"""
for _node in self._dic_trees["mode"].all_nodes()[1:]:
_mode = _node.data["mode"]
_node_id = "{}".format(_mode.mode_id)
self.tree.create_node(
tag="mode",
identifier=_node_id,
parent=self._root,
data={self._tag: _mode},
)
if self._dic_trees["mechanism"].depth() > 0:
self._dic_load_functions["mechanism"]( # type: ignore
_mode.mode_id,
)
def _do_load_mechanisms(self, mode_id: int) -> None:
"""Load the failure mechanisms into the tree.
:param mode_id: the ID of the parent failure mode.
:return: None
:rtype: None
"""
for _node in self._dic_trees["mechanism"].all_nodes()[1:]:
_mechanism = _node.data["mechanism"]
_node_id = "{}.{}".format(mode_id, _mechanism.mechanism_id)
if _mechanism.mode_id == mode_id:
self.tree.create_node(
tag="mechanism",
identifier=_node_id,
parent="{}".format(mode_id),
data={self._tag: _mechanism},
)
if self._dic_trees["cause"].depth() > 0:
self._dic_load_functions["cause"]( # type: ignore
_mechanism.mechanism_id,
_node_id,
)
def _do_load_causes(self, mechanism_id: int, parent_id: str) -> None:
"""Load the failure causes into the tree for the passed mechanism ID.
:param mechanism_id: the failure mechanism ID to add the new operating load.
:param parent_id: the parent node ID.
:return: None
:rtype: None
"""
for _node in self._dic_trees["cause"].all_nodes()[1:]:
_cause = _node.data["cause"]
_node_id = "{}.{}".format(parent_id, _cause.cause_id)
if _cause.mechanism_id == mechanism_id:
self.tree.create_node(
tag="cause",
identifier=_node_id,
parent=parent_id,
data={self._tag: _cause},
)
if self._dic_trees["control"].depth() > 0:
self._dic_load_functions["control"]( # type: ignore
_cause.cause_id,
_node_id,
)
if self._dic_trees["action"].depth() > 0:
self._dic_load_functions["action"]( # type: ignore
_cause.cause_id,
_node_id,
)
def _do_load_controls(self, cause_id: int, parent_id: str) -> None:
"""Load the FNEA controls into the tree.
:param cause_id: the ID of the parent failure cause.
:param parent_id: the parent node ID.
:return: None
:rtype: None
"""
for _node in self._dic_trees["control"].all_nodes()[1:]:
_control = _node.data["control"]
_node_id = "{}.{}c".format(parent_id, _control.control_id)
if _control.cause_id == cause_id:
self.tree.create_node(
tag="control",
identifier=_node_id,
parent=parent_id,
data={self._tag: _control},
)
def _do_load_actions(self, cause_id: int, parent_id: str) -> None:
"""Load the FMEA actions into the tree.
:param cause_id: the ID of the parent failure cause.
:param parent_id: the parent node ID.
:return: None
:rtype: None
"""
for _node in self._dic_trees["action"].all_nodes()[1:]:
_action = _node.data["action"]
_node_id = "{}.{}a".format(parent_id, _action.action_id)
if _action.cause_id == cause_id:
self.tree.create_node(
tag="action",
identifier=_node_id,
parent=parent_id,
data={self._tag: _action},
)
|
the-stack_0_18994 | import locale
import shlex
from typing import List, Dict, Any, Union, Tuple
import yaml
from func_timeout import func_timeout, FunctionTimedOut
from sparclur._parser import VALID, VALID_WARNINGS, REJECTED, REJECTED_AMBIG, RENDER, TRACER, TEXT, TIMED_OUT
from sparclur._hybrid import Hybrid
from sparclur._reforge import Reforger
from sparclur._renderer import _SUCCESSFUL_RENDER_MESSAGE as SUCCESS
from sparclur._renderer import _SUCCESS_WITH_WARNINGS as SUCCESS_WITH_WARNINGS
from sparclur._renderer import _ocr_text
from sparclur._tracer import Tracer
from sparclur.utils import fix_splits, hash_file
import os
import sys
import re
import subprocess
from subprocess import TimeoutExpired, DEVNULL
import tempfile
import time
import fitz
from PIL import Image
from PIL.PngImagePlugin import PngImageFile
from sparclur.utils._config import _get_config_param, _load_config
class MuPDF(Tracer, Hybrid, Reforger):
"""MuPDF parser"""
def __init__(self, doc: Union[str, bytes],
skip_check: Union[bool, None] = None,
hash_exclude: Union[str, List[str], None] = None,
page_hashes: Union[int, Tuple[Any], None] = None,
validate_hash: bool = False,
parse_streams: Union[bool, None] = None,
binary_path: Union[str, None] = None,
temp_folders_dir: Union[str, None] = None,
dpi: Union[int, None] = None,
cache_renders: Union[bool, None] = None,
timeout: Union[int, None] = None,
ocr: Union[bool, None] = None
):
"""
Parameters
----------
parse_streams : bool
Indicates whether mutool clean should be called with -s or not. -s parses into the content streams of the
PDF.
binary_path : str
If the mutool binary is not in the system PATH, add the path to the binary here. Can also be used to trace
specific versions of the binary.
"""
config = _load_config()
skip_check = _get_config_param(MuPDF, config, 'skip_check', skip_check, False)
hash_exclude = _get_config_param(MuPDF, config, 'hash_exclude', hash_exclude, None)
parse_streams = _get_config_param(MuPDF, config, 'parse_streams', parse_streams, True)
binary_path = _get_config_param(MuPDF, config, 'binary_path', binary_path, None)
temp_folders_dir = _get_config_param(MuPDF, config, 'temp_folders_dir', temp_folders_dir, None)
dpi = _get_config_param(MuPDF, config, 'dpi', dpi, 200)
cache_renders = _get_config_param(MuPDF, config, 'cache_renders', cache_renders, False)
timeout = _get_config_param(MuPDF, config, 'timeout', timeout, None)
ocr = _get_config_param(MuPDF, config, 'ocr', ocr, False)
super().__init__(doc=doc,
temp_folders_dir=temp_folders_dir,
skip_check=skip_check,
hash_exclude=hash_exclude,
page_hashes=page_hashes,
validate_hash=validate_hash,
dpi=dpi,
cache_renders=cache_renders,
timeout=timeout,
ocr=ocr)
self._parse_streams = parse_streams
self._cmd_path = 'mutool clean' if binary_path is None else binary_path.strip() + ' clean'
self._trace_exit_code = None
def _check_for_renderer(self) -> bool:
if self._can_render is None:
self._can_render = 'fitz' in sys.modules.keys()
return self._can_render
@property
def validate_renderer(self):
if RENDER in self._validity:
return self._validity[RENDER]
else:
validity_results = dict()
if len(self._logs) == 0:
if self._validate_hash:
_ = self.get_renders(self._parse_page_hashes)
else:
_ = self.get_renders()
results = [(page, value['result']) for (page, value) in self._logs.items()]
not_successful = [result for (_, result) in results if result != SUCCESS]
if self._file_timed_out[RENDER]:
validity_results['valid'] = False
validity_results['status'] = TIMED_OUT
validity_results['info'] = 'Timed Out: %i' % self._timeout
elif len(results) == 0:
validity_results['valid'] = False
validity_results['status'] = REJECTED
validity_results['info'] = 'No info returned'
elif len(not_successful) == 0:
validity_results['valid'] = True
validity_results['status'] = VALID
elif len([result for result in not_successful if result != SUCCESS_WITH_WARNINGS]) == 0:
validity_results['valid'] = True
validity_results['status'] = VALID_WARNINGS
else:
validity_results['valid'] = False
validity_results['status'] = REJECTED
validity_results['info'] = ';'.join(
['%i: %s' % (page, result) for (page, result) in results if result != SUCCESS and result != SUCCESS_WITH_WARNINGS])
self._validity[RENDER] = validity_results
return validity_results
# @staticmethod
# def get_name():
# return 'MuDraw'
def _get_num_pages(self):
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
else:
doc_path = self._doc
try:
doc = fitz.open(doc_path)
self._num_pages = doc.pageCount
except Exception as e:
print(e)
self._num_pages = 0
finally:
try:
doc.close()
except:
pass
def _mudraw(self, page, mat):
pix = page.get_pixmap(matrix=mat)
width = pix.width
height = pix.height
return Image.frombytes("RGB", [width, height], pix.samples)
def _render_page(self, page):
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
else:
doc_path = self._doc
start_time = time.perf_counter()
try:
mat = fitz.Matrix(self._dpi / 72, self._dpi / 72)
fitz.TOOLS.reset_mupdf_warnings()
doc = fitz.open(doc_path)
if self._timeout is None:
mu_pil: PngImageFile = self._mudraw(doc[page], mat)
else:
mu_pil: PngImageFile = func_timeout(
self._timeout,
self._mudraw,
kwargs={
'page': doc[page],
'mat': mat
}
)
doc.close()
if self._caching:
self._renders[page] = mu_pil
timing = time.perf_counter() - start_time
warnings = fitz.TOOLS.mupdf_warnings()
result = SUCCESS if warnings == '' else SUCCESS_WITH_WARNINGS
self._logs[page] = {'result': result, 'timing': timing}
self._file_timed_out[RENDER] = False
except FunctionTimedOut:
mu_pil: PngImageFile = None
self._logs[page] = {'result': 'Timed out', 'timing': self._timeout}
self._file_timed_out[RENDER] = True
except Exception as e:
mu_pil: PngImageFile = None
timing = time.perf_counter() - start_time
self._logs[page] = {'result': str(e), 'timing': timing}
self._file_timed_out[RENDER] = False
return mu_pil
def _render_doc(self, pages=None):
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
else:
doc_path = self._doc
start_time = time.perf_counter()
try:
mat = fitz.Matrix(self._dpi / 72, self._dpi / 72)
doc = fitz.open(doc_path)
num_pages = doc.pageCount
if num_pages == 0 and pages is not None:
num_pages = max(pages) + 1
if pages is None:
page_range = range(num_pages)
else:
page_range = [page for page in pages if -1 < page < num_pages]
if len(doc) == 0:
doc.close()
raise Exception('Document failed to load')
pils: Dict[int, PngImageFile] = dict()
for page in page_range:
fitz.TOOLS.reset_mupdf_warnings()
page_start = time.perf_counter()
try:
if self._timeout is None:
pils[page] = self._mudraw(doc[page], mat)
else:
pils[page] = func_timeout(
self._timeout,
self._mudraw,
kwargs={
'page': doc[page],
'mat': mat
}
)
timing = time.perf_counter() - page_start
warnings = fitz.TOOLS.mupdf_warnings()
result = SUCCESS if warnings == '' else SUCCESS_WITH_WARNINGS
self._logs[page] = {'result': result, 'timing': timing}
self._file_timed_out[RENDER] = False
except FunctionTimedOut:
self._logs[page] = {'result': 'Timed out', 'timing': self._timeout}
self._file_timed_out[RENDER] = True
except Exception as e:
self._logs[page] = {'result': str(e), 'timing': time.perf_counter() - page_start}
self._file_timed_out[RENDER] = False
doc.close()
if self._caching:
if pages is None:
self._full_doc_rendered = True
self._renders.update(pils)
# timing = time.perf_counter() - start_time
# num_pages = len(pils)
# for page in pils.keys():
# self._logs[page] = {'result': SUCCESS, 'timing': timing / num_pages}
except Exception as e:
pils: Dict[int, PngImageFile] = dict()
timing = time.perf_counter() - start_time
self._logs[0] = {'result': str(e), 'timing': timing}
self._file_timed_out[RENDER] = False
return pils
def _render_pages(self, pages: List[int]):
return self._render_doc(pages)
# class MuPDF(Tracer, TextCompare):
# """MuPDF tracer and renderer """
# def __init__(self, doc_path: str,
# parse_streams: bool = True,
# binary_path: str = None,
# temp_folders_dir: str = None
# ):
# """
# Parameters
# ----------
# doc_path : str
# Full path to the document to be traced.
# parse_streams : bool
# Indicates whether mutool clean should be called with -s or not. -s parses into the content streams of the
# PDF.
# binary_path : str
# If the mutool binary is not in the system PATH, add the path to the binary here. Can also be used to trace
# specific versions of the binary.
# temp_folders_dir : str
# Path to create the temporary directories used for temporary files.
# """
# super().__init__(doc_path=doc_path)
# self._parse_streams = parse_streams
# self._temp_folders_dir = temp_folders_dir
# self._cmd_path = 'mutool clean' if binary_path is None else binary_path
def _check_for_text_extraction(self) -> bool:
if self._ocr:
if self._can_extract is None:
if self._can_render is None:
_ = self._check_for_renderer()
self._can_extract = 'pytesseract' in sys.modules.keys() and self._can_render
else:
if self._can_extract is None:
self._can_extract = 'fitz' in sys.modules.keys()
return self._can_extract
@property
def validate_text(self) -> Dict[str, Any]:
if TEXT not in self._validity:
fitz.TOOLS.reset_mupdf_warnings()
validity_results = dict()
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
else:
doc_path = self._doc
try:
doc = fitz.open(doc_path)
for page in doc:
text = page.getText()
if not self._ocr and page.number not in self._text:
self._text[page.number] = text
if not self._ocr:
self._full_text_extracted = True
warnings = fitz.TOOLS.mupdf_warnings()
error = None
except Exception as e:
error = str(e)
warnings = None
finally:
try:
doc.close()
except:
pass
if error is not None:
validity_results['valid'] = False
validity_results['status'] = REJECTED
validity_results['info'] = error
else:
validity_results['valid'] = True
if warnings == '':
validity_results['status'] = VALID
else:
validity_results['status'] = VALID_WARNINGS
validity_results['info'] = warnings
self._validity[TEXT] = validity_results
return self._validity[TEXT]
def _check_for_tracer(self) -> bool:
if self._can_trace is None:
try:
subprocess.check_output(shlex.split("mutool -v"), shell=False)
mutool_present = True
except subprocess.CalledProcessError as e:
mutool_present = False
self._can_trace = mutool_present
return self._can_trace
def _check_for_reforger(self) -> bool:
if self._can_reforge is None:
self._can_reforge = self._check_for_tracer()
return self._can_reforge
def _reforge(self):
stream_flag = ' -s' if self._parse_streams else ''
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
else:
doc_path = self._doc
try:
out_path = os.path.join(temp_path, 'out.pdf')
sp = subprocess.Popen(shlex.split('mutool clean%s %s %s' % (stream_flag, doc_path, out_path)),
stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)
(_, err) = sp.communicate(timeout=self._timeout or 600)
with open(out_path, 'rb') as file_in:
raw = file_in.read()
self._reforged = raw
self._successfully_reforged = True
self._reforge_result = 'Successfully reforged'
decoder = locale.getpreferredencoding()
err = fix_splits(err.decode(decoder))
error_arr = [message for message in err.split('\n') if len(message) > 0]
except TimeoutExpired:
sp.kill()
(_, err) = sp.communicate()
decoder = locale.getpreferredencoding()
err = fix_splits(err.decode(decoder))
error_arr = [message for message in err.split('\n') if len(message) > 0]
error_arr.insert(0, 'Error: Subprocess timed out: %i' % (self._timeout or 600))
self._successfully_reforged = False
self._reforge_result = '[' + ', '.join(error_arr) + ']'
except Exception as e:
sp.kill()
decoder = locale.getpreferredencoding()
err = fix_splits(err.decode(decoder))
error_arr = str(e).split('\n')
error_arr.extend([message for message in err.split('\n') if len(message) > 0])
self._successfully_reforged = False
self._reforge_result = '[' + ', '.join(error_arr) + ']'
self._trace_exit_code = sp.returncode
self._messages = ['No warnings'] if len(error_arr) == 0 else error_arr
@property
def validate_tracer(self) -> Dict[str, Any]:
if TRACER not in self._validity:
validity_results = dict()
if self._messages is None:
self._parse_document()
if self._cleaned is None:
self._scrub_messages()
observed_messages = list(self._cleaned.keys())
if self._file_timed_out[TRACER]:
validity_results['valid'] = False
validity_results['status'] = TIMED_OUT
validity_results['info'] = 'Timed Out: %i' % self._timeout
elif self._trace_exit_code > 0:
validity_results['valid'] = False
validity_results['status'] = REJECTED
validity_results['info'] = 'Exit code: %i' % self._trace_exit_code
elif observed_messages == ['No warnings']:
validity_results['valid'] = True
validity_results['status'] = VALID
elif len([message for message in observed_messages if 'error' in message]) > 0:
validity_results['valid'] = False
validity_results['status'] = REJECTED
validity_results['info'] = 'Errors returned'
elif len([message for message in observed_messages if 'warning' in message]) == len(observed_messages):
validity_results['valid'] = True
validity_results['status'] = VALID_WARNINGS
validity_results['info'] = 'Warnings only'
else:
validity_results['valid'] = False
validity_results['status'] = REJECTED_AMBIG
validity_results['info'] = 'Unknown message type returned'
self._validity[TRACER] = validity_results
return self._validity[TRACER]
@staticmethod
def get_name():
return 'MuPDF'
@property
def streams_parsed(self):
return self._parse_streams
def _parse_document(self):
stream_flag = ' -s' if self._parse_streams else ''
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
else:
doc_path = self._doc
try:
out_path = os.path.join(temp_path, 'out.pdf')
sp = subprocess.Popen(shlex.split('mutool clean%s %s %s' % (stream_flag, doc_path, out_path)),
stderr=subprocess.PIPE, stdout=DEVNULL, shell=False)
(_, err) = sp.communicate(timeout=self._timeout or 600)
decoder = locale.getpreferredencoding()
err = fix_splits(err.decode(decoder))
error_arr = [message for message in err.split('\n') if len(message) > 0]
self._file_timed_out[TRACER] = False
except TimeoutExpired:
sp.kill()
(_, err) = sp.communicate()
decoder = locale.getpreferredencoding()
err = fix_splits(err.decode(decoder))
error_arr = [message for message in err.split('\n') if len(message) > 0]
error_arr.insert(0, 'Error: Subprocess timed out: %i' % (self._timeout or 600))
self._file_timed_out[TRACER] = True
except Exception as e:
sp.kill()
decoder = locale.getpreferredencoding()
err = fix_splits(err.decode(decoder))
error_arr = str(e).split('\n')
error_arr.extend([message for message in err.split('\n') if len(message) > 0])
self._file_timed_out[TRACER] = False
self._trace_exit_code = sp.returncode
self._messages = ['No warnings'] if len(error_arr) == 0 else error_arr
def _clean_message(self, err):
cleaned = re.sub(r'\([\d]+ [\d]+ R\)', '', err)
cleaned = re.sub(r'[\d]+ [\d]+ R', '', cleaned)
cleaned = re.sub(r"\'[^']+\'", '', cleaned)
cleaned = 'error: expected generation number' if cleaned.startswith(
'error: expected generation number ') else cleaned
cleaned = 'error: unknown colorspace' if cleaned.startswith('error: unknown colorspace: ') else cleaned
cleaned = re.sub(r'non-embedded font using identity encoding: [.]*',
'non-embedded font using identity encoding: <font>', cleaned)
cleaned = re.sub(r'\(gid [\d]+\)', '', cleaned)
cleaned = 'error: expected keyword' if cleaned.startswith('error: expected keyword ') else cleaned
cleaned = 'warning: unknown filter name' if cleaned.startswith('warning: unknown filter name ') else cleaned
cleaned = 'error: aes padding out of range' if cleaned.startswith(
'error: aes padding out of range:') else cleaned
cleaned = 'error: cannot authenticate password' if cleaned.startswith(
'error: cannot authenticate password:') else cleaned
cleaned = re.sub(r'\[\d+\] prec\(\d+\) sgnd\(\d+\) \[\d+\] prec\(\d+\) sgnd\(\d+\)', 'Out of Memory Error',
cleaned)
cleaned = 'warning: cannot load content stream part' if cleaned.startswith(
'warning: cannot load content stream part') else cleaned
cleaned = 'error: object out of range' if cleaned.startswith('error: object out of range') else cleaned
cleaned = 'warning: object out of range' if cleaned.startswith('warning: object out of range') else cleaned
cleaned = 'error: object id out of range' if cleaned.startswith('error: object id out of range') else cleaned
cleaned = re.sub(r"\'\'", '', cleaned)
cleaned = 'error: invalid reference to non-object-stream' if cleaned.startswith(
'error: invalid reference to non-object-stream:') else cleaned
cleaned = 'error: object offset out of range' if cleaned.startswith(
'error: object offset out of range:') else cleaned
cleaned = 'error: unexpected xref type' if cleaned.startswith('error: unexpected xref type:') else cleaned
cleaned = 'error: unknown keyword' if cleaned.startswith('error: unknown keyword:') else cleaned
cleaned = re.sub(r'warning: Encountered new definition for object \d+ - keeping the original one',
'warning: Encountered new definition for object - keeping the original one', cleaned)
cleaned = 'warning: bf_range limits out of range in cmap' if cleaned.startswith(
'warning: bf_range limits out of range in cmap') else cleaned
cleaned = re.sub(r'ignoring one to many mapping in cmap [.]*',
'ignoring one to many mapping in cmap', cleaned)
cleaned = re.sub(r'\(segment [\-]?\d+\)', '', cleaned)
cleaned = re.sub(r'\([\-]?\d+\)', '', cleaned)
cleaned = re.sub(r'\(\d+\/\d+\)', '', cleaned)
cleaned = 'warning: jbig2dec error: Invalid SYMWIDTH value' if cleaned.startswith(
'warning: jbig2dec error: Invalid SYMWIDTH value') else cleaned
cleaned = 'warning: jbig2dec error: No OOB signalling end of height class' if cleaned.startswith(
'warning: jbig2dec error: No OOB signalling end of height class') else cleaned
cleaned = 'warning: openjpeg error: Failed to decode tile' if cleaned.startswith(
'warning: openjpeg error: Failed to decode tile') else cleaned
cleaned = 'warning: openjpeg error: Invalid component index' if cleaned.startswith(
'warning: openjpeg error: Invalid component index') else cleaned
cleaned = 'warning: openjpeg error: Invalid tile part index for tile number' if cleaned.startswith(
'warning: openjpeg error: Invalid tile part index for tile number') else cleaned
cleaned = re.sub(
r'warning: openjpeg error: Invalid values for comp = \d+ : prec=\d+ (should be between 1 and 38 according to the JPEG2000 norm. OpenJpeg only supports up to 31)',
'warning: openjpeg error: Invalid values for comp = x : prec=y (should be between 1 and 38 according to the JPEG2000 norm. OpenJpeg only supports up to 31)',
cleaned)
cleaned = 'warning: openjpeg error: read: segment too long with max for codeblock' if cleaned.startswith(
'warning: openjpeg error: read: segment too long with max for codeblock') else cleaned
cleaned = re.sub(r'comp\[\d+\]', 'comp', cleaned)
cleaned = re.sub(r'ignoring CMap range \(\d+-\d+\)', 'ignoring CMap range (a-b)', cleaned)
cleaned = re.sub(r'FT_New_Memory_Face\([^)]+\)', 'FT_New_Memory_Face(x)', cleaned)
cleaned = re.sub(r'FT_Load_Glyph\([^)]+\)', 'FT_Load_Glyph(x)', cleaned)
cleaned = re.sub(r'FT_Set_Char_Size\([^)]+\)', 'FT_Set_Char_Size(x)', cleaned)
cleaned = re.sub(r'Subprocess timed out: [\d]+', 'Subprocess timed out: <t>', cleaned)
cleaned = re.sub(r'error: cannot find page [\d]+ in page tree',
'error: cannot find page <p> in page tree', cleaned)
cleaned = re.sub(r'unknown cid collection: [.]+', 'unknown cid collection', cleaned)
cleaned = re.sub(r'content stream is not a stream \([^)]*\)',
'content stream is not a stream (<x>)', cleaned)
cleaned = re.sub(r'expected endobj or stream keyword \([^)]*\)',
'expected endobj or stream keyword (<x>)', cleaned)
cleaned = re.sub(r'ignoring object with invalid object number \([^)]*\)',
'ignoring object with invalid object number (<x>)', cleaned)
cleaned = re.sub(r'invalid indirect reference \([^)]*\)',
'invalid indirect reference (<x>)', cleaned)
cleaned: str = re.sub(r'\[\d+\] prec\(\d+\) sgnd\(\d+\) \[\d+\] prec\(\d+\) sgnd\(\d+\)', 'Out of Memory Error',
cleaned)
return cleaned
def _mupdf_scrub(self, messages):
scrubbed_messages = [self._clean_message(err) for err in messages]
error_dict: Dict[str, int] = dict()
for (index, error) in enumerate(scrubbed_messages):
if '... repeated ' in error:
repeated = re.sub(r'[^\d]', '', error)
error_dict[self._messages[index - 1]] = error_dict.get(error, 0) + int(repeated)
else:
error_dict[error] = error_dict.get(error, 0) + 1
return error_dict
def _scrub_messages(self):
if self._messages is None:
self._parse_document()
error_dict = self._mupdf_scrub(self._messages)
self._cleaned = error_dict
def _extract_page(self, page: int):
if self._ocr:
self._text[page] = _ocr_text(self.get_renders(page=page))
else:
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
else:
doc_path = self._doc
doc = fitz.open(doc_path)
text = doc[page].get_text()
doc.close()
self._text[page] = text
def _extract_doc(self):
if self._ocr:
for (page, pil) in self.get_renders().items():
self._text[page] = _ocr_text(pil)
else:
with tempfile.TemporaryDirectory(dir=self._temp_folders_dir) as temp_path:
if isinstance(self._doc, bytes):
file_hash = hash_file(self._doc)
doc_path = os.path.join(temp_path, file_hash)
with open(doc_path, 'wb') as doc_out:
doc_out.write(self._doc)
else:
doc_path = self._doc
doc = fitz.open(doc_path)
for page in doc:
self._text[page.number] = page.get_text()
doc.close()
self._full_text_extracted = True
|
the-stack_0_18997 | """
baggingclassifier.py
Builds a bagging classifier
~66 pct accuracy, 1m57.020s execution time with n_estimators=10
"""
from classifier import Classifier
from matrixdatabase import MatrixDatabase
from sklearn.ensemble import BaggingClassifier as BC
class BaggingClassifier(Classifier):
def __init__(self, matrixdatabase):
self._matrix_database = matrixdatabase
self._has_fit = False
self._bc = BC(n_estimators=10)
def learn(self, ingredients, cuisine):
return
def classify(self, ingredients):
if not self._has_fit:
matrix, classes = self._matrix_database.make_train_matrix()
self._bc = self._bc.fit(matrix, classes)
print('Fitting complete...')
self._has_fit = True
output = self._bc.predict(self._matrix_database.make_row_from_recipe(ingredients))
return output[0]
|
the-stack_0_18998 | """Support for playing sounds."""
import os
import re
import subprocess
import uuid
from typing import Any, Dict, List, Optional, Type
from rhasspy.actor import RhasspyActor
from rhasspy.mqtt import MqttPublish
# -----------------------------------------------------------------------------
# Events
# -----------------------------------------------------------------------------
class PlayWavFile:
"""Play a WAV file."""
def __init__(self, wav_path: str, receiver: Optional[RhasspyActor] = None) -> None:
self.wav_path = wav_path
self.receiver = receiver
class PlayWavData:
"""Play a WAV buffer."""
def __init__(
self, wav_data: bytes, receiver: Optional[RhasspyActor] = None
) -> None:
self.wav_data = wav_data
self.receiver = receiver
class WavPlayed:
"""Response to PlayWavFile or PlayWavData."""
pass
# -----------------------------------------------------------------------------
def get_sound_class(system: str) -> Type[RhasspyActor]:
"""Get class type for profile audio player."""
assert system in ["aplay", "hermes", "dummy"], "Unknown sound system: %s" % system
if system == "aplay":
return APlayAudioPlayer
if system == "hermes":
return HermesAudioPlayer
return DummyAudioPlayer
# -----------------------------------------------------------------------------
# Dummy audio player
# -----------------------------------------------------------------------------
class DummyAudioPlayer(RhasspyActor):
"""Does not play sound. Responds immediately with WavPlayed."""
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, (PlayWavFile, PlayWavData)):
self.send(message.receiver or sender, WavPlayed())
@classmethod
def get_speakers(cls) -> Dict[Any, Any]:
"""Get list of possible audio output devices."""
return {}
# -----------------------------------------------------------------------------
# APlay based audio player
# -----------------------------------------------------------------------------
class APlayAudioPlayer(RhasspyActor):
"""Plays WAV files using aplay command."""
def __init__(self):
super().__init__()
self.device: Optional[str] = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.device = self.config.get("device") or self.profile.get(
"sounds.aplay.device"
)
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, PlayWavFile):
self.play_file(message.wav_path)
self.send(message.receiver or sender, WavPlayed())
elif isinstance(message, PlayWavData):
self.play_data(message.wav_data)
self.send(message.receiver or sender, WavPlayed())
# -------------------------------------------------------------------------
def play_file(self, path: str) -> None:
"""Play a WAV file using aplay."""
if not os.path.exists(path):
self._logger.warning("Path does not exist: %s", path)
return
aplay_cmd = ["aplay", "-q"]
if self.device is not None:
aplay_cmd.extend(["-D", str(self.device)])
# Play file
aplay_cmd.append(path)
self._logger.debug(aplay_cmd)
subprocess.run(aplay_cmd)
def play_data(self, wav_data: bytes) -> None:
"""Play a WAV buffer using aplay."""
aplay_cmd = ["aplay", "-q"]
if self.device is not None:
aplay_cmd.extend(["-D", str(self.device)])
self._logger.debug(aplay_cmd)
# Play data
subprocess.run(aplay_cmd, input=wav_data)
# -------------------------------------------------------------------------
@classmethod
def get_speakers(cls) -> Dict[Any, Any]:
"""Get list of possible audio output devices."""
output = subprocess.check_output(["aplay", "-L"]).decode().splitlines()
speakers: Dict[Any, Any] = {}
name, description = None, None
# Parse output of arecord -L
first_speaker = True
for line in output:
line = line.rstrip()
if re.match(r"^\s", line):
description = line.strip()
if first_speaker:
description = description + "*"
first_speaker = False
else:
if name is not None:
speakers[name] = description
name = line.strip()
return speakers
# -----------------------------------------------------------------------------
# MQTT audio player for Snips.AI Hermes Protocol
# https://docs.snips.ai/reference/hermes
# -----------------------------------------------------------------------------
class HermesAudioPlayer(RhasspyActor):
"""Sends audio data over MQTT via Hermes (Snips) protocol."""
def __init__(self):
super().__init__()
self.mqtt: Optional[RhasspyActor] = None
self.site_ids: List[str] = []
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.mqtt = self.config["mqtt"]
self.site_ids = self.profile.get("mqtt.site_id", "default").split(",")
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, PlayWavFile):
self.play_file(message.wav_path)
self.send(message.receiver or sender, WavPlayed())
elif isinstance(message, PlayWavData):
self.play_data(message.wav_data)
self.send(message.receiver or sender, WavPlayed())
# -------------------------------------------------------------------------
def play_file(self, path: str) -> None:
"""Send WAV file over MQTT."""
if not os.path.exists(path):
self._logger.warning("Path does not exist: %s", path)
return
with open(path, "rb") as wav_file:
self.play_data(wav_file.read())
def play_data(self, wav_data: bytes) -> None:
"""Send WAV buffer over MQTT."""
request_id = str(uuid.uuid4())
# Send to all site ids
for site_id in self.site_ids:
topic = f"hermes/audioServer/{site_id}/playBytes/{request_id}"
self.send(self.mqtt, MqttPublish(topic, wav_data))
# -------------------------------------------------------------------------
@classmethod
def get_speakers(cls) -> Dict[Any, Any]:
"""Get list of possible audio output devices."""
return {}
|
the-stack_0_18999 | # -*- coding: UTF-8 -*-
# ------------------------(max to 80 columns)-----------------------------------
# author by : (学员ID)
# created: 2019.11
# Description:
# 初步学习 WinForm 编程 ( Window )
# ------------------------(max to 80 columns)-----------------------------------
import tkinter as tk
from tkinter import ttk
import tkinter.messagebox
# create root window
top_win = tk.Tk()
# naming root window
top_win.title('Hello World Window')
# resize root window
win_size_pos = '800x600'
#win_size_pos = '360x60'
top_win.geometry(win_size_pos)
#------------------------------
# balablabla
title = 'My Label Frame'
lfm_step1 = ttk.Labelframe(
top_win, text=title, width=240, height=500)
#lfm_step1.place(x=20, y=20)
lfm_step1.grid(row=0, column=0, padx=10, pady=10)
lbl_test0 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test0.grid(row=0, column=0, pady=5, padx=5)
lbl_test1 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test1.grid(row=1, column=0, pady=5, padx=5)
lbl_test2 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test2.grid(row=2, column=0, pady=5, padx=5)
lbl_test3 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test3.grid(row=3, column=0, pady=5, padx=5)
lbl_test4 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test4.grid(row=4, column=0, pady=5, padx=5)
lbl_test0 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test0.grid(row=0, column=1, pady=5, padx=5)
lbl_test1 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test1.grid(row=1, column=1, pady=5, padx=5)
lbl_test2 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test2.grid(row=2, column=1, pady=5, padx=5)
lbl_test3 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test3.grid(row=3, column=1, pady=5, padx=5)
lbl_test4 = tk.Label(lfm_step1, text='Oh! My god', bg='blue', fg='white')
lbl_test4.grid(row=4, column=1, pady=5, padx=5)
title = 'My Label Frame generated by for'
lfm_step2 = ttk.Labelframe(
top_win, text=title, width=240, height=500)
#lfm_step2.place(x=400, y=20)
lfm_step2.grid(row=0, column=1, padx=10, pady=10)
for r in range(5):
for c in range(3):
ent_row = tk.Entry(lfm_step2, show=None, width=8, bg='red', fg='white')
#ent_row.place(x=20, y=20)
ent_row.grid(row=r, column=c, padx=5, pady=5)
#------------------------------
# show window and get into event loop
top_win.mainloop()
|
the-stack_0_19001 | from __future__ import absolute_import, division, print_function
import sys
import re
import subprocess
from os.path import join, basename
from conda_build.conda_interface import memoized
from conda_build.conda_interface import untracked
from conda_build.conda_interface import linked_data
from conda_build import post
from conda_build.os_utils.macho import otool
from conda_build.os_utils.pyldd import inspect_linkages
LDD_RE = re.compile(r'\s*(.*?)\s*=>\s*(.*?)\s*\(.*\)')
LDD_NOT_FOUND_RE = re.compile(r'\s*(.*?)\s*=>\s*not found')
def ldd(path):
"thin wrapper around ldd"
lines = subprocess.check_output(['ldd', path]).decode('utf-8').splitlines()
res = []
for line in lines:
if '=>' not in line:
continue
assert line[0] == '\t', (path, line)
m = LDD_RE.match(line)
if m:
res.append(m.groups())
continue
m = LDD_NOT_FOUND_RE.match(line)
if m:
res.append((m.group(1), 'not found'))
continue
if 'ld-linux' in line:
continue
raise RuntimeError("Unexpected output from ldd: %s" % line)
return res
@memoized
def get_linkages(obj_files, prefix, sysroot):
res = {}
for f in obj_files:
path = join(prefix, f)
# ldd quite often fails on foreign architectures.
ldd_failed = False
try:
if sys.platform.startswith('linux'):
res[f] = ldd(path)
elif sys.platform.startswith('darwin'):
links = otool(path)
res[f] = [(basename(l['name']), l['name']) for l in links]
except:
ldd_failed = True
finally:
res_py = inspect_linkages(path, sysroot=sysroot)
res_py = [(basename(lp), lp) for lp in res_py]
# print("set(res_py) {}".format(set(res_py)))
if ldd_failed:
res[f] = res_py
# else:
# print("set(res[f]) = {}".format(set(res[f])))
# if set(res[f]) != set(res_py):
# print("WARNING: pyldd disagrees with ldd/otool. This will not cause any")
# print("WARNING: problems for this build, but please file a bug at:")
# print("WARNING: https://github.com/conda/conda-build")
# print("WARNING: and (if possible) attach file {}".format(path))
# print("WARNING: ldd/tool gives {}, pyldd gives {}"
# .format(set(res[f]), set(res_py)))
return res
@memoized
def get_package_obj_files(dist, prefix):
data = linked_data(prefix).get(dist)
res = []
if data:
for f in data.get('files', []):
path = join(prefix, f)
if post.is_obj(path):
res.append(f)
return res
@memoized
def get_untracked_obj_files(prefix):
res = []
files = untracked(prefix)
for f in files:
path = join(prefix, f)
if post.is_obj(path):
res.append(f)
return res
|
the-stack_0_19003 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import json
import pytest
import sagemaker
import os
import warnings
from mock import (
Mock,
PropertyMock,
patch,
)
from sagemaker.debugger import DEBUGGER_FLAG, ProfilerConfig
from sagemaker.estimator import Estimator
from sagemaker.tensorflow import TensorFlow
from sagemaker.inputs import TrainingInput, TransformInput, CreateModelInput
from sagemaker.model import Model
from sagemaker.processing import (
Processor,
ProcessingInput,
ProcessingOutput,
ScriptProcessor,
)
from sagemaker.tuner import (
HyperparameterTuner,
ContinuousParameter,
WarmStartConfig,
WarmStartTypes,
)
from sagemaker.network import NetworkConfig
from sagemaker.transformer import Transformer
from sagemaker.workflow.functions import Join
from sagemaker.workflow.pipeline import Pipeline, PipelineGraph
from sagemaker.workflow.properties import Properties, PropertyFile
from sagemaker.workflow.parameters import ParameterString, ParameterInteger, ParameterBoolean
from sagemaker.workflow.retry import (
StepRetryPolicy,
StepExceptionTypeEnum,
SageMakerJobStepRetryPolicy,
SageMakerJobExceptionTypeEnum,
)
from sagemaker.workflow.steps import (
ProcessingStep,
ConfigurableRetryStep,
StepTypeEnum,
TrainingStep,
TuningStep,
TransformStep,
CreateModelStep,
CacheConfig,
)
from sagemaker.pipeline import PipelineModel
from sagemaker.sparkml import SparkMLModel
from sagemaker.predictor import Predictor
from sagemaker.model import FrameworkModel
from tests.unit import DATA_DIR
from tests.unit.sagemaker.workflow.helpers import ordered
DUMMY_SCRIPT_PATH = os.path.join(DATA_DIR, "dummy_script.py")
REGION = "us-west-2"
BUCKET = "my-bucket"
IMAGE_URI = "fakeimage"
ROLE = "DummyRole"
MODEL_NAME = "gisele"
class CustomStep(ConfigurableRetryStep):
def __init__(self, name, display_name=None, description=None, retry_policies=None):
super(CustomStep, self).__init__(
name, StepTypeEnum.TRAINING, display_name, description, None, retry_policies
)
self._properties = Properties(name)
@property
def arguments(self):
return dict()
@property
def properties(self):
return self._properties
class DummyFrameworkModel(FrameworkModel):
def __init__(self, sagemaker_session, **kwargs):
super(DummyFrameworkModel, self).__init__(
"s3://bucket/model_1.tar.gz",
"mi-1",
ROLE,
os.path.join(DATA_DIR, "dummy_script.py"),
sagemaker_session=sagemaker_session,
**kwargs,
)
def create_predictor(self, endpoint_name):
return Predictor(endpoint_name, self.sagemaker_session)
@pytest.fixture
def boto_session():
role_mock = Mock()
type(role_mock).arn = PropertyMock(return_value=ROLE)
resource_mock = Mock()
resource_mock.Role.return_value = role_mock
session_mock = Mock(region_name=REGION)
session_mock.resource.return_value = resource_mock
return session_mock
@pytest.fixture
def client():
"""Mock client.
Considerations when appropriate:
* utilize botocore.stub.Stubber
* separate runtime client from client
"""
client_mock = Mock()
client_mock._client_config.user_agent = (
"Boto3/1.14.24 Python/3.8.5 Linux/5.4.0-42-generic Botocore/1.17.24 Resource"
)
return client_mock
@pytest.fixture
def sagemaker_session(boto_session, client):
return sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=client,
sagemaker_runtime_client=client,
default_bucket=BUCKET,
)
@pytest.fixture
def script_processor(sagemaker_session):
return ScriptProcessor(
role=ROLE,
image_uri="012345678901.dkr.ecr.us-west-2.amazonaws.com/my-custom-image-uri",
command=["python3"],
instance_type="ml.m4.xlarge",
instance_count=1,
volume_size_in_gb=100,
volume_kms_key="arn:aws:kms:us-west-2:012345678901:key/volume-kms-key",
output_kms_key="arn:aws:kms:us-west-2:012345678901:key/output-kms-key",
max_runtime_in_seconds=3600,
base_job_name="my_sklearn_processor",
env={"my_env_variable": "my_env_variable_value"},
tags=[{"Key": "my-tag", "Value": "my-tag-value"}],
network_config=NetworkConfig(
subnets=["my_subnet_id"],
security_group_ids=["my_security_group_id"],
enable_network_isolation=True,
encrypt_inter_container_traffic=True,
),
sagemaker_session=sagemaker_session,
)
def test_custom_step():
step = CustomStep(
name="MyStep", display_name="CustomStepDisplayName", description="CustomStepDescription"
)
assert step.to_request() == {
"Name": "MyStep",
"DisplayName": "CustomStepDisplayName",
"Description": "CustomStepDescription",
"Type": "Training",
"Arguments": dict(),
}
def test_custom_step_without_display_name():
step = CustomStep(name="MyStep", description="CustomStepDescription")
assert step.to_request() == {
"Name": "MyStep",
"Description": "CustomStepDescription",
"Type": "Training",
"Arguments": dict(),
}
def test_custom_step_without_description():
step = CustomStep(name="MyStep", display_name="CustomStepDisplayName")
assert step.to_request() == {
"Name": "MyStep",
"DisplayName": "CustomStepDisplayName",
"Type": "Training",
"Arguments": dict(),
}
def test_custom_step_with_retry_policy():
step = CustomStep(
name="MyStep",
retry_policies=[
StepRetryPolicy(
exception_types=[
StepExceptionTypeEnum.SERVICE_FAULT,
StepExceptionTypeEnum.THROTTLING,
],
expire_after_mins=1,
),
SageMakerJobStepRetryPolicy(
exception_types=[SageMakerJobExceptionTypeEnum.CAPACITY_ERROR],
max_attempts=3,
),
],
)
assert step.to_request() == {
"Name": "MyStep",
"Type": "Training",
"RetryPolicies": [
{
"ExceptionType": ["Step.SERVICE_FAULT", "Step.THROTTLING"],
"IntervalSeconds": 1,
"BackoffRate": 2.0,
"ExpireAfterMin": 1,
},
{
"ExceptionType": ["SageMaker.CAPACITY_ERROR"],
"IntervalSeconds": 1,
"BackoffRate": 2.0,
"MaxAttempts": 3,
},
],
"Arguments": dict(),
}
step.add_retry_policy(
SageMakerJobStepRetryPolicy(
exception_types=[SageMakerJobExceptionTypeEnum.INTERNAL_ERROR],
interval_seconds=5,
backoff_rate=2.0,
expire_after_mins=5,
)
)
assert step.to_request() == {
"Name": "MyStep",
"Type": "Training",
"RetryPolicies": [
{
"ExceptionType": ["Step.SERVICE_FAULT", "Step.THROTTLING"],
"IntervalSeconds": 1,
"BackoffRate": 2.0,
"ExpireAfterMin": 1,
},
{
"ExceptionType": ["SageMaker.CAPACITY_ERROR"],
"IntervalSeconds": 1,
"BackoffRate": 2.0,
"MaxAttempts": 3,
},
{
"ExceptionType": ["SageMaker.JOB_INTERNAL_ERROR"],
"IntervalSeconds": 5,
"BackoffRate": 2.0,
"ExpireAfterMin": 5,
},
],
"Arguments": dict(),
}
step = CustomStep(name="MyStep")
assert step.to_request() == {
"Name": "MyStep",
"Type": "Training",
"Arguments": dict(),
}
def test_training_step_base_estimator(sagemaker_session):
custom_step1 = CustomStep("TestStep")
custom_step2 = CustomStep("AnotherTestStep")
instance_type_parameter = ParameterString(name="InstanceType", default_value="c4.4xlarge")
instance_count_parameter = ParameterInteger(name="InstanceCount", default_value=1)
data_source_uri_parameter = ParameterString(
name="DataSourceS3Uri", default_value=f"s3://{BUCKET}/train_manifest"
)
training_epochs_parameter = ParameterInteger(name="TrainingEpochs", default_value=5)
training_batch_size_parameter = ParameterInteger(name="TrainingBatchSize", default_value=500)
use_spot_instances = ParameterBoolean(name="UseSpotInstances", default_value=False)
output_path = Join(on="/", values=["s3:/", "a", "b"])
estimator = Estimator(
image_uri=IMAGE_URI,
role=ROLE,
instance_count=instance_count_parameter,
instance_type=instance_type_parameter,
profiler_config=ProfilerConfig(system_monitor_interval_millis=500),
hyperparameters={
"batch-size": training_batch_size_parameter,
"epochs": training_epochs_parameter,
},
rules=[],
sagemaker_session=sagemaker_session,
output_path=output_path,
use_spot_instances=use_spot_instances,
)
inputs = TrainingInput(s3_data=data_source_uri_parameter)
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
step = TrainingStep(
name="MyTrainingStep",
depends_on=["TestStep"],
description="TrainingStep description",
display_name="MyTrainingStep",
estimator=estimator,
inputs=inputs,
cache_config=cache_config,
)
step.add_depends_on(["AnotherTestStep"])
pipeline = Pipeline(
name="MyPipeline",
parameters=[
instance_type_parameter,
instance_count_parameter,
data_source_uri_parameter,
training_epochs_parameter,
training_batch_size_parameter,
use_spot_instances,
],
steps=[step, custom_step1, custom_step2],
sagemaker_session=sagemaker_session,
)
assert json.loads(pipeline.definition())["Steps"][0] == {
"Name": "MyTrainingStep",
"Type": "Training",
"Description": "TrainingStep description",
"DisplayName": "MyTrainingStep",
"DependsOn": ["TestStep", "AnotherTestStep"],
"Arguments": {
"AlgorithmSpecification": {"TrainingImage": IMAGE_URI, "TrainingInputMode": "File"},
"EnableManagedSpotTraining": {"Get": "Parameters.UseSpotInstances"},
"HyperParameters": {
"batch-size": {
"Std:Join": {
"On": "",
"Values": [{"Get": "Parameters.TrainingBatchSize"}],
},
},
"epochs": {
"Std:Join": {
"On": "",
"Values": [{"Get": "Parameters.TrainingEpochs"}],
},
},
},
"InputDataConfig": [
{
"ChannelName": "training",
"DataSource": {
"S3DataSource": {
"S3DataDistributionType": "FullyReplicated",
"S3DataType": "S3Prefix",
"S3Uri": {"Get": "Parameters.DataSourceS3Uri"},
}
},
}
],
"OutputDataConfig": {
"S3OutputPath": {"Std:Join": {"On": "/", "Values": ["s3:/", "a", "b"]}}
},
"ResourceConfig": {
"InstanceCount": {"Get": "Parameters.InstanceCount"},
"InstanceType": {"Get": "Parameters.InstanceType"},
"VolumeSizeInGB": 30,
},
"RoleArn": ROLE,
"StoppingCondition": {"MaxRuntimeInSeconds": 86400},
"ProfilerConfig": {
"ProfilingIntervalInMilliseconds": 500,
"S3OutputPath": {"Std:Join": {"On": "/", "Values": ["s3:/", "a", "b"]}},
},
},
"CacheConfig": {"Enabled": True, "ExpireAfter": "PT1H"},
}
assert step.properties.TrainingJobName.expr == {"Get": "Steps.MyTrainingStep.TrainingJobName"}
assert step.properties.HyperParameters.expr == {"Get": "Steps.MyTrainingStep.HyperParameters"}
adjacency_list = PipelineGraph.from_pipeline(pipeline).adjacency_list
assert ordered(adjacency_list) == ordered(
{
"AnotherTestStep": ["MyTrainingStep"],
"MyTrainingStep": [],
"TestStep": ["MyTrainingStep"],
}
)
def test_training_step_tensorflow(sagemaker_session):
instance_type_parameter = ParameterString(name="InstanceType", default_value="ml.p3.16xlarge")
instance_count_parameter = ParameterInteger(name="InstanceCount", default_value=1)
data_source_uri_parameter = ParameterString(
name="DataSourceS3Uri", default_value=f"s3://{BUCKET}/train_manifest"
)
training_epochs_parameter = ParameterInteger(name="TrainingEpochs", default_value=5)
training_batch_size_parameter = ParameterInteger(name="TrainingBatchSize", default_value=500)
estimator = TensorFlow(
entry_point=DUMMY_SCRIPT_PATH,
role=ROLE,
model_dir=False,
image_uri=IMAGE_URI,
source_dir="s3://mybucket/source",
framework_version="2.4.1",
py_version="py37",
instance_count=instance_count_parameter,
instance_type=instance_type_parameter,
sagemaker_session=sagemaker_session,
hyperparameters={
"batch-size": training_batch_size_parameter,
"epochs": training_epochs_parameter,
},
debugger_hook_config=False,
# Training using SMDataParallel Distributed Training Framework
distribution={"smdistributed": {"dataparallel": {"enabled": True}}},
)
inputs = TrainingInput(s3_data=data_source_uri_parameter)
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
step = TrainingStep(
name="MyTrainingStep", estimator=estimator, inputs=inputs, cache_config=cache_config
)
pipeline = Pipeline(
name="MyPipeline",
parameters=[
instance_type_parameter,
instance_count_parameter,
data_source_uri_parameter,
training_epochs_parameter,
training_batch_size_parameter,
],
steps=[step],
sagemaker_session=sagemaker_session,
)
dsl = json.loads(pipeline.definition())["Steps"][0]
dsl["Arguments"]["HyperParameters"].pop("sagemaker_program", None)
dsl["Arguments"].pop("ProfilerRuleConfigurations", None)
assert dsl == {
"Name": "MyTrainingStep",
"Type": "Training",
"Arguments": {
"AlgorithmSpecification": {
"TrainingInputMode": "File",
"TrainingImage": "fakeimage",
"EnableSageMakerMetricsTimeSeries": True,
},
"OutputDataConfig": {"S3OutputPath": "s3://my-bucket/"},
"StoppingCondition": {"MaxRuntimeInSeconds": 86400},
"ResourceConfig": {
"InstanceCount": {"Get": "Parameters.InstanceCount"},
"InstanceType": {"Get": "Parameters.InstanceType"},
"VolumeSizeInGB": 30,
},
"RoleArn": "DummyRole",
"InputDataConfig": [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": {"Get": "Parameters.DataSourceS3Uri"},
"S3DataDistributionType": "FullyReplicated",
}
},
"ChannelName": "training",
}
],
"HyperParameters": {
"batch-size": {
"Std:Join": {"On": "", "Values": [{"Get": "Parameters.TrainingBatchSize"}]}
},
"epochs": {
"Std:Join": {"On": "", "Values": [{"Get": "Parameters.TrainingEpochs"}]}
},
"sagemaker_submit_directory": '"s3://mybucket/source"',
"sagemaker_container_log_level": "20",
"sagemaker_region": '"us-west-2"',
"sagemaker_distributed_dataparallel_enabled": "true",
"sagemaker_instance_type": {"Get": "Parameters.InstanceType"},
"sagemaker_distributed_dataparallel_custom_mpi_options": '""',
},
"ProfilerConfig": {"S3OutputPath": "s3://my-bucket/"},
"Environment": {DEBUGGER_FLAG: "0"},
},
"CacheConfig": {"Enabled": True, "ExpireAfter": "PT1H"},
}
assert step.properties.TrainingJobName.expr == {"Get": "Steps.MyTrainingStep.TrainingJobName"}
def test_training_step_profiler_warning(sagemaker_session):
estimator = TensorFlow(
entry_point=DUMMY_SCRIPT_PATH,
role=ROLE,
model_dir=False,
image_uri=IMAGE_URI,
source_dir="s3://mybucket/source",
framework_version="2.4.1",
py_version="py37",
disable_profiler=False,
instance_count=1,
instance_type="ml.p3.16xlarge",
sagemaker_session=sagemaker_session,
hyperparameters={
"batch-size": 500,
"epochs": 5,
},
debugger_hook_config=False,
distribution={"smdistributed": {"dataparallel": {"enabled": True}}},
)
inputs = TrainingInput(s3_data=f"s3://{BUCKET}/train_manifest")
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
with warnings.catch_warnings(record=True) as w:
TrainingStep(
name="MyTrainingStep", estimator=estimator, inputs=inputs, cache_config=cache_config
)
assert len(w) == 2
assert issubclass(w[0].category, UserWarning)
assert "Profiling is enabled on the provided estimator" in str(w[0].message)
assert issubclass(w[1].category, DeprecationWarning)
assert "We are deprecating the instantiation" in str(w[1].message)
def test_training_step_no_profiler_warning(sagemaker_session):
estimator = TensorFlow(
entry_point=DUMMY_SCRIPT_PATH,
role=ROLE,
model_dir=False,
image_uri=IMAGE_URI,
source_dir="s3://mybucket/source",
framework_version="2.4.1",
py_version="py37",
disable_profiler=True,
instance_count=1,
instance_type="ml.p3.16xlarge",
sagemaker_session=sagemaker_session,
hyperparameters={
"batch-size": 500,
"epochs": 5,
},
debugger_hook_config=False,
distribution={"smdistributed": {"dataparallel": {"enabled": True}}},
)
inputs = TrainingInput(s3_data=f"s3://{BUCKET}/train_manifest")
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
with warnings.catch_warnings(record=True) as w:
# profiler disabled, cache config not None
TrainingStep(
name="MyTrainingStep", estimator=estimator, inputs=inputs, cache_config=cache_config
)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "We are deprecating the instantiation" in str(w[-1].message)
with warnings.catch_warnings(record=True) as w:
# profiler enabled, cache config is None
estimator.disable_profiler = False
TrainingStep(name="MyTrainingStep", estimator=estimator, inputs=inputs, cache_config=None)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "We are deprecating the instantiation" in str(w[-1].message)
def test_processing_step(sagemaker_session):
custom_step1 = CustomStep("TestStep")
custom_step2 = CustomStep("SecondTestStep")
custom_step3 = CustomStep("ThirdTestStep")
processing_input_data_uri_parameter = ParameterString(
name="ProcessingInputDataUri", default_value=f"s3://{BUCKET}/processing_manifest"
)
instance_type_parameter = ParameterString(name="InstanceType", default_value="ml.m4.4xlarge")
instance_count_parameter = ParameterInteger(name="InstanceCount", default_value=1)
processor = Processor(
image_uri=IMAGE_URI,
role=ROLE,
instance_count=instance_count_parameter,
instance_type=instance_type_parameter,
sagemaker_session=sagemaker_session,
)
inputs = [
ProcessingInput(
source=processing_input_data_uri_parameter,
destination="processing_manifest",
)
]
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
evaluation_report = PropertyFile(
name="EvaluationReport", output_name="evaluation", path="evaluation.json"
)
with warnings.catch_warnings(record=True) as w:
step = ProcessingStep(
name="MyProcessingStep",
description="ProcessingStep description",
display_name="MyProcessingStep",
depends_on=["TestStep", "SecondTestStep"],
processor=processor,
inputs=inputs,
outputs=[],
cache_config=cache_config,
property_files=[evaluation_report],
)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "We are deprecating the instantiation" in str(w[-1].message)
step.add_depends_on(["ThirdTestStep"])
pipeline = Pipeline(
name="MyPipeline",
parameters=[
processing_input_data_uri_parameter,
instance_type_parameter,
instance_count_parameter,
],
steps=[step, custom_step1, custom_step2, custom_step3],
sagemaker_session=sagemaker_session,
)
assert json.loads(pipeline.definition())["Steps"][0] == {
"Name": "MyProcessingStep",
"Description": "ProcessingStep description",
"DisplayName": "MyProcessingStep",
"Type": "Processing",
"DependsOn": ["TestStep", "SecondTestStep", "ThirdTestStep"],
"Arguments": {
"AppSpecification": {"ImageUri": "fakeimage"},
"ProcessingInputs": [
{
"InputName": "input-1",
"AppManaged": False,
"S3Input": {
"LocalPath": "processing_manifest",
"S3CompressionType": "None",
"S3DataDistributionType": "FullyReplicated",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3Uri": {"Get": "Parameters.ProcessingInputDataUri"},
},
}
],
"ProcessingResources": {
"ClusterConfig": {
"InstanceCount": {"Get": "Parameters.InstanceCount"},
"InstanceType": {"Get": "Parameters.InstanceType"},
"VolumeSizeInGB": 30,
}
},
"RoleArn": "DummyRole",
},
"CacheConfig": {"Enabled": True, "ExpireAfter": "PT1H"},
"PropertyFiles": [
{
"FilePath": "evaluation.json",
"OutputName": "evaluation",
"PropertyFileName": "EvaluationReport",
}
],
}
assert step.properties.ProcessingJobName.expr == {
"Get": "Steps.MyProcessingStep.ProcessingJobName"
}
adjacency_list = PipelineGraph.from_pipeline(pipeline).adjacency_list
assert ordered(adjacency_list) == ordered(
{
"SecondTestStep": ["MyProcessingStep"],
"TestStep": ["MyProcessingStep"],
"ThirdTestStep": ["MyProcessingStep"],
"MyProcessingStep": [],
}
)
@patch("sagemaker.processing.ScriptProcessor._normalize_args")
def test_processing_step_normalizes_args_with_local_code(mock_normalize_args, script_processor):
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
inputs = [
ProcessingInput(
source=f"s3://{BUCKET}/processing_manifest",
destination="processing_manifest",
)
]
outputs = [
ProcessingOutput(
source=f"s3://{BUCKET}/processing_manifest",
destination="processing_manifest",
)
]
step = ProcessingStep(
name="MyProcessingStep",
processor=script_processor,
code=DUMMY_SCRIPT_PATH,
inputs=inputs,
outputs=outputs,
job_arguments=["arg1", "arg2"],
cache_config=cache_config,
)
mock_normalize_args.return_value = [step.inputs, step.outputs]
step.to_request()
mock_normalize_args.assert_called_with(
job_name="MyProcessingStep-3e89f0c7e101c356cbedf27d9d27e9db",
arguments=step.job_arguments,
inputs=step.inputs,
outputs=step.outputs,
code=step.code,
kms_key=None,
)
def test_processing_step_normalizes_args_with_param_str_local_code(
sagemaker_session, script_processor
):
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
code_param = ParameterString(name="Script", default_value="S3://my-bucket/file_name.py")
inputs = [
ProcessingInput(
source=f"s3://{BUCKET}/processing_manifest",
destination="processing_manifest",
)
]
outputs = [
ProcessingOutput(
source=f"s3://{BUCKET}/processing_manifest",
destination="processing_manifest",
)
]
with pytest.raises(ValueError) as error:
step = ProcessingStep(
name="MyProcessingStep",
processor=script_processor,
code=code_param,
inputs=inputs,
outputs=outputs,
job_arguments=["arg1", "arg2"],
cache_config=cache_config,
)
pipeline = Pipeline(
name="MyPipeline",
parameters=[code_param],
steps=[step],
sagemaker_session=sagemaker_session,
)
pipeline.definition()
assert "has to be a valid S3 URI or local file path" in str(error.value)
@patch("sagemaker.processing.ScriptProcessor._normalize_args")
def test_processing_step_normalizes_args_with_s3_code(mock_normalize_args, script_processor):
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
inputs = [
ProcessingInput(
source=f"s3://{BUCKET}/processing_manifest",
destination="processing_manifest",
)
]
outputs = [
ProcessingOutput(
source=f"s3://{BUCKET}/processing_manifest",
destination="processing_manifest",
)
]
step = ProcessingStep(
name="MyProcessingStep",
processor=script_processor,
code="s3://foo",
inputs=inputs,
outputs=outputs,
job_arguments=["arg1", "arg2"],
cache_config=cache_config,
kms_key="arn:aws:kms:us-west-2:012345678901:key/s3-kms-key",
)
mock_normalize_args.return_value = [step.inputs, step.outputs]
step.to_request()
mock_normalize_args.assert_called_with(
job_name=None,
arguments=step.job_arguments,
inputs=step.inputs,
outputs=step.outputs,
code=step.code,
kms_key=step.kms_key,
)
@patch("sagemaker.processing.ScriptProcessor._normalize_args")
def test_processing_step_normalizes_args_with_no_code(mock_normalize_args, script_processor):
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
inputs = [
ProcessingInput(
source=f"s3://{BUCKET}/processing_manifest",
destination="processing_manifest",
)
]
outputs = [
ProcessingOutput(
source=f"s3://{BUCKET}/processing_manifest",
destination="processing_manifest",
)
]
step = ProcessingStep(
name="MyProcessingStep",
processor=script_processor,
inputs=inputs,
outputs=outputs,
job_arguments=["arg1", "arg2"],
cache_config=cache_config,
)
mock_normalize_args.return_value = [step.inputs, step.outputs]
step.to_request()
mock_normalize_args.assert_called_with(
job_name=None,
arguments=step.job_arguments,
inputs=step.inputs,
outputs=step.outputs,
code=None,
kms_key=None,
)
def test_create_model_step(sagemaker_session):
model = Model(
image_uri=IMAGE_URI,
role=ROLE,
sagemaker_session=sagemaker_session,
)
inputs = CreateModelInput(
instance_type="c4.4xlarge",
accelerator_type="ml.eia1.medium",
)
step = CreateModelStep(
name="MyCreateModelStep",
depends_on=["TestStep"],
display_name="MyCreateModelStep",
description="TestDescription",
model=model,
inputs=inputs,
)
step.add_depends_on(["SecondTestStep"])
assert step.to_request() == {
"Name": "MyCreateModelStep",
"Type": "Model",
"Description": "TestDescription",
"DisplayName": "MyCreateModelStep",
"DependsOn": ["TestStep", "SecondTestStep"],
"Arguments": {
"ExecutionRoleArn": "DummyRole",
"PrimaryContainer": {"Environment": {}, "Image": "fakeimage"},
},
}
assert step.properties.ModelName.expr == {"Get": "Steps.MyCreateModelStep.ModelName"}
def test_create_model_step_with_invalid_input(sagemaker_session):
# without both step_args and any of the old required arguments
with pytest.raises(ValueError) as error:
CreateModelStep(
name="MyRegisterModelStep",
)
assert "Either of them should be provided" in str(error.value)
# with both step_args and the old required arguments
with pytest.raises(ValueError) as error:
CreateModelStep(
name="MyRegisterModelStep",
step_args=dict(),
model=Model(image_uri=IMAGE_URI),
)
assert "Either of them should be provided" in str(error.value)
@patch("tarfile.open")
@patch("time.strftime", return_value="2017-10-10-14-14-15")
def test_create_model_step_with_model_pipeline(tfo, time, sagemaker_session):
framework_model = DummyFrameworkModel(sagemaker_session)
sparkml_model = SparkMLModel(
model_data="s3://bucket/model_2.tar.gz",
role=ROLE,
sagemaker_session=sagemaker_session,
env={"SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT": "text/csv"},
)
model = PipelineModel(
models=[framework_model, sparkml_model], role=ROLE, sagemaker_session=sagemaker_session
)
inputs = CreateModelInput(
instance_type="c4.4xlarge",
accelerator_type="ml.eia1.medium",
)
step = CreateModelStep(
name="MyCreateModelStep",
depends_on=["TestStep"],
display_name="MyCreateModelStep",
description="TestDescription",
model=model,
inputs=inputs,
)
step.add_depends_on(["SecondTestStep"])
assert step.to_request() == {
"Name": "MyCreateModelStep",
"Type": "Model",
"Description": "TestDescription",
"DisplayName": "MyCreateModelStep",
"DependsOn": ["TestStep", "SecondTestStep"],
"Arguments": {
"Containers": [
{
"Environment": {
"SAGEMAKER_PROGRAM": "dummy_script.py",
"SAGEMAKER_SUBMIT_DIRECTORY": "s3://my-bucket/mi-1-2017-10-10-14-14-15/sourcedir.tar.gz",
"SAGEMAKER_CONTAINER_LOG_LEVEL": "20",
"SAGEMAKER_REGION": "us-west-2",
},
"Image": "mi-1",
"ModelDataUrl": "s3://bucket/model_1.tar.gz",
},
{
"Environment": {"SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT": "text/csv"},
"Image": "246618743249.dkr.ecr.us-west-2.amazonaws.com/sagemaker-sparkml-serving:2.4",
"ModelDataUrl": "s3://bucket/model_2.tar.gz",
},
],
"ExecutionRoleArn": "DummyRole",
},
}
assert step.properties.ModelName.expr == {"Get": "Steps.MyCreateModelStep.ModelName"}
def test_transform_step(sagemaker_session):
transformer = Transformer(
model_name=MODEL_NAME,
instance_count=1,
instance_type="c4.4xlarge",
sagemaker_session=sagemaker_session,
)
inputs = TransformInput(data=f"s3://{BUCKET}/transform_manifest")
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
with warnings.catch_warnings(record=True) as w:
step = TransformStep(
name="MyTransformStep",
depends_on=["TestStep"],
transformer=transformer,
display_name="TransformStep",
description="TestDescription",
inputs=inputs,
cache_config=cache_config,
)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "We are deprecating the instantiation" in str(w[-1].message)
step.add_depends_on(["SecondTestStep"])
assert step.to_request() == {
"Name": "MyTransformStep",
"Type": "Transform",
"Description": "TestDescription",
"DisplayName": "TransformStep",
"DependsOn": ["TestStep", "SecondTestStep"],
"Arguments": {
"ModelName": "gisele",
"TransformInput": {
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": "s3://my-bucket/transform_manifest",
}
}
},
"TransformOutput": {"S3OutputPath": None},
"TransformResources": {
"InstanceCount": 1,
"InstanceType": "c4.4xlarge",
},
},
"CacheConfig": {"Enabled": True, "ExpireAfter": "PT1H"},
}
assert step.properties.TransformJobName.expr == {
"Get": "Steps.MyTransformStep.TransformJobName"
}
def test_properties_describe_training_job_response():
prop = Properties(step_name="MyStep", shape_name="DescribeTrainingJobResponse")
some_prop_names = ["TrainingJobName", "TrainingJobArn", "HyperParameters", "OutputDataConfig"]
for name in some_prop_names:
assert name in prop.__dict__.keys()
assert prop.CreationTime.expr == {"Get": "Steps.MyStep.CreationTime"}
assert prop.OutputDataConfig.S3OutputPath.expr == {
"Get": "Steps.MyStep.OutputDataConfig.S3OutputPath"
}
def test_properties_describe_processing_job_response():
prop = Properties(step_name="MyStep", shape_name="DescribeProcessingJobResponse")
some_prop_names = ["ProcessingInputs", "ProcessingOutputConfig", "ProcessingEndTime"]
for name in some_prop_names:
assert name in prop.__dict__.keys()
assert prop.ProcessingJobName.expr == {"Get": "Steps.MyStep.ProcessingJobName"}
assert prop.ProcessingOutputConfig.Outputs["MyOutputName"].S3Output.S3Uri.expr == {
"Get": "Steps.MyStep.ProcessingOutputConfig.Outputs['MyOutputName'].S3Output.S3Uri"
}
def test_add_depends_on(sagemaker_session):
processing_input_data_uri_parameter = ParameterString(
name="ProcessingInputDataUri", default_value=f"s3://{BUCKET}/processing_manifest"
)
instance_type_parameter = ParameterString(name="InstanceType", default_value="ml.m4.4xlarge")
instance_count_parameter = ParameterInteger(name="InstanceCount", default_value=1)
processor = Processor(
image_uri=IMAGE_URI,
role=ROLE,
instance_count=instance_count_parameter,
instance_type=instance_type_parameter,
sagemaker_session=sagemaker_session,
)
inputs = [
ProcessingInput(
source=processing_input_data_uri_parameter,
destination="processing_manifest",
)
]
cache_config = CacheConfig(enable_caching=True, expire_after="PT1H")
step_1 = ProcessingStep(
name="MyProcessingStep-1",
processor=processor,
inputs=inputs,
outputs=[],
cache_config=cache_config,
)
step_2 = ProcessingStep(
name="MyProcessingStep-2",
depends_on=[step_1],
processor=processor,
inputs=inputs,
outputs=[],
cache_config=cache_config,
)
step_3 = ProcessingStep(
name="MyProcessingStep-3",
depends_on=[step_1],
processor=processor,
inputs=inputs,
outputs=[],
cache_config=cache_config,
)
step_3.add_depends_on([step_2.name])
assert "DependsOn" not in step_1.to_request()
assert step_2.to_request()["DependsOn"] == ["MyProcessingStep-1"]
assert step_3.to_request()["DependsOn"] == ["MyProcessingStep-1", "MyProcessingStep-2"]
def test_single_algo_tuning_step(sagemaker_session):
data_source_uri_parameter = ParameterString(
name="DataSourceS3Uri", default_value=f"s3://{BUCKET}/train_manifest"
)
use_spot_instances = ParameterBoolean(name="UseSpotInstances", default_value=False)
estimator = Estimator(
image_uri=IMAGE_URI,
role=ROLE,
instance_count=1,
instance_type="ml.c5.4xlarge",
profiler_config=ProfilerConfig(system_monitor_interval_millis=500),
rules=[],
sagemaker_session=sagemaker_session,
use_spot_instances=use_spot_instances,
)
estimator.set_hyperparameters(
num_layers=18,
image_shape="3,224,224",
num_classes=257,
num_training_samples=15420,
mini_batch_size=128,
epochs=10,
optimizer="sgd",
top_k="2",
precision_dtype="float32",
augmentation_type="crop",
)
hyperparameter_ranges = {
"learning_rate": ContinuousParameter(0.0001, 0.05),
"momentum": ContinuousParameter(0.0, 0.99),
"weight_decay": ContinuousParameter(0.0, 0.99),
}
tuner = HyperparameterTuner(
estimator=estimator,
objective_metric_name="val:accuracy",
hyperparameter_ranges=hyperparameter_ranges,
objective_type="Maximize",
max_jobs=5,
max_parallel_jobs=2,
early_stopping_type="OFF",
strategy="Bayesian",
warm_start_config=WarmStartConfig(
warm_start_type=WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM,
parents=set(["parent-hpo"]),
),
)
inputs = TrainingInput(s3_data=data_source_uri_parameter)
with warnings.catch_warnings(record=True) as w:
tuning_step = TuningStep(
name="MyTuningStep",
tuner=tuner,
inputs=inputs,
)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "We are deprecating the instantiation" in str(w[-1].message)
pipeline = Pipeline(
name="MyPipeline",
parameters=[data_source_uri_parameter, use_spot_instances],
steps=[tuning_step],
sagemaker_session=sagemaker_session,
)
step_dsl_list = json.loads(pipeline.definition())["Steps"]
assert step_dsl_list[0] == {
"Name": "MyTuningStep",
"Type": "Tuning",
"Arguments": {
"HyperParameterTuningJobConfig": {
"Strategy": "Bayesian",
"ResourceLimits": {"MaxNumberOfTrainingJobs": 5, "MaxParallelTrainingJobs": 2},
"TrainingJobEarlyStoppingType": "OFF",
"HyperParameterTuningJobObjective": {
"Type": "Maximize",
"MetricName": "val:accuracy",
},
"ParameterRanges": {
"ContinuousParameterRanges": [
{
"Name": "learning_rate",
"MinValue": "0.0001",
"MaxValue": "0.05",
"ScalingType": "Auto",
},
{
"Name": "momentum",
"MinValue": "0.0",
"MaxValue": "0.99",
"ScalingType": "Auto",
},
{
"Name": "weight_decay",
"MinValue": "0.0",
"MaxValue": "0.99",
"ScalingType": "Auto",
},
],
"CategoricalParameterRanges": [],
"IntegerParameterRanges": [],
},
},
"TrainingJobDefinition": {
"StaticHyperParameters": {
"num_layers": "18",
"image_shape": "3,224,224",
"num_classes": "257",
"num_training_samples": "15420",
"mini_batch_size": "128",
"epochs": "10",
"optimizer": "sgd",
"top_k": "2",
"precision_dtype": "float32",
"augmentation_type": "crop",
},
"RoleArn": "DummyRole",
"OutputDataConfig": {"S3OutputPath": "s3://my-bucket/"},
"ResourceConfig": {
"InstanceCount": 1,
"InstanceType": "ml.c5.4xlarge",
"VolumeSizeInGB": 30,
},
"StoppingCondition": {"MaxRuntimeInSeconds": 86400},
"AlgorithmSpecification": {
"TrainingInputMode": "File",
"TrainingImage": "fakeimage",
},
"EnableManagedSpotTraining": {"Get": "Parameters.UseSpotInstances"},
"InputDataConfig": [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": {"Get": "Parameters.DataSourceS3Uri"},
"S3DataDistributionType": "FullyReplicated",
}
},
"ChannelName": "training",
}
],
},
"WarmStartConfig": {
"WarmStartType": "IdenticalDataAndAlgorithm",
"ParentHyperParameterTuningJobs": [
{
"HyperParameterTuningJobName": "parent-hpo",
}
],
},
},
}
assert tuning_step.properties.HyperParameterTuningJobName.expr == {
"Get": "Steps.MyTuningStep.HyperParameterTuningJobName"
}
assert tuning_step.properties.TrainingJobSummaries[0].TrainingJobName.expr == {
"Get": "Steps.MyTuningStep.TrainingJobSummaries[0].TrainingJobName"
}
assert tuning_step.get_top_model_s3_uri(0, "my-bucket", "my-prefix").expr == {
"Std:Join": {
"On": "/",
"Values": [
"s3:/",
"my-bucket",
"my-prefix",
{"Get": "Steps.MyTuningStep.TrainingJobSummaries[0].TrainingJobName"},
"output/model.tar.gz",
],
}
}
def test_multi_algo_tuning_step(sagemaker_session):
data_source_uri_parameter = ParameterString(
name="DataSourceS3Uri", default_value=f"s3://{BUCKET}/train_manifest"
)
instance_count = ParameterInteger(name="InstanceCount", default_value=1)
estimator = Estimator(
image_uri=IMAGE_URI,
role=ROLE,
instance_count=instance_count,
instance_type="ml.c5.4xlarge",
profiler_config=ProfilerConfig(system_monitor_interval_millis=500),
rules=[],
sagemaker_session=sagemaker_session,
max_retry_attempts=10,
)
estimator.set_hyperparameters(
num_layers=18,
image_shape="3,224,224",
num_classes=257,
num_training_samples=15420,
mini_batch_size=128,
epochs=10,
optimizer="sgd",
top_k="2",
precision_dtype="float32",
augmentation_type="crop",
)
initial_lr_param = ParameterString(name="InitialLR", default_value="0.0001")
hyperparameter_ranges = {
"learning_rate": ContinuousParameter(initial_lr_param, 0.05),
"momentum": ContinuousParameter(0.0, 0.99),
"weight_decay": ContinuousParameter(0.0, 0.99),
}
tuner = HyperparameterTuner.create(
estimator_dict={
"estimator-1": estimator,
"estimator-2": estimator,
},
objective_type="Minimize",
objective_metric_name_dict={
"estimator-1": "val:loss",
"estimator-2": "val:loss",
},
hyperparameter_ranges_dict={
"estimator-1": hyperparameter_ranges,
"estimator-2": hyperparameter_ranges,
},
)
inputs = TrainingInput(s3_data=data_source_uri_parameter)
tuning_step = TuningStep(
name="MyTuningStep",
tuner=tuner,
inputs={
"estimator-1": inputs,
"estimator-2": inputs,
},
)
pipeline = Pipeline(
name="MyPipeline",
parameters=[data_source_uri_parameter, instance_count, initial_lr_param],
steps=[tuning_step],
sagemaker_session=sagemaker_session,
)
dsl = json.loads(pipeline.definition())
assert dsl["Steps"][0] == {
"Name": "MyTuningStep",
"Type": "Tuning",
"Arguments": {
"HyperParameterTuningJobConfig": {
"Strategy": "Bayesian",
"ResourceLimits": {"MaxNumberOfTrainingJobs": 1, "MaxParallelTrainingJobs": 1},
"TrainingJobEarlyStoppingType": "Off",
},
"TrainingJobDefinitions": [
{
"StaticHyperParameters": {
"num_layers": "18",
"image_shape": "3,224,224",
"num_classes": "257",
"num_training_samples": "15420",
"mini_batch_size": "128",
"epochs": "10",
"optimizer": "sgd",
"top_k": "2",
"precision_dtype": "float32",
"augmentation_type": "crop",
},
"RoleArn": "DummyRole",
"OutputDataConfig": {"S3OutputPath": "s3://my-bucket/"},
"ResourceConfig": {
"InstanceCount": {"Get": "Parameters.InstanceCount"},
"InstanceType": "ml.c5.4xlarge",
"VolumeSizeInGB": 30,
},
"StoppingCondition": {"MaxRuntimeInSeconds": 86400},
"AlgorithmSpecification": {
"TrainingInputMode": "File",
"TrainingImage": "fakeimage",
},
"InputDataConfig": [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": {"Get": "Parameters.DataSourceS3Uri"},
"S3DataDistributionType": "FullyReplicated",
}
},
"ChannelName": "training",
}
],
"DefinitionName": "estimator-1",
"TuningObjective": {"Type": "Minimize", "MetricName": "val:loss"},
"HyperParameterRanges": {
"ContinuousParameterRanges": [
{
"Name": "learning_rate",
"MinValue": {"Get": "Parameters.InitialLR"},
"MaxValue": "0.05",
"ScalingType": "Auto",
},
{
"Name": "momentum",
"MinValue": "0.0",
"MaxValue": "0.99",
"ScalingType": "Auto",
},
{
"Name": "weight_decay",
"MinValue": "0.0",
"MaxValue": "0.99",
"ScalingType": "Auto",
},
],
"CategoricalParameterRanges": [],
"IntegerParameterRanges": [],
},
"RetryStrategy": {
"MaximumRetryAttempts": 10,
},
},
{
"StaticHyperParameters": {
"num_layers": "18",
"image_shape": "3,224,224",
"num_classes": "257",
"num_training_samples": "15420",
"mini_batch_size": "128",
"epochs": "10",
"optimizer": "sgd",
"top_k": "2",
"precision_dtype": "float32",
"augmentation_type": "crop",
},
"RoleArn": "DummyRole",
"OutputDataConfig": {"S3OutputPath": "s3://my-bucket/"},
"ResourceConfig": {
"InstanceCount": {"Get": "Parameters.InstanceCount"},
"InstanceType": "ml.c5.4xlarge",
"VolumeSizeInGB": 30,
},
"StoppingCondition": {"MaxRuntimeInSeconds": 86400},
"AlgorithmSpecification": {
"TrainingInputMode": "File",
"TrainingImage": "fakeimage",
},
"InputDataConfig": [
{
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": {"Get": "Parameters.DataSourceS3Uri"},
"S3DataDistributionType": "FullyReplicated",
}
},
"ChannelName": "training",
}
],
"DefinitionName": "estimator-2",
"TuningObjective": {"Type": "Minimize", "MetricName": "val:loss"},
"HyperParameterRanges": {
"ContinuousParameterRanges": [
{
"Name": "learning_rate",
"MinValue": {"Get": "Parameters.InitialLR"},
"MaxValue": "0.05",
"ScalingType": "Auto",
},
{
"Name": "momentum",
"MinValue": "0.0",
"MaxValue": "0.99",
"ScalingType": "Auto",
},
{
"Name": "weight_decay",
"MinValue": "0.0",
"MaxValue": "0.99",
"ScalingType": "Auto",
},
],
"CategoricalParameterRanges": [],
"IntegerParameterRanges": [],
},
"RetryStrategy": {
"MaximumRetryAttempts": 10,
},
},
],
},
}
|
the-stack_0_19004 | #!/usr/bin/env python3
#
# ===============LICENSE_START=======================================================
# Acumos
# ===================================================================================
# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
# ===================================================================================
# This Acumos software file is distributed by AT&T
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END=========================================================
from flask_restplus import fields
from modelbuilder.api.namespaces import model_builder_namespace as api
error_response_body = api.model('Error Response - General', {
'errorId': fields.String(description='Identifier for the error.',
required=True,
example="H2O-0003"),
'message': fields.String(description='Brief description about the error.',
required=True,
example="Invalid Value for parameter(s), %1, %2"),
'variable': fields.List(fields.String, description='Values for the parameters in the error message field.',
required=False,
example="['modelKey', 'modelVersion']"),
'errorUrl': fields.String(description='Url to a web page where there is detailed information \
about the cause and resolution for the given error.',
required=False,
example="https://acumos.org/error/h2o-0001"),
})
error_response_body_500 = api.model('Error Response - 500', {
'errorId': fields.String(description='Identifier for the error.',
required=True,
example="H2O-0001"),
'message': fields.String(description='Brief description about the error.',
required=True,
example="Please provide the reference id ( %1 ) to the support team"),
'variable': fields.List(fields.String,
description='The unique error reference id to be provided to the support team.',
required=False,
example="['24234234234234']"),
'errorUrl': fields.String(description='Url to a web page where there is detailed information \
about the cause and resolution for the given error.',
required=False,
example="https://acumos.org/error/h2o-0001"),
})
|
the-stack_0_19008 | '''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <[email protected]>
.. py:module:: fantastico.contrib.dynamic_menu.menu_controller
'''
from fantastico.contrib.dynamic_menu.menu_exceptions import FantasticoMenuNotFoundException
from fantastico.contrib.dynamic_menu.models.menus import DynamicMenuItem, DynamicMenu
from fantastico.mvc.base_controller import BaseController
from fantastico.mvc.controller_decorators import ControllerProvider, Controller
from fantastico.mvc.models.model_filter import ModelFilter
from webob.response import Response
import json
@ControllerProvider()
class DynamicMenuController(BaseController):
'''This class provides the controller for dynamic menus. The following routes are automatically made available
when dynamic menu component is deployed:
**/dynamic-menu/menus/<menu_id>/items/** -- This route loads menu items from database and retrieve them in json format.
Below you can see a diagram describing relation model of the menu:
.. image:: /images/components/dynamic_menu/erd.png
'''
ITEMS_URL = "/dynamic-menu/menus/(?P<menu_id>\\d{1,})/items/$"
@property
def max_items(self):
'''This property retrieves the maximum number of items allowed for a menu.'''
return 100
@Controller(url=ITEMS_URL, method="GET",
models={"Menus": "fantastico.contrib.dynamic_menu.models.menus.DynamicMenu",
"Items": "fantastico.contrib.dynamic_menu.models.menus.DynamicMenuItem"})
def retrieve_menu_items(self, request, menu_id):
'''This method is used to retrieve all items associated with a specified menu.
:param request: Http request being processed.
:type request: HTTP request
:param menu_id: Menu unique identifier we want to retrieve information for.
:type menu_id: int
:returns: A JSON array containing all available menu items.
:raises fantastico.contrib.dynamic_menu.menu_exceptions.FantasticoMenuNotFoundException:
Whenever the requested menu does not exist.
'''
menu_id = int(menu_id)
menus_facade = request.models.Menus
if not menus_facade.find_by_pk({DynamicMenu.id: menu_id}):
raise FantasticoMenuNotFoundException("Menu %s does not exist." % menu_id)
items_facade = request.models.Items
items = items_facade.get_records_paged(start_record=0, end_record=self.max_items,
filter_expr=[ModelFilter(DynamicMenuItem.menu_id, menu_id, ModelFilter.EQ)])
items = [{"url": item.url,
"target": item.target,
"title": item.title,
"label": item.label} for item in items or []]
body = json.dumps({"items": items})
return Response(body, content_type="application/json")
|
the-stack_0_19010 | import json
import logging
from collections import OrderedDict
from decimal import ROUND_HALF_UP, Decimal
from typing import Any, Dict, Union
import pytz
from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.forms import Form
from django.http import HttpRequest
from django.template.loader import get_template
from django.utils.timezone import now
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from django_countries import Countries
from i18nfield.forms import I18nFormField, I18nTextarea, I18nTextInput
from i18nfield.strings import LazyI18nString
from pretix.base.forms import PlaceholderValidator
from pretix.base.models import (
CartPosition, Event, InvoiceAddress, Order, OrderPayment, OrderRefund,
Quota,
)
from pretix.base.reldate import RelativeDateField, RelativeDateWrapper
from pretix.base.settings import SettingsSandbox
from pretix.base.signals import register_payment_providers
from pretix.base.templatetags.money import money_filter
from pretix.base.templatetags.rich_text import rich_text
from pretix.helpers.money import DecimalTextInput
from pretix.presale.views import get_cart_total
from pretix.presale.views.cart import cart_session, get_or_create_cart_id
logger = logging.getLogger(__name__)
class PaymentProviderForm(Form):
def clean(self):
cleaned_data = super().clean()
for k, v in self.fields.items():
val = cleaned_data.get(k)
if v._required and not val:
self.add_error(k, _('This field is required.'))
class BasePaymentProvider:
"""
This is the base class for all payment providers.
"""
def __init__(self, event: Event):
self.event = event
self.settings = SettingsSandbox('payment', self.identifier, event)
# Default values
if self.settings.get('_fee_reverse_calc') is None:
self.settings.set('_fee_reverse_calc', True)
def __str__(self):
return self.identifier
@property
def is_implicit(self) -> bool:
"""
Returns whether or whether not this payment provider is an "implicit" payment provider that will
*always* and unconditionally be used if is_allowed() returns True and does not require any input.
This is intended to be used by the FreePaymentProvider, which skips the payment choice page.
By default, this returns ``False``. Please do not set this if you don't know exactly what you are doing.
"""
return False
@property
def is_meta(self) -> bool:
"""
Returns whether or whether not this payment provider is a "meta" payment provider that only
works as a settings holder for other payment providers and should never be used directly. This
is a trick to implement payment gateways with multiple payment methods but unified payment settings.
Take a look at the built-in stripe provider to see how this might be used.
By default, this returns ``False``.
"""
return False
@property
def is_enabled(self) -> bool:
"""
Returns whether or whether not this payment provider is enabled.
By default, this is determined by the value of the ``_enabled`` setting.
"""
return self.settings.get('_enabled', as_type=bool)
@property
def test_mode_message(self) -> str:
"""
If this property is set to a string, this will be displayed when this payment provider is selected
while the event is in test mode. You should use it to explain to your user how your plugin behaves,
e.g. if it falls back to a test mode automatically as well or if actual payments will be performed.
If you do not set this (or, return ``None``), pretix will show a default message warning the user
that this plugin does not support test mode payments.
"""
return None
def calculate_fee(self, price: Decimal) -> Decimal:
"""
Calculate the fee for this payment provider which will be added to
final price before fees (but after taxes). It should include any taxes.
The default implementation makes use of the setting ``_fee_abs`` for an
absolute fee and ``_fee_percent`` for a percentage.
:param price: The total value without the payment method fee, after taxes.
"""
fee_abs = self.settings.get('_fee_abs', as_type=Decimal, default=0)
fee_percent = self.settings.get('_fee_percent', as_type=Decimal, default=0)
fee_reverse_calc = self.settings.get('_fee_reverse_calc', as_type=bool, default=True)
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
if fee_reverse_calc:
return ((price + fee_abs) * (1 / (1 - fee_percent / 100)) - price).quantize(
Decimal('1') / 10 ** places, ROUND_HALF_UP
)
else:
return (price * fee_percent / 100 + fee_abs).quantize(
Decimal('1') / 10 ** places, ROUND_HALF_UP
)
@property
def verbose_name(self) -> str:
"""
A human-readable name for this payment provider. This should
be short but self-explaining. Good examples include 'Bank transfer'
and 'Credit card via Stripe'.
"""
raise NotImplementedError() # NOQA
@property
def public_name(self) -> str:
"""
A human-readable name for this payment provider to be shown to the public.
This should be short but self-explaining. Good examples include 'Bank transfer'
and 'Credit card', but 'Credit card via Stripe' might be to explicit. By default,
this is the same as ``verbose_name``
"""
return self.verbose_name
@property
def identifier(self) -> str:
"""
A short and unique identifier for this payment provider.
This should only contain lowercase letters and in most
cases will be the same as your package name.
"""
raise NotImplementedError() # NOQA
@property
def abort_pending_allowed(self) -> bool:
"""
Whether or not a user can abort a payment in pending start to switch to another
payment method. This returns ``False`` by default which is no guarantee that
aborting a pending payment can never happen, it just hides the frontend button
to avoid users accidentally committing double payments.
"""
return False
@property
def settings_form_fields(self) -> dict:
"""
When the event's administrator visits the event configuration
page, this method is called to return the configuration fields available.
It should therefore return a dictionary where the keys should be (unprefixed)
settings keys and the values should be corresponding Django form fields.
The default implementation returns the appropriate fields for the ``_enabled``,
``_fee_abs``, ``_fee_percent`` and ``_availability_date`` settings mentioned above.
We suggest that you return an ``OrderedDict`` object instead of a dictionary
and make use of the default implementation. Your implementation could look
like this::
@property
def settings_form_fields(self):
return OrderedDict(
list(super().settings_form_fields.items()) + [
('bank_details',
forms.CharField(
widget=forms.Textarea,
label=_('Bank account details'),
required=False
))
]
)
.. WARNING:: It is highly discouraged to alter the ``_enabled`` field of the default
implementation.
"""
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
d = OrderedDict([
('_enabled',
forms.BooleanField(
label=_('Enable payment method'),
required=False,
)),
('_availability_date',
RelativeDateField(
label=_('Available until'),
help_text=_('Users will not be able to choose this payment provider after the given date.'),
required=False,
)),
('_invoice_text',
I18nFormField(
label=_('Text on invoices'),
help_text=_('Will be printed just below the payment figures and above the closing text on invoices. '
'This will only be used if the invoice is generated before the order is paid. If the '
'invoice is generated later, it will show a text stating that it has already been paid.'),
required=False,
widget=I18nTextarea,
widget_kwargs={'attrs': {'rows': '2'}}
)),
('_total_min',
forms.DecimalField(
label=_('Minimum order total'),
help_text=_('This payment will be available only if the order total is equal to or exceeds the given '
'value. The order total for this purpose may be computed without taking the fees imposed '
'by this payment method into account.'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_total_max',
forms.DecimalField(
label=_('Maximum order total'),
help_text=_('This payment will be available only if the order total is equal to or below the given '
'value. The order total for this purpose may be computed without taking the fees imposed '
'by this payment method into account.'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_fee_abs',
forms.DecimalField(
label=_('Additional fee'),
help_text=_('Absolute value'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_fee_percent',
forms.DecimalField(
label=_('Additional fee'),
help_text=_('Percentage of the order total.'),
localize=True,
required=False,
)),
('_fee_reverse_calc',
forms.BooleanField(
label=_('Calculate the fee from the total value including the fee.'),
help_text=_('We recommend to enable this if you want your users to pay the payment fees of your '
'payment provider. <a href="{docs_url}" target="_blank" rel="noopener">Click here '
'for detailed information on what this does.</a> Don\'t forget to set the correct fees '
'above!').format(docs_url='https://docs.pretix.eu/en/latest/user/payments/fees.html'),
required=False
)),
('_restricted_countries',
forms.MultipleChoiceField(
label=_('Restrict to countries'),
choices=Countries(),
help_text=_('Only allow choosing this payment provider for invoice addresses in the selected '
'countries. If you don\'t select any country, all countries are allowed. This is only '
'enabled if the invoice address is required.'),
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
required=False,
disabled=not self.event.settings.invoice_address_required
)),
])
d['_restricted_countries']._as_type = list
return d
def settings_form_clean(self, cleaned_data):
"""
Overriding this method allows you to inject custom validation into the settings form.
:param cleaned_data: Form data as per previous validations.
:return: Please return the modified cleaned_data
"""
return cleaned_data
def settings_content_render(self, request: HttpRequest) -> str:
"""
When the event's administrator visits the event configuration
page, this method is called. It may return HTML containing additional information
that is displayed below the form fields configured in ``settings_form_fields``.
"""
return ""
def render_invoice_text(self, order: Order, payment: OrderPayment) -> str:
"""
This is called when an invoice for an order with this payment provider is generated.
The default implementation returns the content of the _invoice_text configuration
variable (an I18nString), or an empty string if unconfigured. For paid orders, the
default implementation always renders a string stating that the invoice is already paid.
"""
if order.status == Order.STATUS_PAID:
return pgettext_lazy('invoice', 'The payment for this invoice has already been received.')
return self.settings.get('_invoice_text', as_type=LazyI18nString, default='')
@property
def payment_form_fields(self) -> dict:
"""
This is used by the default implementation of :py:meth:`payment_form`.
It should return an object similar to :py:attr:`settings_form_fields`.
The default implementation returns an empty dictionary.
"""
return {}
def payment_form(self, request: HttpRequest) -> Form:
"""
This is called by the default implementation of :py:meth:`payment_form_render`
to obtain the form that is displayed to the user during the checkout
process. The default implementation constructs the form using
:py:attr:`payment_form_fields` and sets appropriate prefixes for the form
and all fields and fills the form with data form the user's session.
If you overwrite this, we strongly suggest that you inherit from
``PaymentProviderForm`` (from this module) that handles some nasty issues about
required fields for you.
"""
form = PaymentProviderForm(
data=(request.POST if request.method == 'POST' and request.POST.get("payment") == self.identifier else None),
prefix='payment_%s' % self.identifier,
initial={
k.replace('payment_%s_' % self.identifier, ''): v
for k, v in request.session.items()
if k.startswith('payment_%s_' % self.identifier)
}
)
form.fields = self.payment_form_fields
for k, v in form.fields.items():
v._required = v.required
v.required = False
v.widget.is_required = False
return form
def _is_still_available(self, now_dt=None, cart_id=None, order=None):
now_dt = now_dt or now()
tz = pytz.timezone(self.event.settings.timezone)
availability_date = self.settings.get('_availability_date', as_type=RelativeDateWrapper)
if availability_date:
if self.event.has_subevents and cart_id:
availability_date = min([
availability_date.datetime(se).date()
for se in self.event.subevents.filter(
id__in=CartPosition.objects.filter(
cart_id=cart_id, event=self.event
).values_list('subevent', flat=True)
)
])
elif self.event.has_subevents and order:
availability_date = min([
availability_date.datetime(se).date()
for se in self.event.subevents.filter(
id__in=order.positions.values_list('subevent', flat=True)
)
])
elif self.event.has_subevents:
logger.error('Payment provider is not subevent-ready.')
return False
else:
availability_date = availability_date.datetime(self.event).date()
return availability_date >= now_dt.astimezone(tz).date()
return True
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
"""
You can use this method to disable this payment provider for certain groups
of users, products or other criteria. If this method returns ``False``, the
user will not be able to select this payment method. This will only be called
during checkout, not on retrying.
The default implementation checks for the _availability_date setting to be either unset or in the future
and for the _total_max and _total_min requirements to be met. It also checks the ``_restrict_countries``
setting.
:param total: The total value without the payment method fee, after taxes.
.. versionchanged:: 1.17.0
The ``total`` parameter has been added. For backwards compatibility, this method is called again
without this parameter if it raises a ``TypeError`` on first try.
"""
timing = self._is_still_available(cart_id=get_or_create_cart_id(request))
pricing = True
if (self.settings._total_max is not None or self.settings._total_min is not None) and total is None:
raise ImproperlyConfigured('This payment provider does not support maximum or minimum amounts.')
if self.settings._total_max is not None:
pricing = pricing and total <= Decimal(self.settings._total_max)
if self.settings._total_min is not None:
pricing = pricing and total >= Decimal(self.settings._total_min)
def get_invoice_address():
if not hasattr(request, '_checkout_flow_invoice_address'):
cs = cart_session(request)
iapk = cs.get('invoice_address')
if not iapk:
request._checkout_flow_invoice_address = InvoiceAddress()
else:
try:
request._checkout_flow_invoice_address = InvoiceAddress.objects.get(pk=iapk, order__isnull=True)
except InvoiceAddress.DoesNotExist:
request._checkout_flow_invoice_address = InvoiceAddress()
return request._checkout_flow_invoice_address
if self.event.settings.invoice_address_required:
restricted_countries = self.settings.get('_restricted_countries', as_type=list)
if restricted_countries:
ia = get_invoice_address()
if str(ia.country) not in restricted_countries:
return False
return timing and pricing
def payment_form_render(self, request: HttpRequest, total: Decimal) -> str:
"""
When the user selects this provider as their preferred payment method,
they will be shown the HTML you return from this method.
The default implementation will call :py:meth:`payment_form`
and render the returned form. If your payment method doesn't require
the user to fill out form fields, you should just return a paragraph
of explanatory text.
"""
form = self.payment_form(request)
template = get_template('pretixpresale/event/checkout_payment_form_default.html')
ctx = {'request': request, 'form': form}
return template.render(ctx)
def checkout_confirm_render(self, request) -> str:
"""
If the user has successfully filled in their payment data, they will be redirected
to a confirmation page which lists all details of their order for a final review.
This method should return the HTML which should be displayed inside the
'Payment' box on this page.
In most cases, this should include a short summary of the user's input and
a short explanation on how the payment process will continue.
"""
raise NotImplementedError() # NOQA
def payment_pending_render(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
Render customer-facing instructions on how to proceed with a pending payment
:return: HTML
"""
return ""
def checkout_prepare(self, request: HttpRequest, cart: Dict[str, Any]) -> Union[bool, str]:
"""
Will be called after the user selects this provider as their payment method.
If you provided a form to the user to enter payment data, this method should
at least store the user's input into their session.
This method should return ``False`` if the user's input was invalid, ``True``
if the input was valid and the frontend should continue with default behavior
or a string containing a URL if the user should be redirected somewhere else.
On errors, you should use Django's message framework to display an error message
to the user (or the normal form validation error messages).
The default implementation stores the input into the form returned by
:py:meth:`payment_form` in the user's session.
If your payment method requires you to redirect the user to an external provider,
this might be the place to do so.
.. IMPORTANT:: If this is called, the user has not yet confirmed their order.
You may NOT do anything which actually moves money.
:param cart: This dictionary contains at least the following keys:
positions:
A list of ``CartPosition`` objects that are annotated with the special
attributes ``count`` and ``total`` because multiple objects of the
same content are grouped into one.
raw:
The raw list of ``CartPosition`` objects in the users cart
total:
The overall total *including* the fee for the payment method.
payment_fee:
The fee for the payment method.
"""
form = self.payment_form(request)
if form.is_valid():
for k, v in form.cleaned_data.items():
request.session['payment_%s_%s' % (self.identifier, k)] = v
return True
else:
return False
def payment_is_valid_session(self, request: HttpRequest) -> bool:
"""
This is called at the time the user tries to place the order. It should return
``True`` if the user's session is valid and all data your payment provider requires
in future steps is present.
"""
raise NotImplementedError() # NOQA
def execute_payment(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
After the user has confirmed their purchase, this method will be called to complete
the payment process. This is the place to actually move the money if applicable.
You will be passed an :py:class:`pretix.base.models.OrderPayment` object that contains
the amount of money that should be paid.
If you need any special behavior, you can return a string
containing the URL the user will be redirected to. If you are done with your process
you should return the user to the order's detail page.
If the payment is completed, you should call ``payment.confirm()``. Please note that ``this`` might
raise a ``Quota.QuotaExceededException`` if (and only if) the payment term of this order is over and
some of the items are sold out. You should use the exception message to display a meaningful error
to the user.
The default implementation just returns ``None`` and therefore leaves the
order unpaid. The user will be redirected to the order's detail page by default.
On errors, you should raise a ``PaymentException``.
:param order: The order object
:param payment: An ``OrderPayment`` instance
"""
return None
def order_pending_mail_render(self, order: Order, payment: OrderPayment) -> str:
"""
After the user has submitted their order, they will receive a confirmation
email. You can return a string from this method if you want to add additional
information to this email.
:param order: The order object
:param payment: The payment object
"""
return ""
def order_change_allowed(self, order: Order) -> bool:
"""
Will be called to check whether it is allowed to change the payment method of
an order to this one.
The default implementation checks for the _availability_date setting to be either unset or in the future,
as well as for the _total_max, _total_min and _restricted_countries settings.
:param order: The order object
"""
ps = order.pending_sum
if self.settings._total_max is not None and ps > Decimal(self.settings._total_max):
return False
if self.settings._total_min is not None and ps < Decimal(self.settings._total_min):
return False
restricted_countries = self.settings.get('_restricted_countries', as_type=list)
if restricted_countries:
try:
ia = order.invoice_address
except InvoiceAddress.DoesNotExist:
return True
else:
if str(ia.country) not in restricted_countries:
return False
return self._is_still_available(order=order)
def payment_prepare(self, request: HttpRequest, payment: OrderPayment) -> Union[bool, str]:
"""
Will be called if the user retries to pay an unpaid order (after the user filled in
e.g. the form returned by :py:meth:`payment_form`) or if the user changes the payment
method.
It should return and report errors the same way as :py:meth:`checkout_prepare`, but
receives an ``Order`` object instead of a cart object.
Note: The ``Order`` object given to this method might be different from the version
stored in the database as it's total will already contain the payment fee for the
new payment method.
"""
form = self.payment_form(request)
if form.is_valid():
for k, v in form.cleaned_data.items():
request.session['payment_%s_%s' % (self.identifier, k)] = v
return True
else:
return False
def payment_control_render(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
Will be called if the *event administrator* views the details of a payment.
It should return HTML code containing information regarding the current payment
status and, if applicable, next steps.
The default implementation returns the verbose name of the payment provider.
:param order: The order object
"""
return ''
def payment_refund_supported(self, payment: OrderPayment) -> bool:
"""
Will be called to check if the provider supports automatic refunding for this
payment.
"""
return False
def payment_partial_refund_supported(self, payment: OrderPayment) -> bool:
"""
Will be called to check if the provider supports automatic partial refunding for this
payment.
"""
return False
def execute_refund(self, refund: OrderRefund):
"""
Will be called to execute an refund. Note that refunds have an amount property and can be partial.
This should transfer the money back (if possible).
On success, you should call ``refund.done()``.
On failure, you should raise a PaymentException.
"""
raise PaymentException(_('Automatic refunds are not supported by this payment provider.'))
def shred_payment_info(self, obj: Union[OrderPayment, OrderRefund]):
"""
When personal data is removed from an event, this method is called to scrub payment-related data
from a payment or refund. By default, it removes all info from the ``info`` attribute. You can override
this behavior if you want to retain attributes that are not personal data on their own, i.e. a
reference to a transaction in an external system. You can also override this to scrub more data, e.g.
data from external sources that is saved in LogEntry objects or other places.
:param order: An order
"""
obj.info = '{}'
obj.save(update_fields=['info'])
def api_payment_details(self, payment: OrderPayment):
"""
Will be called to populate the ``details`` parameter of the payment in the REST API.
:param payment: The payment in question.
:return: A serializable dictionary
"""
return {}
class PaymentException(Exception):
pass
class FreeOrderProvider(BasePaymentProvider):
is_implicit = True
is_enabled = True
identifier = "free"
def checkout_confirm_render(self, request: HttpRequest) -> str:
return _("No payment is required as this order only includes products which are free of charge.")
def payment_is_valid_session(self, request: HttpRequest) -> bool:
return True
@property
def verbose_name(self) -> str:
return _("Free of charge")
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm(send_mail=False)
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
from .services.cart import get_fees
total = get_cart_total(request)
total += sum([f.value for f in get_fees(self.event, request, total, None, None)])
return total == 0
def order_change_allowed(self, order: Order) -> bool:
return False
class BoxOfficeProvider(BasePaymentProvider):
is_implicit = True
is_enabled = True
identifier = "boxoffice"
verbose_name = _("Box office")
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm(send_mail=False)
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
return False
def order_change_allowed(self, order: Order) -> bool:
return False
def api_payment_details(self, payment: OrderPayment):
return {
"pos_id": payment.info_data.get('pos_id', None),
"receipt_id": payment.info_data.get('receipt_id', None),
}
def payment_control_render(self, request, payment) -> str:
if not payment.info:
return
payment_info = json.loads(payment.info)
template = get_template('pretixcontrol/boxoffice/payment.html')
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'payment_info': payment_info,
'payment': payment,
'provider': self,
}
return template.render(ctx)
class ManualPayment(BasePaymentProvider):
identifier = 'manual'
verbose_name = _('Manual payment')
@property
def test_mode_message(self):
return _('In test mode, you can just manually mark this order as paid in the backend after it has been '
'created.')
@property
def is_implicit(self):
return 'pretix.plugins.manualpayment' not in self.event.plugins
def is_allowed(self, request: HttpRequest, total: Decimal=None):
return 'pretix.plugins.manualpayment' in self.event.plugins and super().is_allowed(request, total)
def order_change_allowed(self, order: Order):
return 'pretix.plugins.manualpayment' in self.event.plugins and super().order_change_allowed(order)
@property
def public_name(self):
return str(self.settings.get('public_name', as_type=LazyI18nString))
@property
def settings_form_fields(self):
d = OrderedDict(
[
('public_name', I18nFormField(
label=_('Payment method name'),
widget=I18nTextInput,
)),
('checkout_description', I18nFormField(
label=_('Payment process description during checkout'),
help_text=_('This text will be shown during checkout when the user selects this payment method. '
'It should give a short explanation on this payment method.'),
widget=I18nTextarea,
)),
('email_instructions', I18nFormField(
label=_('Payment process description in order confirmation emails'),
help_text=_('This text will be included for the {payment_info} placeholder in order confirmation '
'mails. It should instruct the user on how to proceed with the payment. You can use'
'the placeholders {order}, {total}, {currency} and {total_with_currency}'),
widget=I18nTextarea,
validators=[PlaceholderValidator(['{order}', '{total}', '{currency}', '{total_with_currency}'])],
)),
('pending_description', I18nFormField(
label=_('Payment process description for pending orders'),
help_text=_('This text will be shown on the order confirmation page for pending orders. '
'It should instruct the user on how to proceed with the payment. You can use'
'the placeholders {order}, {total}, {currency} and {total_with_currency}'),
widget=I18nTextarea,
validators=[PlaceholderValidator(['{order}', '{total}', '{currency}', '{total_with_currency}'])],
)),
] + list(super().settings_form_fields.items())
)
d.move_to_end('_enabled', last=False)
return d
def payment_form_render(self, request) -> str:
return rich_text(
str(self.settings.get('checkout_description', as_type=LazyI18nString))
)
def checkout_prepare(self, request, total):
return True
def payment_is_valid_session(self, request):
return True
def checkout_confirm_render(self, request):
return self.payment_form_render(request)
def format_map(self, order):
return {
'order': order.code,
'total': order.total,
'currency': self.event.currency,
'total_with_currency': money_filter(order.total, self.event.currency)
}
def order_pending_mail_render(self, order) -> str:
msg = str(self.settings.get('email_instructions', as_type=LazyI18nString)).format_map(self.format_map(order))
return msg
def payment_pending_render(self, request, payment) -> str:
return rich_text(
str(self.settings.get('pending_description', as_type=LazyI18nString)).format_map(self.format_map(payment.order))
)
class OffsettingProvider(BasePaymentProvider):
is_enabled = True
identifier = "offsetting"
verbose_name = _("Offsetting")
is_implicit = True
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm()
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
def execute_refund(self, refund: OrderRefund):
code = refund.info_data['orders'][0]
try:
order = Order.objects.get(code=code, event__organizer=self.event.organizer)
except Order.DoesNotExist:
raise PaymentException(_('You entered an order that could not be found.'))
p = order.payments.create(
state=OrderPayment.PAYMENT_STATE_PENDING,
amount=refund.amount,
payment_date=now(),
provider='offsetting',
info=json.dumps({'orders': [refund.order.code]})
)
p.confirm()
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
return False
def order_change_allowed(self, order: Order) -> bool:
return False
def api_payment_details(self, payment: OrderPayment):
return {
"orders": payment.info_data.get('orders', []),
}
def payment_control_render(self, request: HttpRequest, payment: OrderPayment) -> str:
return _('Balanced against orders: %s' % ', '.join(payment.info_data['orders']))
@receiver(register_payment_providers, dispatch_uid="payment_free")
def register_payment_provider(sender, **kwargs):
return [FreeOrderProvider, BoxOfficeProvider, OffsettingProvider, ManualPayment]
|
the-stack_0_19011 | import tensorflow as tf
from tfoptests.persistor import TensorFlowPersistor
tf.set_random_seed(1)
def test_simple_while():
i1 = tf.Variable(tf.constant(0), name='loop_var')
c = lambda i: tf.less(i, 10)
b = lambda i: tf.add(i, 1)
r = tf.while_loop(c, b, [i1])
out_node = tf.identity(r, name="output")
predictions = [out_node]
# Run and persist
tfp = TensorFlowPersistor(save_dir="simple_while")
tfp.set_placeholders([]) \
.set_output_tensors(predictions) \
.set_test_data({}) \
.build_save_frozen_graph()
if __name__ == '__main__':
test_simple_while()
|
the-stack_0_19022 | # coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Most of this work is copyright (C) 2013-2019 David R. MacIver
# ([email protected]), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import absolute_import, division, print_function
import unittest
from functools import partial
import django.db.models as dm
import django.forms as df
import django.test as dt
from django.core.exceptions import ValidationError
from django.db import IntegrityError
import hypothesis._strategies as st
from hypothesis import reject
from hypothesis.errors import InvalidArgument
from hypothesis.extra.django._fields import from_field
from hypothesis.utils.conventions import infer
if False:
from datetime import tzinfo # noqa
from typing import Any, Type, Optional, List, Text, Callable, Union # noqa
from hypothesis.utils.conventions import InferType # noqa
class HypothesisTestCase(object):
def setup_example(self):
self._pre_setup()
def teardown_example(self, example):
self._post_teardown()
def __call__(self, result=None):
testMethod = getattr(self, self._testMethodName)
if getattr(testMethod, u"is_hypothesis_test", False):
return unittest.TestCase.__call__(self, result)
else:
return dt.SimpleTestCase.__call__(self, result)
class TestCase(HypothesisTestCase, dt.TestCase):
pass
class TransactionTestCase(HypothesisTestCase, dt.TransactionTestCase):
pass
@st.defines_strategy
def from_model(
model, # type: Type[dm.Model]
**field_strategies # type: Union[st.SearchStrategy[Any], InferType]
):
# type: (...) -> st.SearchStrategy[Any]
"""Return a strategy for examples of ``model``.
.. warning::
Hypothesis creates saved models. This will run inside your testing
transaction when using the test runner, but if you use the dev console
this will leave debris in your database.
``model`` must be an subclass of :class:`~django:django.db.models.Model`.
Strategies for fields may be passed as keyword arguments, for example
``is_staff=st.just(False)``.
Hypothesis can often infer a strategy based the field type and validators,
and will attempt to do so for any required fields. No strategy will be
inferred for an :class:`~django:django.db.models.AutoField`, nullable field,
foreign key, or field for which a keyword
argument is passed to ``from_model()``. For example,
a Shop type with a foreign key to Company could be generated with::
shop_strategy = from_model(Shop, company=from_model(Company))
Like for :func:`~hypothesis.strategies.builds`, you can pass
:obj:`~hypothesis.infer` as a keyword argument to infer a strategy for
a field which has a default value instead of using the default.
"""
if not issubclass(model, dm.Model):
raise InvalidArgument("model=%r must be a subtype of Model" % (model,))
fields_by_name = {f.name: f for f in model._meta.concrete_fields}
for name, value in sorted(field_strategies.items()):
if value is infer:
field_strategies[name] = from_field(fields_by_name[name])
for name, field in sorted(fields_by_name.items()):
if (
name not in field_strategies
and not field.auto_created
and field.default is dm.fields.NOT_PROVIDED
):
field_strategies[name] = from_field(field)
for field in field_strategies:
if model._meta.get_field(field).primary_key:
# The primary key is generated as part of the strategy. We
# want to find any existing row with this primary key and
# overwrite its contents.
kwargs = {field: field_strategies.pop(field)}
kwargs["defaults"] = st.fixed_dictionaries(field_strategies) # type: ignore
return _models_impl(st.builds(model.objects.update_or_create, **kwargs))
# The primary key is not generated as part of the strategy, so we
# just match against any row that has the same value for all
# fields.
return _models_impl(st.builds(model.objects.get_or_create, **field_strategies))
@st.composite
def _models_impl(draw, strat):
"""Handle the nasty part of drawing a value for models()"""
try:
return draw(strat)[0]
except IntegrityError:
reject()
@st.defines_strategy
def from_form(
form, # type: Type[dm.Model]
form_kwargs=None, # type: dict
**field_strategies # type: Union[st.SearchStrategy[Any], InferType]
):
# type: (...) -> st.SearchStrategy[Any]
"""Return a strategy for examples of ``form``.
``form`` must be an subclass of :class:`~django:django.forms.Form`.
Strategies for fields may be passed as keyword arguments, for example
``is_staff=st.just(False)``.
Hypothesis can often infer a strategy based the field type and validators,
and will attempt to do so for any required fields. No strategy will be
inferred for a disabled field or field for which a keyword argument
is passed to ``from_form()``.
This function uses the fields of an unbound ``form`` instance to determine
field strategies, any keyword arguments needed to instantiate the unbound
``form`` instance can be passed into ``from_form()`` as a dict with the
keyword ``form_kwargs``. E.g.::
shop_strategy = from_form(Shop, form_kwargs={"company_id": 5})
Like for :func:`~hypothesis.strategies.builds`, you can pass
:obj:`~hypothesis.infer` as a keyword argument to infer a strategy for
a field which has a default value instead of using the default.
"""
# currently unsupported:
# ComboField
# FilePathField
# FileField
# ImageField
form_kwargs = form_kwargs or {}
if not issubclass(form, df.BaseForm):
raise InvalidArgument("form=%r must be a subtype of Form" % (form,))
# Forms are a little bit different from models. Model classes have
# all their fields defined, whereas forms may have different fields
# per-instance. So, we ought to instantiate the form and get the
# fields from the instance, thus we need to accept the kwargs for
# instantiation as well as the explicitly defined strategies
unbound_form = form(**form_kwargs)
fields_by_name = {}
for name, field in unbound_form.fields.items():
if isinstance(field, df.MultiValueField):
# PS: So this is a little strange, but MultiValueFields must
# have their form data encoded in a particular way for the
# values to actually be picked up by the widget instances'
# ``value_from_datadict``.
# E.g. if a MultiValueField named 'mv_field' has 3
# sub-fields then the ``value_from_datadict`` will look for
# 'mv_field_0', 'mv_field_1', and 'mv_field_2'. Here I'm
# decomposing the individual sub-fields into the names that
# the form validation process expects
for i, _field in enumerate(field.fields):
fields_by_name["%s_%d" % (name, i)] = _field
else:
fields_by_name[name] = field
for name, value in sorted(field_strategies.items()):
if value is infer:
field_strategies[name] = from_field(fields_by_name[name])
for name, field in sorted(fields_by_name.items()):
if name not in field_strategies and not field.disabled:
field_strategies[name] = from_field(field)
return _forms_impl(
st.builds(
partial(form, **form_kwargs),
data=st.fixed_dictionaries(field_strategies), # type: ignore
)
)
@st.composite
def _forms_impl(draw, strat):
"""Handle the nasty part of drawing a value for from_form()"""
try:
return draw(strat)
except ValidationError:
reject()
|
the-stack_0_19024 |
# The MIT License (MIT)
#
# Author: Baozhu Zuo ([email protected])
#
# Copyright (C) 2020 Seeed Technology Co.,Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
import os
import sys
import json
import demjson
import stat
from pip._internal.cli import cmdoptions
from functools import partial
from aip.parser import parser
from aip.logger import log
from optparse import Option
from pip._internal.commands.list import tabulate
from pip._internal.utils.misc import write_output
board = partial(
Option,
'-b', '--board',
dest='board',
action='store',
default="",
help='The name of the ArduPy board.',
) # type: Callable[..., Option]
verbose = partial(
Option,
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Output compilation information'
) # type: Callable[..., Option]
if os.name == 'nt': # sys.platform == 'win32':
from serial.tools.list_ports_windows import comports
elif os.name == 'posix':
from serial.tools.list_ports_posix import comports
#~ elif os.name == 'java':
else:
raise ImportError("Sorry: no implementation for your platform ('{}') available".format(os.name))
def readonly_handler(func, path, execinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def dealGenericOptions():
cmdoptions.general_group['options'].insert(6, board)
cmdoptions.general_group['options'].insert(1, verbose)
cmdoptions.general_group['options'].remove(cmdoptions.isolated_mode)
cmdoptions.general_group['options'].remove(cmdoptions.verbose)
cmdoptions.general_group['options'].remove(cmdoptions.no_python_version_warning)
def windows_full_port_name(portname):
# Helper function to generate proper Windows COM port paths. Apparently
# Windows requires COM ports above 9 to have a special path, where ports below
# 9 are just referred to by COM1, COM2, etc. (wacky!) See this post for
# more info and where this code came from:
# http://eli.thegreenplace.net/2009/07/31/listing-all-serial-ports-on-windows-with-python/
m = re.match("^COM(\d+)$", portname)
if m and int(m.group(1)) < 10:
return portname
else:
return "\\\\.\\{0}".format(portname)
def output_package_listing_columns(data, header):
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
for val in pkg_strings:
write_output(val)
class SerialUtils(object):
def __init__(self):
super().__init__()
if len(parser.boards) == 0:
log.error("Unable to find any ardupy boards, please refer to aip core!")
sys.exit(1)
def getAllPortInfo(self):
return comports(include_links=False)
def listAvailableBoard(self):
list = []
for info in self.getAllPortInfo():
port, desc, hwid = info
ii = hwid.find("VID:PID")
#hwid: USB VID:PID=2886:002D SER=4D68990C5337433838202020FF123244 LOCATION=7-3.1.3:1.
#print(hwid)
if ii != -1:
for b in parser.boards:
(vid, pid) = b["hwids"]["application"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
list.append({"port":port, "desc":desc, "hwid":hwid, "state":False})
(vid, pid) = b["hwids"]["bootloader"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
list.append({"port":port, "desc":desc, "hwid":hwid, "state":True})
return demjson.encode(list)
def getBootloaderBoard(self):
for info in self.getAllPortInfo():
port, desc, hwid = info
ii = hwid.find("VID:PID")
#hwid: USB VID:PID=2886:002D SER=4D68990C5337433838202020FF123244 LOCATION=7-3.1.3:1.
#print(hwid)
if ii != -1:
for b in parser.boards:
(vid, pid) = b["hwids"]["bootloader"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
return port,desc, hwid, True
return None, None, None, None
def getAvailableBoard(self):
for info in self.getAllPortInfo():
port, desc, hwid = info
ii = hwid.find("VID:PID")
#hwid: USB VID:PID=2886:002D SER=4D68990C5337433838202020FF123244 LOCATION=7-3.1.3:1.
#print(hwid)
if ii != -1:
for b in parser.boards:
(vid, pid) = b["hwids"]["application"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
return port,desc, hwid, False
(vid, pid) = b["hwids"]["bootloader"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
return port,desc, hwid, True
return None, None, None, None
def listBoard(self):
list = [];
for b in parser.boards:
list.append(b["name"])
return demjson.encode(list)
def listDesignatedBoard(self, designated):
list = []
for info in self.getAllPortInfo():
port, desc, hwid = info
ii = hwid.find("VID:PID")
#hwid: USB VID:PID=2886:002D SER=4D68990C5337433838202020FF123244 LOCATION=7-3.1.3:1.
#print(hwid)
if ii != -1:
for b in parser.boards:
if b["name"] != designated:
continue
(vid, pid) = b["hwids"]["application"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
list.append({"port":port, "desc":desc, "hwid":hwid, "state":False})
(vid, pid) = b["hwids"]["bootloader"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
list.append({"port":port, "desc":desc, "hwid":hwid, "state":True})
return demjson.encode(list)
def getDesignatedBoard(self, designated):
for info in self.getAllPortInfo():
port, desc, hwid = info
ii = hwid.find("VID:PID")
#hwid: USB VID:PID=2886:002D SER=4D68990C5337433838202020FF123244 LOCATION=7-3.1.3:1.
#print(hwid)
if ii != -1:
for b in parser.boards:
if b["name"] != designated:
continue
(vid, pid) = b["hwids"]["application"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
return port,desc, hwid, False
(vid, pid) = b["hwids"]["bootloader"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
return port,desc, hwid, True
None
def isBootloaderStatus(self):
return True
def getBoardByPort(self, _port):
for info in self.getAllPortInfo():
port, desc, hwid = info
if _port != port:
continue
ii = hwid.find("VID:PID")
#hwid: USB VID:PID=2886:002D SER=4D68990C5337433838202020FF123244 LOCATION=7-3.1.3:1.
#print(hwid)
if ii != -1:
for b in parser.boards:
(vid, pid) = b["hwids"]["application"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
return (b["name"], b["version"], b["firmware_url"])
(vid, pid) = b["hwids"]["bootloader"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
return (b["name"], b["version"], b["firmware_url"])
return ""
def getBoardIdByPort(self, _port):
for info in self.getAllPortInfo():
port, desc, hwid = info
if _port != port:
continue
ii = hwid.find("VID:PID")
#hwid: USB VID:PID=2886:002D SER=4D68990C5337433838202020FF123244 LOCATION=7-3.1.3:1.
#print(hwid)
if ii != -1:
for b in parser.boards:
(vid, pid) = b["hwids"]["application"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
return (b["id"])
(vid, pid) = b["hwids"]["bootloader"]
if vid == hwid[ii + 8: ii + 8 + 4] and pid == hwid[ii + 8 + 5 :ii + 8 + 5 + 4 ]:
#print(port,desc, hwid)
return (b["id"])
return ""
def getBoardIdByName(self, _name):
for b in parser.boards:
if b["name"] == _name:
return b["id"]
return -1
# def getFirmwareByBoard(self, Board):
# for b in parser.boards:
# (_vid, _pid) = b["application"]
# if (_vid, _pid) == (vid, pid):
# return (b["version"], b["Firmware_url"])
# (_vid, _pid) = b["bootloader"]
# if (_vid, _pid) == (vid, pid):
# return (b["version"], b["Firmware_url"])
# return ""
if __name__ == '__main__':
ser = SerialUtils()
for info in ser.getAllPortInfo():
port, desc, hwid = info
print("port: {}, desc: {}, hwid: {}".format(port, desc, hwid))
print(ser.getAvailableBoard())
print(ser.getDesignatedBoard("wio terminal"))
|
the-stack_0_19025 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 28 12:54:35 2020
@author: rjovelin
"""
import argparse
import os
import json
import pysam
import time
from smmips.smmips_libs import align_fastqs, assign_reads_to_smmips, create_tree, read_panel, \
count_alleles_across_panel, write_table_variants, parse_cosmic, get_genomic_positions, remove_bam_extension, \
sort_index_bam, merge_smmip_counts, merge_stats, merge_bams
def align_reads(outdir, fastq1, fastq2, reference, bwa, prefix, remove):
'''
(str, str, str, str, str, str, str, bool) -> None
Parameters
----------
- outdir (str): Path to directory where directory structure is created
- fastq1 (str): Path to Fastq1
- fastq2 (str): Path to Fastq2
- reference (str): Path to the reference genome
- bwa (str): Path to the bwa script
- prefix (str): Prefix used to name the output files
- remove (bool): Remove intermediate files if True
Align fastq1 and fastq2 using bwa mem into coordinate-sorted and indexed bam in outdir/out.
'''
# use current directory if outdir not provided
if outdir is None:
outdir = os.getcwd()
else:
outdir = outdir
# align fastqs
prefix = os.path.basename(prefix)
align_fastqs(fastq1, fastq2, reference, outdir, bwa, prefix, remove)
def assign_smmips(outdir, sortedbam, prefix, remove, panel, upstream_nucleotides,
umi_length, max_subs, match, mismatch, gap_opening, gap_extension,
alignment_overlap_threshold, matches_threshold, chromosome, start, end, region):
'''
(str, str, str, str, bool, str, int, int, int, float | int, float | int, float | int, float | int, float | int , float | int, str | None) -> None
Parameters
----------
- outdir (str): Path to directory where directory structure is created
- sortedbam (str): Coordinate-sorted bam with all reads
- prefix (str): Prefix used to name the output files
- remove (bool): Remove intermediate files if True
- panel (str): Path to file with smmip information
- upstream_nucleotides (int): Maximum number of nucleotides upstream the UMI sequence
- umi_length (int): Length of the UMI
- max_subs (int): Maximum number of substitutions allowed in the probe sequence
- match (float or int): Score of identical characters
- mismatch (float or int): Score of non-identical characters
- gap_opening (float or int): Score for opening a gap
- gap_extension (float or int): Score for extending an open gap
- alignment_overlap_threshold (float or int): Cut-off value for the length of the de-gapped overlap between read1 and read2
- matches_threshold (float or int): Cut-off value for the number of matching positions within the de-gapped overlap between read1 and read2
- chromosome (str | None): Specifies the genomic region in the alignment file where reads are mapped.
Examine reads on chromosome if used and on all chromosomes if None
Chromosome format must match format in the bam header
- start (int | None): Start position of region on chromosome if defined
- end (int | None): End position of region on chromosome if defined
- region (str | None): Chromosomal region "chrN.start.end". Dot-separated string with chromosome, start and end.
Overrides chromosome, start and end parameters if used.
Chromosome, start and end must be defined if region is used
Write assigned reads and empty smmips to 2 separate coordinate-sorted and indexed bams.
Assigned reads are tagged with the smMip name and the extracted UMI sequence.
Also write 2 json files in outdir/stats for QC with counts of total, assigned
and unassigned read along with empty smmips, and read count for each smmip in the panel
'''
# define genomic coordinates
if chromosome is None:
start, end = None, None
if region:
chromosome, start, end = region.split('.')
start = int(start) if start is not None else start
end = int(end) if end is not None else end
start_pos = 'start' if start is None else str(start)
end_pos = 'end' if end is None else str(end)
if chromosome:
genomic_region = '.'.join([chromosome, start_pos, end_pos])
# record time spent smmip assignment
start_time = time.time()
# use current directory if outdir not provided
if outdir is None:
outdir = os.getcwd()
else:
outdir = outdir
# create directory structure within outdir, including outdir if doesn't exist
finaldir, statsdir, aligndir = create_tree(outdir)
# align fastqs
prefix = os.path.basename(prefix)
# open files for writing
# create AlignmentFile object to read input bam
infile = pysam.AlignmentFile(sortedbam, 'rb')
# create AlignmentFile objects for writing reads
if chromosome is None:
# create a new file, use header from bamfile
assigned_filename = remove_bam_extension(sortedbam) + '.assigned_reads.bam'
# open bam for writing assigned but empty reads
empty_filename = remove_bam_extension(sortedbam) + '.empty_reads.bam'
else:
# create a new file, use header from bamfile
assigned_filename = remove_bam_extension(sortedbam) + '.{0}.temp.assigned_reads.bam'.format(genomic_region)
# open bam for writing assigned but empty reads
empty_filename = remove_bam_extension(sortedbam) + '.{0}.temp.empty_reads.bam'.format(genomic_region)
assigned_file = pysam.AlignmentFile(assigned_filename, 'wb', template=infile)
empty_file = pysam.AlignmentFile(empty_filename, 'wb', template=infile)
# close sortedbam
infile.close()
# assign reads to smmips
metrics, smmip_counts = assign_reads_to_smmips(sortedbam, assigned_file, empty_file, read_panel(panel), upstream_nucleotides, umi_length, max_subs, match, mismatch, gap_opening, gap_extension, alignment_overlap_threshold, matches_threshold, chromosome, start, end)
# close bams
for i in [assigned_file, empty_file]:
i.close()
# sort and index bams
if chromosome is None:
sort_index_bam(assigned_filename, '.assigned_reads.sorted.bam')
sort_index_bam(empty_filename, '.empty_reads.sorted.bam')
else:
sort_index_bam(assigned_filename, '.temp.assigned_reads.sorted.bam')
sort_index_bam(empty_filename, '.temp.empty_reads.sorted.bam')
# remove intermediate files
if remove:
os.remove(assigned_filename)
os.remove(empty_filename)
# record time after smmip assignment and update QC metrics
end_time = time.time()
run_time = round(end_time - start_time, 3)
metrics.update({'run_time': run_time})
# write json to files
if chromosome is None:
statsfile1 = os.path.join(statsdir, '{0}_extraction_metrics.json'.format(prefix))
statsfile2 = os.path.join(statsdir, '{0}_smmip_counts.json'.format(prefix))
else:
statsfile1 = os.path.join(statsdir, '{0}_temp.{1}.extraction_metrics.json'.format(prefix, genomic_region))
statsfile2 = os.path.join(statsdir, '{0}_temp.{1}.smmip_counts.json'.format(prefix, genomic_region))
with open(statsfile1, 'w') as newfile:
json.dump(metrics, newfile, indent=4)
with open(statsfile2, 'w') as newfile:
json.dump(smmip_counts, newfile, indent=4)
def merge_files(prefix, files, file_type, remove):
'''
(str, list, str, bool) -> None
Merges the files in L into a single stats file or a single bam.
The output bam is indexed and coordinate-sorted
Precondition: All files must be the same type of files and file_type is part of the file name
Parameters
----------
- prefix (str): Prefix used to name the output files
- files (list): List of stats files or bams to be merged
- file_type (str): Type of the files to be merged. Valid options:
- "assigned": merges bams with assigned non-empty smmips
- "empty": merges bams with empty smmips
- "counts": merges stats files with smmip counts
- "extraction": merges stats files with extraction metrics
- remove (bool): Remove intermediate files if True
'''
F = [i for i in files if file_type in i]
if F:
if file_type == 'counts' or file_type == 'extraction':
# make a list of dictionaries with smmip counts
L = []
for i in F:
infile = open(i)
L.append(json.load(infile))
infile.close()
if file_type == 'counts':
statsfile = '{0}_smmip_counts.json'.format(prefix)
merged = merge_smmip_counts(L)
elif file_type == 'extraction':
statsfile = '{0}_extraction_metrics.json'.format(prefix)
merged = merge_stats(L)
with open(statsfile, 'w') as newfile:
json.dump(merged, newfile, indent=4)
elif file_type == 'assigned' or file_type == 'empty':
if file_type == 'assigned':
bam_suffix = '{0}.assigned_reads.bam'
sorted_suffix = '.assigned_reads.sorted.bam'
elif file_type == 'empty':
bam_suffix = '{0}.empty_reads.bam'
sorted_suffix = '.empty_reads.sorted.bam'
bamfile = bam_suffix.format(prefix)
merge_bams(bamfile, F)
# sort and index merged bam
sort_index_bam(bamfile, sorted_suffix)
# remove intermediate files
if remove:
discard = []
if file_type in ['extraction', 'counts']:
if file_type == 'extraction':
discard = [i for i in F if 'temp' in i and 'extraction_metrics.json' in i]
elif discard == 'counts':
discard = [i for i in F if 'temp' in i and 'smmip_counts.json' in i]
elif file_type in ['empty', 'assigned']:
if file_type == 'empty':
discard = [i for i in F if 'temp.empty_reads.sorted.bam' in i]
elif file_type == 'assigned':
discard = [i for i in F if 'temp.assigned_reads.sorted.bam' in i]
if discard:
for i in discard:
os.remove(i)
# this function generates a table with nucleotide counts. not currently being used.
def count_variants(bamfile, panel, outdir, max_depth, truncate, ignore_orphans,
stepper, prefix, reference, cosmicfile):
'''
(str, str, str, int, bool, bool, str, str, str) -> None
Parameters
----------
- bamfile (str): Path to the coordinate-sorted and indexed bam file with annotated reads with smMIP and UMI tags
- panel (str): Path to panel file with smMIP information
- outdir (str): Path to output directory where out directory is written
- max_depth (int): Maximum read depth
- truncate: Consider only pileup columns within interval defined by region start and end if True
- ignore_orphans: Ignore orphan reads (paired reads not in proper pair) if True
- stepper: Controls how the iterator advances. Accepted values:
'all': skip reads with following flags: BAM_FUNMAP, BAM_FSECONDARY, BAM_FQCFAIL, BAM_FDUP
'nofilter': uses every single read turning off any filtering
- prefix (str): Prefix used to name the output bam file
- reference (str): Reference genome. Must be the same reference used in panel. Accepted values: 37 or 38
- cosmicfile (str): Cosmic file. Tab separated table of all COSMIC coding
point mutations from targeted and genome wide screens
Write a summary table with nucleotide and indel counts at each unique position of
the target regions in panel.
'''
# use current directory if outdir not provided
if outdir == None:
outdir = os.getcwd()
else:
outdir = outdir
# create directory structure within outdir, including outdir if doesn't exist
finaldir, statsdir, aligndir = create_tree(outdir)
# get the allele counts at each position across all target regions
Counts = count_alleles_across_panel(bamfile, read_panel(panel), max_depth, truncate, ignore_orphans, stepper)
# get positions at each chromosome with variant information
positions = get_genomic_positions(Counts)
# get cosmic mutation information
mutations = parse_cosmic(reference, cosmicfile, positions)
# write base counts to file
outputfile = os.path.join(finaldir, '{0}_Variant_Counts.txt'.format(prefix))
write_table_variants(Counts, outputfile, mutations)
def main():
'''
main function to run the smmips script
'''
# create main parser
parser = argparse.ArgumentParser(prog='smmip.py', description="A tool to generate QC metrics for smMIP libraries")
subparsers = parser.add_subparsers(help='sub-command help', dest='subparser_name')
# align reads
al_parser = subparsers.add_parser('align', help='Align reads to reference genome')
al_parser.add_argument('-f1', '--Fastq1', dest='fastq1', help = 'Path to Fastq1', required=True)
al_parser.add_argument('-f2', '--Fastq2', dest='fastq2', help = 'Path to Fastq2', required=True)
al_parser.add_argument('-o', '--Outdir', dest='outdir', help = 'Path to outputd directory. Current directory if not provided')
al_parser.add_argument('-r', '--Reference', dest='reference', help = 'Path to the reference genome', required=True)
al_parser.add_argument('-bwa', '--Bwa', dest='bwa', help = 'Path to the bwa script', required=True)
al_parser.add_argument('--remove', dest='remove', action='store_true', help = 'Remove intermediate files. Default is False, becomes True if used')
al_parser.add_argument('-pf', '--Prefix', dest='prefix', help = 'Prefix used to name the output files', required=True)
# assign smMips to reads
a_parser = subparsers.add_parser('assign', help='Extract UMIs from reads and assign reads to smmips')
a_parser.add_argument('-pa', '--Panel', dest='panel', help = 'Path to panel file with smmip information', required=True)
a_parser.add_argument('-o', '--Outdir', dest='outdir', help = 'Path to outputd directory. Current directory if not provided')
a_parser.add_argument('-b', '--BamFile', dest='sortedbam', help = 'Coordinate-sorted and indexed bam with all reads', required=True)
a_parser.add_argument('--remove', dest='remove', action='store_true', help = 'Remove intermediate files. Default is False, becomes True if used')
a_parser.add_argument('-pf', '--Prefix', dest='prefix', help = 'Prefix used to name the output files', required=True)
a_parser.add_argument('-ms', '--Subs', dest='max_subs', type=int, default=0, help = 'Maximum number of substitutions allowed in the probe sequence. Default is 0')
a_parser.add_argument('-up', '--Upstream', dest='upstream_nucleotides', type=int, default=0, help = 'Maximum number of nucleotides upstream the UMI sequence. Default is 0')
a_parser.add_argument('-umi', '--Umi', dest='umi_length', type=int, default=4, help = 'Length of the UMI sequence in bp. Default is 4')
a_parser.add_argument('-m', '--Matches', dest='match', type=float, default=2, \
help = 'Score of identical characters during local alignment. Used only if report is True. Default is 2')
a_parser.add_argument('-mm', '--Mismatches', dest='mismatch', type=float, default=-1, \
help = 'Score of non-identical characters during local alignment. Used only if report is True. Default is -1')
a_parser.add_argument('-go', '--Gap_opening', dest='gap_opening', type=float, default=-5, \
help = 'Score for opening a gap during local alignment. Used only if report is True. Default is -5')
a_parser.add_argument('-ge', '--Gap_extension', dest='gap_extension', type=float, default=-1, \
help = 'Score for extending an open gap during local alignment. Used only if report is True. Default is -1')
a_parser.add_argument('-ao', '--Alignment_overlap', dest='alignment_overlap_threshold', type=int, default=60, \
help = 'Cut-off value for the length of the de-gapped overlap between read1 and read2. Default is 60bp')
a_parser.add_argument('-mt', '--Matches_threshold', dest='matches_threshold', type=float, default=0.7, \
help = 'Cut-off value for the number of matching positions within the de-gapped overlap between read1 and read2. Used only if report is True. Default is 0.7')
a_parser.add_argument('-c', '--Chromosome', dest='chromosome', help = 'Considers only the reads mapped to chromosome. All chromosomes are used if omitted')
a_parser.add_argument('-s', '--Start', dest='start', help = 'Start position of region on chromosome. Start of chromosome if omitted')
a_parser.add_argument('-e', '--End', dest='end', help = 'End position of region on chromosome. End of chromosome if omitted')
a_parser.add_argument('-r', '--Region', dest='region', help = 'Chromosomal region "chrN.start.end". Must follow this format and overrides chromosome, start and end parameters')
# merge chromosome-level files
m_parser = subparsers.add_parser('merge', help='Merges all the chromosome-level stats and alignment files')
m_parser.add_argument('-pf', '--Prefix', dest='prefix', help = 'Prefix used to name the output files', required=True)
m_parser.add_argument('-ft', '--FileType', dest='file_type', choices = ["assigned", "empty", "counts", "extraction"], help = 'Type of the files to be merged', required=True)
m_parser.add_argument('-t', '--Files', dest='files', nargs='*', help = 'List of stats files or bams to be merged', required = True)
m_parser.add_argument('--remove', dest='remove', action='store_true', help = 'Remove intermediate files. Default is False, becomes True if used')
args = parser.parse_args()
if args.subparser_name == 'align':
try:
align_reads(args.outdir, args.fastq1, args.fastq2, args.reference, args.bwa, args.prefix, args.remove)
except AttributeError as e:
print('#############\n')
print('AttributeError: {0}\n'.format(e))
print('#############\n\n')
print(parser.format_help())
elif args.subparser_name == 'assign':
try:
assign_smmips(args.outdir, args.sortedbam, args.prefix, args.remove,
args.panel, args.upstream_nucleotides, args.umi_length, args.max_subs,
args.match, args.mismatch, args.gap_opening, args.gap_extension,
args.alignment_overlap_threshold, args.matches_threshold, args.chromosome, args.start, args.end, args.region)
except AttributeError as e:
print('#############\n')
print('AttributeError: {0}\n'.format(e))
print('#############\n\n')
print(parser.format_help())
elif args.subparser_name == 'merge':
try:
merge_files(args.prefix, args.files, args.file_type, args.remove)
except AttributeError as e:
print('#############\n')
print('AttributeError: {0}\n'.format(e))
print('#############\n\n')
print(parser.format_help())
elif args.subparser_name is None:
print(parser.format_help()) |
the-stack_0_19028 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for eager execution using XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import convolutional
from tensorflow.python.layers import pooling
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import adam
class EagerTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testGradientTape(self):
with self.test_scope():
x = constant_op.constant(1.0)
y = constant_op.constant(10.0)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
tape.watch(y)
a = x + y + x * y
da_dx = tape.gradient(a, x)
da_dy = tape.gradient(a, y)
self.assertEqual(11.0, da_dx.numpy())
self.assertEqual(2.0, da_dy.numpy())
def testExecuteListOutputLen0(self):
with self.test_scope():
empty = constant_op.constant([], dtype=dtypes.float32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteListOutputLen1(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen3(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 3, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertAllEqual([[0], [3]], result[0])
self.assertAllEqual([[1], [4]], result[1])
self.assertAllEqual([[2], [5]], result[2])
def testBasicGraph(self):
# Run some ops eagerly
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
# Run some ops graphly
with context.graph_mode(), self.session():
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, self.evaluate(product))
def testDegenerateSlices(self):
with self.test_scope():
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testIdentity(self):
with self.test_scope():
self.assertAllEqual(2, array_ops.identity(2))
def testRandomOps(self):
with self.test_scope():
tensor = gen_random_ops.random_uniform((2, 2), dtypes.float32)
row0 = tensor[0].numpy()
row1 = tensor[1].numpy()
# It should be very unlikely to rng to generate two equal rows.
self.assertFalse((row0 == row1).all())
def testIdentityOnVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(True)
i = array_ops.identity(v)
self.assertAllEqual(True, i.numpy())
def testAssignAddVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
v.assign_add(2.0)
self.assertEqual(3.0, v.numpy())
def testReadAssignRead(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
val1 = v.read_value()
v.assign_add(2.0)
val2 = v.read_value()
self.assertEqual(1.0, val1.numpy())
self.assertEqual(3.0, val2.numpy())
def testGradient(self):
def f(x):
return x
with self.test_scope():
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testVariableGradient(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(1.0)
def f():
x = v0 * v0
return x
grads = backprop.implicit_grad(f)()
self.assertEqual(2., grads[0][0].numpy())
def testMultipleVariableReads(self):
# This test makes sure consecutive variable reads don't copy
# the underlying memory.
with self.test_scope():
# Create 128MiB variables
var = resource_variable_ops.ResourceVariable(
array_ops.ones([32, 1024, 1024]))
# Read the same variable 100 times. If the underlying tensor
# is not copied, this is a trivial operation. If it is copied,
# this will eat over 13GB and OOM.
values = []
for _ in range(100):
values.append(var.value())
# The shape, shape_n, size, and rank are tested here because their
# execution kernels (as opposed to compilation only tf2xla kernels)
# are distincts from tf2xla kernels.
def testShape(self):
def const(value):
return array_ops.shape(
constant_op.constant(value)).numpy()
def ones(value):
return array_ops.shape(
array_ops.ones(value)).numpy()
with self.test_scope():
# Shapes of directly constructed tensors
self.assertAllEqual([], const(3))
self.assertAllEqual([3], const([1.0, 2.0, 3.0]))
self.assertAllEqual([2, 2], const([[1.0, 2.0], [3.0, 4.0]]))
self.assertAllEqual([2, 1, 2], const([[[1.0, 2.0]], [[3.0, 4.0]]]))
# Shapes of tensors created by op running on device
# We make this distinction because directly constructed tensors
# are treated differently in a few places that can influence shape:
# - they always have on_host_tensor
# - they and their shapes can be cached
# - they end up on device via a copy, instead of as program output
self.assertAllEqual([], ones([]))
self.assertAllEqual([3], ones([3]))
self.assertAllEqual([2, 2], ones([2, 2]))
self.assertAllEqual([2, 1, 2], ones([2, 1, 2]))
def testShapeN(self):
with self.test_scope():
# Shapes of directly constructed tensors
shapes = array_ops.shape_n([
constant_op.constant(1.0),
constant_op.constant([1.0, 2.0, 3.0]),
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
# Shapes of tensors created by op running on device
shapes = array_ops.shape_n([
array_ops.ones([]),
array_ops.ones([3]),
array_ops.ones([2, 2])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
def testSize(self):
with self.test_scope():
self.assertEqual(
1, array_ops.size(constant_op.constant(1.0)).numpy())
self.assertEqual(
3, array_ops.size(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
4, array_ops.size(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testRank(self):
with self.test_scope():
self.assertEqual(
0, array_ops.rank(constant_op.constant(1.0)).numpy())
self.assertEqual(
1, array_ops.rank(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
2, array_ops.rank(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testAdam(self):
with self.test_scope():
optimizer = adam.AdamOptimizer(0.1)
x = resource_variable_ops.ResourceVariable(10.0)
with backprop.GradientTape() as tape:
y = x * x
dy_dx = tape.gradient(y, x)
optimizer.apply_gradients([(dy_dx, x)])
self.assertAlmostEqual(9.9, x.numpy(), places=3)
def testAdamSparse(self):
with ops.device('/cpu:0'):
# Create 2-D embedding for 3 objects on CPU because sparse/sliced updates
# are not implemented on TPU.
embedding_matrix = resource_variable_ops.ResourceVariable(
array_ops.ones([3, 2]))
with self.test_scope():
with backprop.GradientTape() as tape:
embedding = embedding_ops.embedding_lookup(embedding_matrix, [1])
y = math_ops.reduce_sum(embedding)
dy_dx = tape.gradient(y, embedding_matrix)
self.assertIsInstance(dy_dx, ops.IndexedSlices)
optimizer = adam.AdamOptimizer(0.1)
# The gradient application operations will run on CPU because optimizer
# updates are always collocated with the variable.
optimizer.apply_gradients([(dy_dx, embedding_matrix)])
# This assign_add will run on CPU because when an input to an
# operation is a resource, this operation is placed on the resource's
# device by the eager runtime.
embedding_matrix.assign_add(array_ops.ones([3, 2]))
self.assertAllClose([[2.0, 2.0],
[1.9, 1.9],
[2.0, 2.0]], embedding_matrix.numpy())
class EagerFunctionTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
matmul = function.defun(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
def testConv(self):
if 'GPU' in self.device:
# TODO(b/32333178)
self.skipTest('Current implementation of RandomStandardNormal kernel '
'is very slow on GPU, and has been denylisted.')
with self.test_scope():
data_format = 'channels_last'
conv = convolutional.Conv2D(
filters=1, kernel_size=2, padding='VALID',
data_format=data_format, activation=nn_ops.relu,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
pool = pooling.MaxPooling2D(2, 2, data_format=data_format)
def model(x):
x = conv(x)
return pool(x)
model = function.defun(model)
x = array_ops.ones([1, 4, 4, 1])
y = model(x)
self.assertAllEqual(y.numpy(), [[[[4.]]]])
def testReadVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v.read_value()
var = f()
self.assertEqual(1.0, var.numpy())
def testResourceVariableNoInlineReadWrite(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g(x):
w.assign(w.read_value() + x)
return v.read_value() + x * w.read_value()
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
return g(1.0) + g(2.0) + g(3.0) + g(4.0) + g(5.0)
# 1 + 1*1 + 1 + 2*3 + 1 + 3*6 + 1 + 4*10 + 1 + 5*15
self.assertEqual(145.0, f().numpy())
self.assertEqual(15.0, w.read_value().numpy())
def testResourceVariableNoInlineReadOnly(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(10.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g():
return v.read_value()
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
return g() + g() + g() + g() + g()
self.assertEqual(50.0, f().numpy())
def testResourceVariableNoInlineWriteOnly(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(0.0)
@function.defun_with_attributes(attributes={'_noinline': True})
def g(x):
v.assign(x)
@function.defun_with_attributes(attributes={'_noinline': True})
def f():
g(1.0)
g(2.0)
g(3.0)
g(4.0)
g(5.0)
f()
self.assertEqual(5.0, v.read_value().numpy())
def testUpdateVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
def f(v):
v.assign_add(1.0)
return v
f = function.defun(f)
var = f(v)
self.assertEqual(2.0, var.numpy())
def testReturnResourceHandle(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable([[1.0, 2.0], [3.0, 4.0]])
def f(v):
return v.handle
f = function.defun(f)
handle = f(v)
self.assertAllEqual(v.numpy(),
resource_variable_ops.read_variable_op(
handle, dtypes.float32).numpy())
def testReturnMultipleResourceHandles(self):
with self.test_scope():
v1 = resource_variable_ops.ResourceVariable(1.25)
v2 = resource_variable_ops.ResourceVariable(2.0)
def f(v):
return v.handle, 3.0 * v, v2.handle, v + v2
f = function.defun(f)
v1_handle, v1_times_3, v2_handle, variable_sum = f(v1)
self.assertAllEqual(v1.numpy(),
resource_variable_ops.read_variable_op(
v1_handle, dtypes.float32).numpy())
self.assertEqual(3.75, v1_times_3.numpy())
self.assertAllEqual(v2.numpy(),
resource_variable_ops.read_variable_op(
v2_handle, dtypes.float32).numpy())
self.assertEqual(3.25, variable_sum.numpy())
def testAllArgumentKinds(self):
"""Test a complex function that takes different argument kinds.
tf2xla machinery that translates, compiles, and runs defuns
classifies arguments into: compile-time constants, regular tensors,
and resources. This test creates a function with a mix of all these
kinds. Moreover, the order of function arguments is intentionally mixed up.
This also tests the case when the same argument is a compile-time constant
as well as used in an operation that normally expects its inputs to be
in device memory - addition in this case.
"""
with self.test_scope():
def foo(c1, r1, v1, c2, v2, r2):
# c1 and c2 are compile-time constants
# r1 and r2 are regular tensors
# v1 and v2 are resource variables
a = c1 + r1
b = math_ops.cast(c2, dtypes.float32) + v2
c = array_ops.slice(v1, c1, c2)
d = r2 * v2
return a, b, c, d
foo = function.defun(foo)
c1 = [0, 0]
c2 = array_ops.ones([2], dtype=dtypes.int32)
r1 = array_ops.ones([2])
r2 = [[2., 2.], [3., 3.]]
v1 = resource_variable_ops.ResourceVariable([[1., 2.], [3., 4.]])
v2 = resource_variable_ops.ResourceVariable([[10., 20.], [30., 40.]])
a, b, c, d = foo(c1, r1, v1, c2, v2, r2)
self.assertAllEqual([1, 1], a.numpy())
self.assertAllEqual([[11., 21.], [31., 41.]], b.numpy())
self.assertAllEqual([[1.]], c.numpy())
self.assertAllEqual([[20., 40.], [90., 120.]], d.numpy())
def testDefunInGradientTape(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f(x):
x = v0 * v0 * x
return x
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = f(x)
dy = tape.gradient(y, v0)
self.assertEqual(75, y.numpy())
self.assertEqual(30, dy.numpy())
def testGradientTapeInDefun(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f():
x = constant_op.constant(1.0)
with backprop.GradientTape() as tape:
y = v0 * x
dy = tape.gradient(y, v0)
return dy
dy = f()
self.assertEqual(1.0, dy.numpy())
def testSliceInDefun(self):
with self.test_scope():
@function.defun
def f(x, y):
return x[0::2, y:, ...]
x = array_ops.ones([2, 3, 4], dtype=dtypes.float32)
y = array_ops.ones([], dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = f(x, y)
dz = tape.gradient(z, x)
self.assertAllEqual(np.ones([1, 2, 4]), z.numpy())
self.assertAllEqual((2, 3, 4), dz.shape.as_list())
def testNestedDefun(self):
with self.test_scope():
@function.defun
def times_two(x):
return 2. * x
@function.defun
def two_x_plus_1(x):
return times_two(x) + 1.
x = constant_op.constant([2., 3., 4.])
y = two_x_plus_1(x)
self.assertAllEqual([5., 7., 9.], y.numpy())
def testNestedDefunWithVariable(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def g(x):
x = v0 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
y = f(x)
self.assertEqual(75.0, y.numpy())
def testNestedDefunInGradientTape(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def g(x):
x = v0 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = f(x)
dy = tape.gradient(y, v0)
self.assertEqual(75, y.numpy())
self.assertEqual(30, dy.numpy())
def testNestedDefunInGradientTapeDifferentVars(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
v1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun
def g(x):
x = v1 * x
return x
@function.defun
def f(x):
x = g(v0 * x)
return x
x = constant_op.constant(3.0)
with backprop.GradientTape(persistent=True) as tape:
y = f(x)
dy_v0 = tape.gradient(y, v0)
dy_v1 = tape.gradient(y, v1)
self.assertEqual(45, y.numpy())
self.assertEqual(9, dy_v0.numpy())
self.assertEqual(15, dy_v1.numpy())
def testWhileInDefun(self):
with self.test_scope():
@def_function.function
def f(start):
c = lambda x: math_ops.less(x, 13.0)
b = lambda x: math_ops.add(x, 1.0)
return control_flow_ops.while_loop(c, b, [start])
y = f(constant_op.constant(3.0))
self.assertEqual(13.0, y.numpy())
def testAutoGraphWhileInDefun(self):
with self.test_scope():
@def_function.function
def f(start):
x = start
while x < 13.0:
x += 1.0
return x
y = f(constant_op.constant(3.0))
self.assertEqual(13.0, y.numpy())
def testCondInDefun(self):
with self.test_scope():
@def_function.function
def f(pred, value):
fn1 = lambda: math_ops.add(value, 1.0)
fn2 = lambda: math_ops.subtract(value, 1.0)
return control_flow_ops.cond(pred, fn1, fn2)
plus_one = f(constant_op.constant(True), constant_op.constant(10.0))
minus_one = f(constant_op.constant(False), constant_op.constant(10.0))
self.assertEqual(11.0, plus_one.numpy())
self.assertEqual(9.0, minus_one.numpy())
def testAutoGraphCondInDefun(self):
with self.test_scope():
@def_function.function
def f(pred, value):
if pred:
return value + 1.0
else:
return value - 1.0
plus_one = f(constant_op.constant(True), constant_op.constant(10.0))
minus_one = f(constant_op.constant(False), constant_op.constant(10.0))
self.assertEqual(11.0, plus_one.numpy())
self.assertEqual(9.0, minus_one.numpy())
def testScanInDefun(self):
with self.test_scope():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name='data')
v = constant_op.constant(2.0, name='v')
@def_function.function
def f(y):
# pylint: disable=unnecessary-lambda
return functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), y, initializer=v)
# pylint: enable=unnecessary-lambda
r = f(elems)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
def testFeedDeviceMemoryToOpExpectingHostMemory(self):
@function.defun
def f(dims, value):
return array_ops.fill(dims, value)
with self.test_scope():
x = constant_op.constant([4], dtype=dtypes.int64)
y = f(x, 3)
self.assertAllEqual([3, 3, 3, 3], y)
def testRequestNotToCompile(self):
with self.test_scope():
def f(x):
with ops.device('device:CPU:0'):
y = 2.0 * x
return x, y
wholly_compiled_f = def_function.function(f)
op_by_op_f = def_function.function(f, jit_compile=False)
x = array_ops.identity([0.0, 2.0], name='data')
# When function is wholly compiled, all outputs will be on the
# device on which it is run.
r_x, r_y = wholly_compiled_f(x)
self.assertAllEqual([0.0, 2.0], r_x)
self.assertAllEqual([0.0, 4.0], r_y)
if context.executing_eagerly():
# backing_device is only available for eager tensors.
self.assertRegex(r_x.backing_device, self.device)
self.assertRegex(r_y.backing_device, self.device)
# When function is executed op-by-op, requested devices will be
# respected.
r_x, r_y = op_by_op_f(x)
self.assertAllEqual([0.0, 2.0], r_x)
self.assertAllEqual([0.0, 4.0], r_y)
if context.executing_eagerly():
# backing_device is only available for eager tensors.
self.assertRegex(r_x.backing_device, self.device)
self.assertRegex(r_y.backing_device, 'device:CPU:0')
class ExcessivePaddingTest(xla_test.XLATestCase):
"""Test that eager execution works with TPU flattened tensors.
Tensors that would normally be excessively padded when written
to TPU memory are reshaped to 1-D flat tensors.
This test case verifies that such tensors work with eager execution.
The flattening currently only happens on TPU, but tests should work
fine with all backends as flattening is transparent.
"""
def testFromConstant(self):
with self.test_scope():
# Create constant of shape [100, 2, 1]. This tensor would be
# excessively padded on TPU.
tensor = constant_op.constant(100 * [[[10.0], [2.0]]])
# Use reduce_sum since it requires correctly working with
# a particular dimension.
reduced = math_ops.reduce_sum(tensor, axis=1)
self.assertAllEqual(100 * [[12.0]], reduced)
def testFromOperation(self):
with self.test_scope():
tensor = array_ops.ones([3, 100, 2, 2])
reduced = math_ops.reduce_sum(tensor, axis=[0, 2, 3])
self.assertAllEqual(100 * [12.0], reduced)
def testAsFunctionInput(self):
with self.test_scope():
@function.defun
def f(x):
return math_ops.reduce_sum(x, axis=2)
tensor = constant_op.constant(100 * [[[10.0, 2.0]]])
reduced = f(tensor)
self.assertAllEqual(100 * [[12.0]], reduced)
def testAsFunctionOutput(self):
with self.test_scope():
@function.defun
def f(x):
return x * constant_op.constant(100 * [[[10.0, 2.0]]])
y = f(3)
reduced = math_ops.reduce_sum(y, axis=2)
self.assertAllEqual(100 * [[36.0]], reduced)
def multiple_tpus():
devices = context.context().devices()
return len([d for d in devices if 'device:TPU:' in d]) > 1
class MultiDeviceTest(xla_test.XLATestCase):
"""Test running TPU computation on more than one core."""
def testBasic(self):
if not multiple_tpus():
self.skipTest('MultiDeviceTest requires multiple TPU devices.')
# Compute 10 on TPU core 0
with ops.device('device:TPU:0'):
two = constant_op.constant(2)
five = constant_op.constant(5)
ten = two * five
self.assertAllEqual(10, ten)
# Compute 6 on TPU core 1
with ops.device('device:TPU:1'):
two = constant_op.constant(2)
three = constant_op.constant(3)
six = two * three
self.assertAllEqual(6, six)
# Copy 10 and 6 to CPU and sum them
self.assertAllEqual(16, ten + six)
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True))
googletest.main()
|
the-stack_0_19031 |
import json
import asyncio
import traceback
import aiohttp.web
from guajiro.core.response_types import ResponseType
@asyncio.coroutine
def handle_response_annotations(app, view):
@asyncio.coroutine
def middleware(request):
response = yield from view(request)
if not isinstance(response, aiohttp.web.Response):
attributes = getattr(view, "__attributes__", None)
if attributes:
returns = attributes.get("returns")
if isinstance(returns, ResponseType):
handler = returns.value.get("handler", None)
if handler:
response = handler(response)
headers = returns.value.get("headers", {})
return aiohttp.web.Response(
text=response,
headers=headers,
)
return response
return middleware
@asyncio.coroutine
def handle_exceptions(app, view):
@asyncio.coroutine
def middleware(request):
try:
response = yield from view(request)
except Exception as exception:
if app.SETTINGS["DEBUG"]:
plain_traceback = traceback.format_exc()
return aiohttp.web.Response(
text=plain_traceback,
status=500,
)
raise
return response
return middleware |
the-stack_0_19035 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gc
import os
import statistics
import sys
import time
from argparse import Namespace
from operator import attrgetter
import click
MAX_DAG_RUNS_ALLOWED = 1
class ShortCircuitExecutorMixin:
'''
Mixin class to manage the scheduler state during the performance test run.
'''
def __init__(self, dag_ids_to_watch, num_runs):
super().__init__()
self.num_runs_per_dag = num_runs
self.reset(dag_ids_to_watch)
def reset(self, dag_ids_to_watch):
'''
Capture the value that will determine when the scheduler is reset.
'''
self.dags_to_watch = {
dag_id: Namespace(
waiting_for=self.num_runs_per_dag,
# A "cache" of DagRun row, so we don't have to look it up each
# time. This is to try and reduce the impact of our
# benchmarking code on runtime,
runs={}
) for dag_id in dag_ids_to_watch
}
def change_state(self, key, state, info=None):
'''
Change the state of scheduler by waiting till the tasks is complete
and then shut down the scheduler after the task is complete
'''
from airflow.utils.state import State
super().change_state(key, state, info=info)
dag_id, _, execution_date, __ = key
if dag_id not in self.dags_to_watch:
return
# This fn is called before the DagRun state is updated, so we can't
# check the DR.state - so instead we need to check the state of the
# tasks in that run
run = self.dags_to_watch[dag_id].runs.get(execution_date)
if not run:
import airflow.models
# odd `list()` is to work across Airflow versions.
run = list(airflow.models.DagRun.find(dag_id=dag_id, execution_date=execution_date))[0]
self.dags_to_watch[dag_id].runs[execution_date] = run
if run and all(t.state == State.SUCCESS for t in run.get_task_instances()):
self.dags_to_watch[dag_id].runs.pop(execution_date)
self.dags_to_watch[dag_id].waiting_for -= 1
if self.dags_to_watch[dag_id].waiting_for == 0:
self.dags_to_watch.pop(dag_id)
if not self.dags_to_watch:
self.log.warning("STOPPING SCHEDULER -- all runs complete")
self.scheduler_job.processor_agent._done = True # pylint: disable=protected-access
return
self.log.warning("WAITING ON %d RUNS",
sum(map(attrgetter('waiting_for'), self.dags_to_watch.values())))
def get_executor_under_test():
'''
Create and return a MockExecutor
'''
try:
# Run against master and 1.10.x releases
from tests.test_utils.mock_executor import MockExecutor
except ImportError:
from tests.executors.test_executor import TestExecutor as MockExecutor
# from airflow.executors.local_executor import LocalExecutor
# Change this to try other executors
class ShortCircuitExecutor(ShortCircuitExecutorMixin, MockExecutor):
'''
Placeholder class that implements the inheritance hierarchy
'''
scheduler_job = None
return ShortCircuitExecutor
def reset_dag(dag, session):
'''
Delete all dag and task instances and then un_pause the Dag.
'''
import airflow.models
DR = airflow.models.DagRun
DM = airflow.models.DagModel
TI = airflow.models.TaskInstance
TF = airflow.models.TaskFail
dag_id = dag.dag_id
session.query(DM).filter(DM.dag_id == dag_id).update({'is_paused': False})
session.query(DR).filter(DR.dag_id == dag_id).delete()
session.query(TI).filter(TI.dag_id == dag_id).delete()
session.query(TF).filter(TF.dag_id == dag_id).delete()
def pause_all_dags(session):
'''
Pause all Dags
'''
from airflow.models.dag import DagModel
session.query(DagModel).update({'is_paused': True})
def create_dag_runs(dag, num_runs, session):
'''
Create `num_runs` of dag runs for sub-sequent schedules
'''
from airflow.utils import timezone
from airflow.utils.state import State
try:
from airflow.utils.types import DagRunType
ID_PREFIX = f'{DagRunType.SCHEDULED.value}__'
except ImportError:
from airflow.models.dagrun import DagRun
ID_PREFIX = DagRun.ID_PREFIX
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs):
dag.create_dagrun(
run_id=ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
next_run_date = dag.following_schedule(next_run_date)
@click.command()
@click.option('--num-runs', default=1, help='number of DagRun, to run for each DAG')
@click.option('--repeat', default=3, help='number of times to run test, to reduce variance')
@click.option('--pre-create-dag-runs', is_flag=True, default=False,
help='''Pre-create the dag runs and stop the scheduler creating more.
Warning: this makes the scheduler do (slightly) less work so may skew your numbers. Use sparingly!
''')
@click.argument('dag_ids', required=True, nargs=-1)
def main(num_runs, repeat, pre_create_dag_runs, dag_ids): # pylint: disable=too-many-locals
"""
This script can be used to measure the total "scheduler overhead" of Airflow.
By overhead we mean if the tasks executed instantly as soon as they are
executed (i.e. they do nothing) how quickly could we schedule them.
It will monitor the task completion of the Mock/stub executor (no actual
tasks are run) and after the required number of dag runs for all the
specified dags have completed all their tasks, it will cleanly shut down
the scheduler.
The dags you run with need to have an early enough start_date to create the
desired number of runs.
Care should be taken that other limits (DAG concurrency, pool size etc) are
not the bottleneck. This script doesn't help you in that regard.
It is recommended to repeat the test at least 3 times (`--repeat=3`, the
default) so that you can get somewhat-accurate variance on the reported
timing numbers, but this can be disabled for longer runs if needed.
"""
# Turn on unit test mode so that we don't do any sleep() in the scheduler
# loop - not needed on master, but this script can run against older
# releases too!
os.environ['AIRFLOW__CORE__UNIT_TEST_MODE'] = 'True'
os.environ['AIRFLOW__CORE__DAG_CONCURRENCY'] = '500'
# Set this so that dags can dynamically configure their end_date
os.environ['AIRFLOW_BENCHMARK_MAX_DAG_RUNS'] = str(num_runs)
os.environ['PERF_MAX_RUNS'] = str(num_runs)
if pre_create_dag_runs:
os.environ['AIRFLOW__SCHEDULER__USE_JOB_SCHEDULE'] = 'False'
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models.dagbag import DagBag
from airflow.utils import db
dagbag = DagBag()
dags = []
with db.create_session() as session:
pause_all_dags(session)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.sync_to_db(session=session)
dags.append(dag)
reset_dag(dag, session)
next_run_date = dag.normalize_schedule(dag.start_date or min(t.start_date for t in dag.tasks))
for _ in range(num_runs - 1):
next_run_date = dag.following_schedule(next_run_date)
end_date = dag.end_date or dag.default_args.get('end_date')
if end_date != next_run_date:
message = (
f"DAG {dag_id} has incorrect end_date ({end_date}) for number of runs! "
f"It should be "
f" {next_run_date}")
sys.exit(message)
if pre_create_dag_runs:
create_dag_runs(dag, num_runs, session)
ShortCircutExecutor = get_executor_under_test()
executor = ShortCircutExecutor(dag_ids_to_watch=dag_ids, num_runs=num_runs)
scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
executor.scheduler_job = scheduler_job
total_tasks = sum(len(dag.tasks) for dag in dags)
if 'PYSPY' in os.environ:
pid = str(os.getpid())
filename = os.environ.get('PYSPY_O', 'flame-' + pid + '.html')
os.spawnlp(os.P_NOWAIT, 'sudo', 'sudo', 'py-spy', 'record', '-o', filename, '-p', pid, '--idle')
times = []
# Need a lambda to refer to the _latest_ value fo scheduler_job, not just
# the initial one
code_to_test = lambda: scheduler_job.run() # pylint: disable=unnecessary-lambda
for count in range(repeat):
gc.disable()
start = time.perf_counter()
code_to_test()
times.append(time.perf_counter() - start)
gc.enable()
print("Run %d time: %.5f" % (count + 1, times[-1]))
if count + 1 != repeat:
with db.create_session() as session:
for dag in dags:
reset_dag(dag, session)
executor.reset(dag_ids)
scheduler_job = SchedulerJob(dag_ids=dag_ids, do_pickle=False, executor=executor)
executor.scheduler_job = scheduler_job
print()
print()
msg = "Time for %d dag runs of %d dags with %d total tasks: %.4fs"
if len(times) > 1:
print((msg + " (±%.3fs)") % (
num_runs,
len(dags),
total_tasks,
statistics.mean(times),
statistics.stdev(times)
))
else:
print(msg % (num_runs, len(dags), total_tasks, times[0]))
print()
print()
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
|
the-stack_0_19037 | from tester.tester import Tester
from tester.tester_params import TestingParameters
from tester.learning_params import LearningParameters
import os
def rendezvous_config(num_times, num_agents):
"""
Function setting the experiment parameters and environment.
Output
------
Tester : tester object
Object containing the information necessary to run this experiment.
"""
base_file_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
joint_rm_file = os.path.join(base_file_path, 'experiments', 'gridworld_many_agent_rendezvous', '{}_agent_rendezvous_rm.txt'.format(num_agents))
local_rm_files = []
for i in range(num_agents):
local_rm_string = os.path.join(base_file_path, 'experiments', 'gridworld_many_agent_rendezvous', 'coordination_experiment_agent{}.txt'.format(i+1))
local_rm_files.append(local_rm_string)
step_unit = 1000
# configuration of testing params
testing_params = TestingParameters()
testing_params.test = True
testing_params.test_freq = 1*step_unit
testing_params.num_steps = step_unit
# configuration of learning params
learning_params = LearningParameters()
learning_params.gamma = 0.9
learning_params.alpha = 0.8
learning_params.T = 50
learning_params.initial_epsilon = 0.0 # Set epsilon to zero to turn off epsilon-greedy exploration (only using boltzmann)
learning_params.tabular_case = True
learning_params.max_timesteps_per_task = step_unit
learning_params.relearn_period = 30
learning_params.enter_loop = 10
tester = Tester(learning_params, testing_params)
tester.total_steps = 150 * step_unit
tester.min_steps = 1
tester.rm_test_file = joint_rm_file
tester.rm_learning_file_list = local_rm_files
tester.num_times = num_times
tester.num_agents = num_agents
# Set the environment settings for the experiment
env_settings = dict()
env_settings['Nr'] = 10
env_settings['Nc'] = 10
env_settings['initial_states'] = [0, 3, 20, 8, 90, 40, 70, 49, 96, 69]
env_settings['rendezvous_loc'] = (3,4)
env_settings['goal_locations'] = [(9,7), (7,9), (2,9), (9,9), (0,9), (7,0), (4,0), (5,0), (6,9), (8,0)]
env_settings['p'] = 0.98
tester.env_settings = env_settings
tester.experiment = 'rendezvous'
return tester |
the-stack_0_19039 | import pythoncom
import win32com.server.util
import win32com.test.util
import unittest
class Persists:
_public_methods_ = [ 'GetClassID', 'IsDirty', 'Load', 'Save',
'GetSizeMax', 'InitNew' ]
_com_interfaces_ = [ pythoncom.IID_IPersistStreamInit ]
def __init__(self):
self.data = "abcdefg"
self.dirty = 1
def GetClassID(self):
return pythoncom.IID_NULL
def IsDirty(self):
return self.dirty
def Load(self, stream):
self.data = stream.Read(26)
def Save(self, stream, clearDirty):
stream.Write(self.data)
if clearDirty:
self.dirty = 0
def GetSizeMax(self):
return 1024
def InitNew(self):
pass
class Stream:
_public_methods_ = [ 'Read', 'Write' ]
_com_interfaces_ = [ pythoncom.IID_IStream ]
def __init__(self, data):
self.data = data
self.index = 0
def Read(self, amount):
result = self.data[self.index : self.index + amount]
self.index = self.index + amount
return result
def Write(self, data):
self.data = data
self.index = 0
return len(data)
class StreamTest(win32com.test.util.TestCase):
def _readWrite(self, data, write_stream, read_stream = None):
if read_stream is None: read_stream = write_stream
write_stream.Write(data)
got = read_stream.Read(len(data))
self.assertEqual(data, got)
def testit(self):
mydata = 'abcdefghijklmnopqrstuvwxyz'
# First test the objects just as Python objects...
s = Stream(mydata)
p = Persists()
p.Load(s)
p.Save(s, 0)
self.assertEqual(s.data, mydata)
# Wrap the Python objects as COM objects, and make the calls as if
# they were non-Python COM objects.
s2 = win32com.server.util.wrap(s, pythoncom.IID_IStream)
p2 = win32com.server.util.wrap(p, pythoncom.IID_IPersistStreamInit)
self._readWrite(mydata, s, s)
self._readWrite(mydata, s, s2)
self._readWrite(mydata, s2, s)
self._readWrite(mydata, s2, s2)
self._readWrite("string with\0a NULL", s2, s2)
# reset the stream
s.Write(mydata)
p2.Load(s2)
p2.Save(s2, 0)
self.assertEqual(s.data, mydata)
if __name__=='__main__':
unittest.main()
|
the-stack_0_19040 | """Link user to organisation
Revision ID: 4d8b254d7e7e
Revises: 016571f41a20
Create Date: 2020-01-22 17:42:42.968199
"""
# revision identifiers, used by Alembic.
revision = '4d8b254d7e7e'
down_revision = '016571f41a20'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import orm
from app import db
from app.users.models import AppUser
Base = declarative_base()
def upgrade():
Base.metadata.bind = op.get_bind()
session = orm.Session(bind=Base.metadata.bind)
op.add_column('app_user', sa.Column('organisation_id', sa.Integer(), nullable=True))
op.create_foreign_key('fk_user_organisation', 'app_user', 'organisation', ['organisation_id'], ['id'])
# Populate with organisation 1 and make non-nullable
users = session.query(AppUser).all()
for user in users:
user.organisation_id = 1
session.commit()
op.alter_column('app_user', 'organisation_id', nullable=False)
op.add_column('organisation', sa.Column('system_name', sa.String(length=50), nullable=True))
op.execute("""UPDATE organisation SET system_name = 'Baobab' WHERE id = 1""")
op.execute("""UPDATE organisation SET system_name = 'EEML Portal' WHERE id = 2""")
op.execute("""UPDATE organisation SET system_name = 'Baobab Dev' WHERE id = 3""")
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('organisation', 'system_name')
op.drop_constraint('fk_user_organisation', 'app_user', type_='foreignkey')
op.drop_column('app_user', 'organisation_id')
# ### end Alembic commands ###
|
the-stack_0_19041 | """ This file contains Visualizer class based on Facebook's visdom.
Returns:
Visualizer(): Visualizer class to display plots and images
"""
##
import os
import time
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import torchvision.utils as vutils
from .plot import plot_confusion_matrix
from .evaluate import get_values_for_pr_curve, get_values_for_roc_curve
import seaborn as sns
##
class Visualizer():
""" Visualizer wrapper based on Visdom.
Returns:
Visualizer: Class file.
"""
# pylint: disable=too-many-instance-attributes
# Reasonable.
##
def __init__(self, opt):
self.name = opt.name
self.opt = opt
self.writer = None
# use tensorboard for now
if self.opt.display:
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(log_dir=os.path.join("../tensorboard/skip_ganomaly/", opt.outf))
# --
# Dictionaries for plotting data and results.
self.plot_data = None
self.plot_res = None
# --
# Path to train and test directories.
self.img_dir = os.path.join(opt.outf, opt.name, 'train', 'images')
self.tst_img_dir = os.path.join(opt.outf, opt.name, 'test', 'images')
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
if not os.path.exists(self.tst_img_dir):
os.makedirs(self.tst_img_dir)
# --
# Log file.
self.log_name = os.path.join(opt.outf, opt.name, 'loss_log.txt')
# with open(self.log_name, "a") as log_file:
# now = time.strftime("%c")
# log_file.write('================ Training Loss (%s) ================\n' % now)
now = time.strftime("%c")
title = f'================ {now} ================\n'
info = f'Anomalies, {opt.nz}, {opt.w_adv}, {opt.w_con}, {opt.w_lat}\n'
self.write_to_log_file(text=title + info)
##
@staticmethod
def normalize(inp):
"""Normalize the tensor
Args:
inp ([FloatTensor]): Input tensor
Returns:
[FloatTensor]: Normalized tensor.
"""
return (inp - inp.min()) / (inp.max() - inp.min() + 1e-5)
##
def plot_current_errors(self, epoch, total_steps, errors):
"""Plot current errros.
Args:
epoch (int): Current epoch
counter_ratio (float): Ratio to plot the range between two epoch.
errors (OrderedDict): Error for the current epoch.
"""
self.writer.add_scalars("Loss over time", errors, global_step=total_steps)
##
def plot_performance(self, epoch, counter_ratio, performance, tag=None):
""" Plot performance
Args:
epoch (int): Current epoch
counter_ratio (float): Ratio to plot the range between two epoch.
performance (OrderedDict): Performance for the current epoch.
"""
self.writer.add_scalars(tag if tag else "Performance Metrics", {k:v for k,v in performance.items() if ("conf_matrix" not in k and k != "Avg Run Time (ms/batch)")}, global_step=epoch)
def plot_current_conf_matrix(self, epoch, cm, tag=None, save_path=None):
plot = plot_confusion_matrix(cm, normalize=False, save_path=save_path)
self.writer.add_figure(tag if tag else "Confusion Matrix", plot, global_step=epoch)
##
def print_current_errors(self, epoch, errors):
""" Print current errors.
Args:
epoch (int): Current epoch.
errors (OrderedDict): Error for the current epoch.
batch_i (int): Current batch
batch_n (int): Total Number of batches.
"""
# message = ' [%d/%d] ' % (epoch, self.opt.niter)
message = ' Loss: [%d/%d] ' % (epoch, self.opt.niter)
for key, val in errors.items():
message += '%s: %.3f ' % (key, val)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
##
def write_to_log_file(self, text):
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % text)
##
def print_current_performance(self, performance, best):
""" Print current performance results.
Args:
performance ([OrderedDict]): Performance of the model
best ([int]): Best performance.
"""
message = ' '
#print(performance)
for key, val in performance.items():
if key == "conf_matrix":
message += '%s: %s ' % (key, val)
else:
message += '%s: %.3f ' % (key, val)
message += 'max AUC: %.3f' % best
print(message)
self.write_to_log_file(text=message)
def display_current_images(self, reals, fakes, fixed, train_or_test="train", global_step=0):
""" Display current images.
Args:
epoch (int): Current epoch
counter_ratio (float): Ratio to plot the range between two epoch.
reals ([FloatTensor]): Real Image
fakes ([FloatTensor]): Fake Image
fixed ([FloatTensor]): Fixed Fake Image
"""
reals = self.normalize(reals.cpu().numpy())
fakes = self.normalize(fakes.cpu().numpy())
# fixed = self.normalize(fixed.cpu().numpy())
self.writer.add_images("Reals from {} step: ".format(str(train_or_test)), reals, global_step=global_step)
self.writer.add_images("Fakes from {} step: ".format(str(train_or_test)), fakes, global_step=global_step)
def plot_pr_curve(self, y_trues, y_preds, thresholds, global_step, tag=None):
tp_counts, fp_counts, tn_counts, fn_counts, precisions, recalls, n_thresholds = get_values_for_pr_curve(y_trues, y_preds, thresholds)
self.writer.add_pr_curve_raw(tag if tag else "Precision_recall_curve", true_positive_counts=tp_counts, false_positive_counts=fp_counts, true_negative_counts=tn_counts, false_negative_counts= fn_counts,
precision=precisions, recall=recalls, num_thresholds=n_thresholds, global_step=global_step)
def save_current_images(self, epoch, reals, fakes, fixed):
""" Save images for epoch i.
Args:
epoch ([int]) : Current epoch
reals ([FloatTensor]): Real Image
fakes ([FloatTensor]): Fake Image
fixed ([FloatTensor]): Fixed Fake Image
"""
vutils.save_image(reals, '%s/reals.png' % self.img_dir, normalize=True)
vutils.save_image(fakes, '%s/fakes.png' % self.img_dir, normalize=True)
vutils.save_image(fixed, '%s/fixed_fakes_%03d.png' %(self.img_dir, epoch+1), normalize=True)
def plot_histogram(self, y_trues, y_preds, threshold, global_step=1, save_path=None, tag=None):
scores = dict()
scores["scores"] = y_preds
scores["labels"] = y_trues
hist = pd.DataFrame.from_dict(scores)
plt.ion()
# Filter normal and abnormal scores.
abn_scr = hist.loc[hist.labels == 1]['scores']
nrm_scr = hist.loc[hist.labels == 0]['scores']
# Create figure and plot the distribution.
fig = plt.figure(figsize=(4,4))
sns.distplot(nrm_scr, label=r'Normal Scores')
sns.distplot(abn_scr, label=r'Abnormal Scores')
plt.axvline(threshold, 0, 1, label='threshold', color="red")
plt.legend()
plt.yticks([])
plt.xlabel(r'Anomaly Scores')
plt.savefig(save_path)
self.writer.add_figure(tag if tag else "Histogram", fig, global_step)
def plot_roc_curve(self, y_trues, y_preds, global_step=1, tag=None, save_path=None):
fpr, tpr, roc_auc = get_values_for_roc_curve(y_trues, y_preds)
fig = plt.figure(figsize=(4,4))
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='(AUC = %0.2f)' % (roc_auc))
plt.plot([0, 1], [1, 0], color='navy', lw=1, linestyle=':')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
if save_path:
plt.savefig(save_path)
self.writer.add_figure(tag if tag else "ROC-Curve", fig, global_step) |
the-stack_0_19043 | #!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.S3D2.FF-Prior
import os
import logging
from config import SEED
from config import _ERROR
from config import _TRUTH
import numpy as np
import pandas as pd
from visual.misc import set_plot_config
set_plot_config()
from ..common import load_estimations
from ..common import load_conditional_estimations
from utils.log import set_logger
from utils.log import flush
from utils.log import print_line
from utils.model import get_model
from utils.model import train_or_load_classifier
from utils.evaluation import evaluate_classifier
from utils.evaluation import evaluate_neural_net
from utils.evaluation import evaluate_config
from utils.evaluation import evaluate_summary_computer
from utils.evaluation import evaluate_minuit
from utils.evaluation import evaluate_estimator
from utils.evaluation import evaluate_conditional_estimation
from utils.images import gather_images
from visual.misc import plot_params
from problem.synthetic3D import S3D2Config as Config
from problem.synthetic3D import get_minimizer
from problem.synthetic3D import get_minimizer_no_nuisance
from problem.synthetic3D import Generator
from problem.synthetic3D import Parameter
from problem.synthetic3D import param_generator
from problem.synthetic3D import S3D2NLL as NLLComputer
from visual.special.synthetic3D import plot_nll_around_min
from model.feature_filter import FeatureModel
from ..my_argparser import FF_parse_args
from archi.classic import L4 as ARCHI
from .common import N_BINS
DATA_NAME = 'S3D2'
BENCHMARK_NAME = DATA_NAME+'-prior'
N_ITER = 30
def build_model(args, i_cv):
model = get_model(args, FeatureModel)
model.set_info(DATA_NAME, BENCHMARK_NAME, i_cv)
return model
# =====================================================================
# MAIN
# =====================================================================
def main():
# BASIC SETUP
logger = set_logger()
args = FF_parse_args(main_description="Training launcher for Feature Filter on GG benchmark")
logger.info(args)
flush(logger)
# INFO
model = build_model(args, -1)
os.makedirs(model.results_directory, exist_ok=True)
config = Config()
config_table = evaluate_config(config)
config_table.to_csv(os.path.join(model.results_directory, 'config_table.csv'))
# RUN
if not args.conditional_only:
eval_table = get_eval_table(args, model.results_directory)
if not args.estimate_only:
eval_conditional = get_eval_conditional(args, model.results_directory)
if not args.estimate_only and not args.conditional_only:
eval_table = pd.concat([eval_table, eval_conditional], axis=1)
# EVALUATION
print_line()
print_line()
print(eval_table)
print_line()
print_line()
eval_table.to_csv(os.path.join(model.results_directory, 'evaluation.csv'))
gather_images(model.results_directory)
def get_eval_table(args, results_directory):
logger = logging.getLogger()
if args.load_run:
logger.info(f'Loading previous runs [{args.start_cv},{args.end_cv}[')
estimations = load_estimations(results_directory, start_cv=args.start_cv, end_cv=args.end_cv)
else:
logger.info(f'Running runs [{args.start_cv},{args.end_cv}[')
estimations = [run_estimation(args, i_cv) for i_cv in range(args.start_cv, args.end_cv)]
estimations = pd.concat(estimations, ignore_index=True)
estimations.to_csv(os.path.join(results_directory, 'estimations.csv'))
# EVALUATION
eval_table = evaluate_estimator(Config.INTEREST_PARAM_NAME, estimations)
print_line()
print_line()
print(eval_table)
print_line()
print_line()
eval_table.to_csv(os.path.join(results_directory, 'estimation_evaluation.csv'))
return eval_table
def get_eval_conditional(args, results_directory):
logger = logging.getLogger()
if args.load_run:
logger.info(f'Loading previous runs [{args.start_cv},{args.end_cv}[')
conditional_estimations = load_conditional_estimations(results_directory, start_cv=args.start_cv, end_cv=args.end_cv)
else:
logger.info(f'Running runs [{args.start_cv},{args.end_cv}[')
conditional_estimations = [run_conditional_estimation(args, i_cv) for i_cv in range(args.start_cv, args.end_cv)]
conditional_estimations = pd.concat(conditional_estimations, ignore_index=True)
conditional_estimations.to_csv(os.path.join(results_directory, 'conditional_estimations.csv'))
# EVALUATION
eval_conditional = evaluate_conditional_estimation(conditional_estimations, interest_param_name=Config.INTEREST_PARAM_NAME)
print_line()
print_line()
print(eval_conditional)
print_line()
print_line()
eval_conditional.to_csv(os.path.join(results_directory, 'conditional_evaluation.csv'))
return eval_conditional
def run_estimation(args, i_cv):
logger = logging.getLogger()
print_line()
logger.info('Running iter n°{}'.format(i_cv))
print_line()
result_row = {'i_cv': i_cv}
# LOAD/GENERATE DATA
logger.info('Set up data generator')
config = Config()
seed = SEED + i_cv * 5
train_generator = Generator(seed)
valid_generator = Generator(seed+1)
test_generator = Generator(seed+2)
# SET MODEL
logger.info('Set up classifier')
model = build_model(args, i_cv)
os.makedirs(model.results_path, exist_ok=True)
flush(logger)
# TRAINING / LOADING
train_or_load_classifier(model, train_generator, config.CALIBRATED, config.N_TRAINING_SAMPLES, retrain=args.retrain)
# CHECK TRAINING
logger.info('Generate validation data')
X_valid, y_valid, w_valid = valid_generator.generate(*config.CALIBRATED, n_samples=config.N_VALIDATION_SAMPLES, no_grad=True)
result_row.update(evaluate_classifier(model, X_valid, y_valid, w_valid, prefix='valid'))
# MEASUREMENT
evaluate_summary_computer(model, X_valid, y_valid, w_valid, n_bins=N_BINS, prefix='valid_', suffix='')
iter_results = [run_estimation_iter(model, result_row, i, test_config, valid_generator, test_generator, n_bins=N_BINS)
for i, test_config in enumerate(config.iter_test_config())]
result_table = pd.DataFrame(iter_results)
result_table.to_csv(os.path.join(model.results_path, 'estimations.csv'))
logger.info('Plot params')
param_names = config.PARAM_NAMES
for name in param_names:
plot_params(name, result_table, title=model.full_name, directory=model.results_path)
logger.info('DONE')
return result_table
def run_estimation_iter(model, result_row, i_iter, config, valid_generator, test_generator, n_bins=N_BINS):
logger = logging.getLogger()
logger.info('-'*45)
logger.info(f'iter : {i_iter}')
flush(logger)
iter_directory = os.path.join(model.results_path, f'iter_{i_iter}')
os.makedirs(iter_directory, exist_ok=True)
result_row['i'] = i_iter
result_row['n_test_samples'] = config.N_TESTING_SAMPLES
suffix = f'-mu={config.TRUE.mu:1.2f}_r={config.TRUE.r}_lambda={config.TRUE.lam}'
logger.info('Generate testing data')
test_generator.reset()
X_test, y_test, w_test = test_generator.generate(*config.TRUE, n_samples=config.N_TESTING_SAMPLES, no_grad=True)
# PLOT SUMMARIES
evaluate_summary_computer(model, X_test, y_test, w_test, n_bins=n_bins, prefix='', suffix=suffix, directory=iter_directory)
logger.info('Set up NLL computer')
compute_summaries = model.summary_computer(n_bins=n_bins)
compute_nll = NLLComputer(compute_summaries, valid_generator, X_test, w_test, config=config)
# NLL PLOTS
plot_nll_around_min(compute_nll, config.TRUE, iter_directory, suffix)
# MINIMIZE NLL
logger.info('Prepare minuit minimizer')
minimizer = get_minimizer(compute_nll, config.CALIBRATED, config.CALIBRATED_ERROR)
result_row.update(evaluate_minuit(minimizer, config.TRUE, iter_directory, suffix=suffix))
return result_row.copy()
def run_conditional_estimation(args, i_cv):
logger = logging.getLogger()
print_line()
logger.info('Running iter n°{}'.format(i_cv))
print_line()
result_row = {'i_cv': i_cv}
# LOAD/GENERATE DATA
logger.info('Set up data generator')
config = Config()
seed = SEED + i_cv * 5
train_generator = Generator(seed)
valid_generator = Generator(seed+1)
test_generator = Generator(seed+2)
# SET MODEL
logger.info('Set up classifier')
model = build_model(args, i_cv)
os.makedirs(model.results_path, exist_ok=True)
flush(logger)
# TRAINING / LOADING
train_or_load_classifier(model, train_generator, config.CALIBRATED, config.N_TRAINING_SAMPLES, retrain=args.retrain)
# CHECK TRAINING
logger.info('Generate validation data')
X_valid, y_valid, w_valid = valid_generator.generate(*config.CALIBRATED, n_samples=config.N_VALIDATION_SAMPLES, no_grad=True)
result_row.update(evaluate_classifier(model, X_valid, y_valid, w_valid, prefix='valid'))
# MEASUREMENT
evaluate_summary_computer(model, X_valid, y_valid, w_valid, n_bins=N_BINS, prefix='valid_', suffix='')
iter_results = [run_conditional_estimation_iter(model, result_row, i, test_config, valid_generator, test_generator, n_bins=N_BINS)
for i, test_config in enumerate(config.iter_test_config())]
conditional_estimate = pd.concat(iter_results)
conditional_estimate['i_cv'] = i_cv
fname = os.path.join(model.results_path, "conditional_estimations.csv")
conditional_estimate.to_csv(fname)
logger.info('DONE')
return conditional_estimate
def run_conditional_estimation_iter(model, result_row, i_iter, config, valid_generator, test_generator, n_bins=N_BINS):
logger = logging.getLogger()
logger.info('-'*45)
logger.info(f'iter : {i_iter}')
flush(logger)
iter_directory = os.path.join(model.results_path, f'iter_{i_iter}')
os.makedirs(iter_directory, exist_ok=True)
logger.info('Generate testing data')
test_generator.reset()
X_test, y_test, w_test = test_generator.generate(*config.TRUE, n_samples=config.N_TESTING_SAMPLES, no_grad=True)
# SUMMARIES
logger.info('Set up NLL computer')
compute_summaries = model.summary_computer(n_bins=n_bins)
compute_nll = NLLComputer(compute_summaries, valid_generator, X_test, w_test, config=config)
# MEASURE STAT/SYST VARIANCE
logger.info('MEASURE STAT/SYST VARIANCE')
conditional_results = make_conditional_estimation(compute_nll, config)
fname = os.path.join(iter_directory, "no_nuisance.csv")
conditional_estimate = pd.DataFrame(conditional_results)
conditional_estimate['i'] = i_iter
conditional_estimate.to_csv(fname)
return conditional_estimate
def make_conditional_estimation(compute_nll, config):
results = []
for j, nuisance_parameters in enumerate(config.iter_nuisance()):
compute_nll_no_nuisance = lambda mu : compute_nll(*nuisance_parameters, mu)
minimizer = get_minimizer_no_nuisance(compute_nll_no_nuisance, config.CALIBRATED, config.CALIBRATED_ERROR)
results_row = evaluate_minuit(minimizer, config.TRUE, do_hesse=False)
results_row['j'] = j
for name, value in zip(config.CALIBRATED.nuisance_parameters_names, nuisance_parameters):
results_row[name] = value
results_row[name+_TRUTH] = config.TRUE[name]
results.append(results_row)
print(f"ncalls = {results_row['ncalls']}", flush=True)
return results
if __name__ == '__main__':
main()
|
the-stack_0_19045 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='../cache/mnist',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
the-stack_0_19048 | # C2SMART Lab, NYU
# NCHRP 03-137
# @file SSM_DRAC_CorrCoef.py
# @author Fan Zuo
# @author Di Yang
# @date 2020-10-18
import pandas as pd
import numpy as np
from shapely.geometry import Polygon
import math
import time
import multiprocessing as mp
import glob
from scipy.stats import spearmanr
from scipy.stats import kde
import matplotlib.pyplot as plt
import matplotlib as mpl
# 2 mpss = 6.56168 fpss
# 3 mpss = 9.84252 fpss
# 4 mpss = 13.1234 fpss
def main(dataset, dataset_ref, time_interval, threshold, ssm_type, start_point, end_point):
"""
The main processing function of estimating the correlation coefficient.
Returns the correlation coefficient of number of Surrogated Safety Measurements (SSM)
between target and reference(100MPR).
Keyword arguments:
>>> dataset: The generated SSM from the BSM data. (Other MPR file, str)
>>> dataset_ref: The generated SSM from the BSM data for comparison usage. (the 100MPR file, str)
>>> time_interval: The time window that split the dataset, generated from SSM_DRAC_Opt.py. (5/10/15 min, int)
>>> threshold: The threshold of identifying the unsafe movements, generated from SSM_DRAC_Opt.py. (float)
>>> ssm_type: The type of the SSM output.
>>> start_point: The start time of the dataset (second)
>>> end_point: The end time of the dataset (second)
RETURN: Spearmans correlation coefficient of number of SSM events between target and reference data
"""
if ssm_type == '1':
# Read the whole file using useful columns.
df = pd.read_csv(dataset, usecols=['transtime', 'Avg_Acceleration'])
df = df.sort_values(by=['transtime'])
# Select rows with acceleration lower than threshold
# If the start time is not from 0, user can use the following sentence to modify the starting time point
df = df[df.transtime > start_point]
# Filter the the data by threshold, select rows with acceleration lower than threshold
df = df[df.Avg_Acceleration < (-1 * threshold * 3.28)]
# Define the time interval from minutes to seconds
time_jump = time_interval * 60
# Define the time ranges and bins, be careful of number of words in the file name
ranges = np.arange(start_point, end_point+1, time_jump).tolist()
df_sim = df.groupby(pd.cut(df.transtime, ranges)).count()
df_sim_corr = df_sim['transtime'].tolist()
# Read the whole reference file using useful columns.
df_ref = pd.read_csv(dataset_ref, usecols=['transtime', 'X', 'Y', 'Avg_Acceleration'])
df_ref = df.sort_values(by=['transtime'])
# Select rows with acceleration lower than threshold
# If the start time is not from 0, user can use the following sentence to modify the starting time point
df_ref = df_ref[df_ref.transtime > start_point]
# Filter the the data by threshold
df_ref = df_ref[df_ref.Avg_Acceleration < (-1 * threshold * 3.28)]
# Define the time interval from minutes to seconds
df_uni_ref = df_ref
time_jump = time_interval * 60
# Define the time ranges and bins, be careful of number of words in the file name
ranges = np.arange(start_point, end_point+1, time_jump).tolist()
df_sim_ref = df_uni_ref.groupby(pd.cut(df_uni_ref.transtime, ranges)).count()
df_sim_corr_ref = df_sim_ref['transtime'].tolist()
elif ssm_type == '2':
# Read the whole file using useful columns.
df = pd.read_csv(dataset, usecols=['Time', 'DRAC'])
df = df.sort_values(by=['Time'])
# Select rows with acceleration lower than threshold
# If the start time is not from 0, user can use the following sentence to modify the starting time point
df = df[df.Time > start_point]
# Filter the the data by threshold
df = df[df.DRAC > (threshold * 3.28)]
# Define the time interval from minutes to seconds
time_jump = time_interval * 60
# Define the time ranges and bins, be careful of number of words in the file name
ranges = np.arange(start_point, end_point + 1, time_jump).tolist()
df_sim = df.groupby(pd.cut(df.Time, ranges)).count()
df_sim_corr = df_sim['Time'].tolist()
# Read the whole reference file using useful columns.
df_ref = pd.read_csv(dataset_ref, usecols=['Time', 'DRAC'])
df_ref = df_ref.sort_values(by=['Time'])
# Select rows with acceleration lower than threshold
# If the start time is not from 0, user can use the following sentence to modify the starting time point
df_ref = df_ref[df_ref.Time > start_point]
# Filter the the data by threshold
df_ref = df_ref[df_ref.DRAC > (threshold * 3.28)]
# Define the time interval from minutes to seconds
df_uni_ref = df_ref
time_jump = time_interval * 60
# Define the time ranges and bins, be careful of number of words in the file name
ranges = np.arange(start_point, end_point + 1, time_jump).tolist()
df_sim_ref = df_uni_ref.groupby(pd.cut(df_uni_ref.Time, ranges)).count()
df_sim_corr_ref = df_sim_ref['Time'].tolist()
elif ssm_type == '3':
# Read the whole file using useful columns.
df = pd.read_csv(dataset, usecols=['Time'])
df = df.sort_values(by=['Time'])
# Select rows with acceleration lower than threshold
# If the start time is not from 0, user can use the following sentence to modify the starting time point
df = df[df.Time > start_point]
# Define the time interval from minutes to seconds
time_jump = time_interval * 60
# Define the time ranges and bins, be careful of number of words in the file name
ranges = np.arange(start_point, end_point + 1, time_jump).tolist()
df_sim = df.groupby(pd.cut(df.Time, ranges)).count()
df_sim_corr = df_sim['Time'].tolist()
# Read the whole reference file using useful columns.
df_ref = pd.read_csv(dataset_ref, usecols=['Time'])
df_ref = df_ref.sort_values(by=['Time'])
# Select rows with acceleration lower than threshold
# If the start time is not from 0, user can use the following sentence to modify the starting time point
df_ref = df_ref[df_ref.Time > start_point]
# Define the time interval from minutes to seconds
df_uni_ref = df_ref
time_jump = time_interval * 60
# Define the time ranges and bins, be careful of number of words in the file name
ranges = np.arange(start_point, end_point + 1, time_jump).tolist()
df_sim_ref = df_uni_ref.groupby(pd.cut(df_uni_ref.Time, ranges)).count()
df_sim_corr_ref = df_sim_ref['Time'].tolist()
print (df_sim_corr, len(df_sim_corr))
print (df_sim_corr_ref, len(df_sim_corr_ref))
# calculate spearman's correlation
coef, p = spearmanr(df_sim_corr, df_sim_corr_ref)
print('For time interval', time_interval, 'min, DRAC threshold', drac_threshold, ', spearmans correlation coefficient: %.3f' % coef)
# interpret the significance
alpha = 0.05
if p > alpha:
print('Samples are uncorrelated (fail to reject H0) p=%.3f' % p)
else:
print('Samples are correlated (reject H0) p=%.3f' % p)
return(coef)
if __name__ == "__main__":
program_st = time.time()
print("******************* Start Program *******************")
print("Start time %s" % (time.strftime('%X', time.localtime(program_st))))
s_t = input("Please select the type of the SSM (1 - Hard Braking, 2 - DRAC, 3 - TTCD):")
time_window = int(input("Please input the optimal time interval:"))
opt_threshold = float(input("Please input the optimal threshold:"))
ssm_file_100 = input("Please input the name of the 100MPR SSM file(*.csv):")
ssm_file_75 = input("Please input the name of the 75MPR SSM file(*.csv):")
ssm_file_50 = input("Please input the name of the 50MPR SSM file(*.csv):")
ssm_file_20 = input("Please input the name of the 20MPR SSM file(*.csv):")
ssm_file_5 = input("Please input the name of the 5MPR SSM file(*.csv):")
Start_Point = float("{:.1f}".format(input("Please input the start time of the sub-interval(int): ")))
End_Point = float("{:.1f}".format(input("Please input the end time of the sub-interval(int): ")))
Diff_100_75 = main(ssm_file_75, ssm_file_100, time_window, opt_threshold, s_t, Start_Point, End_Point)
Diff_100_50 = main(ssm_file_50, ssm_file_100, time_window, opt_threshold, s_t, Start_Point, End_Point)
Diff_100_20 = main(ssm_file_20, ssm_file_100, time_window, opt_threshold, s_t, Start_Point, End_Point)
Diff_100_5 = main(ssm_file_5, ssm_file_100, time_window, opt_threshold, s_t, Start_Point, End_Point)
Diff_result = [Diff_100_75, Diff_100_50, Diff_100_20, Diff_100_5]
print(Diff_result)
x_label = ["100% & 75%", "100% & 50%", "100% & 20%", "100% & 5%"]
x_pos = [i for i, _ in enumerate(x_label)]
mpl.rcParams['font.size'] = 18.0
mpl.rcParams['axes.titlesize'] = 18.0
csfont = {'fontname': 'Times New Roman'}
plt.plot(x_pos, Diff_result, 'o-')
for x,y in zip(x_pos,Diff_result):
label = "{:.2f}".format(y)
plt.annotate(label, # this is the text
(x,y), # this is the point to label
textcoords="offset points", # how to position the text
xytext=(0,10), # distance from text to points (x,y)
ha='center', va='bottom', size = 16,**csfont)
plt.xlabel("Market Penetration Rate Pair",**csfont)
plt.ylabel("Correlation Coefficient",**csfont)
plt.title("Correlation coefficient between\n 100% MPR and each MPR level",**csfont)
plt.grid()
plt.xticks(x_pos, x_label,**csfont)
plt.yticks(**csfont)
plt.ylim(0, 1)
figure = plt.gcf()
figure.set_size_inches(7, 6)
# Adjust the output name if there are multiple results will be generated.
plt.savefig('CorrCoef_Typ%s.png'%(s_t),bbox_inches='tight',dpi=100)
plt.show()
ed_time = time.time()
print("End time %s (%f)" % (time.strftime('%X', time.localtime(ed_time)), (ed_time - program_st)))
print("******************* End Program *******************") |
the-stack_0_19049 | # adds gdb "info registers eflags" feature to lldb
import lldb
# See: http://en.wikipedia.org/wiki/FLAGS_register
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
FLAGS = [ 'CF', '', 'PF', '', 'AF', '', 'ZF', 'SF', 'TF', 'IF', 'DF', 'OF', 'IOPL', 'IOPL', 'NT', '', 'RF', 'VM', 'AC', 'VIF', 'VIP', 'ID' ]
def ifl(debugger, command, result, internal_dict):
ci = debugger.GetCommandInterpreter()
res = lldb.SBCommandReturnObject()
ci.HandleCommand('register read --format b rflags', res)
flags = str(res.GetOutput())[::-1].strip()
syms = []
for i in range(0,21):
if flags[i] == '1' and FLAGS[i] != '': syms.append(FLAGS[i])
res = lldb.SBCommandReturnObject()
ci.HandleCommand('register read --format x rflags', res)
print('rflags: %s %s' % (res.GetOutput()[11:-1], str(syms)))
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand('command script add -f ifl.ifl ifl')
|
the-stack_0_19050 | from algorithms.utils import read_fasta, BLOSUM62
import numpy as np
def main_GCON(s, t, scoring_matrix, gap):
"""
Global Alignment with Constant Gap Penalty.
Inputs: Two protein strings s and t in FASTA format.
Return: The maximum alignment score between s and t using BLOSUM62 scoring matrix
and a constant gap penalty equal to 5.
"""
neg_infinity = -999999
M = np.zeros((len(s) + 1, len(t) + 1), dtype=int) # main table
L = np.full((len(s) + 1, len(t) + 1), neg_infinity, dtype=int) # lower scores
U = np.full((len(s) + 1, len(t) + 1), neg_infinity, dtype=int) # upper scores
# setting constant gap penalty
M[1:, 0] = gap
M[0, 1:] = gap
for i in range(1, len(s) + 1):
for j in range(1, len(t) + 1):
# row horizontal
L[i][j] = max([M[i - 1][j] + gap, L[i - 1][j]])
# column vertical
U[i][j] = max([M[i][j - 1] + gap, U[i][j - 1]])
# diagonal
M[i][j] = max([M[i - 1][j - 1] + scoring_matrix.loc[s[i - 1], t[j - 1]],
L[i][j], U[i][j]])
# bottom-right corner of the M is the max score.
return M[-1][-1]
if __name__ == '__main__': # pragma: no cover
s, t = read_fasta('../datasets/GCON_1.txt') # pragma: no cover
max_score = main_GCON(s, t, BLOSUM62(), -5) # pragma: no cover
print(max_score) # pragma: no cover
|
the-stack_0_19052 | import os
import xml.etree.ElementTree as ET
from PIL import Image
from tqdm import tqdm
from yolo import YOLO
from utils.utils import get_classes
from utils.utils_map import get_coco_map, get_map
if __name__ == "__main__":
'''
Recall和Precision不像AP是一个面积的概念,在门限值不同时,网络的Recall和Precision值是不同的。
map计算结果中的Recall和Precision代表的是当预测时,门限置信度为0.5时,所对应的Recall和Precision值。
此处获得的./map_out/detection-results/里面的txt的框的数量会比直接predict多一些,这是因为这里的门限低,
目的是为了计算不同门限条件下的Recall和Precision值,从而实现map的计算。
'''
#------------------------------------------------------------------------------------------------------------------#
# map_mode用于指定该文件运行时计算的内容
# map_mode为0代表整个map计算流程,包括获得预测结果、获得真实框、计算VOC_map。
# map_mode为1代表仅仅获得预测结果。
# map_mode为2代表仅仅获得真实框。
# map_mode为3代表仅仅计算VOC_map。
# map_mode为4代表利用COCO工具箱计算当前数据集的0.50:0.95map。需要获得预测结果、获得真实框后并安装pycocotools才行
#-------------------------------------------------------------------------------------------------------------------#
map_mode = 0
#-------------------------------------------------------#
# 此处的classes_path用于指定需要测量VOC_map的类别
# 一般情况下与训练和预测所用的classes_path一致即可
#-------------------------------------------------------#
classes_path = 'model_data/my_classes.txt'
#-------------------------------------------------------#
# MINOVERLAP用于指定想要获得的mAP0.x
# 比如计算mAP0.75时,可以设定MINOVERLAP = 0.75。
#-------------------------------------------------------#
MINOVERLAP = 0.5
#-------------------------------------------------------#
# map_vis用于指定是否开启VOC_map计算的可视化。开启的话可能导致程序没反应
#-------------------------------------------------------#
map_vis = False
#-------------------------------------------------------#
# 指向VOC数据集所在的文件夹
# 默认指向根目录下的VOC数据集
#-------------------------------------------------------#
VOCdevkit_path = 'VOCdevkit'
#-------------------------------------------------------#
# 结果输出的文件夹,默认为map_out
#-------------------------------------------------------#
map_out_path = 'map_out'
image_ids = open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Main/test.txt")).read().strip().split()
if not os.path.exists(map_out_path):
os.makedirs(map_out_path)
if not os.path.exists(os.path.join(map_out_path, 'ground-truth')):
os.makedirs(os.path.join(map_out_path, 'ground-truth'))
if not os.path.exists(os.path.join(map_out_path, 'detection-results')):
os.makedirs(os.path.join(map_out_path, 'detection-results'))
if not os.path.exists(os.path.join(map_out_path, 'images-optional')):
os.makedirs(os.path.join(map_out_path, 'images-optional'))
class_names, _ = get_classes(classes_path)
if map_mode == 0 or map_mode == 1:
print("Load model.")
yolo = YOLO(confidence = 0.001, nms_iou = 0.5)
print("Load model done.")
print("Get predict result.")
for image_id in tqdm(image_ids):
image_path = os.path.join(VOCdevkit_path, "VOC2007/JPEGImages/"+image_id+".jpg")
image = Image.open(image_path)
if map_vis:
image.save(os.path.join(map_out_path, "images-optional/" + image_id + ".jpg"))
yolo.get_map_txt(image_id, image, class_names, map_out_path)
print("Get predict result done.")
if map_mode == 0 or map_mode == 2:
print("Get ground truth result.")
for image_id in tqdm(image_ids):
with open(os.path.join(map_out_path, "ground-truth/"+image_id+".txt"), "w") as new_f:
root = ET.parse(os.path.join(VOCdevkit_path, "VOC2007/Annotations/"+image_id+".xml")).getroot()
for obj in root.findall('object'):
difficult_flag = False
if obj.find('difficult')!=None:
difficult = obj.find('difficult').text
if int(difficult)==1:
difficult_flag = True
obj_name = obj.find('name').text
if obj_name not in class_names:
continue
bndbox = obj.find('bndbox')
left = bndbox.find('xmin').text
top = bndbox.find('ymin').text
right = bndbox.find('xmax').text
bottom = bndbox.find('ymax').text
if difficult_flag:
new_f.write("%s %s %s %s %s difficult\n" % (obj_name, left, top, right, bottom))
else:
new_f.write("%s %s %s %s %s\n" % (obj_name, left, top, right, bottom))
print("Get ground truth result done.")
if map_mode == 0 or map_mode == 3:
print("Get map.")
get_map(MINOVERLAP, True, path = map_out_path)
print("Get map done.")
if map_mode == 4:
print("Get map.")
get_coco_map(class_names = class_names, path = map_out_path)
print("Get map done.")
|
the-stack_0_19054 | from collections import defaultdict
import subprocess
import lsst_camera.cp_pipe_drivers as cpd
repo = '/lsstdata/offline/teststand/BOT/gen2repo'
run = '6790D'
imageType = 'FLAT'
raftName = 'R22'
selection = (f'imageType=="{imageType}" and run=="{run}" '
f'and raftName="{raftName}"')
visit_dict = cpd.VisitDict(repo, selection)
visits = sorted(list(visit_dict.keys()))
visit_pairs = defaultdict(list)
for visit in visits:
my_df = visit_dict.df.query(f'visit=={visit}')
if len(my_df) == 9:
row = my_df.query(f'detectorName=="S00"').iloc[0]
visit_pairs[(row['filter'], row.expTime)].append(row.visit)
visit_pair_list = []
for items in visit_pairs.values():
if len(items) != 2:
continue
visit_pair_list.append(','.join([str(_) for _ in items]))
visit_pair_list = sorted(visit_pair_list)
visit_list = ' '.join(visit_pair_list[::4])
outdir = 'calib_products'
rerun_folder = 'bf_kernels'
command = (f'makeBrighterFatterKernel.py {outdir} --rerun {rerun_folder} '
f'--longlog --id raftName={raftName} '
f'--visit-pairs {visit_list} '
'--config isr.doCrosstalk=False isr.doBias=True '
'isr.doDark=True level=AMP --clobber-config '
'--calib CALIB')
print(command)
subprocess.check_call(command, shell=True)
|
the-stack_0_19055 | # first use of slow_net and fast_net
import nengo
from nengo import spa
from nengo.dists import Exponential, Choice, Uniform
from mem_net import MemNet
from adder_env import create_adder_env
# Note that D is equal to the dimensions of the addend
from constants import *
import numpy as np
from collections import OrderedDict
import itertools
import ipdb
## Generate the vocab awkwardly
rng = np.random.RandomState(0)
vocab = spa.Vocabulary(D, rng=rng)
number_dict = {"ONE": 1, "TWO": 2, "THREE": 3, "FOUR": 4, "FIVE": 5,
"SIX": 6, "SEVEN": 7, "EIGHT": 8, "NINE": 9}
number_ordered = OrderedDict(sorted(number_dict.items(), key=lambda t: t[1]))
# This should be set to 10 for the actual final test
number_range = 4
number_list = number_ordered.keys()
def nearest(d):
from scipy.linalg import sqrtm
p = nengo.dists.UniformHypersphere(surface=True).sample(d, d)
return np.dot(p, np.linalg.inv(sqrtm(np.dot(p.T, p))))
orth_vecs = nearest(D)
for i in range(number_range):
print(number_list[i])
vocab.add(number_list[i], orth_vecs[i])
join_num = "+".join(number_list[0:number_range])
q_list = []
ans_list = []
for val in itertools.product(number_list, number_list):
# Filter for max count
if val[0] >= val[1]:
ans_val = number_dict[val[0]] + number_dict[val[1]]
if ans_val <= number_range:
q_list.append(
np.concatenate(
(vocab.parse(val[0]).v, vocab.parse(val[1]).v)
)
)
ans_list.append(
vocab.parse(number_list[ans_val-1]).v
)
print("%s+%s=%s" %(val[0], val[1], number_list[ans_val-1]))
# TESTING
q_list[0] = q_list[3]
ans_list[0] = ans_list[3]
## Generate specialised vocabs
state_vocab = spa.Vocabulary(less_D)
state_vocab.parse("RUN+NONE")
with nengo.Network(label="Root Net", seed=0) as model:
env = create_adder_env(q_list, ans_list, state_vocab.parse("NONE").v, vocab, ans_dur=0.1)
with spa.SPA(vocabs=[vocab, state_vocab], label="Count Net", seed=0) as slow_net:
slow_net.q1 = spa.State(D, vocab=vocab)
slow_net.q2 = spa.State(D, vocab=vocab)
slow_net.answer = spa.State(D, vocab=vocab)
slow_net.op_state = MemNet(less_D, state_vocab, label="op_state")
input_keys = number_list[:-1]
output_keys = number_list[1:]
### Result circuit
## Incrementing memory
slow_net.res_assoc = spa.AssociativeMemory(input_vocab=vocab, output_vocab=vocab,
input_keys=input_keys, output_keys=output_keys,
wta_output=True)
## Starting memory
slow_net.count_res = MemNet(D, vocab, label="count_res")
## Increment result memory
slow_net.res_mem = MemNet(D, vocab, label="res_mem")
## Cleanup memory
slow_net.rmem_assoc = spa.AssociativeMemory(input_vocab=vocab,
wta_output=True)
### Total circuit
## Total memory
slow_net.tot_assoc = spa.AssociativeMemory(input_vocab=vocab, output_vocab=vocab,
input_keys=input_keys, output_keys=output_keys,
wta_output=True)
## Starting memory
slow_net.count_tot = MemNet(D, vocab, label="count_tot")
## Increment result memory
slow_net.tot_mem = MemNet(D, vocab, label="tot_mem")
## Cleanup memory
slow_net.tmem_assoc = spa.AssociativeMemory(input_vocab=vocab,
wta_output=True)
slow_net.ans_assoc = spa.AssociativeMemory(input_vocab=vocab,
wta_output=True)
## The memory that says when to stop incrementing
slow_net.count_fin = MemNet(D, vocab, label="count_fin")
### Comparison circuit
## State for easier insertion into Actions after threshold
slow_net.tot_fin_simi = spa.State(1)
slow_net.comp_tot_fin = spa.Compare(D)
# this network is only used during the on_input action, is it really necessary?
slow_net.fin_assoc = spa.AssociativeMemory(input_vocab=vocab,
wta_output=True)
### Compares that set the speed of the increment
## Compare for loading into start memory
slow_net.comp_load_res = spa.Compare(D)
## Compare for loading into incrementing memory
slow_net.comp_inc_res = spa.Compare(D)
## Cleanup for compare
slow_net.comp_assoc = spa.AssociativeMemory(input_vocab=vocab,
wta_output=True)
## Increment for compare and input
slow_net.gen_inc_assoc = spa.AssociativeMemory(input_vocab=vocab, output_vocab=vocab,
input_keys=input_keys, output_keys=output_keys,
wta_output=True)
main_actions = spa.Actions(
## If the input isn't blank, read it in
on_input=
"(dot(q1, %s) + dot(q2, %s))/2 "
"--> count_res = gen_inc_assoc, gen_inc_assoc = q1, count_tot = ONE, count_fin = fin_assoc, fin_assoc = 2.5*q2, op_state = RUN" % (join_num, join_num,),
## If not done, prepare next increment
cmp_fail=
"dot(op_state, RUN) - tot_fin_simi + 1.25*comp_inc_res - comp_load_res"
"--> op_state = 0.5*RUN - NONE, rmem_assoc = 2.5*count_res, tmem_assoc = 2.5*count_tot, "
"count_res_gate = CLOSE, count_tot_gate = CLOSE, op_state_gate = CLOSE, count_fin_gate = CLOSE, "
"comp_load_res_A = res_mem, comp_load_res_B = comp_assoc, comp_assoc = 2.5*count_res",
## If we're done incrementing write it to the answer
cmp_good=
"0.5*dot(op_state, RUN) + tot_fin_simi"
"--> ans_assoc = 8*count_res, op_state = 0.5*RUN,"
"count_res_gate = CLOSE, count_tot_gate = CLOSE, op_state_gate = CLOSE, count_fin_gate = CLOSE",
## Increment memory transfer
increment=
"0.3*dot(op_state, RUN) + 1.2*comp_load_res - comp_inc_res"
"--> res_assoc = 2.5*res_mem, tot_assoc = 2.5*tot_mem, "
"res_mem_gate = CLOSE, tot_mem_gate = CLOSE, op_state_gate = CLOSE, count_fin_gate = CLOSE, "
"comp_load_res_A = 0.75*ONE, comp_load_res_B = 0.75*ONE, "
"comp_inc_res_A = gen_inc_assoc, gen_inc_assoc = 2.5*res_mem, comp_inc_res_B = count_res",
)
slow_net.bg_main = spa.BasalGanglia(main_actions)
slow_net.thal_main = spa.Thalamus(slow_net.bg_main)
## Threshold preventing premature influence from comp_tot_fin similarity
thr = 0.25
thresh_ens = nengo.Ensemble(100, 1, encoders=Choice([[1]]), intercepts=Exponential(scale=(1 - thr) / 5.0, shift=thr, high=1),
eval_points=Uniform(thr, 1.1), n_eval_points=5000)
nengo.Connection(slow_net.comp_tot_fin.output, thresh_ens)
nengo.Connection(thresh_ens, slow_net.tot_fin_simi.input)
## Because the answer is being continuously output, we've got to threshold it by the comp_tot_fin similarity
ans_boost = nengo.networks.Product(200, dimensions=D, input_magnitude=2)
ans_boost.label = "ans_boost"
nengo.Connection(slow_net.ans_assoc.output, ans_boost.A)
nengo.Connection(thresh_ens, ans_boost.B,
transform=np.ones((D,1)))
nengo.Connection(ans_boost.output, slow_net.answer.input, transform=2.5)
# had to put the assoc connections here because bugs
# ideally they should be routable
cortical_actions = spa.Actions(
"res_mem = rmem_assoc, tot_mem = tmem_assoc",
"count_res = res_assoc, count_tot = tot_assoc",
"comp_tot_fin_A = count_fin",
"comp_tot_fin_B = 0.5*count_tot",
)
slow_net.cortical = spa.Cortical(cortical_actions)
nengo.Connection(env.q_in[D:], slow_net.q1.input)
nengo.Connection(env.q_in[:D], slow_net.q2.input)
nengo.Connection(env.op_in, slow_net.op_state.mem.input)
with spa.SPA(vocabs=[vocab], label="Fast Net", seed=0) as fast_net:
fast_net.final_cleanup = spa.AssociativeMemory(input_vocab=vocab,
threshold=0.2,
wta_output=True)
fast_net.speech = MemNet(D, vocab, label="speech")
nengo.Connection(slow_net.answer.output, fast_net.final_cleanup.input)
nengo.Connection(fast_net.final_cleanup.output, fast_net.speech.mem.input,
transform=2.5)
nengo.Connection(fast_net.speech.mem.output, env.set_ans)
# I don't know if sustaining this is absolutely necessary...
# The actual answer is comming out of the env anyways
nengo.Connection(env.reset, fast_net.speech.mem.reset, synapse=None)
# reset all the counting network
nengo.Connection(env.count_reset, slow_net.count_res.mem.reset, synapse=None)
nengo.Connection(env.count_reset, slow_net.count_fin.mem.reset, synapse=None)
nengo.Connection(env.count_reset, slow_net.count_tot.mem.reset, synapse=None)
nengo.Connection(env.count_reset, slow_net.op_state.mem.reset, synapse=None)
nengo.Connection(env.gate, fast_net.speech.mem.gate, synapse=None)
#sim = nengo.Simulator(model, dt=dt)
"""
get_data = "probe"
if get_data == "probe":
p_keys = nengo.Probe(env.env_keys, synapse=None)
p_final_ans = nengo.Probe(fast_net.final_cleanup.output)
p_speech = nengo.Probe(fast_net.speech.mem.output)
p_count_res = nengo.Probe(slow_net.count_res.mem.output)
p_count_fin = nengo.Probe(slow_net.count_fin.mem.output)
p_count_tot = nengo.Probe(slow_net.count_tot.mem.output)
p_ans_assoc = nengo.Probe(slow_net.ans_assoc.output)
p_thres_ens = nengo.Probe(thresh_ens)
else:
def file_func(filename):
fi = open("data/%s" %filename, "w")
def f(t, x):
fi.write("%s\n" %x)
return f
p_keys = nengo.Node(file_func("p_keys"), size_in=2*D)
nengo.Connection(env.env_keys, p_keys, synapse=None)
p_final_ans = nengo.Node(file_func("p_final_ans"), size_in=D)
nengo.Connection(fast_net.final_cleanup.output, p_final_ans)
p_speech = nengo.Node(file_func("p_speech"), size_in=D)
nengo.Connection(fast_net.speech.mem.output, p_speech)
p_count_res = nengo.Node(file_func("p_count_res"), size_in=D)
nengo.Connection(slow_net.count_res.mem.output, p_count_res)
p_count_fin = nengo.Node(file_func("p_count_fin"), size_in=D)
nengo.Connection(slow_net.count_fin.mem.output, p_count_fin)
p_count_tot = nengo.Node(file_func("p_count_tot"), size_in=D)
nengo.Connection(slow_net.count_tot.mem.output, p_count_tot)
p_ans_assoc = nengo.Node(file_func("p_ans_assoc"), size_in=D)
nengo.Connection(slow_net.ans_assoc.output, p_ans_assoc)
p_thres_ens = nengo.Node(file_func("p_thres_ens"), size_in=1)
nengo.Connection(thresh_ens, p_thres_ens)
print("Building")
sim = nengo.Simulator(model, dt=dt)
print("Running")
while env.env_cls.questions_answered < 4:
sim.step()
if env.env_cls.time_since_last_answer > 7.0:
print("UH OH")
ipdb.set_trace()
ipdb.set_trace()
np.savez_compressed("data/count_fig_data", p_count_res=sim.data[p_count_res], p_count_fin=sim.data[p_count_fin], p_count_tot=sim.data[p_count_tot], p_ans_assoc=sim.data[p_ans_assoc], p_thres_ens=sim.data[p_thres_ens])
np.savez_compressed("data/count_fig_env_data", t=t, p_keys=sim.data[p_keys], p_final_ans=sim.data[p_final_ans], p_speech=sim.data[p_speech])
""" |
the-stack_0_19056 | #!/usr/bin/env python
####################################################################
### This is the PYTHON version of program 6.2 from page 197 of #
### "Modeling Infectious Disease in humans and animals" #
### by Keeling & Rohani. #
### #
### % It is the SIR epidemic model with constant additive noise #
### added all the various rates. #
### Given the difficulties in integrating the dynamics, the user #
### is prompted for a integration time-step. #
####################################################################
###################################
### Written by Ilias Soumpasis #
### [email protected] (work) #
### [email protected] #
###################################
import scipy.integrate as spi
import numpy as np
import pylab as pl
beta = 1.0
gamma = 1 / 10.0
mu = 1 / (50 * 365.0)
X0 = 1e5
Y0 = 500
N0 = 1e6
Step = 1
ND = MaxTime = 5 * 365.0
TS = 1.0
INPUT0 = np.hstack((X0, Y0))
def diff_eqs(INP, t):
"""The main set of equations"""
Y = np.zeros((2))
V = INP
Y[0] = (
(mu * N0 + np.sqrt(mu * N0) * P[0])
- (beta * V[0] * V[1] / N0 + np.sqrt(beta * V[0] * V[1] / N0) * P[1])
- (mu * V[1] + np.sqrt(mu * V[1]) * P[2])
)
Y[1] = (
(beta * V[0] * V[1] / N0 + np.sqrt(beta * V[0] * V[1] / N0) * P[1])
- (gamma * V[1] + np.sqrt(gamma * V[1]) * P[3])
- (mu * V[1] + np.sqrt(mu * V[1]) * P[4])
)
return Y # For odeint
T = np.zeros((np.ceil(ND / Step), 1))
RES = np.zeros((np.ceil(ND / Step), 2))
INPUT = INPUT0
t = 0
loop = 0
sqrtStep = np.sqrt(Step)
while t < ND and INPUT[0] > 0 and INPUT[1] > 0:
t_start = 0.0
t_end = t_start + Step
t_inc = TS
t_range = np.arange(t_start, t_end + t_inc, t_inc)
P = np.random.normal(size=5) / sqrtStep
PRES = spi.odeint(diff_eqs, INPUT, t_range)
T[loop] = t = t + Step
INPUT = PRES[-1]
RES[loop] = PRES[-1]
loop += 1
print(RES)
### plotting
pl.subplot(211)
pl.plot(T / 365.0, RES[:, 0], ".-g")
pl.xlabel("Time (Years)")
pl.ylabel("Susceptibles")
pl.subplot(212)
pl.plot(T / 365.0, RES[:, 1], ".-r")
pl.ylabel("Infected")
pl.xlabel("Time (Years)")
pl.show()
|
the-stack_0_19057 | # -*- coding: utf-8 -*-
"""
Chromaticity Coordinates of the Colour Checkers
===============================================
Defines the chromaticity coordinates of the colour checkers.
Each colour checker data is in the form of an :class:`OrderedDict` class
instance of 24 samples as follows::
{'name': 'xyY', ..., 'name': 'xyY'}
The following colour checkers are available:
- :attr:`colour.characterisation.datasets.colour_checkers.\
chromaticity_coordinates.CCS_COLORCHECKER1976`: *ColorChecker Classic*
developed by *McCamy et al. (1976)* at Macbeth, a Division of Kollmorgen.
- :attr:`colour.characterisation.datasets.colour_checkers.\
chromaticity_coordinates.CCS_COLORCHECKER2005`: *ColorChecker Classic*
reference data from *GretagMacbeth* published in 2005.
- :attr:`colour.characterisation.datasets.colour_checkers.\
chromaticity_coordinates.CCS_BABELCOLOR_AVERAGE`: Average data derived from
measurements of 30 *ColorChecker Classic* charts.
- :attr:`colour.characterisation.datasets.colour_checkers.\
chromaticity_coordinates.CCS_COLORCHECKER24_BEFORE_NOV2014`:
*ColorChecker Classic* reference data from *X-Rite* published in 2015 and
matching the data from *GretagMacbeth* published in 2005.
- :attr:`colour.characterisation.datasets.colour_checkers.\
chromaticity_coordinates.CCS_COLORCHECKER24_AFTER_NOV2014`:
*ColorChecker Classic* reference data from *X-Rite* published in 2015 and
matching the *ColorChecker Classic* edition after November 2014.
References
----------
- :cite:`BabelColor2012b` : BabelColor. (2012). The ColorChecker (since
1976!). Retrieved September 26, 2014, from
http://www.babelcolor.com/main_level/ColorChecker.htm
- :cite:`BabelColor2012c` : BabelColor. (2012). ColorChecker RGB and
spectra.
http://www.babelcolor.com/download/ColorChecker_RGB_and_spectra.xls
- :cite:`X-Rite2016` : X-Rite. (2016). New color specifications for
ColorChecker SG and Classic Charts. Retrieved October 29, 2018, from
http://xritephoto.com/ph_product_overview.aspx?ID=938&Action=Support&\
SupportID=5884#
"""
from __future__ import division, unicode_literals
import numpy as np
from collections import OrderedDict, namedtuple
from colour.colorimetry import CCS_ILLUMINANTS
from colour.models import Lab_to_XYZ, XYZ_to_xyY
from colour.utilities import CaseInsensitiveMapping
__author__ = 'Colour Developers, Danny Pascale '
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__copyright__ += ', '
__copyright__ += (
'BabelColor ColorChecker data: Copyright (C) 2004-2012 Danny Pascale '
'(www.babelcolor.com); used by permission.')
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'ColourChecker', 'SAMPLE_LABELS_COLORCHECKER_CLASSIC',
'DATA_COLORCHECKER1976', 'CCS_ILLUMINANT_COLORCHECKER1976',
'CCS_COLORCHECKER1976', 'DATA_COLORCHECKER2005',
'CCS_ILLUMINANT_COLORCHECKER2005', 'CCS_COLORCHECKER2005',
'DATA_BABELCOLOR_AVERAGE', 'CCS_ILLUMINANT_BABELCOLOR_AVERAGE',
'CCS_BABELCOLOR_AVERAGE', 'DATA_COLORCHECKER24_BEFORE_NOV2014',
'DATA_COLORCHECKER24_BEFORE_NOV2014',
'CCS_ILLUMINANT_COLORCHECKER24_BEFORE_NOV2014',
'CCS_COLORCHECKER24_BEFORE_NOV2014', 'DATA_COLORCHECKER24_AFTER_NOV2014',
'DATA_COLORCHECKER24_AFTER_NOV2014',
'CCS_ILLUMINANT_COLORCHECKER24_AFTER_NOV2014',
'CCS_COLORCHECKER24_AFTER_NOV2014', 'CCS_COLOURCHECKERS'
]
class ColourChecker(
namedtuple('ColourChecker', ('name', 'data', 'illuminant'))):
"""
*Colour Checker* data.
Parameters
----------
name : unicode
*Colour Checker* name.
data : OrderedDict
Chromaticity coordinates in *CIE xyY* colourspace.
illuminant : array_like
*Colour Checker* illuminant chromaticity coordinates.
"""
SAMPLE_LABELS_COLORCHECKER_CLASSIC = (
'dark skin',
'light skin',
'blue sky',
'foliage',
'blue flower',
'bluish green',
'orange',
'purplish blue',
'moderate red',
'purple',
'yellow green',
'orange yellow',
'blue',
'green',
'red',
'yellow',
'magenta',
'cyan',
'white 9.5 (.05 D)',
'neutral 8 (.23 D)',
'neutral 6.5 (.44 D)',
'neutral 5 (.70 D)',
'neutral 3.5 (1.05 D)',
'black 2 (1.5 D)',
)
"""
*ColorChecker Classic* illuminant.
SAMPLE_LABELS_COLORCHECKER_CLASSIC : tuple
"""
DATA_COLORCHECKER1976 = OrderedDict(
zip(SAMPLE_LABELS_COLORCHECKER_CLASSIC, [
np.array([0.4002, 0.3504, 0.1005]),
np.array([0.3773, 0.3446, 0.3582]),
np.array([0.2470, 0.2514, 0.1933]),
np.array([0.3372, 0.4220, 0.1329]),
np.array([0.2651, 0.2400, 0.2427]),
np.array([0.2608, 0.3430, 0.4306]),
np.array([0.5060, 0.4070, 0.3005]),
np.array([0.2110, 0.1750, 0.1200]),
np.array([0.4533, 0.3058, 0.1977]),
np.array([0.2845, 0.2020, 0.0656]),
np.array([0.3800, 0.4887, 0.4429]),
np.array([0.4729, 0.4375, 0.4306]),
np.array([0.1866, 0.1285, 0.0611]),
np.array([0.3046, 0.4782, 0.2339]),
np.array([0.5385, 0.3129, 0.1200]),
np.array([0.4480, 0.4703, 0.5910]),
np.array([0.3635, 0.2325, 0.1977]),
np.array([0.1958, 0.2519, 0.1977]),
np.array([0.3101, 0.3163, 0.9001]),
np.array([0.3101, 0.3163, 0.5910]),
np.array([0.3101, 0.3163, 0.3620]),
np.array([0.3101, 0.3163, 0.1977]),
np.array([0.3101, 0.3163, 0.0900]),
np.array([0.3101, 0.3163, 0.0313]),
]))
CCS_ILLUMINANT_COLORCHECKER1976 = (
CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['C'])
"""
*ColorChecker Classic 1976* illuminant.
CCS_ILLUMINANT_COLORCHECKER1976 : ndarray
"""
CCS_COLORCHECKER1976 = ColourChecker('ColorChecker 1976',
DATA_COLORCHECKER1976,
CCS_ILLUMINANT_COLORCHECKER1976)
"""
*ColorChecker Classic* developed by *McCamy et al.* (1976) at Macbeth, a
Division of Kollmorgen.
CCS_COLORCHECKER1976 : ColourChecker
"""
DATA_COLORCHECKER2005 = OrderedDict(
zip(SAMPLE_LABELS_COLORCHECKER_CLASSIC, [
np.array([0.4316, 0.3777, 0.1008]),
np.array([0.4197, 0.3744, 0.3495]),
np.array([0.2760, 0.3016, 0.1836]),
np.array([0.3703, 0.4499, 0.1325]),
np.array([0.2999, 0.2856, 0.2304]),
np.array([0.2848, 0.3911, 0.4178]),
np.array([0.5295, 0.4055, 0.3118]),
np.array([0.2305, 0.2106, 0.1126]),
np.array([0.5012, 0.3273, 0.1938]),
np.array([0.3319, 0.2482, 0.0637]),
np.array([0.3984, 0.5008, 0.4446]),
np.array([0.4957, 0.4427, 0.4357]),
np.array([0.2018, 0.1692, 0.0575]),
np.array([0.3253, 0.5032, 0.2318]),
np.array([0.5686, 0.3303, 0.1257]),
np.array([0.4697, 0.4734, 0.5981]),
np.array([0.4159, 0.2688, 0.2009]),
np.array([0.2131, 0.3023, 0.1930]),
np.array([0.3469, 0.3608, 0.9131]),
np.array([0.3440, 0.3584, 0.5894]),
np.array([0.3432, 0.3581, 0.3632]),
np.array([0.3446, 0.3579, 0.1915]),
np.array([0.3401, 0.3548, 0.0883]),
np.array([0.3406, 0.3537, 0.0311]),
]))
CCS_ILLUMINANT_COLORCHECKER2005 = (
CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['ICC D50'])
"""
*ColorChecker Classic 2005* illuminant.
CCS_ILLUMINANT_COLORCHECKER2005 : ndarray
"""
CCS_COLORCHECKER2005 = ColourChecker('ColorChecker 2005',
DATA_COLORCHECKER2005,
CCS_ILLUMINANT_COLORCHECKER2005)
"""
*ColorChecker Classic* data from *GretagMacbeth (2005)*.
CCS_COLORCHECKER2005 : ColourChecker
"""
DATA_BABELCOLOR_AVERAGE = OrderedDict(
zip(SAMPLE_LABELS_COLORCHECKER_CLASSIC, [
np.array([0.4325, 0.3788, 0.1034]),
np.array([0.4191, 0.3748, 0.3525]),
np.array([0.2761, 0.3004, 0.1847]),
np.array([0.3700, 0.4501, 0.1335]),
np.array([0.3020, 0.2877, 0.2324]),
np.array([0.2856, 0.3910, 0.4174]),
np.array([0.5291, 0.4075, 0.3117]),
np.array([0.2339, 0.2155, 0.1140]),
np.array([0.5008, 0.3293, 0.1979]),
np.array([0.3326, 0.2556, 0.0644]),
np.array([0.3989, 0.4998, 0.4435]),
np.array([0.4962, 0.4428, 0.4358]),
np.array([0.2040, 0.1696, 0.0579]),
np.array([0.3270, 0.5033, 0.2307]),
np.array([0.5709, 0.3298, 0.1268]),
np.array([0.4694, 0.4732, 0.6081]),
np.array([0.4177, 0.2704, 0.2007]),
np.array([0.2151, 0.3037, 0.1903]),
np.array([0.3488, 0.3628, 0.9129]),
np.array([0.3451, 0.3596, 0.5885]),
np.array([0.3446, 0.3590, 0.3595]),
np.array([0.3438, 0.3589, 0.1912]),
np.array([0.3423, 0.3576, 0.0893]),
np.array([0.3439, 0.3565, 0.0320]),
]))
CCS_ILLUMINANT_BABELCOLOR_AVERAGE = (
CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['ICC D50'])
"""
*BabelColor Average* illuminant.
CCS_ILLUMINANT_BABELCOLOR_AVERAGE : ndarray
"""
CCS_BABELCOLOR_AVERAGE = ColourChecker('BabelColor Average',
DATA_BABELCOLOR_AVERAGE,
CCS_ILLUMINANT_BABELCOLOR_AVERAGE)
"""
Average data derived from measurements of 30 *ColorChecker Classic* charts.
CCS_BABELCOLOR_AVERAGE : ColourChecker
"""
DATA_COLORCHECKER24_BEFORE_NOV2014 = OrderedDict(
zip(SAMPLE_LABELS_COLORCHECKER_CLASSIC, [
np.array([37.986, 13.555, 14.059]),
np.array([65.711, 18.13, 17.81]),
np.array([49.927, -4.88, -21.905]),
np.array([43.139, -13.095, 21.905]),
np.array([55.112, 8.844, -25.399]),
np.array([70.719, -33.397, -0.199]),
np.array([62.661, 36.067, 57.096]),
np.array([40.02, 10.41, -45.964]),
np.array([51.124, 48.239, 16.248]),
np.array([30.325, 22.976, -21.587]),
np.array([72.532, -23.709, 57.255]),
np.array([71.941, 19.363, 67.857]),
np.array([28.778, 14.179, -50.297]),
np.array([55.261, -38.342, 31.37]),
np.array([42.101, 53.378, 28.19]),
np.array([81.733, 4.039, 79.819]),
np.array([51.935, 49.986, -14.574]),
np.array([51.038, -28.631, -28.638]),
np.array([96.539, -0.425, 1.186]),
np.array([81.257, -0.638, -0.335]),
np.array([66.766, -0.734, -0.504]),
np.array([50.867, -0.153, -0.27]),
np.array([35.656, -0.421, -1.231]),
np.array([20.461, -0.079, -0.973]),
]))
"""
*ColorChecker24 - Before November 2014* illuminant.
Notes
-----
- *X-Rite* data is given as *CIE L\\*a\\*b\\** colourspace values under
*CIE Illuminant D Series D50* for the
*CIE 1931 2 Degree Standard Observer*.
DATA_COLORCHECKER24_BEFORE_NOV2014 : ndarray
"""
DATA_COLORCHECKER24_BEFORE_NOV2014 = OrderedDict(
zip(
SAMPLE_LABELS_COLORCHECKER_CLASSIC,
XYZ_to_xyY(
Lab_to_XYZ(
list(DATA_COLORCHECKER24_BEFORE_NOV2014.values()),
CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer'][
'ICC D50']))))
CCS_ILLUMINANT_COLORCHECKER24_BEFORE_NOV2014 = (
CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['ICC D50'])
"""
*ColorChecker24 - Before November 2014* illuminant.
CCS_ILLUMINANT_COLORCHECKER24_BEFORE_NOV2014 : ndarray
"""
CCS_COLORCHECKER24_BEFORE_NOV2014 = ColourChecker(
'ColorChecker24 - Before November 2014',
DATA_COLORCHECKER24_BEFORE_NOV2014,
CCS_ILLUMINANT_COLORCHECKER24_BEFORE_NOV2014)
"""
Reference *ColorChecker Classic* data from *X-Rite (2015)*.
Notes
-----
- The rounded *ColorChecker24 - Before November 2014* values should match the
*ColorChecker Classic 2005* values. They are given for reference of the
original *CIE L\\*a\\*b\\** colourspace values.
CCS_COLORCHECKER24_BEFORE_NOV2014 : ColourChecker
"""
DATA_COLORCHECKER24_AFTER_NOV2014 = OrderedDict((
('dark skin', np.array([37.54, 14.37, 14.92])),
('light skin', np.array([64.66, 19.27, 17.5])),
('blue sky', np.array([49.32, -3.82, -22.54])),
('foliage', np.array([43.46, -12.74, 22.72])),
('blue flower', np.array([54.94, 9.61, -24.79])),
('bluish green', np.array([70.48, -32.26, -0.37])),
('orange', np.array([62.73, 35.83, 56.5])),
('purplish blue', np.array([39.43, 10.75, -45.17])),
('moderate red', np.array([50.57, 48.64, 16.67])),
('purple', np.array([30.1, 22.54, -20.87])),
('yellow green', np.array([71.77, -24.13, 58.19])),
('orange yellow', np.array([71.51, 18.24, 67.37])),
('blue', np.array([28.37, 15.42, -49.8])),
('green', np.array([54.38, -39.72, 32.27])),
('red', np.array([42.43, 51.05, 28.62])),
('yellow', np.array([81.8, 2.67, 80.41])),
('magenta', np.array([50.63, 51.28, -14.12])),
('cyan', np.array([49.57, -29.71, -28.32])),
('white 9.5 (.05 D)', np.array([95.19, -1.03, 2.93])),
('neutral 8 (.23 D)', np.array([81.29, -0.57, 0.44])),
('neutral 6.5 (.44 D)', np.array([66.89, -0.75, -0.06])),
('neutral 5 (.70 D)', np.array([50.76, -0.13, 0.14])),
('neutral 3.5 (1.05 D)', np.array([35.63, -0.46, -0.48])),
('black 2 (1.5 D)', np.array([20.64, 0.07, -0.46])),
))
"""
*ColorChecker24 - After November 2014* illuminant.
Notes
-----
- *X-Rite* data is given as *CIE L\\*a\\*b\\** colourspace values under
*CIE Illuminant D Series D50* for the
*CIE 1931 2 Degree Standard Observer*.
DATA_COLORCHECKER24_AFTER_NOV2014 : ndarray
"""
DATA_COLORCHECKER24_AFTER_NOV2014 = OrderedDict(
zip(
DATA_COLORCHECKER24_AFTER_NOV2014.keys(),
XYZ_to_xyY(
Lab_to_XYZ(
list(DATA_COLORCHECKER24_AFTER_NOV2014.values()),
CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer'][
'ICC D50']))))
CCS_ILLUMINANT_COLORCHECKER24_AFTER_NOV2014 = (
CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['ICC D50'])
"""
*ColorChecker24 - After November 2014* illuminant.
CCS_ILLUMINANT_COLORCHECKER24_AFTER_NOV2014 : ndarray
"""
CCS_COLORCHECKER24_AFTER_NOV2014 = ColourChecker(
'ColorChecker24 - After November 2014', DATA_COLORCHECKER24_AFTER_NOV2014,
CCS_ILLUMINANT_COLORCHECKER24_AFTER_NOV2014)
"""
Reference *ColorChecker Classic* data from *X-Rite (2015)* and matching the
*ColorChecker Classic* edition after November 2014.
CCS_COLORCHECKER24_AFTER_NOV2014 : ColourChecker
"""
CCS_COLOURCHECKERS = CaseInsensitiveMapping({
'ColorChecker 1976':
CCS_COLORCHECKER1976,
'ColorChecker 2005':
CCS_COLORCHECKER2005,
'BabelColor Average':
CCS_BABELCOLOR_AVERAGE,
'ColorChecker24 - Before November 2014':
CCS_COLORCHECKER24_BEFORE_NOV2014,
'ColorChecker24 - After November 2014':
CCS_COLORCHECKER24_AFTER_NOV2014,
})
CCS_COLOURCHECKERS.__doc__ = """
Chromaticity coordinates of the colour checkers.
References
----------
:cite:`BabelColor2012b`, :cite:`BabelColor2012c`, :cite:`X-Rite2016`
CCS_COLOURCHECKERS : CaseInsensitiveMapping
**{'ColorChecker 1976', 'ColorChecker 2005', 'BabelColor Average',
'ColorChecker24 - Before November 2014',
'ColorChecker24 - After November 2014'}**
Aliases:
- 'babel_average': 'BabelColor Average'
- 'cc2005': 'ColorChecker 2005'
- 'ccb2014': 'ColorChecker24 - Before November 2014'
- 'cca2014': 'ColorChecker24 - After November 2014'
"""
CCS_COLOURCHECKERS['babel_average'] = CCS_COLOURCHECKERS['BabelColor Average']
CCS_COLOURCHECKERS['cc2005'] = CCS_COLOURCHECKERS['ColorChecker 2005']
CCS_COLOURCHECKERS['ccb2014'] = CCS_COLOURCHECKERS[
'ColorChecker24 - Before November 2014']
CCS_COLOURCHECKERS['cca2014'] = CCS_COLOURCHECKERS[
'ColorChecker24 - After November 2014']
|
the-stack_0_19058 | # Author: Decebal Constantin Mocanu et al.;
# Proof of concept implementation of Sparse Evolutionary Training (SET) of Multi Layer Perceptron (MLP) on CIFAR10 using Keras and a mask over weights.
# This implementation can be used to test SET in varying conditions, using the Keras framework versatility, e.g. various optimizers, activation layers, tensorflow
# Also it can be easily adapted for Convolutional Neural Networks or other models which have dense layers
# However, due the fact that the weights are stored in the standard Keras format (dense matrices), this implementation can not scale properly.
# If you would like to build and SET-MLP with over 100000 neurons, please use the pure Python implementation from the folder "SET-MLP-Sparse-Python-Data-Structures"
# This is a pre-alpha free software and was tested with Python 3.5.2, Keras 2.1.3, Keras_Contrib 0.0.2, Tensorflow 1.5.0, Numpy 1.14;
# The code is distributed in the hope that it may be useful, but WITHOUT ANY WARRANTIES; The use of this software is entirely at the user's own risk;
# For an easy understanding of the code functionality please read the following articles.
# If you use parts of this code please cite the following articles:
#@article{Mocanu2018SET,
# author = {Mocanu, Decebal Constantin and Mocanu, Elena and Stone, Peter and Nguyen, Phuong H. and Gibescu, Madeleine and Liotta, Antonio},
# journal = {Nature Communications},
# title = {Scalable Training of Artificial Neural Networks with Adaptive Sparse Connectivity inspired by Network Science},
# year = {2018},
# doi = {10.1038/s41467-018-04316-3}
#}
#@Article{Mocanu2016XBM,
#author="Mocanu, Decebal Constantin and Mocanu, Elena and Nguyen, Phuong H. and Gibescu, Madeleine and Liotta, Antonio",
#title="A topological insight into restricted Boltzmann machines",
#journal="Machine Learning",
#year="2016",
#volume="104",
#number="2",
#pages="243--270",
#doi="10.1007/s10994-016-5570-z",
#url="https://doi.org/10.1007/s10994-016-5570-z"
#}
#@phdthesis{Mocanu2017PhDthesis,
#title = "Network computations in artificial intelligence",
#author = "D.C. Mocanu",
#year = "2017",
#isbn = "978-90-386-4305-2",
#publisher = "Eindhoven University of Technology",
#}
from __future__ import division
from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras import optimizers
import numpy as np
from keras import backend as K
from keras_contrib.layers.advanced_activations import SReLU
from keras.datasets import cifar10
from keras.utils import np_utils
class Constraint(object):
def __call__(self, w):
return w
def get_config(self):
return {}
class MaskWeights(Constraint):
def __init__(self, mask):
self.mask = mask
self.mask = K.cast(self.mask, K.floatx())
def __call__(self, w):
w *= self.mask
return w
def get_config(self):
return {'mask': self.mask}
def find_first_pos(array, value):
idx = (np.abs(array - value)).argmin()
return idx
def find_last_pos(array, value):
idx = (np.abs(array - value))[::-1].argmin()
return array.shape[0] - idx
def createWeightsMask(epsilon,noRows, noCols):
# generate an Erdos Renyi sparse weights mask
mask_weights = np.random.rand(noRows, noCols)
prob = 1 - (epsilon * (noRows + noCols)) / (noRows * noCols) # normal tp have 8x connections
mask_weights[mask_weights < prob] = 0
mask_weights[mask_weights >= prob] = 1
noParameters = np.sum(mask_weights)
print ("Create Sparse Matrix: No parameters, NoRows, NoCols ",noParameters,noRows,noCols)
return [noParameters,mask_weights]
class SET_MLP_CIFAR10:
def __init__(self):
# set model parameters
self.epsilon = 20 # control the sparsity level as discussed in the paper
self.zeta = 0.3 # the fraction of the weights removed
self.batch_size = 100 # batch size
self.maxepoches = 1000 # number of epochs
self.learning_rate = 0.01 # SGD learning rate
self.num_classes = 10 # number of classes
self.momentum=0.9 # SGD momentum
# generate an Erdos Renyi sparse weights mask for each layer
[self.noPar1, self.wm1] = createWeightsMask(self.epsilon,32 * 32 *3, 4000)
[self.noPar2, self.wm2] = createWeightsMask(self.epsilon,4000, 1000)
[self.noPar3, self.wm3] = createWeightsMask(self.epsilon,1000, 4000)
# initialize layers weights
self.w1 = None
self.w2 = None
self.w3 = None
self.w4 = None
# initialize weights for SReLu activation function
self.wSRelu1 = None
self.wSRelu2 = None
self.wSRelu3 = None
# create a SET-MLP model
self.create_model()
# train the SET-MLP model
self.train()
def create_model(self):
# create a SET-MLP model for CIFAR10 with 3 hidden layers
self.model = Sequential()
self.model.add(Flatten(input_shape=(32, 32, 3)))
self.model.add(Dense(4000, name="sparse_1",kernel_constraint=MaskWeights(self.wm1),weights=self.w1))
self.model.add(SReLU(name="srelu1",weights=self.wSRelu1))
self.model.add(Dropout(0.3))
self.model.add(Dense(1000, name="sparse_2",kernel_constraint=MaskWeights(self.wm2),weights=self.w2))
self.model.add(SReLU(name="srelu2",weights=self.wSRelu2))
self.model.add(Dropout(0.3))
self.model.add(Dense(4000, name="sparse_3",kernel_constraint=MaskWeights(self.wm3),weights=self.w3))
self.model.add(SReLU(name="srelu3",weights=self.wSRelu3))
self.model.add(Dropout(0.3))
self.model.add(Dense(self.num_classes, name="dense_4",weights=self.w4)) #please note that there is no need for a sparse output layer as the number of classes is much smaller than the number of input hidden neurons
self.model.add(Activation('softmax'))
def rewireMask(self,weights, noWeights):
# rewire weight matrix
# remove zeta largest negative and smallest positive weights
values = np.sort(weights.ravel())
firstZeroPos = find_first_pos(values, 0)
lastZeroPos = find_last_pos(values, 0)
largestNegative = values[int((1-self.zeta) * firstZeroPos)]
smallestPositive = values[int(min(values.shape[0] - 1, lastZeroPos +self.zeta * (values.shape[0] - lastZeroPos)))]
rewiredWeights = weights.copy();
rewiredWeights[rewiredWeights > smallestPositive] = 1;
rewiredWeights[rewiredWeights < largestNegative] = 1;
rewiredWeights[rewiredWeights != 1] = 0;
weightMaskCore = rewiredWeights.copy()
# add zeta random weights
nrAdd = 0
noRewires = noWeights - np.sum(rewiredWeights)
while (nrAdd < noRewires):
i = np.random.randint(0, rewiredWeights.shape[0])
j = np.random.randint(0, rewiredWeights.shape[1])
if (rewiredWeights[i, j] == 0):
rewiredWeights[i, j] = 1
nrAdd += 1
return [rewiredWeights, weightMaskCore]
def weightsEvolution(self):
# this represents the core of the SET procedure. It removes the weights closest to zero in each layer and add new random weights
self.w1 = self.model.get_layer("sparse_1").get_weights()
self.w2 = self.model.get_layer("sparse_2").get_weights()
self.w3 = self.model.get_layer("sparse_3").get_weights()
self.w4 = self.model.get_layer("dense_4").get_weights()
self.wSRelu1 = self.model.get_layer("srelu1").get_weights()
self.wSRelu2 = self.model.get_layer("srelu2").get_weights()
self.wSRelu3 = self.model.get_layer("srelu3").get_weights()
[self.wm1, self.wm1Core] = self.rewireMask(self.w1[0], self.noPar1)
[self.wm2, self.wm2Core] = self.rewireMask(self.w2[0], self.noPar2)
[self.wm3, self.wm3Core] = self.rewireMask(self.w3[0], self.noPar3)
self.w1[0] = self.w1[0] * self.wm1Core
self.w2[0] = self.w2[0] * self.wm2Core
self.w3[0] = self.w3[0] * self.wm3Core
def train(self):
# read CIFAR10 data
[x_train,x_test,y_train,y_test]=self.read_data()
#data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=10, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
datagen.fit(x_train)
self.model.summary()
# training process in a for loop
self.accuracies_per_epoch=[]
for epoch in range(0,self.maxepoches):
sgd = optimizers.SGD(lr=self.learning_rate, momentum=self.momentum)
self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
historytemp = self.model.fit_generator(datagen.flow(x_train, y_train,
batch_size=self.batch_size),
steps_per_epoch=x_train.shape[0]//self.batch_size,
epochs=epoch,
validation_data=(x_test, y_test),
initial_epoch=epoch-1)
self.accuracies_per_epoch.append(historytemp.history['val_acc'][0])
#ugly hack to avoid tensorflow memory increase for multiple fit_generator calls. Theano shall work more nicely this but it is outdated in general
self.weightsEvolution()
K.clear_session()
self.create_model()
self.accuracies_per_epoch=np.asarray(self.accuracies_per_epoch)
def read_data(self):
#read CIFAR10 data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, self.num_classes)
y_test = np_utils.to_categorical(y_test, self.num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
#normalize data
xTrainMean = np.mean(x_train, axis=0)
xTtrainStd = np.std(x_train, axis=0)
x_train = (x_train - xTrainMean) / xTtrainStd
x_test = (x_test - xTrainMean) / xTtrainStd
return [x_train, x_test, y_train, y_test]
if __name__ == '__main__':
# create and run a SET-MLP model on CIFAR10
model=SET_MLP_CIFAR10()
# save accuracies over for all training epochs
# in "results" folder you can find the output of running this file
np.savetxt("results/set_mlp_srelu_sgd_cifar10_acc.txt", np.asarray(model.accuracies_per_epoch))
|
the-stack_0_19061 | import numpy as np
import cv2
import os
from utils import ACTION_TO_ID
class Demonstration():
def __init__(self, path, demo_num, check_z_height, task_type='stack'):
try:
# path is expected to be <logs/exp_name>
self.action_log = np.loadtxt(os.path.join(path, 'transitions',
'executed-actions-' + str(demo_num) + '.log.txt'))
except OSError:
raise OSError("Demo Number " + str(demo_num) + " does not exist.")
self.rgb_dir = os.path.join(path, 'data', 'color-heightmaps')
self.depth_dir = os.path.join(path, 'data', 'depth-heightmaps')
self.demo_num = demo_num
self.check_z_height = check_z_height
self.task_type = task_type
# image_names should contain all heightmaps that have demo_num as their poststring
self.image_names = sorted([i for i in os.listdir(self.rgb_dir) if int(i.split('.')[-3]) == demo_num])
# get number of actions in demo
self.num_actions = len(self.action_log)
# check to make sure action log and image_names line up
if len(self.image_names) != self.num_actions:
raise ValueError("MISMATCH: Number of images does not match number of actions in demo for demo number:", demo_num)
# populate actions in dict keyed by action_pair number {action_pair : {action : (x, y, z, theta)}}
# divide num actions by 2 to get number of grasp/place pairs
self.action_dict = {}
# start at 1 since the structure starts with size 1
for action_pair in range(1, (self.num_actions // 2) + 1):
demo_ind = (action_pair - 1) * 2
grasp_image_ind = int(self.image_names[demo_ind].split('.')[0])
place_image_ind = int(self.image_names[demo_ind + 1].split('.')[0])
self.action_dict[action_pair] = {ACTION_TO_ID['grasp'] : self.action_log[demo_ind],
ACTION_TO_ID['place'] : self.action_log[demo_ind + 1],
'grasp_image_ind': grasp_image_ind, 'place_image_ind': place_image_ind}
def get_heightmaps(self, action_str, stack_height, use_hist=False, history_len=3):
# e.g. initial rgb filename is 000000.orig.color.png, only for stack demos
if action_str != 'orig' and self.task_type == 'stack':
action_str = str(stack_height) + action_str
rgb_filename = os.path.join(self.rgb_dir,
'%06d.%s.%d.color.png' % (stack_height, action_str, self.demo_num))
depth_filename = os.path.join(self.depth_dir,
'%06d.%s.%d.depth.png' % (stack_height, action_str, self.demo_num))
# read rgb and depth heightmap
rgb_heightmap = cv2.cvtColor(cv2.imread(rgb_filename), cv2.COLOR_BGR2RGB)
depth_heightmap = cv2.imread(depth_filename, -1).astype(np.float32)/100000
# if using history, need to modify depth heightmap
if use_hist:
depth_heightmap_history = [depth_heightmap]
image_ind = self.image_names.index(rgb_filename.split('/')[-1])
hist_ind = image_ind
# iterate through last history_len frames and add to list
for i in range(history_len - 1):
# calculate previous index
hist_ind = max(0, hist_ind - 1)
# load heightmap and add to list
heightmap_path = os.path.join(self.depth_dir, self.image_names[image_ind].replace('color', 'depth'))
hist_depth = cv2.imread(heightmap_path, -1).astype(np.float32)/100000
depth_heightmap_history.append(hist_depth)
return rgb_heightmap, np.stack(depth_heightmap_history, axis=-1)
return rgb_heightmap, np.stack([depth_heightmap] * 3, axis=-1)
def get_action(self, workspace_limits, primitive_action, stack_height, stack_trainer=None,
row_trainer=None, unstack_trainer=None, vertical_square_trainer=None, use_hist=False,
demo_mask=True, cycle_consistency=False):
# ensure one of stack trainer or row trainer is provided
if stack_trainer is None and row_trainer is None and unstack_trainer is None and vertical_square_trainer is None:
raise ValueError("Must provide at least one trainer")
if primitive_action == 'grasp':
color_heightmap, valid_depth_heightmap = self.get_heightmaps(primitive_action,
self.action_dict[stack_height]['grasp_image_ind'], use_hist=use_hist)
elif primitive_action == 'place':
color_heightmap, valid_depth_heightmap = self.get_heightmaps(primitive_action,
self.action_dict[stack_height]['place_image_ind'], use_hist=use_hist)
# get stack features if stack_trainer is provided
# TODO(adit98) can add specific rotation to these forward calls for speedup
if stack_trainer is not None:
# to get vector of 64 vals, run trainer.forward with get_action_feat
stack_push, stack_grasp, stack_place = stack_trainer.forward(color_heightmap,
valid_depth_heightmap, is_volatile=True, keep_action_feat=True,
demo_mask=demo_mask)[:3]
# fill all masked arrays (convert to regular np arrays)
stack_push, stack_grasp, stack_place = stack_push.filled(0.0), \
stack_grasp.filled(0.0), stack_place.filled(0.0)
# get row features if row_trainer is provided
if row_trainer is not None:
# to get vector of 64 vals, run trainer.forward with get_action_feat
row_push, row_grasp, row_place = row_trainer.forward(color_heightmap,
valid_depth_heightmap, is_volatile=True, keep_action_feat=True,
demo_mask=demo_mask)[:3]
# fill all masked arrays (convert to regular np arrays)
row_push, row_grasp, row_place = row_push.filled(0.0), \
row_grasp.filled(0.0), row_place.filled(0.0)
# get unstack features if unstack_trainer is provided
if unstack_trainer is not None:
# to get vector of 64 vals, run trainer.forward with get_action_feat
unstack_push, unstack_grasp, unstack_place = unstack_trainer.forward(color_heightmap,
valid_depth_heightmap, is_volatile=True, keep_action_feat=True,
demo_mask=demo_mask)[:3]
# fill all masked arrays (convert to regular np arrays)
unstack_push, unstack_grasp, unstack_place = unstack_push.filled(0.0), \
unstack_grasp.filled(0.0), unstack_place.filled(0.0)
# get vertical_square features if vertical_square_trainer is provided
if vertical_square_trainer is not None:
# to get vector of 64 vals, run trainer.forward with get_action_feat
vertical_square_push, vertical_square_grasp, vertical_square_place = vertical_square_trainer.forward(color_heightmap,
valid_depth_heightmap, is_volatile=True, keep_action_feat=True,
demo_mask=demo_mask)[:3]
# fill all masked arrays (convert to regular np arrays)
vertical_square_push, vertical_square_grasp, vertical_square_place = vertical_square_push.filled(0.0), \
vertical_square_grasp.filled(0.0), vertical_square_place.filled(0.0)
# get demo action index vector
action_vec = self.action_dict[stack_height][ACTION_TO_ID[primitive_action]]
# convert rotation angle to index
best_rot_ind = np.around((np.rad2deg(action_vec[-2]) % 360) * 16 / 360).astype(int)
# convert robot coordinates to pixel
workspace_pixel_offset = workspace_limits[:2, 0] * -1 * 1000
best_action_xy = ((workspace_pixel_offset + 1000 * action_vec[:2]) / 2).astype(int)
# initialize best actions for stacking and row making
best_action_stack, best_action_row, best_action_unstack, best_action_vertical_square = None, None, None, None
# initialize embedding arrays for each policy (for selected primitive_action)
stack_feat, row_feat, unstack_feat, vertical_square_feat = None, None, None, None
# index predictions to obtain best action
if primitive_action == 'grasp':
# NOTE that we swap the order that the best_action_xy coordinates are passed in since
# the NN output expects (theta, :, y, x)
if stack_trainer is not None:
best_action_stack = stack_grasp[best_rot_ind, :, best_action_xy[1],
best_action_xy[0]]
stack_feat = stack_grasp
if row_trainer is not None:
best_action_row = row_grasp[best_rot_ind, :, best_action_xy[1],
best_action_xy[0]]
row_feat = row_grasp
if unstack_trainer is not None:
best_action_unstack = unstack_grasp[best_rot_ind, :, best_action_xy[1],
best_action_xy[0]]
if vertical_square_trainer is not None:
best_action_vertical_square = vertical_square_grasp[best_rot_ind, :,
best_action_xy[1], best_action_xy[0]]
vertical_square_feat = vertical_square_grasp
elif primitive_action == 'place':
if stack_trainer is not None:
best_action_stack = stack_place[best_rot_ind, :, best_action_xy[1],
best_action_xy[0]]
stack_feat = stack_place
if row_trainer is not None:
best_action_row = row_place[best_rot_ind, :, best_action_xy[1],
best_action_xy[0]]
row_feat = row_place
if unstack_trainer is not None:
best_action_unstack = unstack_place[best_rot_ind, :, best_action_xy[1],
best_action_xy[0]]
unstack_feat = unstack_place
if vertical_square_trainer is not None:
best_action_vertical_square = vertical_square_place[best_rot_ind, :,
best_action_xy[1], best_action_xy[0]]
vertical_square_feat = vertical_square_place
# if we aren't using cycle consistency, return best action's embedding
if not cycle_consistency:
# return best action for each model, primitive_action
return best_action_row, best_action_stack, best_action_unstack, best_action_vertical_square, ACTION_TO_ID[primitive_action]
# otherwise, return the entire 16x224x224 embedding space (only for selected primitive action)
else:
return stack_feat, row_feat, unstack_feat, vertical_square_feat, ACTION_TO_ID[primitive_action]
def load_all_demos(demo_path, check_z_height, task_type):
"""
Function to load all demonstrations in a given path and return a list of demo objects.
Argument:
demo_path: Path to folder with demonstrations
"""
demos = []
demo_ind = 0
while True:
try:
demos.append(Demonstration(path=demo_path, demo_num=demo_ind,
check_z_height=check_z_height, task_type=task_type))
except OSError:
# demo does not exist, we loaded all the demos in the directory
break
# increment demo_ind
demo_ind += 1
return demos
|
the-stack_0_19062 | class Solution:
"""
@param A: A string
@param B: A string
@return: the length of the longest common substring.
"""
def longestCommonSubstring(self, A, B):
m = len(A)
n = len(B)
dp = [[0] * (n+1) for _ in range(m+1)]
ans = 0
for i in range(m):
for j in range(n):
if A[i] == B[j]:
dp[i+1][j+1] = dp[i][j] + 1
ans = max(ans, dp[i+1][j+1])
return ans |
the-stack_0_19064 | # Needed on case-insensitive filesystems
from __future__ import absolute_import
# Try to import PIL in either of the two ways it can be installed.
try:
from PIL import Image
except ImportError: # pragma: no cover
import Image
import math
class QRColorMask:
"""
QRColorMask is used to color in the QRCode.
By the time apply_mask is called, the QRModuleDrawer of the StyledPilImage
will have drawn all of the modules on the canvas (the color of these
modules will be mostly black, although antialiasing may result in
gradiants) In the base class, apply_mask is implemented such that the
background color will remain, but the foreground pixels will be replaced by
a color determined by a call to get_fg_pixel. There is additional
calculation done to preserve the gradiant artifacts of antialiasing.
All QRColorMask objects should be careful about RGB vs RGBA color spaces.
For examples of what these look like, see doc/color_masks.png
"""
back_color = (255, 255, 255)
has_transparency = False
paint_color = back_color
def initialize(self, styledPilImage, image):
self.paint_color = styledPilImage.paint_color
def apply_mask(self, image):
width, height = image.size
for x in range(width):
for y in range(height):
norm = self.extrap_color(
self.back_color, self.paint_color, image.getpixel((x, y))
)
if norm is not None:
image.putpixel(
(x, y),
self.interp_color(
self.get_bg_pixel(image, x, y),
self.get_fg_pixel(image, x, y),
norm,
),
)
else:
image.putpixel((x, y), self.get_bg_pixel(image, x, y))
def get_fg_pixel(self, image, x, y):
raise NotImplementedError("QRModuleDrawer.paint_fg_pixel")
def get_bg_pixel(self, image, x, y):
return self.back_color
# The following functions are helpful for color calculation:
# interpolate a number between two numbers
def interp_num(self, n1, n2, norm):
return int(n2 * norm + n1 * (1 - norm))
# interpolate a color between two colorrs
def interp_color(self, col1, col2, norm):
return tuple(self.interp_num(col1[i], col2[i], norm) for i in range(len(col1)))
# find the interpolation coefficient between two numbers
def extrap_num(self, n1, n2, interped_num):
if n2 == n1:
return None
else:
return (interped_num - n1) / (n2 - n1)
# find the interpolation coefficient between two numbers
def extrap_color(self, col1, col2, interped_color):
normed = list(
filter(
lambda i: i is not None,
[
self.extrap_num(col1[i], col2[i], interped_color[i])
for i in range(len(col1))
],
)
)
if not normed:
return None
else:
return sum(normed) / len(normed)
class SolidFillColorMask(QRColorMask):
"""
Just fills in the background with one color and the foreground with another
"""
def __init__(self, back_color=(255, 255, 255), front_color=(0, 0, 0)):
self.back_color = back_color
self.front_color = front_color
self.has_transparency = len(self.back_color) == 4
def apply_mask(self, image):
if self.back_color == (255, 255, 255) and self.front_color == (0, 0, 0):
# Optimization: the image is already drawn by QRModuleDrawer in
# black and white, so if these are also our mask colors we don't
# need to do anything. This is much faster than actually applying a
# mask.
pass
else:
# TODO there's probably a way to use PIL.ImageMath instead of doing
# the individual pixel comparisons that the base class uses, which
# would be a lot faster. (In fact doing this would probably remove
# the need for the B&W optimization above.)
QRColorMask.apply_mask(self, image)
def get_fg_pixel(self, image, x, y):
return self.front_color
class RadialGradiantColorMask(QRColorMask):
"""
Fills in the foreground with a radial gradiant from the center to the edge
"""
def __init__(
self, back_color=(255, 255, 255), center_color=(0, 0, 0), edge_color=(0, 0, 255)
):
self.back_color = back_color
self.center_color = center_color
self.edge_color = edge_color
self.has_transparency = len(self.back_color) == 4
def get_fg_pixel(self, image, x, y):
width, _ = image.size
normedDistanceToCenter = math.sqrt(
(x - width / 2) ** 2 + (y - width / 2) ** 2
) / (math.sqrt(2) * width / 2)
return self.interp_color(
self.center_color, self.edge_color, normedDistanceToCenter
)
class SquareGradiantColorMask(QRColorMask):
"""
Fills in the foreground with a square gradiant from the center to the edge
"""
def __init__(
self, back_color=(255, 255, 255), center_color=(0, 0, 0), edge_color=(0, 0, 255)
):
self.back_color = back_color
self.center_color = center_color
self.edge_color = edge_color
self.has_transparency = len(self.back_color) == 4
def get_fg_pixel(self, image, x, y):
width, _ = image.size
normedDistanceToCenter = max(abs(x - width / 2), abs(y - width / 2)) / (
width / 2
)
return self.interp_color(
self.center_color, self.edge_color, normedDistanceToCenter
)
class HorizontalGradiantColorMask(QRColorMask):
"""
Fills in the foreground with a gradiant sweeping from the left to the right
"""
def __init__(
self, back_color=(255, 255, 255), left_color=(0, 0, 0), right_color=(0, 0, 255)
):
self.back_color = back_color
self.left_color = left_color
self.right_color = right_color
self.has_transparency = len(self.back_color) == 4
def get_fg_pixel(self, image, x, y):
width, _ = image.size
return self.interp_color(self.left_color, self.right_color, x / width)
class VerticalGradiantColorMask(QRColorMask):
"""
Fills in the forefround with a gradiant sweeping from the top to the bottom
"""
def __init__(
self, back_color=(255, 255, 255), top_color=(0, 0, 0), bottom_color=(0, 0, 255)
):
self.back_color = back_color
self.top_color = top_color
self.bottom_color = bottom_color
self.has_transparency = len(self.back_color) == 4
def get_fg_pixel(self, image, x, y):
width, _ = image.size
return self.interp_color(self.top_color, self.bottom_color, y / width)
class ImageColorMask(QRColorMask):
"""
Fills in the foreground with pixels from another image, either passed by
path or passed by image object.
"""
def __init__(
self, back_color=(255, 255, 255), color_mask_path=None, color_mask_image=None
):
self.back_color = back_color
if color_mask_image:
self.color_img = color_mask_image
else:
self.color_img = Image.open(color_mask_path)
self.has_transparency = len(self.back_color) == 4
def initialize(self, styledPilImage, image):
self.paint_color = styledPilImage.paint_color
self.color_img = self.color_img.resize(image.size)
def get_fg_pixel(self, image, x, y):
width, _ = image.size
return self.color_img.getpixel((x, y))
|
the-stack_0_19065 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0322-Coin-Change.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-31
=================================================================="""
import sys
import time
from typing import List
# import collections
"""
LeetCode - 0322 - (Medium) - Coin Change
https://leetcode.com/problems/coin-change/
Description & Requirement:
You are given an integer array coins representing coins of different denominations
and an integer amount representing a total amount of money.
Return the fewest number of coins that you need to make up that amount.
If that amount of money cannot be made up by any combination of the coins, return -1.
You may assume that you have an infinite number of each kind of coin.
Example 1:
Input: coins = [1,2,5], amount = 11
Output: 3
Explanation: 11 = 5 + 5 + 1
Example 2:
Input: coins = [2], amount = 3
Output: -1
Example 3:
Input: coins = [1], amount = 0
Output: 0
Constraints:
1 <= coins.length <= 12
1 <= coins[i] <= 2^31 - 1
0 <= amount <= 10^4
Related Problem:
LC-0518-Coin-Change-2
"""
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
# exception case
if not isinstance(amount, int) or amount <= 0:
return 0 # amount == 0, no change is needed
if not isinstance(coins, list) or len(coins) <= 0:
return -1 # Error input type (now amount >= 1)
# main method: (Knapsack problem - Dynamic Programming)
# dp[i] is minimum number of coins to get total amount i (i = 0, 1, 2, ..., amount)
# dp equation: dp[i] = min(1 + dp[i-j]), where j in coins (i - j >= 0), means a valid coin denomination
# dp init: dp[0] == 0.
# dp aim: get dp[-1]
return self._coinChange(coins, amount)
def _coinChange(self, coins: List[int], amount: int) -> int:
len_coins = len(coins)
assert len_coins >= 1 and amount >= 1
# default list coins is sorted. if not, sort it first (and remove duplicated denominations)
# coins.sort()
# dp[i] is minimum number of coins to get total amount i (i = 0, 1, 2, ..., amount)
dp = [-1 for _ in range(amount + 1)] # -1 means can't get this total coin amount
dp[0] = 0 # need no coin to get amount 0
for i in range(1, amount + 1):
for j in coins:
if i - j < 0 or dp[i - j] == -1: # index out of range, or dp[i - j] itself can't be reached
continue
if dp[i] == -1: # the state of dp[i] change from "can't reach" to "can reach"
dp[i] = dp[i - j] + 1
else: # update dp[i] if dp[i - j] + 1 is smaller
dp[i] = min(dp[i], dp[i - j] + 1)
return dp[-1]
def main():
# Example 1: Output: 3
# Explanation: 11 = 5 + 5 + 1
coins = [1, 2, 5]
amount = 11
# Example 2: Output: -1
# coins = [2]
# amount = 3
# Example 3: Output: 0
# coins = [1]
# amount = 0
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.coinChange(coins, amount)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_19066 | # -*- coding: utf-8 -*-
from __future__ import print_function
from matplotlib import rcParams
FONT_FAMILY='DejaVu Serif'
rcParams["font.family"] = FONT_FAMILY
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
import numpy as np
import datetime
import itertools
import utils as u
#markers=['.','x','o','v','^','<','>','1','2','3','4','8','s','p','*']
markers=[None]
colors = ['b', 'g', 'r', 'm', 'y', 'k', 'orange', 'purple', 'olive']
markeriter = itertools.cycle(markers)
coloriter = itertools.cycle(colors)
fixed_colors = {
'S-SGD': '#ff3300',
'ssgd': '#ff3300',
'gTopK': '#009900',
'blue': 'b',
0.001: 'C2',
0.002: 'C5',
0.00025: 'C3',
0.0001: 'C0',
0.00005: 'C1',
0.00001: 'C4',
}
OUTPUTPATH='/tmp/ijcai2019'
LOGHOME='/tmp/logs'
FONTSIZE=14
HOSTNAME='localhost'
num_batches_per_epoch = None
global_max_epochs=150
global_density=0.001
#NFIGURES=4;NFPERROW=2
NFIGURES=6;NFPERROW=2
#NFIGURES=1;NFPERROW=1
#FIGSIZE=(5*NFPERROW,3.8*NFIGURES/NFPERROW)
PLOT_NORM=False
PLOT_NORM=True
if PLOT_NORM:
#FIGSIZE=(5*NFPERROW,3.1*NFIGURES/NFPERROW)
FIGSIZE=(5*NFPERROW,3.2*NFIGURES/NFPERROW)
else:
#FIGSIZE=(5*NFPERROW,2.9*NFIGURES/NFPERROW)
FIGSIZE=(5*NFPERROW,3.0*NFIGURES/NFPERROW)
fig, group_axs = plt.subplots(NFIGURES/NFPERROW, NFPERROW,figsize=FIGSIZE)
if NFIGURES > 1 and PLOT_NORM:
ax = None
group_axtwins = []
for i in range(NFIGURES/NFPERROW):
tmp = []
for a in group_axs[i]:
tmp.append(a.twinx())
group_axtwins.append(tmp)
global_index = 0
else:
ax = group_axs
ax1 = ax
global_index = None
ax2 = None
STANDARD_TITLES = {
'resnet20': 'ResNet-20',
'vgg16': 'VGG-16',
'alexnet': 'AlexNet',
'resnet50': 'ResNet-50',
'lstmptb': 'LSTM-PTB',
'lstm': 'LSTM-PTB',
'lstman4': 'LSTM-AN4'
}
def get_real_title(title):
return STANDARD_TITLES.get(title, title)
def seconds_between_datetimestring(a, b):
a = datetime.datetime.strptime(a, '%Y-%m-%d %H:%M:%S')
b = datetime.datetime.strptime(b, '%Y-%m-%d %H:%M:%S')
delta = b - a
return delta.days*86400+delta.seconds
sbd = seconds_between_datetimestring
def get_loss(line, isacc=False):
valid = line.find('val acc: ') > 0 if isacc else line.find('loss: ') > 0
if line.find('Epoch') > 0 and valid:
items = line.split(' ')
loss = float(items[-1])
t = line.split(' I')[0].split(',')[0]
t = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')
return loss, t
def read_losses_from_log(logfile, isacc=False):
global num_batches_per_epoch
f = open(logfile)
losses = []
times = []
average_delays = []
lrs = []
i = 0
time0 = None
max_epochs = global_max_epochs
counter = 0
for line in f.readlines():
if line.find('num_batches_per_epoch: ') > 0:
num_batches_per_epoch = int(line[0:-1].split('num_batches_per_epoch:')[-1])
valid = line.find('val acc: ') > 0 if isacc else line.find('average loss: ') > 0
if line.find('num_batches_per_epoch: ') > 0:
num_batches_per_epoch = int(line[0:-1].split('num_batches_per_epoch:')[-1])
if line.find('Epoch') > 0 and valid:
t = line.split(' I')[0].split(',')[0]
t = datetime.datetime.strptime(t, '%Y-%m-%d %H:%M:%S')
if not time0:
time0 = t
if line.find('lr: ') > 0:
try:
lr = float(line.split(',')[-2].split('lr: ')[-1])
lrs.append(lr)
except:
pass
if line.find('average delay: ') > 0:
delay = int(line.split(':')[-1])
average_delays.append(delay)
loss, t = get_loss(line, isacc)
if loss and t:
counter += 1
losses.append(loss)
times.append(t)
if counter > max_epochs:
break
f.close()
if len(times) > 0:
t0 = time0 if time0 else times[0] #times[0]
for i in range(0, len(times)):
delta = times[i]- t0
times[i] = delta.days*86400+delta.seconds
return losses, times, average_delays, lrs
def read_norm_from_log(logfile):
f = open(logfile)
means = []
stds = []
for line in f.readlines():
if line.find('gtopk-dense norm mean') > 0:
items = line.split(',')
mean = float(items[-2].split(':')[-1])
std = float(items[--1].split(':')[-1])
means.append(mean)
stds.append(std)
print('means: ', means)
print('stds: ', stds)
return means, stds
def plot_loss(logfile, label, isacc=False, title='ResNet-20', fixed_color=None):
losses, times, average_delays, lrs = read_losses_from_log(logfile, isacc=isacc)
norm_means, norm_stds = read_norm_from_log(logfile)
print('times: ', times)
print('losses: ', losses)
if len(average_delays) > 0:
delay = int(np.mean(average_delays))
else:
delay = 0
if delay > 0:
label = label + ' (delay=%d)' % delay
if isacc:
ax.set_ylabel('top-1 Validation Accuracy')
else:
ax.set_ylabel('training loss')
ax.set_title(get_real_title(title))
marker = markeriter.next()
if fixed_color:
color = fixed_color
else:
color = coloriter.next()
iterations = np.arange(len(losses))
line = ax.plot(iterations, losses, label=label, marker=marker, markerfacecolor='none', color=color, linewidth=1)
if False and len(norm_means) > 0:
global ax2
if ax2 is None:
ax2 = ax.twinx()
ax2.set_ylabel('L2-Norm of : gTopK-Dense')
ax2.plot(norm_means, label=label+' norms', color=color)
ax.set_xlabel('# of epochs')
if len(lrs) > 0:
lr_indexes = [0]
lr = lrs[0]
for i in range(len(lrs)):
clr = lrs[i]
if lr != clr:
lr_indexes.append(i)
lr = clr
u.update_fontsize(ax, FONTSIZE)
return line
def plot_with_params(dnn, nworkers, bs, lr, hostname, legend, isacc=False, prefix='', title='ResNet-20', sparsity=None, nsupdate=None, sg=None, density=None, force_legend=False):
global global_density
global_density = density
postfix='5922'
color = None
if prefix.find('allreduce')>=0:
postfix='0'
elif prefix.find('single') >= 0:
postfix = None
if sparsity:
logfile = LOGHOME+'/%s/%s-n%d-bs%d-lr%.4f-s%.5f' % (prefix, dnn, nworkers, bs, lr, sparsity)
elif nsupdate:
logfile = LOGHOME+'/%s/%s-n%d-bs%d-lr%.4f-ns%d' % (prefix, dnn, nworkers, bs, lr, nsupdate)
else:
logfile = LOGHOME+'/%s/%s-n%d-bs%d-lr%.4f' % (prefix, dnn, nworkers, bs, lr)
if sg is not None:
logfile += '-sg%.2f' % sg
if density is not None:
logfile += '-ds%s' % str(density)
color = fixed_colors[density]
else:
color = fixed_colors['S-SGD']
if postfix is None:
logfile += '/%s.log' % (hostname)
else:
logfile += '/%s-%s.log' % (hostname, postfix)
print('logfile: ', logfile)
if force_legend:
l = legend
else:
l = legend+ '(lr=%.4f, bs=%d, %d workers)'%(lr, bs, nworkers)
line = plot_loss(logfile, l, isacc=isacc, title=dnn, fixed_color=color)
return line
def plot_group_norm_diff():
global ax
networks = ['vgg16', 'resnet20', 'lstm', 'lstman4']
networks = ['vgg16', 'resnet20', 'alexnet', 'resnet50', 'lstm', 'lstman4']
for i, network in enumerate(networks):
ax_row = i / NFPERROW
ax_col = i % NFPERROW
ax = group_axs[ax_row][ax_col]
ax1 = group_axtwins[ax_row][ax_col]
plts = plot_norm_diff(ax1, network)
lines, labels = ax.get_legend_handles_labels()
STNAME
lines2, labels2 = ax1.get_legend_handles_labels()
fig.legend(lines + lines2, labels + labels2, ncol=4, loc='upper center', fontsize=FONTSIZE, frameon=True)
plt.subplots_adjust(bottom=0.09, left=0.08, right=0.90, top=0.88, wspace=0.49, hspace=0.42)
plt.savefig('%s/multiple_normdiff.pdf'%OUTPUTPATH)
def plot_norm_diff(lax=None, network=None, subfig=None):
global global_index
global global_max_epochs
density = 0.001
nsupdate=1
prefix='allreduce-comp-gtopk-baseline-gwarmup-dc1-model-ijcai2019'
if network == 'lstm':
network = 'lstm';bs =100;lr=30.0;epochs =40
elif network == 'lstman4':
network = 'lstman4';bs =8;lr=0.0002;epochs = 80
elif network == 'resnet20':
network = 'resnet20';bs =32;lr=0.1;epochs=140
elif network == 'vgg16':
network = 'vgg16';bs=128;lr=0.1;epochs=140
elif network == 'alexnet':
network = 'alexnet';bs=256;lr=0.01;epochs =40
elif network == 'resnet50':
nsupdate=16
network = 'resnet50';bs=512;lr=0.01;epochs =35
global_max_epochs = epochs
path = LOGHOME+'/%s/%s-n4-bs%d-lr%.4f-ns%d-sg1.50-ds%s' % (prefix, network,bs,lr, nsupdate,density)
print(network, path)
plts = []
if network == 'lstm':
line = plot_with_params(network, 4, 100, 30.0, HOSTNAME, r'S-SGD loss', prefix='allreduce-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, force_legend=True)
plts.append(line)
line = plot_with_params(network, 4, 100, 30.0, HOSTNAME, r'gTop-$k$ S-SGD loss', prefix='allreduce-comp-gtopk-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, sg=1.5, density=density, force_legend=True)
plts.append(line)
elif network == 'resnet20':
line = plot_with_params(network, 4, 32, lr, HOSTNAME, 'S-SGD loss', prefix='allreduce-baseline-gwarmup-dc1-model-ijcai2019', force_legend=True)
plts.append(line)
line = plot_with_params(network, 4, bs, lr, HOSTNAME, r'gTop-$k$ S-SGD loss', prefix='allreduce-comp-topk-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, sg=1.5, density=density, force_legend=True)
plts.append(line)
pass
elif network == 'vgg16':
line = plot_with_params(network, 4, bs, lr, HOSTNAME, 'S-SGD loss', prefix='allreduce-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, force_legend=True)
plts.append(line)
line = plot_with_params(network, 4, bs, lr, HOSTNAME, r'gTop-$k$ S-SGD loss', prefix=prefix, nsupdate=1, sg=1.5, density=density, force_legend=True)
plts.append(line)
elif network == 'lstman4':
line = plot_with_params(network, 4, 8, 0.0002, HOSTNAME, 'S-SGD loss', prefix='allreduce-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, force_legend=True)
plts.append(line)
line = plot_with_params(network, 4, 8, 0.0002, HOSTNAME, r'gTop-$k$ S-SGD loss', prefix='allreduce-comp-gtopk-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, sg=1.5, density=density, force_legend=True)
plts.append(line)
elif network == 'resnet50':
line = plot_with_params(network, 4, 512, lr, HOSTNAME, 'S-SGD loss', prefix='allreduce-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=nsupdate, force_legend=True)
line = plot_with_params(network, 4, 512, lr, HOSTNAME, r'gTop-$k$ S-SGD loss', prefix=prefix, nsupdate=nsupdate, sg=1.5, density=density, force_legend=True)
plts.append(line)
elif network == 'alexnet':
plot_with_params(network, 4, 256, lr, HOSTNAME, 'S-SGD', prefix='allreduce-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, force_legend=True)
line = plot_with_params(network, 4, 256, lr, HOSTNAME, r'gTop-$k$ S-SGD loss', prefix=prefix, nsupdate=nsupdate, sg=1.5, density=density, force_legend=True)
plts.append(line)
arr = []
arr2 = []
for i in range(1, epochs+1):
fn = '%s/gtopknorm-rank0-epoch%d.npy' % (path, i)
fn2 = '%s/randknorm-rank0-epoch%d.npy' % (path, i)
arr.append(np.mean(np.power(np.load(fn), 2)))
arr2.append(np.mean(np.power(np.load(fn2), 2)))
arr = np.array(arr)
arr2 = np.array(arr2)
cax = lax if lax is not None else ax1
cax.plot(arr/arr2, label=r'$\delta$', color=fixed_colors['blue'],linewidth=1)
cax.set_ylim(bottom=0.97, top=1.001)
zero_x = np.arange(len(arr), step=1)
ones = np.ones_like(zero_x)
cax.plot(zero_x, ones, ':', label='1 ref.', color='black', linewidth=1)
if True or network.find('lstm') >= 0:
subaxes = inset_axes(cax,
width='50%',
height='30%',
bbox_to_anchor=(-0.04,0,1,0.95),
bbox_transform=cax.transAxes,
loc='upper right')
half = epochs //2
subx = np.arange(half, len(arr))
subaxes.plot(subx, (arr/arr2)[half:], color=fixed_colors['blue'], linewidth=1)
subaxes.plot(subx, ones[half:], ':', color='black', linewidth=1)
subaxes.set_ylim(bottom=subaxes.get_ylim()[0])
cax.set_xlabel('# of iteration')
cax.set_ylabel(r'$\delta$')
u.update_fontsize(cax, FONTSIZE)
if global_index is not None:
global_index += 1
return plts
def plot_group_lr_sensitivies():
def _plot_with_network(network):
global global_max_epochs
global global_density
densities = [0.001, 0.00025, 0.0001, 0.00005]
if network == 'vgg16':
global_max_epochs = 140
for density in densities:
legend=r'$c$=%d'%(1/density)
plot_with_params(network, 4, 128, 0.1, HOSTNAME, legend, prefix='allreduce-comp-gtopk-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, sg=1.5, density=density, force_legend=True)
elif network == 'resnet20':
global_max_epochs = 140
for density in densities:
legend=r'$c$=%d'%(1/density)
plot_with_params(network, 4, 32, 0.1, HOSTNAME, legend, prefix='allreduce-comp-gtopk-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, sg=1.5, density=density, force_legend=True)
elif network == 'lstm':
global_max_epochs = 40
for density in densities:
legend=r'$c$=%d'%(1/density)
plot_with_params(network, 4, 100, 30.0, HOSTNAME, legend, prefix='allreduce-comp-gtopk-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, sg=1.5, density=density, force_legend=True)
elif network == 'lstman4':
global_max_epochs = 80
for density in densities:
legend=r'$c$=%d'%(1/density)
plot_with_params(network, 4, 8, 0.0002, HOSTNAME, legend, prefix='allreduce-comp-gtopk-baseline-gwarmup-dc1-model-ijcai2019', nsupdate=1, sg=1.5, density=density, force_legend=True)
global ax
networks = ['vgg16', 'resnet20', 'lstm', 'lstman4']
for i, network in enumerate(networks):
ax_row = i / NFPERROW
ax_col = i % NFPERROW
ax = group_axs[ax_row][ax_col]
_plot_with_network(network)
ax.legend(ncol=2, loc='upper right', fontsize=FONTSIZE-2)
plt.subplots_adjust(bottom=0.10, left=0.10, right=0.94, top=0.95, wspace=0.37, hspace=0.42)
plt.savefig('%s/multiple_lrs.pdf'%OUTPUTPATH)
if __name__ == '__main__':
if PLOT_NORM:
plot_group_norm_diff()
else:
plot_group_lr_sensitivies()
plt.show()
|
the-stack_0_19068 | import torch
from torch import nn
class BasicCritic(nn.Module):
"""
The BasicCritic module takes an image and predicts whether it is a cover
image or a steganographic image (N, 1).
Input: (N, 3, H, W)
Output: (N, 1)
"""
def _conv2d(self, in_channels, out_channels):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3
)
def _build_models(self):
self.conv1 = nn.Sequential(
self._conv2d(3, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv2 = nn.Sequential(
self._conv2d(self.hidden_size, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv3 = nn.Sequential(
self._conv2d(self.hidden_size, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv4 = nn.Sequential(
self._conv2d(self.hidden_size, 1)
)
return self.conv1,self.conv2,self.conv3,self.conv4
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self._models = self._build_models()
def forward(self, image):
x = self._models[0](image)
x_1 = self._models[1](x)
x_2 = self._models[2](x_1)
x_3 = self._models[3](x_2)
return torch.mean(x_3.view(x_3.size(0), -1), dim=1)
|
the-stack_0_19069 | import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
import pymzml.run as run
try:
import numpy as np
except:
np = None
import unittest
import test_file_paths
class ChromatogramTest(unittest.TestCase):
def assertPeaksIdentical(self, peaks1, peaks2, msg=None):
self.assertEqual(
len(peaks1), len(peaks2)
) # , msg='List have different number of peaks!')
for x in range(len(peaks1)):
self.assertCountEqual(peaks1[x], peaks2[x], msg=msg)
def setUp(self):
self.paths = test_file_paths.paths
path = self.paths[2]
self.Run_np = run.Reader(path)
self.chrom = self.Run_np["TIC"]
def test_time(self):
time = self.chrom.time
mz = self.chrom.mz
self.assertCountEqual(time, mz)
intensity = self.chrom.i
def test_i(self):
self.chrom.profile = [(1, 10), (2, 20), (3, 30)]
peaks = self.chrom.peaks()
print(self.chrom.peaks())
self.assertPeaksIdentical(peaks, [(1, 10), (2, 20), (3, 30)])
def test_profile(self):
profile = self.chrom.profile
self.assertIsNotNone(len(profile))
if np:
self.assertIsInstance(profile, np.ndarray)
else:
self.assertIsInstance(profile, list)
if __name__ == "__main__":
unittest.main(verbosity=3)
|
the-stack_0_19074 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The worldwideweb Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
Test that the CHECKLOCKTIMEVERIFY soft-fork activates.
"""
from test_framework.blocktools import (
create_block,
create_coinbase,
)
from test_framework.messages import (
CTransaction,
msg_block,
)
from test_framework.p2p import P2PInterface
from test_framework.script import (
CScript,
CScriptNum,
OP_1NEGATE,
OP_CHECKLOCKTIMEVERIFY,
OP_DROP,
)
from test_framework.test_framework import worldwidewebTestFramework
from test_framework.util import assert_equal
from test_framework.wallet import (
MiniWallet,
MiniWalletMode,
)
# Helper function to modify a transaction by
# 1) prepending a given script to the scriptSig of vin 0 and
# 2) (optionally) modify the nSequence of vin 0 and the tx's nLockTime
def cltv_modify_tx(tx, prepend_scriptsig, nsequence=None, nlocktime=None):
assert_equal(len(tx.vin), 1)
if nsequence is not None:
tx.vin[0].nSequence = nsequence
tx.nLockTime = nlocktime
tx.vin[0].scriptSig = CScript(prepend_scriptsig + list(CScript(tx.vin[0].scriptSig)))
tx.rehash()
def cltv_invalidate(tx, failure_reason):
# Modify the signature in vin 0 and nSequence/nLockTime of the tx to fail CLTV
#
# According to BIP65, OP_CHECKLOCKTIMEVERIFY can fail due the following reasons:
# 1) the stack is empty
# 2) the top item on the stack is less than 0
# 3) the lock-time type (height vs. timestamp) of the top stack item and the
# nLockTime field are not the same
# 4) the top stack item is greater than the transaction's nLockTime field
# 5) the nSequence field of the txin is 0xffffffff
assert failure_reason in range(5)
scheme = [
# | Script to prepend to scriptSig | nSequence | nLockTime |
# +-------------------------------------------------+------------+--------------+
[[OP_CHECKLOCKTIMEVERIFY], None, None],
[[OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP], None, None],
[[CScriptNum(100), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, 1296688602], # timestamp of genesis block
[[CScriptNum(100), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, 50],
[[CScriptNum(50), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0xffffffff, 50],
][failure_reason]
cltv_modify_tx(tx, prepend_scriptsig=scheme[0], nsequence=scheme[1], nlocktime=scheme[2])
def cltv_validate(tx, height):
# Modify the signature in vin 0 and nSequence/nLockTime of the tx to pass CLTV
scheme = [[CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, height]
cltv_modify_tx(tx, prepend_scriptsig=scheme[0], nsequence=scheme[1], nlocktime=scheme[2])
CLTV_HEIGHT = 111
class BIP65Test(worldwidewebTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
f'-testactivationheight=cltv@{CLTV_HEIGHT}',
'[email protected]',
'-par=1', # Use only one script thread to get the exact reject reason for testing
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
]]
self.setup_clean_chain = True
self.rpc_timeout = 480
def test_cltv_info(self, *, is_active):
assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip65'], {
"active": is_active,
"height": CLTV_HEIGHT,
"type": "buried",
},
)
def run_test(self):
peer = self.nodes[0].add_p2p_connection(P2PInterface())
wallet = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_OP_TRUE)
self.test_cltv_info(is_active=False)
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
self.generate(wallet, 10)
self.generate(self.nodes[0], CLTV_HEIGHT - 2 - 10)
assert_equal(self.nodes[0].getblockcount(), CLTV_HEIGHT - 2)
self.log.info("Test that invalid-according-to-CLTV transactions can still appear in a block")
# create one invalid tx per CLTV failure reason (5 in total) and collect them
invalid_cltv_txs = []
for i in range(5):
spendtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx']
cltv_invalidate(spendtx, i)
invalid_cltv_txs.append(spendtx)
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time)
block.nVersion = 3
block.vtx.extend(invalid_cltv_txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.test_cltv_info(is_active=False) # Not active as of current tip and next block does not need to obey rules
peer.send_and_ping(msg_block(block))
self.test_cltv_info(is_active=True) # Not active as of current tip, but next block must obey rules
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 4")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
block.nVersion = 3
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=[f'{block.hash}, bad-version(0x00000003)']):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that invalid-according-to-CLTV transactions cannot appear in a block")
block.nVersion = 4
block.vtx.append(CTransaction()) # dummy tx after coinbase that will be replaced later
# create and test one invalid tx per CLTV failure reason (5 in total)
for i in range(5):
spendtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx']
cltv_invalidate(spendtx, i)
expected_cltv_reject_reason = [
"non-mandatory-script-verify-flag (Operation not valid with the current stack size)",
"non-mandatory-script-verify-flag (Negative locktime)",
"non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
"non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
"non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
][i]
# First we show that this tx is valid except for CLTV by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{
'txid': spendtx.hash,
'wtxid': spendtx.getwtxid(),
'allowed': False,
'reject-reason': expected_cltv_reject_reason,
}],
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
)
# Now we verify that a block with this transaction is also invalid.
block.vtx[1] = spendtx
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=[f'CheckInputScripts on {block.vtx[-1].hash} failed with {expected_cltv_reject_reason}']):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
cltv_validate(spendtx, CLTV_HEIGHT - 1)
block.vtx.pop(1)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.test_cltv_info(is_active=True) # Not active as of current tip, but next block must obey rules
peer.send_and_ping(msg_block(block))
self.test_cltv_info(is_active=True) # Active as of current tip
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP65Test().main()
|
the-stack_0_19075 | import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
import torchvision
from torchvision.utils import save_image
import os
import sys
import subprocess
import signal
from pathlib import Path
from tqdm import trange
from collections import namedtuple
from big_sleep.biggan import BigGAN
from big_sleep.clip import load, tokenize, normalize_image
from einops import rearrange
from adabelief_pytorch import AdaBelief
assert torch.cuda.is_available(), 'CUDA must be available in order to use Deep Daze'
# graceful keyboard interrupt
terminate = False
def signal_handling(signum,frame):
global terminate
terminate = True
signal.signal(signal.SIGINT,signal_handling)
# helpers
def exists(val):
return val is not None
def open_folder(path):
if os.path.isfile(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
return
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', path]
elif sys.platform == 'linux2' or sys.platform == 'linux':
cmd_list = ['xdg-open', path]
elif sys.platform in ['win32', 'win64']:
cmd_list = ['explorer', path.replace('/','\\')]
if cmd_list == None:
return
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass
except OSError:
pass
# tensor helpers
def differentiable_topk(x, k, temperature=1.):
n, dim = x.shape
topk_tensors = []
for i in range(k):
is_last = i == (k - 1)
values, indices = (x / temperature).softmax(dim=-1).topk(1, dim=-1)
topks = torch.zeros_like(x).scatter_(-1, indices, values)
topk_tensors.append(topks)
if not is_last:
x = x.scatter(-1, indices, float('-inf'))
topks = torch.cat(topk_tensors, dim=-1)
return topks.reshape(n, k, dim).sum(dim = 1)
# load clip
perceptor, preprocess = load()
# load biggan
class Latents(torch.nn.Module):
def __init__(
self,
num_latents = 32,
num_classes = None,
class_temperature = 2.
):
super().__init__()
self.normu = torch.nn.Parameter(torch.zeros(num_latents, 128).normal_(std = 1))
self.cls = torch.nn.Parameter(torch.zeros(num_latents, 1000).normal_(mean = -3.9, std = .3))
self.register_buffer('thresh_lat', torch.tensor(1))
assert not exists(num_classes) or num_classes > 0 and num_classes <= 1000, 'num classes must be between 0 and 1000'
self.num_classes = num_classes
self.class_temperature = class_temperature
def forward(self):
if exists(self.num_classes):
classes = differentiable_topk(self.cls, self.num_classes, temperature = self.class_temperature)
else:
classes = torch.sigmoid(self.cls)
return self.normu, classes
class Model(nn.Module):
def __init__(
self,
image_size,
num_classes = None,
class_temperature = 2.
):
super().__init__()
assert image_size in (128, 256, 512), 'image size must be one of 128, 256, or 512'
self.biggan = BigGAN.from_pretrained(f'biggan-deep-{image_size}')
self.num_classes = num_classes
self.class_temperature = class_temperature
self.init_latents()
def init_latents(self):
self.latents = Latents(
num_classes = self.num_classes,
class_temperature = self.class_temperature
)
def forward(self):
self.biggan.eval()
out = self.biggan(*self.latents(), 1)
return (out + 1) / 2
# load siren
class BigSleep(nn.Module):
def __init__(
self,
num_cutouts = 128,
loss_coef = 100,
image_size = 512,
bilinear = False,
num_classes = None,
class_temperature = 2.
):
super().__init__()
self.loss_coef = loss_coef
self.image_size = image_size
self.num_cutouts = num_cutouts
self.interpolation_settings = {'mode': 'bilinear', 'align_corners': False} if bilinear else {'mode': 'nearest'}
self.model = Model(
image_size = image_size,
num_classes = num_classes,
class_temperature = class_temperature
)
def reset(self):
self.model.init_latents()
def forward(self, text, return_loss = True):
width, num_cutouts = self.image_size, self.num_cutouts
out = self.model()
if not return_loss:
return out
pieces = []
for ch in range(num_cutouts):
size = int(width * torch.zeros(1,).normal_(mean=.8, std=.3).clip(.5, .95))
offsetx = torch.randint(0, width - size, ())
offsety = torch.randint(0, width - size, ())
apper = out[:, :, offsetx:offsetx + size, offsety:offsety + size]
apper = F.interpolate(apper, (224, 224), **self.interpolation_settings)
pieces.append(apper)
into = torch.cat(pieces)
into = normalize_image(into)
image_embed = perceptor.encode_image(into)
text_embed = perceptor.encode_text(text)
latents, soft_one_hot_classes = self.model.latents()
num_latents = latents.shape[0]
latent_thres = self.model.latents.thresh_lat
lat_loss = torch.abs(1 - torch.std(latents, dim=1)).mean() + \
torch.abs(torch.mean(latents)).mean() + \
4 * torch.max(torch.square(latents).mean(), latent_thres)
for array in latents:
mean = torch.mean(array)
diffs = array - mean
var = torch.mean(torch.pow(diffs, 2.0))
std = torch.pow(var, 0.5)
zscores = diffs / std
skews = torch.mean(torch.pow(zscores, 3.0))
kurtoses = torch.mean(torch.pow(zscores, 4.0)) - 3.0
lat_loss = lat_loss + torch.abs(kurtoses) / num_latents + torch.abs(skews) / num_latents
cls_loss = ((50 * torch.topk(soft_one_hot_classes, largest = False, dim = 1, k = 999)[0]) ** 2).mean()
sim_loss = -self.loss_coef * torch.cosine_similarity(text_embed, image_embed, dim = -1).mean()
return (lat_loss, cls_loss, sim_loss)
class Imagine(nn.Module):
def __init__(
self,
text,
*,
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 20,
iterations = 1050,
save_progress = False,
bilinear = False,
open_folder = True,
seed = None,
adabelief=True,
save_latents=False,
adabelief_args = None,
clip_grad = None,
lr_scheduling = False,
torch_deterministic = False,
num_classes = None,
class_temperature = 2.
):
super().__init__()
self.seed = seed
self.save_latents = save_latents
if exists(seed):
assert not bilinear, 'the deterministic (seeded) operation does not work with interpolation, yet (ask pytorch)'
print(f'setting seed of {seed}')
if seed == 0:
print('you can override this with --seed argument in the command line, or --random for a randomly chosen one')
torch.manual_seed(seed)
torch.set_deterministic(torch_deterministic)
self.epochs = epochs
self.iterations = iterations
model = BigSleep(
image_size = image_size,
bilinear = bilinear,
num_classes = num_classes,
class_temperature = class_temperature
).cuda()
self.model = model
self.lr = lr
self.adabelief=adabelief
self.clip_grad = clip_grad
self.lr_scheduling = lr_scheduling
if self.adabelief:
if adabelief_args != None:
self.adabelief_args = adabelief_args
self.optimizer = AdaBelief(model.model.latents.parameters(), lr=self.adabelief_args.lr, betas=(self.adabelief_args.b1, self.adabelief_args.b2), eps=self.adabelief_args.eps,
weight_decay=self.adabelief_args.weight_decay, amsgrad=self.adabelief_args.amsgrad, weight_decouple=self.adabelief_args.weight_decouple,
fixed_decay=self.adabelief_args.fixed_decay, rectify=self.adabelief_args.rectify)
else:
self.optimizer = AdaBelief(model.model.latents.parameters(), lr=self.lr, betas=(0.5, 0.999), eps=1e-12,
weight_decay=0, amsgrad=False, weight_decouple=True, fixed_decay=False, rectify=True)
else:
self.optimizer = Adam(model.model.latents.parameters(), self.lr)
if lr_scheduling:
#self.lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer, max_lr=self.lr, steps_per_epoch=self.iterations, epochs=self.epochs)
self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma = .96)
self.gradient_accumulate_every = gradient_accumulate_every
self.save_every = save_every
self.save_progress = save_progress
self.open_folder = open_folder
self.set_text(text)
def set_text(self, text):
self.text = text
textpath = self.text.replace(' ','_').replace('.','_')[:30]
#textpath = datetime.now().strftime("%Y%m%d-%H%M%S-") + textpath
if exists(self.seed):
textpath = str(self.seed) + '-' + textpath
self.textpath = textpath
self.filename = Path(f'./{textpath}.png')
self.encoded_text = tokenize(text).cuda()
def reset(self):
self.model.reset()
if self.adabelief:
if self.adabelief_args != None:
self.optimizer = AdaBelief(self.model.model.latents.parameters(), lr=self.adabelief_args.lr, betas=(self.adabelief_args.b1, self.adabelief_args.b2), eps=self.adabelief_args.eps,
weight_decay=self.adabelief_args.weight_decay, amsgrad=self.adabelief_args.amsgrad, weight_decouple=self.adabelief_args.weight_decouple,
fixed_decay=self.adabelief_args.fixed_decay, rectify=self.adabelief_args.rectify)
else:
self.optimizer = AdaBelief(self.model.model.latents.parameters(), lr=self.lr, betas=(0.5, 0.999), eps=1e-12,
weight_decay=0, amsgrad=False, weight_decouple=True, fixed_decay=False, rectify=True)
else:
self.optimizer = Adam(self.model.model.latents.parameters(), self.lr)
if self.lr_scheduling:
#self.lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer, max_lr=self.lr, steps_per_epoch=self.iterations, epochs=self.epochs)
self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma = .96)
def train_step(self, epoch, i):
total_loss = 0
for _ in range(self.gradient_accumulate_every):
losses = self.model(self.encoded_text)
loss = sum(losses) / self.gradient_accumulate_every
total_loss += loss
loss.backward()
if self.clip_grad != None:
torch.nn.utils.clip_grad_norm_(self.model.model.latents.parameters(), self.clip_grad)
self.optimizer.step()
self.optimizer.zero_grad()
if self.lr_scheduling and epoch!=0 and i== 0: self.lr_scheduler.step()
if (i + 1) % self.save_every == 0:
with torch.no_grad():
best = torch.topk(losses[2], k = 1, largest = False)[1]
image = self.model.model()[best].cpu()
save_image(image, str(self.filename))
print(f'image updated at "./{str(self.filename)}"')
if self.save_latents:
# save latents
lats = self.model.model.latents
lats.best = best # saving this just in case it might be useful
torch.save(lats, Path(f'./{self.textpath}.pth'))
if self.save_progress:
total_iterations = epoch * self.iterations + i
num = total_iterations // self.save_every
save_image(image, Path(f'./{self.textpath}.{num:03d}.png'))
if self.save_latents:
# save latents
lats = self.model.model.latents
lats.best = best # saving this just in case it might be useful
torch.save(lats, Path(f'./{self.textpath}.{num:03d}.pth'))
return total_loss
def forward(self):
print(f'Imagining "{self.text}" from the depths of my weights...')
if self.open_folder:
open_folder('./')
self.open_folder = False
for epoch in trange(self.epochs, desc = 'epochs'):
pbar = trange(self.iterations, desc='iteration')
for i in pbar:
loss = self.train_step(epoch, i)
pbar.set_description(f'loss: {loss.item():.2f}')
if terminate:
print('detecting keyboard interrupt, gracefully exiting')
return
|
the-stack_0_19077 | import json
import redis
from allocation import config
r = redis.Redis(**config.get_redis_host_and_port())
def subscribe_to(channel):
pubsub = r.pubsub()
pubsub.subscribe(channel)
confirmation = pubsub.get_message(timeout=3)
assert confirmation["type"] == "subscribe"
return pubsub
def publish_message(channel, message):
r.publish(channel, json.dumps(message))
|
the-stack_0_19079 | from django import forms
from django.conf import settings
from django.db.models import Q
from django.utils.html import mark_safe
from django.utils.translation import gettext_lazy as _
from django_scopes import scopes_disabled
from django_scopes.forms import SafeModelMultipleChoiceField
from i18nfield.forms import I18nModelForm
from pretalx.common.forms.fields import IMAGE_EXTENSIONS, ExtensionFileField
from pretalx.common.mixins.forms import ReadOnlyFlag
from pretalx.event.models import Event, Organiser, Team, TeamInvite
from pretalx.orga.forms.widgets import HeaderSelect, MultipleLanguagesWidget
from pretalx.submission.models import ReviewPhase, Track
class TeamForm(ReadOnlyFlag, I18nModelForm):
def __init__(self, *args, organiser=None, instance=None, **kwargs):
super().__init__(*args, instance=instance, **kwargs)
self.fields['organiser'].widget = forms.HiddenInput()
if instance and getattr(instance, 'pk', None):
self.fields.pop('organiser')
self.fields['limit_events'].queryset = instance.organiser.events.all()
else:
self.fields['organiser'].initial = organiser
self.fields['limit_events'].queryset = organiser.events.all()
if instance and instance.pk:
self.fields['is_reviewer'].help_text = mark_safe(
f' (<a href="{instance.orga_urls.base}tracks">'
+ str(_('Limit to certain tracks?'))
+ '</a>)'
)
class Meta:
model = Team
fields = [
'name',
'organiser',
'all_events',
'limit_events',
'can_create_events',
'can_change_teams',
'can_change_organiser_settings',
'can_change_event_settings',
'can_change_submissions',
'is_reviewer',
'review_override_votes',
]
class TeamTrackForm(I18nModelForm):
def __init__(self, *args, organiser=None, **kwargs):
with scopes_disabled():
super().__init__(*args, **kwargs)
instance = kwargs.get('instance')
if instance and not instance.all_events and instance.limit_events.count():
self.fields['limit_tracks'].queryset = Track.objects.filter(
event__in=instance.limit_events.all()
)
else:
self.fields['limit_tracks'].queryset = Track.objects.filter(
event__organiser=organiser
).order_by('-event__date_from', 'name')
class Meta:
model = Team
fields = ['limit_tracks']
field_classes = {
'limit_tracks': SafeModelMultipleChoiceField,
}
class TeamInviteForm(ReadOnlyFlag, forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['email'].required = True
class Meta:
model = TeamInvite
fields = ('email',)
class OrganiserForm(ReadOnlyFlag, I18nModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if kwargs.get('instance'):
self.fields['slug'].disabled = True
class Meta:
model = Organiser
fields = ('name', 'slug')
class EventWizardInitialForm(forms.Form):
locales = forms.MultipleChoiceField(
choices=settings.LANGUAGES,
label=_('Use languages'),
help_text=_('Choose all languages that your event should be available in.'),
widget=MultipleLanguagesWidget,
)
def __init__(self, *args, user=None, **kwargs):
super().__init__(*args, **kwargs)
self.fields['organiser'] = forms.ModelChoiceField(
label=_('Organiser'),
queryset=Organiser.objects.filter(
id__in=user.teams.filter(can_create_events=True).values_list(
'organiser', flat=True
)
)
if not user.is_administrator
else Organiser.objects.all(),
widget=forms.RadioSelect,
empty_label=None,
required=True,
help_text=_(
'The organiser running the event can copy settings from previous events and share team permissions across all or multiple events.'
),
)
if len(self.fields['organiser'].choices) == 1:
self.fields['organiser'].initial = self.fields['organiser'].queryset.first()
class EventWizardBasicsForm(I18nModelForm):
def __init__(self, *args, user=None, locales=None, organiser=None, **kwargs):
self.locales = locales
super().__init__(*args, **kwargs, locales=locales)
self.fields['locale'].choices = [
(a, b) for a, b in settings.LANGUAGES if a in locales
]
self.fields['slug'].help_text = str(_(
'This is the address your event will be available at. '
'Should be short, only contain lowercase letters and numbers, and must be unique. '
'We recommend some kind of abbreviation with less than 10 characters that can be easily remembered.'
)) + ' <strong>' + str(_('You cannot change the slug later on!')) + '</strong>'
def clean_slug(self):
slug = self.cleaned_data['slug']
qs = Event.objects.all()
if qs.filter(slug__iexact=slug).exists():
raise forms.ValidationError(
_(
'This short name is already taken, please choose another one (or ask the owner of that event to add you to their team).'
)
)
return slug.lower()
class Meta:
model = Event
fields = ('name', 'slug', 'timezone', 'email', 'locale')
class EventWizardTimelineForm(forms.ModelForm):
deadline = forms.DateTimeField(
required=False, help_text=_('The default deadline for your Call for Papers. You can assign additional deadlines to individual submission types, which will take precedence over this deadline.')
)
def __init__(self, *args, user=None, locales=None, organiser=None, **kwargs):
super().__init__(*args, **kwargs)
self.fields['deadline'].widget.attrs['class'] = 'datetimepickerfield'
class Meta:
model = Event
fields = ('date_from', 'date_to')
widgets = {
'date_from': forms.DateInput(attrs={'class': 'datepickerfield'}),
'date_to': forms.DateInput(
attrs={'class': 'datepickerfield', 'data-date-after': '#id_date_from'}
),
}
class EventWizardDisplayForm(forms.Form):
show_on_dashboard = forms.BooleanField(
initial=True,
required=False,
label=_('Show on dashboard'),
help_text=_('Show this event on this website\'s dashboard, once it is public?'),
)
primary_color = forms.CharField(
max_length=7,
label=_('Main event colour'),
help_text=_(
'Provide a hex value like #00ff00 if you want to style pretalx in your event\'s colour scheme.'
),
required=False,
)
logo = ExtensionFileField(
required=False,
extensions=IMAGE_EXTENSIONS,
label=_('Logo'),
help_text=_(
'If you provide a logo image, we will by default not show your event\'s name and date in the page header. '
'We will show your logo in its full size if possible, scaled down to the full header width otherwise.'
),
)
display_header_pattern = forms.ChoiceField(
label=_('Frontpage header pattern'),
help_text=_(
'Choose how the frontpage header banner will be styled. Pattern source: <a href="http://www.heropatterns.com/">heropatterns.com</a>, CC BY 4.0.'
),
choices=(
('', _('Plain')),
('pcb', _('Circuits')),
('bubbles', _('Circles')),
('signal', _('Signal')),
('topo', _('Topography')),
('graph', _('Graph Paper')),
),
required=False,
widget=HeaderSelect,
)
def __init__(self, *args, user=None, locales=None, organiser=None, **kwargs):
super().__init__(*args, **kwargs)
self.fields['primary_color'].widget.attrs['class'] = 'colorpickerfield'
class EventWizardCopyForm(forms.Form):
@staticmethod
def copy_from_queryset(user):
return Event.objects.filter(
Q(
organiser_id__in=user.teams.filter(
all_events=True, can_change_event_settings=True
).values_list('organiser', flat=True)
)
| Q(
id__in=user.teams.filter(can_change_event_settings=True).values_list(
'limit_events__id', flat=True
)
)
)
def __init__(self, *args, user=None, locales=None, organiser=None, **kwargs):
super().__init__(*args, **kwargs)
self.fields['copy_from_event'] = forms.ModelChoiceField(
label=_('Copy configuration from'),
queryset=EventWizardCopyForm.copy_from_queryset(user),
widget=forms.RadioSelect,
empty_label=_('Do not copy'),
required=False,
)
class ReviewPhaseForm(I18nModelForm):
def __init__(self, *args, event=None, **kwargs):
super().__init__(*args, **kwargs)
def clean(self):
data = super().clean()
if data.get('start') and data.get('end') and data['start'] > data['end']:
self.add_error('end', forms.ValidationError(_('The end of a phase has to be after its start.')))
return data
class Meta:
model = ReviewPhase
fields = [
'name', 'start', 'end',
'can_review',
'can_see_speaker_names',
'can_change_submission_state',
'can_see_other_reviews',
'speakers_can_change_submissions',
]
widgets = {
'start': forms.DateInput(attrs={'class': 'datetimepickerfield'}),
'end': forms.DateInput(
attrs={'class': 'datetimepickerfield'}
),
}
|
the-stack_0_19080 | __all__ = ['user_ns']
from flask_restplus import Namespace
from documentation.models import (login_model, registration_model, auth_model, message_model, gallery_item, metadata,
friend_model, upload_img, user_model, comment_model, message_post, post_response,
pending_frame_model, pending_model_update, add_role)
user_ns = Namespace('user', path='/users/<string:username>', description='User level operations')
for model in [login_model, registration_model, auth_model, metadata, message_model,
friend_model, upload_img, user_model, gallery_item, comment_model, message_post, post_response,
pending_frame_model, pending_model_update, add_role]:
user_ns.add_model(model.name, model) |
the-stack_0_19082 | from django.urls import path, include
from . import views
urlpatterns = [
path('friend_requests/', views.friend_requests, name='friend_requests'),
path('following/', views.following, name='following'),
path('followers/', views.followers, name='followers'),
path('friends/', views.friends, name='friends'),
path('find/', views.find, name='find_friends'),
path('change_ModelDatabase/',views.change_ModelDatabase,name='change_ModelDatabase'),
path('getNodeList/',views.getNodeList,name='getNodeList')
]
|
the-stack_0_19083 |
import numpy as np
from sklearn import linear_model
reg = linear_model.LinearRegression(fit_intercept=True)
import pandas as pd
from scipy import signal
def get_doubling_time_via_regression(in_array):
''' Use a linear regression to approximate the doubling rate
Parameters:
----------
in_array : pandas.series
Returns:
----------
Doubling rate: double
'''
y = np.array(in_array)
X = np.arange(-1,2).reshape(-1, 1)
assert len(in_array)==3
reg.fit(X,y)
intercept=reg.intercept_
slope=reg.coef_
return intercept/slope
def savgol_filter(df_input,column='confirmed',window=5):
''' Savgol Filter which can be used in groupby apply function (data structure kept)
parameters:
----------
df_input : pandas.series
column : str
window : int
used data points to calculate the filter result
Returns:
----------
df_result: pd.DataFrame
the index of the df_input has to be preserved in result
'''
degree=1
df_result=df_input
filter_in=df_input[column].fillna(0) # attention with the neutral element here
result=signal.savgol_filter(np.array(filter_in),
window, # window size used for filtering
1)
df_result[str(column+'_filtered')]=result
return df_result
def rolling_reg(df_input,col='confirmed'):
''' Rolling Regression to approximate the doubling time'
Parameters:
----------
df_input: pd.DataFrame
col: str
defines the used column
Returns:
----------
result: pd.DataFrame
'''
days_back=3
result=df_input[col].rolling(
window=days_back,
min_periods=days_back).apply(get_doubling_time_via_regression,raw=False)
return result
def calc_filtered_data(df_input,filter_on='confirmed'):
''' Calculate savgol filter and return merged data frame
Parameters:
----------
df_input: pd.DataFrame
filter_on: str
defines the used column
Returns:
----------
df_output: pd.DataFrame
the result will be joined as a new column on the input data frame
'''
must_contain=set(['state','country',filter_on])
assert must_contain.issubset(set(df_input.columns)), ' Erro in calc_filtered_data not all columns in data frame'
df_output=df_input.copy() # we need a copy here otherwise the filter_on column will be overwritten
pd_filtered_result=df_output[['state','country',filter_on]].groupby(['state','country']).apply(savgol_filter)#.reset_index()
df_output=pd.merge(df_output,pd_filtered_result[[str(filter_on+'_filtered')]],left_index=True,right_index=True,how='left')
return df_output.copy()
def calc_doubling_rate(df_input,filter_on='confirmed'):
''' Calculate approximated doubling rate and return merged data frame
Parameters:
----------
df_input: pd.DataFrame
filter_on: str
defines the used column
Returns:
----------
df_output: pd.DataFrame
the result will be joined as a new column on the input data frame
'''
must_contain=set(['state','country',filter_on])
assert must_contain.issubset(set(df_input.columns)), ' Erro in calc_filtered_data not all columns in data frame'
pd_DR_result= df_input.groupby(['state','country']).apply(rolling_reg,filter_on).reset_index()
pd_DR_result=pd_DR_result.rename(columns={filter_on:filter_on+'_DR',
'level_2':'index'})
#we do the merge on the index of our big table and on the index column after groupby
df_output=pd.merge(df_input,pd_DR_result[['index',str(filter_on+'_DR')]],left_index=True,right_on=['index'],how='left')
df_output=df_output.drop(columns=['index'])
return df_output
if __name__ == '__main__':
test_data_reg=np.array([2,4,6])
result=get_doubling_time_via_regression(test_data_reg)
print('the test slope is: '+str(result))
pd_JH_data=pd.read_csv('../data/processed/COVID_relational_confirmed.csv',sep=';',parse_dates=[0])
pd_JH_data=pd_JH_data.sort_values('date',ascending=True).copy()
pd_result_large=calc_filtered_data(pd_JH_data)
pd_result_large=calc_doubling_rate(pd_result_large)
pd_result_large=calc_doubling_rate(pd_result_large,'confirmed_filtered')
mask=pd_result_large['confirmed']>250
pd_result_large['confirmed_filtered_DR']=pd_result_large['confirmed_filtered_DR'].where(mask, other=np.NaN)
pd_result_large.to_csv('../data/processed/COVID_final_set.csv',sep=';',index=False)
print(pd_result_large[pd_result_large['country']=='Korea, South'].tail())
|
the-stack_0_19085 | # -*- coding: utf-8 -*-
import re
import warnings
from six.moves import urllib_parse
import os
import os.path as osp
import re
import shutil
import sys
import tempfile
import requests
import six
from tqdm import tqdm
import argparse
import pkg_resources
from binascii import hexlify as hx, unhexlify as uhx
try:
import ujson as json
except:
import json
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from oauth2client.service_account import ServiceAccountCredentials
import Hex
import sq_tools
from Drive import Public
from Drive import DriveTools
import io
import Fs.Nca as Nca
import Fs.Nsp as Nsp
import Fs.Nacp as Nacp
from Fs.File import MemoryFile
from hashlib import sha256,sha1
from python_pick import pick
from python_pick import Picker
from listmanager import folder_to_list
squirrel_dir=os.path.abspath(os.curdir)
NSCB_dir=os.path.abspath('../'+(os.curdir))
if os.path.exists(os.path.join(squirrel_dir,'ztools')):
NSCB_dir=squirrel_dir
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
ztools_dir=os.path.join(NSCB_dir,'ztools')
squirrel_dir=ztools_dir
elif os.path.exists(os.path.join(NSCB_dir,'ztools')):
squirrel_dir=squirrel_dir
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
else:
ztools_dir=os.path.join(NSCB_dir, 'ztools')
zconfig_dir=os.path.join(NSCB_dir, 'zconfig')
credentials_dir=os.path.join(zconfig_dir, 'credentials')
if not os.path.exists(credentials_dir):
os.makedirs(credentials_dir)
credentials_json = os.path.join(credentials_dir, 'credentials.json')
def get_scopes(type):
if type=='default':
SCOPES = ['https://www.googleapis.com/auth/drive']
elif type=='read':
SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly', 'https://www.googleapis.com/auth/drive.readonly']
return SCOPES
def get_path_parameters(route):
param=route.split(":")
folder_list=list()
if len(param)>1:
token_name=param[0]
route=param[1]
else:
try:
token_name=param[0]
# print(token_name)
route='root'
except:
token_name='drive'
route='root'
if route=='drive':
pass
else:
tid1=list();tid1=[pos for pos, char in enumerate(route) if char == '/']
tid2=list();tid2=[pos for pos, char in enumerate(route) if char == '\\']
positions= list(set(tid1 + tid2));positions.sort()
for i in range(len(positions)):
if i==0:
j=positions[i];folder=route[:j]
else:
j=positions[i-1];k=positions[i]
folder=route[j+1:k]
if folder != "":
folder_list.append(folder)
if i==(len(positions)-1):
j=positions[i]+1;folder=route[j:]
if folder != "":
folder_list.append(folder)
return token_name,folder_list
def get_html_header(access_token,off1=None,off2=None):
tokenTotal = 'Bearer ' + str(access_token)
if off1==None or off2==None:
header={
'Authorization':tokenTotal,
'token_type':'Bearer',
'accept' : '*/*',
'accept-encoding': 'none',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'x-goog-api-client': 'gdcl/1.7.11 gl-python/3.7.3'}
else:
header={
'Range': 'bytes=%s-%s' % (off1,off2),
'Authorization':tokenTotal,
'token_type':'Bearer',
'accept' : '*/*',
'accept-encoding': 'none',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'x-goog-api-client': 'gdcl/1.7.11 gl-python/3.7.3'}
return header
class auth():
def __init__(self,SCOPES=None,token='drive',SCOPES_type='default',headless=True,apiver='v3',json_file=None):
if SCOPES_type!='default':
SCOPES=get_scopes(SCOPES_type)
if SCOPES!=None:
if os.path.exists(tk):
try:
os.remove(tk)
except:pass
else:
SCOPES = ['https://www.googleapis.com/auth/drive']
creds = None;json_auth=False
tk = os.path.join(credentials_dir, token)
alt_json=os.path.join(credentials_dir, (token+".json"))
if os.path.exists(alt_json):
credentials_json=alt_json
else:
credentials_json=os.path.join(credentials_dir, 'credentials.json')
if json_file!=None:
tk=json_file
if os.path.exists(tk):
try:
with open(tk) as jfile:
test=json.load(jfile)
json_auth=True
apiver='v3'
except:
with open(tk, 'rb') as tok:
creds = pickle.load(tok)
# If there are no (valid) credentials available, let the user log in.
if json_auth==False:
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials_json, SCOPES)
if headless==False:
creds = flow.run_local_server(port=0)
else:
creds = flow.run_console()
# Save the credentials for the next run
with open(tk, 'wb') as tok:
pickle.dump(creds, tok)
self.drive_service = build('drive', apiver, credentials=creds)
self.access_token = creds.token
else:
if os.path.exists(token):
creds = ServiceAccountCredentials.from_json_keyfile_name(token, scopes=SCOPES)
elif os.path.exists(tk):
creds = ServiceAccountCredentials.from_json_keyfile_name(tk, scopes=SCOPES)
self.drive_service = build('drive', apiver, credentials=creds)
self.access_token = None
class location():
def __init__(self,ID=None,route=None,TD_ID=None,TD_Name=None,token_name=None):
self.ID=ID;self.name=None;self.token_name=token_name;drive_service=None
self.root='root';self.filetype=None;self.md5hash=None;self.access_token = None
self.size=None;self.session=None;self.position=0;self.drivename=None
self.drivename=self.token_name;self.response=None
if TD_ID==None and TD_Name==None:
self.isinTD=False
else:
self.isinTD=True
if self.ID!=None and self.name==None:
self.name,mimetype=self.get_name()
if mimetype=='application/vnd.google-apps.folder':
self.filetype='folder'
else:
self.filetype='file'
if self.ID==None and route==None:
raise("Can't get route to location")
elif ID != None:
self.ID=ID
else:
self.ID,self.name=self.get_location_ID(route,TD_ID,TD_Name)
if self.ID==None and route!=None:
tk,flist=get_path_parameters(route)
if not flist and tk==self.token_name:
self.ID=self.root
def get_name(self,ID=None,token_name=None):
if ID==None:
ID=self.ID
if token_name==None:
remote=auth()
self.access_token = remote.access_token
else:
remote=auth(token=token_name)
self.access_token = remote.access_token
drive_service=remote.drive_service
self.drive_service=drive_service
# print(self.isinTD)
if self.isinTD==False:
result=drive_service.files().get(fileId=ID, fields="name,mimeType").execute()
else:
result=drive_service.files().get(fileId=ID, fields="name,mimeType",supportsAllDrives = True).execute()
name=result['name']
mimetype=result['mimeType']
return name,mimetype
def get_hash(self,ID=None,token_name=None):
if ID==None:
ID=self.ID
if token_name==None:
remote=auth()
self.access_token = remote.access_token
else:
remote=auth(token=token_name)
self.access_token = remote.access_token
drive_service=remote.drive_service
self.drive_service=drive_service
if self.isinTD==False:
result=drive_service.files().get(fileId=ID, fields="md5Checksum").execute()
else:
result=drive_service.files().get(fileId=ID, fields="md5Checksum",supportsAllDrives = True).execute()
md5=result['md5Checksum']
self.md5hash=md5
return md5
def get_size(self,ID=None,token_name=None):
if ID==None:
ID=self.ID
if token_name==None:
remote=auth()
self.access_token = remote.access_token
else:
remote=auth(token=token_name)
self.access_token = remote.access_token
drive_service=remote.drive_service
self.drive_service=drive_service
try:
if self.isinTD==False:
result=drive_service.files().get(fileId=ID, fields="size").execute()
else:
result=drive_service.files().get(fileId=ID, fields="size",supportsAllDrives = True).execute()
size=result['size']
self.size=size
except:pass
return self.size
def get_location_ID(self,route,TD_ID=None,TD_Name=None):
token_name,folder_list=get_path_parameters(route)
self.token_name=token_name
self.drivename=self.token_name
remote=auth(token=token_name)
self.access_token = remote.access_token
drive_service=remote.drive_service
self.drive_service=drive_service
parent='root';endID=None;endName=None
if TD_ID !=None:
parent=TD_ID
self.root=parent
elif TD_Name != None:
parent=self.get_TD(TD_Name)
self.root=parent
for i in range(len(folder_list)):
# if i==0:
# j=folder_list[i]
# if self.root != 'root':
# results = drive_service.files().list(
# q="mimeType='application/vnd.google-apps.folder' and name='{}' and '{}' in parents".format(j,parent),
# pageSize=100, fields="nextPageToken, files(id, name)",includeItemsFromAllDrives = True,supportsAllDrives = True).execute()
# else:
# results = drive_service.files().list(
# q="mimeType='application/vnd.google-apps.folder' and name='{}' and '{}' in parents".format(j,parent),
# pageSize=100, fields="nextPageToken, files(id, name)").execute()
# items = results.get('files', [])
# if not items:
# return None,None
# parent=items[0]['id']
# endID=parent;endName=items[0]['name']
if i == (len(folder_list)-1):
j=folder_list[i]
if self.root != 'root':
results = drive_service.files().list(
q="mimeType='application/vnd.google-apps.folder' and name='{}' and '{}' in parents".format(j,parent),
pageSize=100, fields="nextPageToken, files(id, name)",includeItemsFromAllDrives = True,supportsAllDrives = True).execute()
else:
results = drive_service.files().list(
q="mimeType='application/vnd.google-apps.folder' and name='{}' and '{}' in parents".format(j,parent),
pageSize=100, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
if self.root != 'root':
results = drive_service.files().list(
q="mimeType!='application/vnd.google-apps.folder' and name='{}' and '{}' in parents".format(j,parent),
pageSize=100, fields="nextPageToken, files(id, name)",includeItemsFromAllDrives = True,supportsAllDrives = True).execute()
else:
results = drive_service.files().list(
q="mimeType!='application/vnd.google-apps.folder' and name='{}' and '{}' in parents".format(j,parent),
pageSize=100, fields="nextPageToken, files(id, name)").execute()
self.filetype='file'
else:
self.filetype='folder'
items = results.get('files', [])
if not items:
self.filetype=None
return None,None
parent=items[0]['id']
endID=parent;endName=items[0]['name']
else:
j=folder_list[i]
if self.root != 'root':
results = drive_service.files().list(
q="mimeType='application/vnd.google-apps.folder' and name='{}' and '{}' in parents".format(j,parent),
pageSize=100, fields="nextPageToken, files(id, name)",includeItemsFromAllDrives = True,supportsAllDrives = True).execute()
else:
results = drive_service.files().list(
q="mimeType='application/vnd.google-apps.folder' and name='{}' and '{}' in parents".format(j,parent),
pageSize=100, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
return None,None
parent=items[0]['id']
endID=parent;endName=items[0]['name']
return endID,endName
def get_TD(self,TD_Name=None):
drive_service=self.drive_service;root='root'
results = drive_service.drives().list(pageSize=100).execute()
if TD_Name=='None' or TD_Name==None:
return results['drives']
else:
TDlist= results['drives']
for drive in results['drives']:
if str(TD_Name).lower() == str(drive['name']).lower():
root=drive['id']
break
if root != 'root':
return root
def get_session(self,hd=None):
if self.filetype=='file':
if self.session==None:
self.session = requests.session()
if hd==None:
header=get_html_header(self.access_token)
else:
header=hd
self.session.headers=header
URL='https://www.googleapis.com/drive/v3/files/'+self.ID+'?alt=media'
res = self.session.get(URL, stream=True)
self.response=res
else:
self.session=None
self.response=None
def rewind(self):
hd=get_html_header(self.access_token,0,int(self.size))
self.get_session(hd)
self.position=0
def seek(self,p,off2=None):
if off2==None:
off2=self.size
p=int(p);off2=int(off2)-0x01
hd=get_html_header(self.access_token,p,off2)
self.get_session(hd)
self.position=p
def read(self,sz=None,buffer=None):
if buffer==None:
buf=64*1024
else:
buf=int(buffer)
if sz==None:
sz=self.size-self.position
sz=int(sz)
if buf>sz:
buf=sz
end=self.position+sz-0x01
hd=get_html_header(self.access_token,self.position,end)
self.get_session(hd)
data=b''
for dump in self.response.iter_content(chunk_size=buf):
data=data+dump
self.position=self.position+len(dump)
if not dump:
break
return data
def read_at(self,pos,sz,buffer=None):
if buffer==None:
buf=64*1024
else:
buf=int(buffer)
pos=int(pos);off2=int(sz)
self.position=pos
sz=int(sz)
if buf>sz:
buf=sz
end=self.position+sz-0x01
hd=get_html_header(self.access_token,self.position,end)
self.get_session(hd)
data=b''
for dump in self.response.iter_content(chunk_size=buf):
data=data+dump
self.position=self.position+len(dump)
if not dump:
break
return data
def readInt8(self, byteorder='little', signed = False):
return int.from_bytes(self.read(1), byteorder=byteorder, signed=signed)
def readInt16(self, byteorder='little', signed = False):
return int.from_bytes(self.read(2), byteorder=byteorder, signed=signed)
def readInt32(self, byteorder='little', signed = False):
return int.from_bytes(self.read(4), byteorder=byteorder, signed=signed)
def readInt64(self, byteorder='little', signed = False):
return int.from_bytes(self.read(8), byteorder=byteorder, signed=signed)
def readInt128(self, byteorder='little', signed = False):
return int.from_bytes(self.read(16), byteorder=byteorder, signed=signed)
def create_token(name,headless=True,SCOPES='default'):
tk = os.path.join(credentials_dir, name)
if os.path.exists(tk):
os.remove(tk)
auth(token=name,SCOPES_type=SCOPES,headless=headless)
if os.path.exists(tk):
print("\nSUCCESS!!!\n")
else:
print("\FAILURE!!!\n")
def token_picker():
files=folder_to_list(credentials_dir,"all")
# print(files)
tokens=list();names=list()
for file in files:
bname=os.path.basename(os.path.abspath(file))
test=bname.split(".")
if len(test)==1 and file not in tokens:
tokens.append(file)
names.append(str(os.path.basename(os.path.abspath(file))))
if len(names)>1:
title = 'Pick an account (press SPACE\RIGHT to mark\\unmark, ENTER to continue): '
options = names
selected = pick(options, title, min_selection_count=1)
tok=names[selected[1]]
elif len(names)==1:
tok=names[0]
else:
tok=False
return tok
def folder_walker(showfiles=False,Print=False):
account=token_picker()
path=account+':'
TD=TD_picker(path)
while True:
folder=search_folder(path,TD=TD,mime='folders',pickmode='single',Print=False)
if folder==False:
break
path=folder
if Print==True:
print(path)
if showfiles==False:
return path,TD
else:
files=search_folder(path,TD=TD)
def search_folder(path,TD=None,ext=None,filter=None,order=None,mime='files',Pick=True,Print=True,pickmode='multi'):
file_list=list();userfilter=filter;isroot=False;TDlist=False;file_listv2=list()
if isinstance(path, list):
paths=path
else:
paths=path.split('.+')
if isinstance(TD, list):
TDlist=TD
index=0
for path in paths:
# print(path)
try:
if userfilter==None or filter=="":
filter=""
else:
filter=" and name contains '{}'".format(userfilter)
if TDlist!=False:
TD=TDlist[index]
index+=1
if TD=="pick":
TD=TD_picker(path)
if TD!= None:
remote=location(route=path,TD_Name=TD)
else:
remote=location(route=path)
drive_service=remote.drive_service
if drive_service==None:
if remote.token_name==None:
auth=auth()
else:
auth=auth(token=token_name)
drive_service=auth.drive_service
tk,fl=get_path_parameters(path)
if not fl and TD==None:
root='root';remote.root=root;remote.ID=root
elif not fl:
root=remote.ID
remote.root=root
remote.ID=root
else:
root=remote.root
# print(remote.ID)
if mime=='files':
page_token = None;pagesize=1000
while True:
if root != 'root':
results = drive_service.files().list(
q="mimeType!='application/vnd.google-apps.folder' and '{}' in parents{}".format(remote.ID,filter),
pageSize=pagesize,pageToken=page_token,fields="nextPageToken, files(id, name, size, createdTime)",includeItemsFromAllDrives = True,supportsAllDrives = True).execute()
else:
results = drive_service.files().list(
q="mimeType!='application/vnd.google-apps.folder' and '{}' in parents{}".format(remote.ID,filter),
pageSize=pagesize,pageToken=page_token,fields="nextPageToken, files(id, name, size, createdTime)").execute()
items = results.get('files', [])
try:
page_token = results.get('nextPageToken', None)
except:pass
for file in items:
try:
file_list.append([file['name'],file['size'],path,file['createdTime']])
except:pass
if Print==True:
print(f'- {path}: Total Retrieved '+str(len(file_list)))
if page_token == None:
break
elif mime=='folders':
page_token = None;pagesize=100
while True:
if root != 'root':
results = drive_service.files().list(
q="mimeType='application/vnd.google-apps.folder' and '{}' in parents{}".format(remote.ID,filter),
pageSize=pagesize,pageToken=page_token,fields="nextPageToken, files(id, name)",includeItemsFromAllDrives = True,supportsAllDrives = True).execute()
else:
results = drive_service.files().list(
q="mimeType='application/vnd.google-apps.folder' and '{}' in parents{}".format(remote.ID,filter),
pageSize=pagesize,pageToken=page_token,fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
try:
page_token = results.get('nextPageToken', None)
except:pass
for file in items:
try:
file_list.append([file['name'],path])
except:pass
if Print==True:
print(f'- {path}: Total Retrieved '+str(len(file_list)))
if page_token == None:
break
except:
print(f'- {path}: Retrieved 0')
return False
if not file_list:
return False
file_list.sort(key=lambda x: x[0])
if Pick==True:
if pickmode!='single':
title = 'Select results (press SPACE\RIGHT to mark\\unmark, ENTER to continue): '
elif mime=="files":
title = 'Select result:'
else:
title = 'Select result:\n + Press space or right to select content \n + Press E to finish selection'
oplist=list();cleanlist=list()
if mime=='folders':
for item in file_list:
oplist.append(item[0])
cleanlist.append(clean_name(item[0]))
else:
for item in file_list:
sz=str(sq_tools.getSize(int(item[1])))
oplist.append(item[0]+' | '+sz)
cleanlist.append(clean_name(item[0])+' | '+sz)
options = cleanlist
if pickmode!='single':
selected = pick(options, title, multi_select=True, min_selection_count=0)
elif mime=="files":
selected = pick(options, title, min_selection_count=1)
if selected[0]==False:
return False
else:
picker = Picker(options, title, min_selection_count=1)
def end_selection(picker):
return False,-1
picker.register_custom_handler(ord('e'), end_selection)
picker.register_custom_handler(ord('E'), end_selection)
selected=picker.start()
if selected[0]==False:
return False
# print (selected)
oplist=file_list;file_list=list()
if pickmode=='single':
if mime=='folders':
basepath=oplist[selected[1]][1]
if basepath[-1]!="/" and basepath[-1]!="\\":
basepath=basepath+'/'
pth=basepath+oplist[selected[1]][0]
else:
basepath=oplist[selected[1]][2]
if basepath[-1]!="/" and basepath[-1]!="\\":
basepath=basepath+'/'
pth=basepath+oplist[selected[1]][0]
return pth
if mime=='folders':
for file in selected:
basepath=oplist[file[1]][1]
if basepath[-1]!="/" and basepath[-1]!="\\":
basepath=basepath+'/'
pth=basepath+oplist[file[1]][0]
file_list.append(pth)
else:
for file in selected:
basepath=oplist[file[1]][2]
if basepath[-1]!="/" and basepath[-1]!="\\":
basepath=basepath+'/'
pth=basepath+oplist[file[1]][0]
file_list.append(pth)
if not file_list:
return False
if Print==True:
print("\n- User selected the following results: ")
for file in file_list:
print(file)
else:
print("- User selected {} files".format(str(len(file_list))))
if TDlist!=False and file_list:
file_listv2.append([file_list,TD])
if TDlist!=False:
return file_listv2
return file_list
def get_TeamDrives(token='drive:',Print=False):
remote=location(route=token)
TD_list=remote.get_TD()
names=list();ids=list()
names.append('None');ids.append('root')
for drive in TD_list:
names.append(drive['name']);ids.append(drive['id'])
if Print!=False:
print(drive['name']+' id: '+drive['id'])
return names,ids
# print(remote.ID)
# print(remote.name)
def get_Data(path,TD=None,get_hash=False,Print=True):
if TD!= None:
remote=location(route=path,TD_Name=TD)
else:
remote=location(route=path)
ID=remote.ID
type=remote.filetype
name=remote.name
if Print==True:
print('- ID: '+ID);print('- Name: '+name);print('- Type: '+type)
if type!='folder':
size=remote.get_size(remote.ID,remote.token_name)
if Print==True:
print('- Size: '+size)
else:
size=None
if get_hash==True:
md5=remote.get_hash(remote.ID,remote.token_name)
if Print==True:
print('- MD5Hash: '+md5)
else:
md5=None
return ID,name,type,size,md5,remote
def download(path,ofolder,TD=None,filter=None,trimm=True):
if path=='pick':
account=token_picker()
TD=TD_picker(account)
return
test=path.split(".+")
if TD=='pick':
TD=TD_picker(path)
if len(test)>1 or path.endswith('/') or path.endswith('\\'):
type="folder"
else:
ID,name,type,size,md5,remote=get_Data(path,TD=TD,Print=False)
output=os.path.join(ofolder,name)
if type!='file':
# print('Path is a folder')
folderpicker(path,ofolder,TD,filter)
return
else:
if name.endswith(".xci") and trimm==True:
end=DriveTools.get_xci_rom_size(remote)
hd=get_html_header(remote.access_token,off1=0,off2=end)
remote.get_session(hd)
size=end
else:
remote.get_session()
buf=int(64*1024)
print("- Downloading file to {}".format(output))
t = tqdm(total=int(size), unit='B', unit_scale=True, leave=False)
with open(output,"wb") as o:
for data in remote.response.iter_content(chunk_size=buf):
o.write(data)
t.update(len(data))
if not data:
break
t.close()
print(" *Finished*")
def TD_picker(path):
remote=location(route=path)
names,ids=get_TeamDrives(remote.token_name)
if names:
title = 'Select Teamdrive (press SPACE\RIGHT to mark\\unmark, ENTER to continue): \n"None" will return the My-Drive section of the account'
options = names
selected = pick(options, title, min_selection_count=1)
TD=selected[0]
if TD=='None':
TD=None
else:
TD=None
return TD
def folderpicker(path,ofolder=None,TD=None,filter=None,mode='download'):
pathlist=search_folder(path,TD,pick=True,Print=False,filter=filter)
counter=len(pathlist)
for path in pathlist:
if mode=='download':
download(path,ofolder,TD)
elif mode=='get_cnmt_data':
DriveTools.get_cnmt_data(path,TD)
elif mode=='decompress':
from Drive.Decompress import decompress
decompress(path,ofolder,TD)
elif mode=='supertrimm':
from Drive.XciTools import supertrimm
supertrimm(path,ofolder,TD)
elif mode=='read_cnmt':
from Drive.Info import read_cnmt
read_cnmt(path,TD)
counter-=1
if counter>0:
print("Still {} files to download".format(str(counter)))
def add_to_drive(url=None,ID=None,filepath=None,makecopy=False,TD=None):
# print(url);print(ID);print(filepath);print(makecopy);print(TD);
try:
if ID==None and url==None:
return False
if filepath==None:
return False
if ID!=None:
file_id=ID
else:
file_id, is_download_link=Public.parse_url(url)
if TD==None:
remote=location(route=filepath)
else:
remote=location(route=filepath,TD_Name=TD)
# remote=location(route=filepath)
drive=remote.drivename
FolderID=remote.ID
if makecopy!=True:
file = remote.drive_service.files().update(fileId=file_id,
addParents=FolderID,
fields='id,parents,name').execute()
name=file.get('name')
else:
result=remote.drive_service.files().get(fileId=file_id, fields="name,mimeType").execute()
name=result['name']
newfile = {'name': name, 'parents' : [FolderID]}
file = remote.drive_service.files().copy(fileId=file_id,
body=newfile,
fields='id, parents').execute()
print("{} was added to drive".format(name))
except IOError as e:
print(e, file=sys.stderr)
return name
# def get_plink_data(url=None,ID=None,filepath=None,TD=None):
# try:
# if ID==None and url==None:
# return False
# if filepath==None:
# return False
# if ID!=None:
# file_id=ID
# else:
# file_id, is_download_link=Public.parse_url(url)
# if TD==None:
# remote=location(route=filepath)
# else:
# remote=location(route=filepath,TD_Name=TD)
# remote=location(route=filepath)
# drive=remote.drivename
# FolderID=remote.ID
# result=remote.drive_service.files().get(fileId=file_id, fields="name,mimeType").execute()
# name=result['name']
# id=file_id
# except IOError as e:
# print(e, file=sys.stderr)
# return name,id
def delete_from_drive(filepath=None,url=None,lkID=None,TD=None):
try:
if filepath==None and url==None and lkID==None:
return False
if TD==None:
remote=location(route=filepath)
else:
remote=location(route=filepath,TD_Name=TD)
if lkID != None:
file_id=lkID
elif url != None:
file_id, is_download_link=Public.parse_url(url)
elif filepath != None:
file_id=remote.ID
file = remote.drive_service.files().get(fileId=file_id,
fields='parents').execute()
# print(file.get('parents'))
previous_parents = ",".join(file.get('parents'))
file = remote.drive_service.files().update(fileId=file_id,
removeParents=previous_parents,
fields='name,id,parents').execute()
print("{} was removed from drive".format(file['name']))
except IOError as e:
print(e, file=sys.stderr)
def move(origin,destiny,originTD,destinyTD):
if originTD==None:
Oremote=location(route=origin)
else:
Oremote=location(route=origin,TD_Name=originTD)
fileId=Oremote.ID
# print(fileId)
# print(Oremote.name)
if destinyTD==None:
Dremote=location(route=destiny)
else:
Dremote=location(route=destiny,TD_Name=destinyTD)
folderID=Dremote.ID
# print(folderID)
file = Oremote.drive_service.files().get(fileId=fileId,
fields='parents').execute()
previous_parents = ",".join(file.get('parents'))
file = Dremote.drive_service.files().update(fileId=fileId,
removeParents=previous_parents,
addParents=folderID,
fields='id, parents').execute()
def create_folder(filepath):
c=0;parent='root';endID=False
try:
remote=location(route=filepath)
if remote.ID!=None:
print("- Folder already exists")
return True,remote.ID
else:
token_name,folder_list=get_path_parameters(filepath)
remote=location(route=token_name)
if remote.ID==None:
print("Bad token")
return False,False
folder="{}:".format(token_name)
for j in folder_list:
folder+=('/'+j)
remote=location(route=folder)
if remote.ID==None:
try:
file_metadata = {
'name': j,
'mimeType': 'application/vnd.google-apps.folder',
'parents': [parent]
}
# print(file_metadata)
file = remote.drive_service.files().create(body=file_metadata,
fields='id').execute()
parent=file.get('id')
endID=parent
print("Created {} with ID: {}".format(folder,parent))
except:
return False,False
else:
parent=remote.ID
except:
token_name,folder_list=get_path_parameters(filepath)
remote=location(route=token_name)
if remote.ID==None:
print("Bad token")
return False,False
folder="{}:".format(token_name)
for j in folder_list:
folder+=('/'+j)
remote=location(route=folder)
if remote.ID==None:
try:
file_metadata = {
'name': j,
'mimeType': 'application/vnd.google-apps.folder',
'parents': [parent]
}
print(file_metadata)
file = remote.drive_service.files().create(body=file_metadata,
fields='id').execute()
parent=file.get('id')
endID=parent
print("Created {} with ID: {}".format(folder,parent))
except:
return False,False
else:
parent=remote.ID
return True,endID
def get_parents(path,ID=None,url=None):
file_id=ID
remote=location(route=path)
if ID !=None:
file = remote.drive_service.files().get(fileId=file_id,
fields='parents').execute()
return file.get('parents')
elif url !=None:
file_id, is_download_link=Public.parse_url(url)
file = remote.drive_service.files().get(fileId=file_id,
fields='parents').execute()
return file.get('parents')
else:
return False
def clean_name(name):
import nutdb
name = re.sub(r'[àâá@äå]', 'a', name);name = re.sub(r'[ÀÂÁÄÅ]', 'A', name)
name = re.sub(r'[èêéë]', 'e', name);name = re.sub(r'[ÈÊÉË]', 'E', name)
name = re.sub(r'[ìîíï]', 'i', name);name = re.sub(r'[ÌÎÍÏ]', 'I', name)
name = re.sub(r'[òôóöø]', 'o', name);name = re.sub(r'[ÒÔÓÖØ]', 'O', name)
name = re.sub(r'[ùûúü]', 'u', name);name = re.sub(r'[ÙÛÚÜ]', 'U', name)
converter = nutdb.kakashi_conv()
name=converter.do(name)
if name[0] == ' ':
name=name[1:]
name=name[0].upper()+name[1:]
name=nutdb.set_roma_uppercases(name)
return name
|
the-stack_0_19086 | from django.urls import reverse
from rest_framework import status
from bluebottle.test.utils import BluebottleTestCase
from bluebottle.test.factory_models.news import NewsItemFactory
class NewsItemApiTestCase(BluebottleTestCase):
"""
Integration tests for the NewsItem API.
"""
def setUp(self):
super(NewsItemApiTestCase, self).setUp()
self.some_dutch_news = NewsItemFactory.create(language='nl')
self.some_other_dutch_news = NewsItemFactory.create(language='nl')
self.third_dutch_news = NewsItemFactory.create(language='nl')
self.some_english_news = NewsItemFactory.create(language='en')
self.some_other_english_news = NewsItemFactory.create(language='en')
self.some_unpublished_english_news = NewsItemFactory.create(status='draft', language='en')
class NewsItemsApiTest(NewsItemApiTestCase):
"""
Test case for the ``NewsItem`` API view
Endpoint: /api/news/items/
"""
def test_news_list_unfiltered(self):
"""
Test retrieving news items.
"""
response = self.client.get(reverse('news_item_list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 5)
def test_news_list_filtered(self):
"""
Test filtering news items by language.
"""
# Check that we have 3 dutch news items
response = self.client.get(reverse('news_item_list'),
{'language': 'nl'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 3)
self.assertEqual(response.data['results'][0]['language'], 'nl')
# Check that we have 2 english news items
response = self.client.get(reverse('news_item_list'),
{'language': 'en'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 2)
self.assertEqual(response.data['results'][0]['language'], 'en')
def test_news_post_details(self):
"""
Test retrieving a single news item.
"""
news_item_url = reverse('news_post_detail',
kwargs={'slug': self.some_dutch_news.slug})
response = self.client.get(news_item_url)
self.assertEqual(response.status_code, status.HTTP_200_OK,
response.data)
self.assertEqual(response.data['title'], self.some_dutch_news.title)
def test_news_post_by_language(self):
"""
Test retrieving a single news item.
"""
NewsItemFactory.create(language='nl', slug='update', title='Hier is een update')
NewsItemFactory.create(language='en', slug='update', title='This is happening now')
news_item_url = reverse('news_post_detail', kwargs={'slug': 'update'})
response = self.client.get(news_item_url, HTTP_X_APPLICATION_LANGUAGE='nl')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(response.data['title'], 'Hier is een update')
response = self.client.get(news_item_url, HTTP_X_APPLICATION_LANGUAGE='en')
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertEqual(response.data['title'], 'This is happening now')
def test_news_post_by_wrong_slug(self):
"""
Test retrieving a single news item.
"""
NewsItemFactory.create(language='nl', slug='update')
news_item_url = reverse('news_post_detail', kwargs={'slug': 'vzzbx'})
response = self.client.get(news_item_url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
|
the-stack_0_19087 | from django.core.management.base import BaseCommand
from company.models import Company, CompanyUser
from company.tests.factories import CompanyFactory, CompanyUserFactory
class Command(BaseCommand):
help = 'Masks personal company/supplier fields with test data'
def handle(self, *args, **options):
self.mask_company_data()
self.mask_supplier_data()
def mask_company_data(self):
queryset = Company.objects.all()
failed = 0
succeded = 0
for company in queryset:
try:
message = f'Company {company.pk} updated'
company_factory = CompanyFactory.build()
company.mobile_number = company_factory.mobile_number
company.postal_full_name = company_factory.postal_full_name
company.address_line_1 = company_factory.address_line_1
company.address_line_2 = company_factory.address_line_2
company.postal_code = company_factory.postal_code
company.po_box = company_factory.po_box
company.email_address = company_factory.email_address
company.email_full_name = company_factory.email_full_name
company.save()
self.stdout.write(self.style.SUCCESS(message))
succeded += 1
except Exception as e:
self.stdout.write(self.style.ERROR(e))
failed += 1
self.stdout.write(self.style.SUCCESS(f'{succeded} companies updated'))
self.stdout.write(self.style.WARNING(f'{failed} companies failed'))
def mask_supplier_data(self):
failed = 0
succeded = 0
for supplier in CompanyUser.objects.all():
try:
supplier_factory = CompanyUserFactory.build()
message = f'supplier {supplier.pk} updated'
supplier.name = supplier_factory.name
supplier.company_email = supplier_factory.company_email
supplier.mobile_number = supplier_factory.mobile_number
supplier.save()
self.stdout.write(self.style.SUCCESS(message))
succeded += 1
except Exception as e:
self.stdout.write(self.style.ERROR(e))
failed += 1
self.stdout.write(self.style.SUCCESS(f'{succeded} supplier updated'))
self.stdout.write(self.style.WARNING(f'{failed} supplier failed'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.