ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5e40c02bfcc4421c626f95635db43681da2394
|
"""Shelly Configuration Schemas."""
# pylint: disable=dangerous-default-value
from homeassistant.const import (
CONF_DEVICES, CONF_DISCOVERY, CONF_ID, CONF_NAME, CONF_PASSWORD,
CONF_SCAN_INTERVAL, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP)
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from .const import *
ALL_SENSORS_W_EXTRA = list(ALL_SENSORS.keys()) + list(EXTRA_SENSORS.keys())
SENSOR_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
})
SETTING_SCHEMA = vol.Schema({
vol.Optional(CONF_DECIMALS): cv.positive_int,
vol.Optional(CONF_DIV): cv.positive_int,
vol.Optional(CONF_UNIT): cv.string
})
SETTINGS_SCHEMA = vol.Schema({
vol.Optional('temperature'): SETTING_SCHEMA,
vol.Optional('humidity'): SETTING_SCHEMA,
vol.Optional('illuminance'): SETTING_SCHEMA,
vol.Optional('current'): SETTING_SCHEMA,
vol.Optional('total_consumption'): SETTING_SCHEMA,
vol.Optional('total_returned'): SETTING_SCHEMA,
vol.Optional('current_consumption'): SETTING_SCHEMA,
vol.Optional('device_temp'): SETTING_SCHEMA,
vol.Optional('voltage'): SETTING_SCHEMA,
vol.Optional('power_factor'): SETTING_SCHEMA,
vol.Optional('uptime'): SETTING_SCHEMA,
vol.Optional('rssi'): SETTING_SCHEMA
})
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_ID): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LIGHT_SWITCH, default=False): cv.boolean,
vol.Optional(CONF_SENSORS):
vol.All(cv.ensure_list, [vol.In(ALL_SENSORS_W_EXTRA)]),
vol.Optional(CONF_UPGRADE_SWITCH): cv.boolean,
vol.Optional(CONF_UNAVALABLE_AFTER_SEC) : cv.positive_int,
vol.Optional(CONF_ENTITY_ID): cv.string,
vol.Optional(CONF_POWER_DECIMALS): cv.positive_int, #deprecated
vol.Optional(CONF_SETTINGS, default={}): SETTINGS_SCHEMA
})
STEP_SCHEMA = vol.Schema({
vol.Optional(CONF_OBJECT_ID_PREFIX,
default=DEFAULT_OBJECT_ID_PREFIX): str,
})
CONFIG_SCHEMA_ROOT = vol.Schema({
vol.Optional(CONF_IGMPFIX,
default=DEFAULT_IGMPFIX): cv.boolean,
vol.Optional(CONF_SHOW_ID_IN_NAME,
default=DEFAULT_SHOW_ID_IN_NAME): cv.boolean,
vol.Optional(CONF_DISCOVERY,
default=DEFAULT_DISCOVERY): cv.boolean,
vol.Optional(CONF_OBJECT_ID_PREFIX,
default=DEFAULT_OBJECT_ID_PREFIX): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_DEVICES,
default=[]): vol.All(cv.ensure_list, [DEVICE_SCHEMA]),
vol.Optional(CONF_VERSION,
default=False): cv.boolean,
vol.Optional(CONF_WIFI_SENSOR): cv.boolean, #deprecated
vol.Optional(CONF_UPTIME_SENSOR): cv.boolean, #deprecated
vol.Optional(CONF_UPGRADE_SWITCH, default=True): cv.boolean,
vol.Optional(CONF_UNAVALABLE_AFTER_SEC, default=90) : cv.positive_int,
vol.Optional(CONF_SENSORS, default=DEFAULT_SENSORS):
vol.All(cv.ensure_list, [vol.In(ALL_SENSORS_W_EXTRA)]),
vol.Optional(CONF_ATTRIBUTES, default=list(DEFAULT_ATTRIBUTES)):
vol.All(cv.ensure_list,
[vol.In(ALL_ATTRIBUTES | EXTRA_ATTRIBUTES)]),
vol.Optional(CONF_ADDITIONAL_INFO,
default=True): cv.boolean,
vol.Optional(CONF_SCAN_INTERVAL,
default=DEFAULT_SCAN_INTERVAL): cv.positive_int,
vol.Optional(CONF_POWER_DECIMALS): cv.positive_int, #deprecated
vol.Optional(CONF_LOCAL_PY_SHELLY,
default=False): cv.boolean,
vol.Optional(CONF_ONLY_DEVICE_ID) : cv.string,
vol.Optional(CONF_CLOUD_AUTH_KEY) : cv.string,
vol.Optional(CONF_CLOUD_SERVER) : cv.string,
vol.Optional(CONF_TMPL_NAME) : cv.string,
vol.Optional(CONF_DISCOVER_BY_IP, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_MDNS, default=DEFAULT_MDNS): cv.boolean,
vol.Optional(CONF_HOST_IP, default='') : cv.string,
vol.Optional(CONF_SETTINGS, default={}): SETTINGS_SCHEMA
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: CONFIG_SCHEMA_ROOT
}, extra=vol.ALLOW_EXTRA)
|
py
|
1a5e413432ebc2ad05d658bbde49b48e759ba400
|
import io
import json
import os
import subprocess
from setuptools import Command
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist as base_sdist
from wagtail import __semver__
class assets_mixin:
def compile_assets(self):
try:
subprocess.check_call(['npm', 'run', 'dist'])
except (OSError, subprocess.CalledProcessError) as e:
print('Error compiling assets: ' + str(e)) # noqa
raise SystemExit(1)
def publish_assets(self):
try:
subprocess.check_call(['npm', 'publish', 'client'])
except (OSError, subprocess.CalledProcessError) as e:
print('Error publishing front-end assets: ' + str(e)) # noqa
raise SystemExit(1)
def bump_client_version(self):
"""
Writes the current Wagtail version number into package.json
"""
path = os.path.join('.', 'client', 'package.json')
input_file = io.open(path, "r")
try:
package = json.loads(input_file.read().decode("utf-8"))
except (ValueError) as e:
print('Unable to read ' + path + ' ' + e) # noqa
raise SystemExit(1)
package['version'] = __semver__
try:
with io.open(path, 'w', encoding='utf-8') as f:
f.write(str(json.dumps(package, indent=2, ensure_ascii=False)))
except (IOError) as e:
print('Error setting the version for front-end assets: ' + str(e)) # noqa
raise SystemExit(1)
class assets(Command, assets_mixin):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
self.bump_client_version()
self.compile_assets()
self.publish_assets()
class sdist(base_sdist, assets_mixin):
def run(self):
self.compile_assets()
base_sdist.run(self)
class check_bdist_egg(bdist_egg):
# If this file does not exist, warn the user to compile the assets
sentinel_dir = 'wagtail/wagtailadmin/static/'
def run(self):
bdist_egg.run(self)
if not os.path.isdir(self.sentinel_dir):
print("\n".join([ # noqa
"************************************************************",
"The front end assets for Wagtail are missing.",
"To generate the assets, please refer to the documentation in",
"docs/contributing/css_guidelines.rst",
"************************************************************",
]))
|
py
|
1a5e41a6bbdb59a7fda26c5a08448aa72e0a1923
|
import ast
import functools
import inspect
import re
import sys
import textwrap
import numpy as np
import taichi.lang
from taichi._lib import core as _ti_core
from taichi.lang import impl, runtime_ops, util
from taichi.lang.ast import (ASTTransformerContext, KernelSimplicityASTChecker,
transform_tree)
from taichi.lang.enums import Layout
from taichi.lang.exception import TaichiSyntaxError
from taichi.lang.expr import Expr
from taichi.lang.matrix import MatrixType
from taichi.lang.shell import _shell_pop_print, oinspect
from taichi.lang.util import to_taichi_type
from taichi.linalg.sparse_matrix import sparse_matrix_builder
from taichi.tools.util import obsolete
from taichi.types import any_arr, primitive_types, template
from taichi import _logging
if util.has_pytorch():
import torch
def func(fn):
"""Marks a function as callable in Taichi-scope.
This decorator transforms a Python function into a Taichi one. Taichi
will JIT compile it into native instructions.
Args:
fn (Callable): The Python function to be decorated
Returns:
Callable: The decorated function
Example::
>>> @ti.func
>>> def foo(x):
>>> return x + 2
>>>
>>> @ti.kernel
>>> def run():
>>> print(foo(40)) # 42
"""
is_classfunc = _inside_class(level_of_class_stackframe=3)
fun = Func(fn, _classfunc=is_classfunc)
@functools.wraps(fn)
def decorated(*args):
return fun.__call__(*args)
decorated._is_taichi_function = True
return decorated
def pyfunc(fn):
"""Marks a function as callable in both Taichi and Python scopes.
When called inside the Taichi scope, Taichi will JIT compile it into
native instructions. Otherwise it will be invoked directly as a
Python function.
See also :func:`~taichi.lang.kernel_impl.func`.
Args:
fn (Callable): The Python function to be decorated
Returns:
Callable: The decorated function
"""
is_classfunc = _inside_class(level_of_class_stackframe=3)
fun = Func(fn, _classfunc=is_classfunc, _pyfunc=True)
@functools.wraps(fn)
def decorated(*args):
return fun.__call__(*args)
decorated._is_taichi_function = True
return decorated
def _get_tree_and_ctx(self,
excluded_parameters=(),
is_kernel=True,
arg_features=None,
args=None):
file = oinspect.getsourcefile(self.func)
src, start_lineno = oinspect.getsourcelines(self.func)
src = [textwrap.fill(line, tabsize=4, width=9999) for line in src]
tree = ast.parse(textwrap.dedent("\n".join(src)))
func_body = tree.body[0]
func_body.decorator_list = []
global_vars = _get_global_vars(self.func)
for i, arg in enumerate(func_body.args.args):
anno = arg.annotation
if isinstance(anno, ast.Name):
global_vars[anno.id] = self.argument_annotations[i]
if isinstance(func_body.returns, ast.Name):
global_vars[func_body.returns.id] = self.return_type
if is_kernel or impl.get_runtime().experimental_real_function:
# inject template parameters into globals
for i in self.template_slot_locations:
template_var_name = self.argument_names[i]
global_vars[template_var_name] = args[i]
return tree, ASTTransformerContext(excluded_parameters=excluded_parameters,
is_kernel=is_kernel,
func=self,
arg_features=arg_features,
global_vars=global_vars,
argument_data=args,
src=src,
start_lineno=start_lineno,
file=file)
class Func:
function_counter = 0
def __init__(self, _func, _classfunc=False, _pyfunc=False):
self.func = _func
self.func_id = Func.function_counter
Func.function_counter += 1
self.compiled = None
self.classfunc = _classfunc
self.pyfunc = _pyfunc
self.argument_annotations = []
self.argument_names = []
self.return_type = None
self.extract_arguments()
self.template_slot_locations = []
for i, anno in enumerate(self.argument_annotations):
if isinstance(anno, template):
self.template_slot_locations.append(i)
self.mapper = TaichiCallableTemplateMapper(
self.argument_annotations, self.template_slot_locations)
self.taichi_functions = {} # The |Function| class in C++
def __call__(self, *args):
if not impl.inside_kernel():
if not self.pyfunc:
raise TaichiSyntaxError(
"Taichi functions cannot be called from Python-scope."
" Use @ti.pyfunc if you wish to call Taichi functions "
"from both Python-scope and Taichi-scope.")
return self.func(*args)
if impl.get_runtime().experimental_real_function:
if impl.get_runtime().current_kernel.is_grad:
raise TaichiSyntaxError(
"Real function in gradient kernels unsupported.")
instance_id, _ = self.mapper.lookup(args)
key = _ti_core.FunctionKey(self.func.__name__, self.func_id,
instance_id)
if self.compiled is None:
self.compiled = {}
if key.instance_id not in self.compiled:
self.do_compile(key=key, args=args)
return self.func_call_rvalue(key=key, args=args)
tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args)
ret = transform_tree(tree, ctx)
if not impl.get_runtime().experimental_real_function:
if self.return_type and not ctx.returned:
raise TaichiSyntaxError(
"Function has a return type but does not have a return statement"
)
return ret
def func_call_rvalue(self, key, args):
# Skip the template args, e.g., |self|
assert impl.get_runtime().experimental_real_function
non_template_args = []
for i, anno in enumerate(self.argument_annotations):
if not isinstance(anno, template):
non_template_args.append(args[i])
non_template_args = impl.make_expr_group(non_template_args)
return Expr(
_ti_core.make_func_call_expr(
self.taichi_functions[key.instance_id], non_template_args))
def do_compile(self, key, args):
tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args)
self.compiled[key.instance_id] = lambda: transform_tree(tree, ctx)
self.taichi_functions[key.instance_id] = _ti_core.create_function(key)
self.taichi_functions[key.instance_id].set_function_body(
self.compiled[key.instance_id])
def extract_arguments(self):
sig = inspect.signature(self.func)
if sig.return_annotation not in (inspect._empty, None):
self.return_type = sig.return_annotation
params = sig.parameters
arg_names = params.keys()
for i, arg_name in enumerate(arg_names):
param = params[arg_name]
if param.kind == inspect.Parameter.VAR_KEYWORD:
raise KernelDefError(
'Taichi functions do not support variable keyword parameters (i.e., **kwargs)'
)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
raise KernelDefError(
'Taichi functions do not support variable positional parameters (i.e., *args)'
)
if param.kind == inspect.Parameter.KEYWORD_ONLY:
raise KernelDefError(
'Taichi functions do not support keyword parameters')
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise KernelDefError(
'Taichi functions only support "positional or keyword" parameters'
)
annotation = param.annotation
if annotation is inspect.Parameter.empty:
if i == 0 and self.classfunc:
annotation = template()
# TODO: pyfunc also need type annotation check when real function is enabled,
# but that has to happen at runtime when we know which scope it's called from.
elif not self.pyfunc and impl.get_runtime(
).experimental_real_function:
raise KernelDefError(
f'Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated'
)
else:
if not id(annotation
) in primitive_types.type_ids and not isinstance(
annotation, template):
raise KernelDefError(
f'Invalid type annotation (argument {i}) of Taichi function: {annotation}'
)
self.argument_annotations.append(annotation)
self.argument_names.append(param.name)
class TaichiCallableTemplateMapper:
def __init__(self, annotations, template_slot_locations):
self.annotations = annotations
self.num_args = len(annotations)
self.template_slot_locations = template_slot_locations
self.mapping = {}
@staticmethod
def extract_arg(arg, anno):
if isinstance(anno, template):
if isinstance(arg, taichi.lang.snode.SNode):
return arg.ptr
if isinstance(arg, taichi.lang.expr.Expr):
return arg.ptr.get_underlying_ptr_address()
if isinstance(arg, _ti_core.Expr):
return arg.get_underlying_ptr_address()
if isinstance(arg, tuple):
return tuple(
TaichiCallableTemplateMapper.extract_arg(item, anno)
for item in arg)
return arg
if isinstance(anno, any_arr):
if isinstance(arg, taichi.lang._ndarray.ScalarNdarray):
anno.check_element_dim(arg, 0)
anno.check_element_shape(())
anno.check_field_dim(len(arg.shape))
return arg.dtype, len(arg.shape), (), Layout.AOS
if isinstance(arg, taichi.lang.matrix.VectorNdarray):
anno.check_element_dim(arg, 1)
anno.check_element_shape((arg.n, ))
anno.check_field_dim(len(arg.shape))
anno.check_layout(arg)
return arg.dtype, len(arg.shape) + 1, (arg.n, ), arg.layout
if isinstance(arg, taichi.lang.matrix.MatrixNdarray):
anno.check_element_dim(arg, 2)
anno.check_element_shape((arg.n, arg.m))
anno.check_field_dim(len(arg.shape))
anno.check_layout(arg)
return arg.dtype, len(arg.shape) + 2, (arg.n,
arg.m), arg.layout
# external arrays
element_dim = 0 if anno.element_dim is None else anno.element_dim
layout = Layout.AOS if anno.layout is None else anno.layout
shape = tuple(arg.shape)
if len(shape) < element_dim:
raise ValueError(
f"Invalid argument into ti.any_arr() - required element_dim={element_dim}, "
f"but the argument has only {len(shape)} dimensions")
element_shape = (
) if element_dim == 0 else shape[:
element_dim] if layout == Layout.SOA else shape[
-element_dim:]
return to_taichi_type(arg.dtype), len(shape), element_shape, layout
return type(arg).__name__,
def extract(self, args):
extracted = []
for arg, anno in zip(args, self.annotations):
extracted.append(self.extract_arg(arg, anno))
return tuple(extracted)
def lookup(self, args):
if len(args) != self.num_args:
raise TypeError(
f'{self.num_args} argument(s) needed but {len(args)} provided.'
)
key = self.extract(args)
if key not in self.mapping:
count = len(self.mapping)
self.mapping[key] = count
return self.mapping[key], key
class KernelDefError(Exception):
pass
class KernelArgError(Exception):
def __init__(self, pos, needed, provided):
message = f'Argument {pos} (type={provided}) cannot be converted into required type {needed}'
super().__init__(message)
self.pos = pos
self.needed = needed
self.provided = provided
def _get_global_vars(_func):
closure_vars = inspect.getclosurevars(_func)
return {
**closure_vars.globals,
**closure_vars.nonlocals,
**closure_vars.builtins
}
class Kernel:
counter = 0
def __init__(self, _func, is_grad, _classkernel=False):
self.func = _func
self.kernel_counter = Kernel.counter
Kernel.counter += 1
self.is_grad = is_grad
self.grad = None
self.argument_annotations = []
self.argument_names = []
self.return_type = None
self.classkernel = _classkernel
self.extract_arguments()
self.template_slot_locations = []
for i, anno in enumerate(self.argument_annotations):
if isinstance(anno, template):
self.template_slot_locations.append(i)
self.mapper = TaichiCallableTemplateMapper(
self.argument_annotations, self.template_slot_locations)
impl.get_runtime().kernels.append(self)
self.reset()
self.kernel_cpp = None
def reset(self):
self.runtime = impl.get_runtime()
if self.is_grad:
self.compiled_functions = self.runtime.compiled_grad_functions
else:
self.compiled_functions = self.runtime.compiled_functions
def extract_arguments(self):
sig = inspect.signature(self.func)
if sig.return_annotation not in (inspect._empty, None):
self.return_type = sig.return_annotation
params = sig.parameters
arg_names = params.keys()
for i, arg_name in enumerate(arg_names):
param = params[arg_name]
if param.kind == inspect.Parameter.VAR_KEYWORD:
raise KernelDefError(
'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)'
)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
raise KernelDefError(
'Taichi kernels do not support variable positional parameters (i.e., *args)'
)
if param.default is not inspect.Parameter.empty:
raise KernelDefError(
'Taichi kernels do not support default values for arguments'
)
if param.kind == inspect.Parameter.KEYWORD_ONLY:
raise KernelDefError(
'Taichi kernels do not support keyword parameters')
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise KernelDefError(
'Taichi kernels only support "positional or keyword" parameters'
)
annotation = param.annotation
if param.annotation is inspect.Parameter.empty:
if i == 0 and self.classkernel: # The |self| parameter
annotation = template()
else:
raise KernelDefError(
'Taichi kernels parameters must be type annotated')
else:
if isinstance(annotation, (template, any_arr)):
pass
elif id(annotation) in primitive_types.type_ids:
pass
elif isinstance(annotation, sparse_matrix_builder):
pass
elif isinstance(annotation, MatrixType):
pass
else:
raise KernelDefError(
f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}'
)
self.argument_annotations.append(annotation)
self.argument_names.append(param.name)
def materialize(self, key=None, args=None, arg_features=None):
if key is None:
key = (self.func, 0)
self.runtime.materialize()
if key in self.compiled_functions:
return
grad_suffix = ""
if self.is_grad:
grad_suffix = "_grad"
kernel_name = f"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}"
_logging.trace(f"Compiling kernel {kernel_name}...")
tree, ctx = _get_tree_and_ctx(
self,
args=args,
excluded_parameters=self.template_slot_locations,
arg_features=arg_features)
if self.is_grad:
KernelSimplicityASTChecker(self.func).visit(tree)
# Do not change the name of 'taichi_ast_generator'
# The warning system needs this identifier to remove unnecessary messages
def taichi_ast_generator():
if self.runtime.inside_kernel:
raise TaichiSyntaxError(
"Kernels cannot call other kernels. I.e., nested kernels are not allowed. "
"Please check if you have direct/indirect invocation of kernels within kernels. "
"Note that some methods provided by the Taichi standard library may invoke kernels, "
"and please move their invocations to Python-scope.")
self.runtime.inside_kernel = True
self.runtime.current_kernel = self
try:
transform_tree(tree, ctx)
if not impl.get_runtime().experimental_real_function:
if self.return_type and not ctx.returned:
raise TaichiSyntaxError(
"Kernel has a return type but does not have a return statement"
)
finally:
self.runtime.inside_kernel = False
self.runtime.current_kernel = None
taichi_kernel = _ti_core.create_kernel(taichi_ast_generator,
kernel_name, self.is_grad)
self.kernel_cpp = taichi_kernel
assert key not in self.compiled_functions
self.compiled_functions[key] = self.get_function_body(taichi_kernel)
def get_function_body(self, t_kernel):
# The actual function body
def func__(*args):
assert len(args) == len(
self.argument_annotations
), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided'
tmps = []
callbacks = []
has_external_arrays = False
actual_argument_slot = 0
launch_ctx = t_kernel.make_launch_context()
for i, v in enumerate(args):
needed = self.argument_annotations[i]
if isinstance(needed, template):
continue
provided = type(v)
# Note: do not use sth like "needed == f32". That would be slow.
if id(needed) in primitive_types.real_type_ids:
if not isinstance(v, (float, int)):
raise KernelArgError(i, needed.to_string(), provided)
launch_ctx.set_arg_float(actual_argument_slot, float(v))
elif id(needed) in primitive_types.integer_type_ids:
if not isinstance(v, int):
raise KernelArgError(i, needed.to_string(), provided)
launch_ctx.set_arg_int(actual_argument_slot, int(v))
elif isinstance(needed, sparse_matrix_builder):
# Pass only the base pointer of the ti.linalg.sparse_matrix_builder() argument
launch_ctx.set_arg_int(actual_argument_slot, v.get_addr())
elif isinstance(needed, any_arr) and (
self.match_ext_arr(v)
or isinstance(v, taichi.lang._ndarray.Ndarray)):
is_ndarray = False
if isinstance(v, taichi.lang._ndarray.Ndarray):
v = v.arr
is_ndarray = True
has_external_arrays = True
ndarray_use_torch = self.runtime.prog.config.ndarray_use_torch
has_torch = util.has_pytorch()
is_numpy = isinstance(v, np.ndarray)
if is_numpy:
tmp = np.ascontiguousarray(v)
# Purpose: DO NOT GC |tmp|!
tmps.append(tmp)
launch_ctx.set_arg_external_array(
actual_argument_slot, int(tmp.ctypes.data),
tmp.nbytes, False)
elif is_ndarray and not ndarray_use_torch:
# Use ndarray's own memory allocator
tmp = v
launch_ctx.set_arg_external_array(
actual_argument_slot,
int(tmp.device_allocation_ptr()),
tmp.element_size() * tmp.nelement(), True)
else:
def get_call_back(u, v):
def call_back():
u.copy_(v)
return call_back
assert has_torch
assert isinstance(v, torch.Tensor)
tmp = v
taichi_arch = self.runtime.prog.config.arch
# Ndarray means its memory is allocated on the specified taichi arch.
# Since torch only supports CPU & CUDA, torch-base ndarray only supports
# taichi cpu/cuda backend as well.
# Note I put x64/arm64/cuda here to be more specific.
assert not is_ndarray or taichi_arch in (
_ti_core.Arch.cuda, _ti_core.Arch.x64,
_ti_core.Arch.arm64
), "Torch-based ndarray is only supported on taichi x64/arm64/cuda backend."
if str(v.device).startswith('cuda'):
# External tensor on cuda
if taichi_arch != _ti_core.Arch.cuda:
# copy data back to cpu
host_v = v.to(device='cpu', copy=True)
tmp = host_v
callbacks.append(get_call_back(v, host_v))
else:
# External tensor on cpu
if taichi_arch == _ti_core.Arch.cuda:
gpu_v = v.cuda()
tmp = gpu_v
callbacks.append(get_call_back(v, gpu_v))
launch_ctx.set_arg_external_array(
actual_argument_slot, int(tmp.data_ptr()),
tmp.element_size() * tmp.nelement(), False)
shape = v.shape
max_num_indices = _ti_core.get_max_num_indices()
assert len(
shape
) <= max_num_indices, f"External array cannot have > {max_num_indices} indices"
for ii, s in enumerate(shape):
launch_ctx.set_extra_arg_int(actual_argument_slot, ii,
s)
elif isinstance(needed, MatrixType):
if id(needed.dtype) in primitive_types.real_type_ids:
for a in range(needed.n):
for b in range(needed.m):
if not isinstance(v[a, b], (int, float)):
raise KernelArgError(
i, needed.dtype.to_string(),
type(v[a, b]))
launch_ctx.set_arg_float(
actual_argument_slot, float(v[a, b]))
actual_argument_slot += 1
elif id(needed.dtype) in primitive_types.integer_type_ids:
for a in range(needed.n):
for b in range(needed.m):
if not isinstance(v[a, b], int):
raise KernelArgError(
i, needed.dtype.to_string(),
type(v[a, b]))
launch_ctx.set_arg_int(actual_argument_slot,
int(v[a, b]))
actual_argument_slot += 1
else:
raise ValueError(
f'Matrix dtype {needed.dtype} is not integer type or real type.'
)
continue
else:
raise ValueError(
f'Argument type mismatch. Expecting {needed}, got {type(v)}.'
)
actual_argument_slot += 1
# Both the class kernels and the plain-function kernels are unified now.
# In both cases, |self.grad| is another Kernel instance that computes the
# gradient. For class kernels, args[0] is always the kernel owner.
if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced:
self.runtime.target_tape.insert(self, args)
t_kernel(launch_ctx)
ret = None
ret_dt = self.return_type
has_ret = ret_dt is not None
if has_ret or (impl.current_cfg().async_mode
and has_external_arrays):
runtime_ops.sync()
if has_ret:
if id(ret_dt) in primitive_types.integer_type_ids:
ret = t_kernel.get_ret_int(0)
else:
ret = t_kernel.get_ret_float(0)
if callbacks:
for c in callbacks:
c()
return ret
return func__
@staticmethod
def match_ext_arr(v):
has_array = isinstance(v, np.ndarray)
if not has_array and util.has_pytorch():
has_array = isinstance(v, torch.Tensor)
return has_array
def ensure_compiled(self, *args):
instance_id, arg_features = self.mapper.lookup(args)
key = (self.func, instance_id)
self.materialize(key=key, args=args, arg_features=arg_features)
return key
# For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__
# Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU)
@_shell_pop_print
def __call__(self, *args, **kwargs):
if self.is_grad and impl.current_cfg().opt_level == 0:
_logging.warn(
"""opt_level = 1 is enforced to enable gradient computation."""
)
impl.current_cfg().opt_level = 1
assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'
key = self.ensure_compiled(*args)
return self.compiled_functions[key](*args)
# For a Taichi class definition like below:
#
# @ti.data_oriented
# class X:
# @ti.kernel
# def foo(self):
# ...
#
# When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is
# different from that of Python 3.7 and below. In 3.8+, it is 'class X:',
# whereas in <=3.7, it is '@ti.data_oriented'. More interestingly, if the class
# inherits, i.e. class X(object):, then in both versions, |code_context| is
# 'class X(object):'...
_KERNEL_CLASS_STACKFRAME_STMT_RES = [
re.compile(r'@(\w+\.)?data_oriented'),
re.compile(r'class '),
]
def _inside_class(level_of_class_stackframe):
try:
maybe_class_frame = sys._getframe(level_of_class_stackframe)
statement_list = inspect.getframeinfo(maybe_class_frame)[3]
first_statment = statement_list[0].strip()
for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES:
if pat.match(first_statment):
return True
except:
pass
return False
def _kernel_impl(_func, level_of_class_stackframe, verbose=False):
# Can decorators determine if a function is being defined inside a class?
# https://stackoverflow.com/a/8793684/12003165
is_classkernel = _inside_class(level_of_class_stackframe + 1)
if verbose:
print(f'kernel={_func.__name__} is_classkernel={is_classkernel}')
primal = Kernel(_func, is_grad=False, _classkernel=is_classkernel)
adjoint = Kernel(_func, is_grad=True, _classkernel=is_classkernel)
# Having |primal| contains |grad| makes the tape work.
primal.grad = adjoint
if is_classkernel:
# For class kernels, their primal/adjoint callables are constructed
# when the kernel is accessed via the instance inside
# _BoundedDifferentiableMethod.
# This is because we need to bind the kernel or |grad| to the instance
# owning the kernel, which is not known until the kernel is accessed.
#
# See also: _BoundedDifferentiableMethod, data_oriented.
@functools.wraps(_func)
def wrapped(*args, **kwargs):
# If we reach here (we should never), it means the class is not decorated
# with @ti.data_oriented, otherwise getattr would have intercepted the call.
clsobj = type(args[0])
assert not hasattr(clsobj, '_data_oriented')
raise KernelDefError(
f'Please decorate class {clsobj.__name__} with @ti.data_oriented'
)
else:
@functools.wraps(_func)
def wrapped(*args, **kwargs):
return primal(*args, **kwargs)
wrapped.grad = adjoint
wrapped._is_wrapped_kernel = True
wrapped._is_classkernel = is_classkernel
wrapped._primal = primal
wrapped._adjoint = adjoint
return wrapped
def kernel(fn):
"""Marks a function as a Taichi kernel.
A Taichi kernel is a function written in Python, and gets JIT compiled by
Taichi into native CPU/GPU instructions (e.g. a series of CUDA kernels).
The top-level ``for`` loops are automatically parallelized, and distributed
to either a CPU thread pool or massively parallel GPUs.
Kernel's gradient kernel would be generated automatically by the AutoDiff system.
See also https://docs.taichi.graphics/lang/articles/basic/syntax#kernels.
Args:
fn (Callable): the Python function to be decorated
Returns:
Callable: The decorated function
Example::
>>> x = ti.field(ti.i32, shape=(4, 8))
>>>
>>> @ti.kernel
>>> def run():
>>> # Assigns all the elements of `x` in parallel.
>>> for i in x:
>>> x[i] = i
"""
return _kernel_impl(fn, level_of_class_stackframe=3)
classfunc = obsolete('@ti.classfunc', '@ti.func directly')
classkernel = obsolete('@ti.classkernel', '@ti.kernel directly')
class _BoundedDifferentiableMethod:
def __init__(self, kernel_owner, wrapped_kernel_func):
clsobj = type(kernel_owner)
if not getattr(clsobj, '_data_oriented', False):
raise KernelDefError(
f'Please decorate class {clsobj.__name__} with @ti.data_oriented'
)
self._kernel_owner = kernel_owner
self._primal = wrapped_kernel_func._primal
self._adjoint = wrapped_kernel_func._adjoint
self._is_staticmethod = wrapped_kernel_func._is_staticmethod
self.__name__ = None
def __call__(self, *args, **kwargs):
if self._is_staticmethod:
return self._primal(*args, **kwargs)
return self._primal(self._kernel_owner, *args, **kwargs)
def grad(self, *args, **kwargs):
return self._adjoint(self._kernel_owner, *args, **kwargs)
def data_oriented(cls):
"""Marks a class as Taichi compatible.
To allow for modularized code, Taichi provides this decorator so that
Taichi kernels can be defined inside a class.
See also https://docs.taichi.graphics/lang/articles/advanced/odop
Example::
>>> @ti.data_oriented
>>> class TiArray:
>>> def __init__(self, n):
>>> self.x = ti.field(ti.f32, shape=n)
>>>
>>> @ti.kernel
>>> def inc(self):
>>> for i in self.x:
>>> self.x[i] += 1.0
>>>
>>> a = TiArray(32)
>>> a.inc()
Args:
cls (Class): the class to be decorated
Returns:
The decorated class.
"""
def _getattr(self, item):
method = cls.__dict__.get(item, None)
is_property = method.__class__ == property
is_staticmethod = method.__class__ == staticmethod
if is_property:
x = method.fget
else:
x = super(cls, self).__getattribute__(item)
if hasattr(x, '_is_wrapped_kernel'):
if inspect.ismethod(x):
wrapped = x.__func__
else:
wrapped = x
wrapped._is_staticmethod = is_staticmethod
assert inspect.isfunction(wrapped)
if wrapped._is_classkernel:
ret = _BoundedDifferentiableMethod(self, wrapped)
ret.__name__ = wrapped.__name__
if is_property:
return ret()
return ret
if is_property:
return x(self)
return x
cls.__getattribute__ = _getattr
cls._data_oriented = True
return cls
|
py
|
1a5e41e0c0880e647777779f769b3fa3bbad592b
|
from flask import Flask
from .config import DevConfig
from flask_bootstrap import Bootstrap
app = Flask(__name__)
# Initializing application
app = Flask(__name__,instance_relative_config = True)
# Setting up configuration
app.config.from_object(DevConfig)
# app.config.from_pyfile("config.py")
# Initializing Flask Extensions
bootstrap = Bootstrap(app)
from app import views
from app import error
|
py
|
1a5e41e359548d945a28db8818a64a178f54c32d
|
__author__ = "Sudip Sinha"
from math import exp, sqrt
def tr_underlying(s0: float, sigma: float, t: float, n: int) -> list:
"""Generate the tree of stock prices"""
s = [[0] * (i+1) for i in range(n+1)]
s[0][0] = s0
dt = t / n
u = exp(sigma * sqrt(dt))
for i in range(1, n+1):
for j in range(i+1):
s[i][j] = s0 * u**(-i + 2*j)
return s
|
py
|
1a5e42db4a67a5195a9f27c245476ab7f7746213
|
#!/usr/bin/env python3
# Depth tolerance in km (for determining if top and bottom edges are
# horizontal)
DEPTH_TOL = 0.05
# Maximum ratio of distance off of the plane (relative to edge length) for the
# 4th point to be before being considered non-co-planar and adjusted to
# actually be on the plane?
OFFPLANE_TOLERANCE = 0.05
RAKEDICT = {"SS": 0.0, "NM": -90.0, "RS": 90.0, "ALL": None}
DEFAULT_MECH = "ALL"
DEFAULT_STRIKE = 0.0
DEFAULT_DIP = 90.0
DEFAULT_RAKE = 0.0
DEFAULT_WIDTH = 0.0
DEFAULT_ZTOR = 0.0
ORIGIN_REQUIRED_KEYS = [
"id",
"netid",
"network",
"lat",
"lon",
"depth",
"locstring",
"mag",
"time",
]
# Times can have either integer or floating point (preferred) seconds
TIMEFMT = "%Y-%m-%dT%H:%M:%S.%fZ"
ALT_TIMEFMT = "%Y-%m-%dT%H:%M:%SZ"
|
py
|
1a5e44a810c6aca2241d6f29caae9a5c20e7a2c2
|
import os
from mkdocs.config import config_options
from mkdocs.plugins import BasePlugin
from mkdocs.structure.nav import Section
from mkdocs.structure.pages import Page
from .utils import flatten
from . import markdown as md
class AddNumberPlugin(BasePlugin):
config_scheme = (
('strict_mode', config_options.Type(bool, default=False)),
('increment_pages', config_options.Type(bool, default=False)),
('increment_topnav', config_options.Type(bool, default=False)),
('excludes', config_options.Type(list, default=[])),
('includes', config_options.Type(list, default=[])),
('order', config_options.Type(int, default=1))
)
def _check_config_params(self):
set_parameters = self.config.keys()
allowed_parameters = dict(self.config_scheme).keys()
if set_parameters != allowed_parameters:
unknown_parameters = [x for x in set_parameters if
x not in allowed_parameters]
raise AssertionError(
"Unknown parameter(s) set: %s" % ", ".join(unknown_parameters))
def on_nav(self, nav, config, files):
"""
The nav event is called after the site navigation is created and
can be used to alter the site navigation.
See:
https://www.mkdocs.org/user-guide/plugins/#on_nav
:param nav: global navigation object
:param config: global configuration object
:param files: global files collection
:return: global navigation object
"""
self._title2index = dict()
is_increment_topnav = self.config.get("increment_topnav", False)
is_increment_pages = self.config.get("increment_pages", False)
index = 0
while index < len(nav.items):
if is_increment_topnav:
nav.items[index].title = str(index + 1) + '. ' + \
nav.items[index].title
# Section(title='Linux')
# Page(title=[blank], url='/linux/epel%E6%BA%90/')
if type(nav.items[index]) == Section:
pages = nav.items[index].children
j = 0
while j < len(pages):
if is_increment_topnav and is_increment_pages:
self._title2index[pages[j].url] = \
str(index + 1) + '.' + str(j + 1) + ' '
elif is_increment_pages:
self._title2index[pages[j].url] = str(j + 1) + '. '
j += 1
index += 1
return nav
def on_files(self, files, config):
"""
The files event is called after the files collection is populated from the docs_dir.
Use this event to add, remove, or alter files in the collection.
See https://www.mkdocs.org/user-guide/plugins/#on_files
Args:
files (list): files: global files collection
config (dict): global configuration object
Returns:
files (list): global files collection
"""
self._check_config_params()
# Use navigation if set,
# (see https://www.mkdocs.org/user-guide/configuration/#nav)
# only these files will be displayed.
nav = config.get('nav', None)
if nav:
files_str = flatten(nav)
# Otherwise, take all source markdown pages
else:
files_str = [
file.src_path for file in files if file.is_documentation_page()
]
# Record excluded files from selection by user
self._excludes = self.config['excludes']
self._exclude_files = [os.path.normpath(file1) for file1 in
self._excludes if not file1.endswith('\\')
and not file1.endswith('/')]
self._exclude_dirs = [os.path.normpath(dir1) for dir1 in self._excludes
if dir1.endswith('\\')
or dir1.endswith('/')]
self._includes = self.config['includes']
self._include_files = [os.path.normpath(file1) for file1 in
self._includes if not file1.endswith('\\')
and not file1.endswith('/')]
self._include_dirs = [os.path.normpath(dir1) for dir1 in self._includes
if dir1.endswith('\\')
or dir1.endswith('/')]
self._order = self.config['order'] - 1
# Remove files excluded from selection by user
files_to_remove = [file for file in files_str if
self._is_exclude(file) and not self._is_include(
file)]
self.files_str = [file for file in files_str if
file not in files_to_remove]
return files
def on_page_markdown(self, markdown, page, config, files):
"""
The page_markdown event is called after the page's markdown is loaded
from file and can be used to alter the Markdown source text.
The meta- data has been stripped off and is available as page.meta
at this point.
See:
https://www.mkdocs.org/user-guide/plugins/#on_page_markdown
Args:
markdown (str): Markdown source text of page as string
page (Page): mkdocs.nav.Page instance
config (dict): global configuration object
files (list): global files collection
Returns:
markdown (str): Markdown source text of page as string
"""
if self.config.get('increment_pages', False):
index_str = self._title2index.get(page.url, None)
if index_str:
page.title = index_str + page.title
if page.file.src_path not in self.files_str:
return markdown
lines = markdown.split('\n')
heading_lines = md.headings(lines)
if len(heading_lines) <= self._order:
return markdown
tmp_lines_values = list(heading_lines.values())
if self.config['strict_mode']:
tmp_lines_values, _ = self._searchN(tmp_lines_values, 1,
self._order, 1, [])
else:
tmp_lines_values = self._ascent(tmp_lines_values, [0], 0, [], 1,
self._order)
# replace the links of current page after numbering the titles
def _format_link_line(line):
line = line.replace(".", "")
new_line = ''
for s in line:
if s.isdigit() or s in (" ", "_") \
or (u'\u0041' <= s <= u'\u005a') \
or (u'\u0061' <= s <= u'\u007a'):
new_line += s.lower()
return '#' + '-'.join(new_line.split())
link_lines = [_format_link_line(v) for v in tmp_lines_values]
link_lines = {'#' + i.split("-", 1)[1]: i for i in link_lines
if i.count('-') > 0}
n = 0
while n < len(lines):
for k in link_lines.keys():
line_content = lines[n]
if line_content.count('[') >= 1 \
and line_content.count('(') >= 1:
lines[n] = line_content.replace(k, link_lines[k])
n += 1
# replace these new titles
n = 0
for key in heading_lines.keys():
lines[key] = tmp_lines_values[n]
n += 1
return '\n'.join(lines)
def _ascent(self, tmp_lines, parent_nums_head, level, args, num, startrow):
"""
Add number to every line.
e.g.
if number from h2, then the level is:
## level=1
### level=2
#### level=3
### level=2
args
|...|
v v
######
^
|
num
:param tmp_lines: line
:param parent_nums_head: storage depth of header before this line.
:param level: level of header
:param args: all of numbers to combine the number
:param num: the last number
:param startrow: start row to deal
:return: lines which has been numbered
"""
if startrow == len(tmp_lines):
return tmp_lines
nums_head = md.heading_depth(tmp_lines[startrow])
parent_nums = parent_nums_head[len(parent_nums_head) - 1]
chang_num = nums_head - parent_nums
# drop one level
if chang_num < 0:
if level != 1:
# for _ in range(-chang_num):
num = args.pop()
level -= 1
parent_nums_head.pop()
return self._ascent(tmp_lines, parent_nums_head, level, args, num,
startrow)
# sibling
if chang_num == 0:
num += 1
tmp_lines[startrow] = self._replace_line(tmp_lines[startrow],
'#' * nums_head + ' ',
'%d.' * len(args) % tuple(
args), num)
return self._ascent(tmp_lines, parent_nums_head, level, args, num,
startrow + 1)
# rise one level
level += 1
if level != 1:
# for _ in range(chang_num):
args.append(num)
parent_nums_head.append(nums_head)
num = 1
tmp_lines[startrow] = self._replace_line(tmp_lines[startrow],
'#' * nums_head + ' ',
'%d.' * len(args) % tuple(
args), num)
return self._ascent(tmp_lines, parent_nums_head, level, args, num,
startrow + 1)
def _replace_line(self, tmp_line, substr, prenum_str, nextnum):
re_str = (substr + "%d. " % nextnum) if (prenum_str == '') else (
substr + "%s%d " % (prenum_str, nextnum))
tmp_line = tmp_line.replace(substr, re_str)
return tmp_line
def _searchN(self, tmp_lines, num, start_row, level, args):
while True:
tmp_lines, start_row, re = self._replace(tmp_lines,
'#' * level + ' ',
'.'.join(('%d.' * (
level - 1)).split()) % tuple(
args),
num, start_row)
if not re:
break
next_num = 1
if level != 6:
args.append(num)
re_lines, start_row = self._searchN(tmp_lines, next_num,
start_row, level + 1, args)
args.pop()
num += 1
return tmp_lines, start_row
def _replace(self, tmp_lines, substr, prenum_str, nextnum, start_row):
if start_row == len(tmp_lines) or not tmp_lines[start_row].startswith(
substr):
return tmp_lines, start_row, False
re_str = (substr + "%d. " % nextnum) if (prenum_str == '') else (
substr + "%s%d " % (prenum_str, nextnum))
tmp_lines[start_row] = tmp_lines[start_row].replace(substr, re_str)
return tmp_lines, start_row + 1, True
def _is_exclude(self, file):
if len(self._excludes) == 0:
return False
url = os.path.normpath(file)
if url in self._exclude_files or '*' in self._exclude_files:
return True
for dir1 in self._exclude_dirs:
if url.find(dir1) != -1:
return True
return False
def _is_include(self, file):
if len(self._includes) == 0:
return False
url = os.path.normpath(file)
if url in self._include_files:
return True
for dir1 in self._include_dirs:
if url.find(dir1) != -1:
return True
return False
|
py
|
1a5e44cfdd71dc445a1fc4a5a78746adc26618d1
|
from ..element import Element
from dearpygui.core import add_tab, end
__all__ = [
'Tab'
]
class Tab(Element):
def __init__(self, name: str, closable: bool = False, **config):
super().__init__(name, **config)
self.closable = closable
def place(self, same_line=False):
raise RuntimeError("don't use place() with Tab, use the class with a WITH block instead: ex: with Tab('test'): ...")
def _add(self):
add_tab(self.name, closable=self.closable, **self.default_config)
def start(self):
self._add()
def end(self):
end()
def __enter__(self):
self._add()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
end()
|
py
|
1a5e4575b6ae45147111be62ba108d4ca85eed90
|
import torch
import torch.nn.functional as F
import timm
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from ace import attack_confidence_estimation
def attack_example(file_name, true_label, transform, normalization):
image = Image.open(f'./images/{file_name}.jpg').convert('RGB')
input = transform(image).unsqueeze(0).cuda() # transform and add batch dimension
with torch.no_grad():
output = model(normalization(input))
orig_prediction = torch.nn.functional.softmax(output, dim=1).max(1)
print(f'Ground truth label is {true_label}. The predicted label is {orig_prediction[1].item()} with a confidence of {orig_prediction[0].item()}')
adversarial_example = attack_confidence_estimation(model=model, input=input, label=torch.tensor(true_label), normalization=normalization)
with torch.no_grad():
attacked_prediction = torch.nn.functional.softmax(model(normalization(adversarial_example)), dim=1).max(1)
print(f'After using ACE, the predicted label is still {attacked_prediction[1].item()} with a confidence of {attacked_prediction[0].item()}')
if __name__ == '__main__':
model = timm.create_model('efficientnet_b0', pretrained=True).cuda()
model.eval()
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
normalization = transform.transforms.pop(3)
# A correct prediction example
print('=============== A correct prediction example: ===============')
attack_example(file_name='tank', true_label=847, transform=transform, normalization=normalization)
# An incorrect prediction example
print('=============== An incorrect prediction example: ===============')
attack_example(file_name='binoculars', true_label=447, transform=transform, normalization=normalization)
|
py
|
1a5e45cfe00884bab122c69c29bedbcd76878e92
|
# import streamlit as st
# import pandas as pd
# import os
# from dotenv import load_dotenv
# from alpha_vantage.timeseries import TimeSeries
# import pandas_bokeh
# from bokeh.plotting import figure, show
#
# ################################ GET DATA ######################################
#
# project_folder = os.path.expanduser('~/code/GitHub/streamlit-framework')
# load_dotenv(os.path.join(project_folder,'.env'))
# key = os.getenv("API_KEY")
# ts = TimeSeries(key, output_format='pandas')
#
# ############################### STREAMLIT ######################################
# header = st.beta_container()
# dataset = st.beta_container()
# features = st.beta_container()
# test = st.sidebar.beta_container()
#
# with header:
# st.title('Stock Ticker Milestone Project')
# st.text('Stock data acquired via the Alpha Vantage API')
# st.text('App deployed using Heroku')
#
# with test:
# st.title('Select Features')
# st.text("Type the name of a stock ticker")
# ticker = st.text_input("(i.e. 'AARP', 'AMZN', 'MSFT', 'GOOG', etc.)", 'GOOG')
# data, meta = ts.get_daily_adjusted(ticker, outputsize='full')
#
# display1 = ('January','February','March','April','May','June','July','August','September','October','November','December')
# options1 = list(range(len(display1)))
# YEAR = st.selectbox('YEAR:', ['2021','2020','2019','2018','2017','2016','2015'])
# monthh = st.selectbox('MONTH:', options1, format_func=lambda x: display1[x])
# MONTH = monthh+1
#
# cols = ['open','high','low','close','adj_close','volume','divedend','split_coeff']
# data.columns = cols
# data['day'] = data.index.date
# data['time'] = data.index.time
#
# US_daily_market = data
# US_daily_market.index = pd.to_datetime(US_daily_market.index, format='%Y-%m-%d')
#
# US_daily_market['month'] = US_daily_market.index.month
# US_daily_market['year'] = US_daily_market.index.year
# US_daily_market = US_daily_market.reset_index()
# US_daily_market = US_daily_market.loc[(US_daily_market['month'] == int(MONTH)) & (US_daily_market['year'] == int(YEAR))]
#
# date = US_daily_market['day']
# x = US_daily_market['low']
# y = US_daily_market['high']
#
# with dataset:
# st.header("Stock Market Data For: '{}'".format(ticker))
# st.text('Data visualization constructed with Bokeh, from hourly intraday stock data')
# g1_col, g2_col = st.beta_columns(2)
# p = figure(title="The Highs and Lows of: '{}'".format(ticker), x_axis_type='datetime', x_axis_label='Date', y_axis_label='Value (USD)')
# p.line(date, y, legend_label="Max / day (USD)", line_width=2)
# p.line(date, x, color= "red", legend_label="Min / day (USD)", line_width=2)
# st.bokeh_chart(p, use_container_width=True)
#
#
# ################################################################################
# # useful links:
# #
# # bokeh:
# # https://www.youtube.com/watch?v=tnOgrlqA0Bc
# # https://pythonforundergradengineers.com/streamlit-app-with-bokeh.html
# # alpha vantage:
# # https://www.youtube.com/watch?v=WJ2t_LYb__0
# # streamlit:
# # four part series, used first 2/3:
# # https://www.youtube.com/watch?v=CSv2TBA9_2E
|
py
|
1a5e483528c196a8de16918c8d57db4f2f3d7202
|
#! /usr/bin/python3
from __future__ import print_function
import sys
import time
import array
import os
#sys.path.append("shell")
import swapforth
class TetheredJ1a(swapforth.TetheredTarget):
cellsize = 2
def open_ser(self, port, speed):
try:
import serial
except:
print("This tool needs PySerial, but it was not found")
sys.exit(1)
self.ser = serial.Serial(port, 115200, timeout=None, rtscts=0)
sys.stdout.write("115200...ok")
print("")
def reset(self, fullreset = True):
ser = self.ser
'''
ser.setDTR(1)
if fullreset:
ser.setRTS(1)
ser.setRTS(0)
ser.setDTR(0)
'''
def waitcr():
while ser.read(1) != chr(10):
pass
#waitcr()
ser.write(b'\r')
#waitcr()
for c in ' 1 tth !':
ser.write(c.encode('utf-8'))
ser.flush()
time.sleep(0.001)
ser.flushInput()
# print(repr(se#! /usr/bin/python3r.read(ser.inWaiting())))
ser.write(b'\r')
while 1:
c = ser.read(1)
# print(repr(c))
if c == b'\x1e':
break
def boot(self, bootfile = None):
sys.stdout.write('Contacting... ')
print("")
self.reset()
print('established')
def interrupt(self):
self.reset(False)
def serialize(self):
l = self.command_response('0 here dump')
lines = l.strip().replace('\r', '').split('\n')
s = []
for l in lines:
l = l.split()
s += [int(b, 16) for b in l[1:17]]
s = array.array('B', s).tostring().ljust(8192, b'\xff')
return array.array('H', s)
if __name__ == '__main__':
swapforth.main(TetheredJ1a)
|
py
|
1a5e484d1fe2ee32d4184f93a4c80f30477c0479
|
from numpy import repeat
from numpy import reshape
from numpy import sum
from numpy import where
from numpy import zeros
from gwlfe.Input.LandUse.NLU import NLU
from gwlfe.Input.WaterBudget.Water import Water
from gwlfe.Input.WaterBudget.Water import Water_f
from gwlfe.Memoization import memoize
from gwlfe.MultiUse_Fxns.Runoff.Qrun import Qrun
from gwlfe.MultiUse_Fxns.Runoff.Qrun import Qrun_f
from gwlfe.MultiUse_Fxns.Runoff.Retention import Retention
from gwlfe.MultiUse_Fxns.Runoff.Retention import Retention_f
@memoize
def RurQRunoff(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0):
result = zeros((NYrs, 16, 12))
nlu = NLU(NRur, NUrb)
water = Water(NYrs, DaysMonth, InitSnow_0, Temp, Prec)
retention = Retention(NYrs, DaysMonth, Temp, Prec, InitSnow_0, AntMoist_0, NRur, NUrb, CN, Grow_0)
qrun = Qrun(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, CN, AntMoist_0, Grow_0)
for Y in range(NYrs):
for i in range(12):
for l in range(nlu):
result[Y, l, i] = 0.0
for Y in range(NYrs):
for i in range(12):
for j in range(DaysMonth[Y][i]):
if Temp[Y][i][j] > 0 and water[Y][i][j] > 0.01:
for l in range(NRur):
if CN[l] > 0:
if water[Y][i][j] >= 0.2 * retention[Y][i][j][l]:
result[Y][l][i] += qrun[Y][i][j][l]
else:
pass
else:
pass
else:
pass
return result
@memoize
def RurQRunoff_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec, AntMoist_0, NRur, NUrb, CN, Grow_0):
water = reshape(repeat(Water_f(NYrs, DaysMonth, InitSnow_0, Temp, Prec), repeats=NRur, axis=2),
(NYrs, 12, 31, NRur))
retention = Retention_f(NYrs, DaysMonth, Temp, Prec, InitSnow_0, AntMoist_0, NRur, NUrb, CN, Grow_0)[:, :, :, :NRur]
qrun = Qrun_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, CN, AntMoist_0, Grow_0)[:, :, :, :NRur]
return sum(where((water >= 0.2 * retention) & (CN[:NRur] > 0), qrun, 0), axis=2)
|
py
|
1a5e4adee1a7738133d79d777152b3d0463a2c5e
|
print(np.bincount(y_train))
print(np.bincount(y_test))
# yes, they are more or less equal
|
py
|
1a5e4b07d4ffc4a3546f5663c86e9744e85608d0
|
"""
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# bookstore_project/
APPS_DIR = ROOT_DIR / "bookstore_project"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "Africa/Nairobi"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
]
LOCAL_APPS = [
"bookstore_project.users.apps.UsersConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "bookstore_project.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"bookstore_project.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Daniel Ndegwa""", "[email protected]")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "bookstore_project.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "bookstore_project.users.adapters.SocialAccountAdapter"
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ["compressor"]
STATICFILES_FINDERS += ["compressor.finders.CompressorFinder"]
# Your stuff...
# ------------------------------------------------------------------------------
|
py
|
1a5e4b28355674010ba8f92b176d5cabca3e1a8d
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
from test_softmax_op import stable_softmax
from test_softmax_with_cross_entropy_op import cross_entropy
def stable_softmax(x):
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def log_softmax(x, axis=-1):
softmax_out = np.apply_along_axis(stable_softmax, axis, x)
return np.log(softmax_out)
def cross_entropy_loss_1d(input,
label,
weight=None,
reduction='mean',
ignore_index=-100):
log_softmax_out = log_softmax(input)
input_shape = log_softmax_out.shape
N = input_shape[0]
C = input_shape[1]
out = np.zeros_like(label).astype(np.float64)
total_weight = 0
###1. compute softmax cross_entropy (with weight)
### Note: only support hard labels.
for i in range(N):
cur_target = label[i]
if cur_target == ignore_index:
out[i] = 0
continue
cur_weight = weight[cur_target] if weight is not None else 1
total_weight += cur_weight
out[i] = -log_softmax_out[i][cur_target] * cur_weight
###2. deal with reduction
if reduction == 'sum':
return np.sum(out), np.array([total_weight]).astype('float64')
elif reduction == 'mean':
return out.sum() / total_weight, np.array(
[total_weight]).astype('float64')
elif reduction == 'none':
return out
def cross_entropy_loss_2d(input,
label,
weight=None,
reduction='mean',
ignore_index=-100):
log_softmax_out = log_softmax(input)
input_shape = log_softmax_out.shape
N = input_shape[0]
H = input_shape[1]
W = input_shape[2]
out = np.zeros_like(label).astype(np.float64)
total_weight = 0
for i in range(N):
for h in range(H):
for w in range(W):
cur_target = label[i][h][w]
if cur_target == ignore_index:
out[i][h][w] = 0
continue
cur_weight = weight[cur_target] if weight is not None else 1
total_weight += cur_weight
out[i][h][w] = -log_softmax_out[i][h][w][
cur_target] * cur_weight
if reduction == 'sum':
return np.sum(out), np.array([total_weight]).astype('float64')
elif reduction == 'mean':
return out.sum() / total_weight, np.array(
[total_weight]).astype('float64')
elif reduction == 'none':
return out
def cross_entropy_soft(softmax,
label,
axis,
N,
weight=None,
reduction='mean',
ignore_index=-100):
#1.loss
loss = cross_entropy(
softmax,
label,
True, #soft_label,
axis,
ignore_index)
if weight is None and reduction == 'none':
return loss
#2.weight
weighted_loss = loss
total_weight = N #for weight is None
if weight is not None:
weighted_loss = np.zeros_like(loss).astype(np.float64)
total_weight = 0
for i in range(N):
cur_soft_label = label[i]
cur_weight = np.dot(weight, cur_soft_label)
total_weight += cur_weight
weighted_loss[i] = loss[i] * cur_weight
#3.reduce
if reduction == 'none':
return weighted_loss
elif reduction == 'mean':
weighted_loss_sum = np.sum(weighted_loss)
weighted_loss_mean = weighted_loss_sum / total_weight
return weighted_loss_mean
else:
weighted_loss_sum = np.sum(weighted_loss)
return weighted_loss_sum
def cross_entropy_soft_2d(softmax,
label,
axis,
N,
H,
W,
weight=None,
reduction='mean',
ignore_index=-100):
#1.loss
loss = cross_entropy(
softmax,
label,
True, #soft_label,
axis,
ignore_index)
if weight is None and reduction == 'none':
return loss
#2.weight
weighted_loss = loss
total_weight = N #for weight is None
if weight is not None:
weighted_loss = np.zeros_like(loss).astype(np.float64)
total_weight = 0
for i in range(N):
for h in range(H):
for w in range(W):
cur_soft_label = label[i][h][w]
cur_weight = np.dot(weight, cur_soft_label)
total_weight += cur_weight
weighted_loss[i][h][w] = loss[i][h][w] * cur_weight
#3.reduce
if reduction == 'none':
return weighted_loss
elif reduction == 'mean':
weighted_loss_sum = np.sum(weighted_loss)
weighted_loss_mean = weighted_loss_sum / total_weight
return weighted_loss_mean
else:
weighted_loss_sum = np.sum(weighted_loss)
return weighted_loss_sum
class CrossEntropyLoss(unittest.TestCase):
###test for deprecated softmax_with_cross_entropy
def test_softmax_with_cross_entropy(self):
self.numeric_stable_mode = False
self.soft_label = True
self.dtype = np.float64
self.axis = -1
self.ignore_index = -100 #should not be changed
self.N = 4
self.C = 3
self.shape = [self.N, self.C]
self.use_softmax = True
self.reduction = 'none'
self.weight = None
self.logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits)
self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)
self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True)
expected = cross_entropy_soft(
softmax,
self.labels,
self.axis,
self.N,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index)
paddle.set_device("cpu")
paddle.disable_static()
paddle_loss_swce = paddle.nn.functional.softmax_with_cross_entropy(
fluid.dygraph.to_variable(self.logits),
fluid.dygraph.to_variable(self.labels),
soft_label=True,
axis=self.axis)
paddle_loss_ce = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(self.logits),
fluid.dygraph.to_variable(self.labels),
soft_label=True,
axis=self.axis,
weight=fluid.dygraph.to_variable(self.weight)
if self.weight is not None else None,
reduction=self.reduction)
self.assertTrue(np.allclose(paddle_loss_swce.numpy(), expected))
self.assertTrue(np.allclose(paddle_loss_ce.numpy(), expected))
###soft_label test start
###soft_label test 1
def test_cross_entropy_loss_soft_1d(self):
self.numeric_stable_mode = False
self.soft_label = True
self.dtype = np.float64
self.axis = -1
self.ignore_index = -100 #should not be changed
self.N = 4
self.C = 3
self.shape = [self.N, self.C]
self.use_softmax = True
self.reduction = 'none'
self.weight = None
self.logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits)
self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)
self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True)
expected = cross_entropy_soft(
softmax,
self.labels,
self.axis,
self.N,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index)
paddle.set_device("cpu")
#2. dygraph
paddle.disable_static()
paddle_loss_none_weight = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(self.logits),
fluid.dygraph.to_variable(self.labels),
soft_label=True,
axis=self.axis,
weight=fluid.dygraph.to_variable(self.weight)
if self.weight is not None else None,
reduction=self.reduction)
dy_ret_value = paddle_loss_none_weight.numpy()
#3. static
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[self.N, self.C], dtype='float64')
label = fluid.data(
name='label', shape=[self.N, self.C], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction=self.reduction, soft_label=True)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': self.logits,
'label': self.labels,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
paddle.disable_static()
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
###soft_label test 2
def test_cross_entropy_loss_soft_1d_weight(self):
self.numeric_stable_mode = False
self.soft_label = True
self.dtype = np.float64
self.axis = -1
self.ignore_index = -100 #should not be changed
self.N = 4
self.C = 3
self.shape = [self.N, self.C]
self.use_softmax = True
self.reduction = 'none'
self.weight = np.random.uniform(0.1, 1.0, self.C).astype(self.dtype)
self.logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits)
if self.soft_label:
self.labels = np.random.uniform(0.1, 1.0,
self.shape).astype(self.dtype)
self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True)
else:
axis_dim = self.shape[self.axis]
self.shape[self.axis] = 1
self.labels = np.random.randint(
0, axis_dim, self.shape, dtype="int64")
#1. numpy
expected = cross_entropy_soft(
softmax,
self.labels,
self.axis,
self.N,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index)
paddle.set_device("cpu")
#2. dygraph
paddle.disable_static()
paddle_loss_none_weight = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(self.logits),
fluid.dygraph.to_variable(self.labels),
soft_label=True,
axis=self.axis,
weight=fluid.dygraph.to_variable(self.weight),
reduction=self.reduction)
dy_ret_value = paddle_loss_none_weight.numpy()
# 3.static
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[self.N, self.C], dtype='float64')
label = fluid.data(
name='label', shape=[self.N, self.C], dtype='float64')
weight = fluid.data(name='weight', shape=[self.C], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=self.reduction, soft_label=True)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': self.logits,
'label': self.labels,
"weight": self.weight
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
paddle.disable_static()
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
###soft_label test 3
def test_cross_entropy_loss_soft_1d_mean(self):
self.numeric_stable_mode = False
self.soft_label = True
self.dtype = np.float64
self.axis = -1
self.ignore_index = -100 #should not be changed
self.N = 4
self.C = 3
self.shape = [self.N, self.C]
self.use_softmax = True
self.reduction = 'mean'
self.weight = None
self.logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits)
self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)
self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True)
#1. numpy
expected = cross_entropy_soft(
softmax,
self.labels,
self.axis,
self.N,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index)
paddle.set_device("cpu")
#2 dygraph
paddle.disable_static()
paddle_loss_mean = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(self.logits),
fluid.dygraph.to_variable(self.labels),
soft_label=True,
axis=self.axis,
weight=self.weight,
reduction=self.reduction)
dy_ret_value = paddle_loss_mean.numpy()
#3. static
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[self.N, self.C], dtype='float64')
label = fluid.data(
name='label', shape=[self.N, self.C], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction=self.reduction, soft_label=True)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(
prog,
feed={'input': self.logits,
'label': self.labels},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
paddle.disable_static()
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
###soft_label test 4
def test_cross_entropy_loss_soft_1d_weight_mean(self):
self.numeric_stable_mode = False
self.soft_label = True
self.dtype = np.float64
self.axis = -1
self.ignore_index = -100 #should not be changed
self.N = 4
self.C = 3
self.shape = [self.N, self.C]
self.use_softmax = True
self.reduction = 'mean'
self.weight = np.random.uniform(0.1, 1.0, self.C).astype(self.dtype)
self.logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits)
self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)
self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True)
#1. numpy
expected = cross_entropy_soft(
softmax,
self.labels,
self.axis,
self.N,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index)
paddle.set_device("cpu")
paddle.disable_static()
#2. dygraph
paddle_loss_none_weight = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(self.logits),
fluid.dygraph.to_variable(self.labels),
soft_label=True,
axis=self.axis,
weight=fluid.dygraph.to_variable(self.weight),
reduction=self.reduction)
dy_ret_value = paddle_loss_none_weight.numpy()
#3. static
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[self.N, self.C], dtype='float64')
label = fluid.data(
name='label', shape=[self.N, self.C], dtype='float64')
weight = fluid.data(name='weight', shape=[self.C], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=self.reduction, soft_label=True)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': self.logits,
'label': self.labels,
"weight": self.weight
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
paddle.disable_static()
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
###soft_label test 5
def test_cross_entropy_loss_soft_2d(self):
self.numeric_stable_mode = False
self.soft_label = True
self.dtype = np.float64
self.axis = -1
self.ignore_index = -100 #should not be changed
self.N = 3
self.H = 2
self.W = 2
self.C = 5
self.shape = [self.N, self.H, self.W, self.C]
self.use_softmax = True
self.reduction = 'none'
self.weight = None
self.logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits)
self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)
self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True)
#1. numpy
expected = cross_entropy_soft_2d(
softmax,
self.labels,
self.axis,
self.N,
self.H,
self.W,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index)
paddle.set_device("cpu")
paddle.disable_static()
#2. dygraph
paddle_loss_none_weight = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(self.logits),
fluid.dygraph.to_variable(self.labels),
soft_label=True,
axis=self.axis,
weight=fluid.dygraph.to_variable(self.weight)
if self.weight is not None else None,
reduction=self.reduction)
dy_ret_value = paddle_loss_none_weight.numpy()
#3. static
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input',
shape=[self.N, self.H, self.W, self.C],
dtype='float64')
label = fluid.data(
name='label',
shape=[self.N, self.H, self.W, self.C],
dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction=self.reduction, soft_label=True)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': self.logits,
'label': self.labels,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
paddle.disable_static()
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
###soft_label test 6
def test_cross_entropy_loss_soft_2d_weight_mean(self):
self.numeric_stable_mode = False
self.soft_label = True
self.dtype = np.float64
self.axis = -1
self.ignore_index = -100 #should not be changed
self.N = 3
self.H = 2
self.W = 2
self.C = 5
self.shape = [self.N, self.H, self.W, self.C]
self.use_softmax = True
self.reduction = 'mean'
self.weight = np.random.uniform(0.1, 1.0, self.C).astype(self.dtype)
self.logits = getattr(
self, "logits",
np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype))
softmax = np.apply_along_axis(stable_softmax, self.axis, self.logits)
self.labels = np.random.uniform(0.1, 1.0, self.shape).astype(self.dtype)
self.labels /= np.sum(self.labels, axis=self.axis, keepdims=True)
#1. numpy
expected = cross_entropy_soft_2d(
softmax,
self.labels,
self.axis,
self.N,
self.H,
self.W,
weight=self.weight,
reduction=self.reduction,
ignore_index=self.ignore_index)
paddle.set_device("cpu")
paddle.disable_static()
#2. dygraph
paddle_loss_none_weight = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(self.logits),
fluid.dygraph.to_variable(self.labels),
soft_label=True,
axis=self.axis,
weight=fluid.dygraph.to_variable(self.weight),
reduction=self.reduction)
dy_ret_value = paddle_loss_none_weight.numpy()
#3. static
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input',
shape=[self.N, self.H, self.W, self.C],
dtype='float64')
label = fluid.data(
name='label',
shape=[self.N, self.H, self.W, self.C],
dtype='float64')
weight = fluid.data(name='weight', shape=[self.C], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction=self.reduction, soft_label=True)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': self.logits,
'label': self.labels,
"weight": self.weight
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
paddle.disable_static()
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
###soft_label test end
def test_cross_entropy_loss_1d_with_mean_ignore(self):
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[2, 4], dtype='float64')
label = fluid.data(name='label', shape=[2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(ignore_index=0)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
expected = cross_entropy_loss_1d(input_np, label_np)[0]
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
axis=1, ignore_index=0)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, ignore_index=0)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_mean_ignore(self):
N = 100
C = 200
input_np = np.random.random([N, C]).astype(np.float64)
label_np = np.random.randint(0, C, size=(N)).astype(np.int64)
weight_np = np.random.random([C]).astype(np.float64)
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[N, C], dtype='float64')
label = fluid.data(name='label', shape=[N], dtype='int64')
weight = fluid.data(
name='weight', shape=[C],
dtype='float64') #weight for each class
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, ignore_index=0)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np),
axis=1,
ignore_index=0)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, ignore_index=0)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_mean(self):
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
weight_np = np.random.random([4]).astype(np.float64) #shape:C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[2, 4], dtype='float64')
label = fluid.data(name='label', shape=[2], dtype='int64')
weight = fluid.data(
name='weight', shape=[4],
dtype='float64') #weight for each class
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np)[0]
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), axis=1)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_sum(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_none(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_none_func(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
ret = paddle.nn.functional.cross_entropy(
input, label, weight=weight, reduction='none')
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
dy_ret = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np),
weight=fluid.dygraph.to_variable(weight_np),
reduction='none')
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_mean(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[100], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss()
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss()
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_sum(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_none(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_with_weight_none(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW1
weight_np = np.random.random(size=(3, )).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
weight = fluid.data(name='weight', shape=[3], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(
input_np, label_np, weight=weight_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_with_weight_mean(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
weight_np = np.random.random(size=(3, )).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
weight = fluid.data(name='weight', shape=[3], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='mean')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='mean')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(
input_np, label_np, weight=weight_np, reduction='mean')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_with_weight_sum(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
weight_np = np.random.random(size=(3, )).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
weight = fluid.data(name='weight', shape=[3], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(
input_np, label_np, weight=weight_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_none(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(input_np, label_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_mean(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='mean')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='mean')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(
input_np, label_np, reduction='mean')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_sum(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_2d(input_np, label_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
if __name__ == "__main__":
unittest.main()
|
py
|
1a5e4d950e53fbf3bfd355326d9b1baa32e1f3cb
|
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
__all__ = ["ResNet", "ResNet18", "ResNet34", "ResNet50", "ResNet101", "ResNet152"]
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class ResNet():
def __init__(self, layers=50):
self.params = train_parameters
self.layers = layers
def net(self, input, args, class_dim=1000):
layers = self.layers
supported_layers = [18, 34, 50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 18:
depth = [2, 2, 2, 2]
elif layers == 34 or layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_filters = [64, 128, 256, 512]
conv = self.conv_bn_layer(
input=input, num_filters=64, filter_size=7, stride=2, act='relu',name="conv1", data_format=args.data_format)
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max',
data_format=args.data_format)
if layers >= 50:
for block in range(len(depth)):
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name="res"+str(block+2)+"a"
else:
conv_name="res"+str(block+2)+"b"+str(i)
else:
conv_name="res"+str(block+2)+chr(97+i)
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1, name=conv_name, data_format=args.data_format)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True, data_format=args.data_format)
if args.data_format == "NCHW":
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
else:
stdv = 1.0 / math.sqrt(pool.shape[-1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
else:
for block in range(len(depth)):
for i in range(depth[block]):
conv_name="res"+str(block+2)+chr(97+i)
conv = self.basic_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
is_first=block==i==0,
name=conv_name,
data_format=args.data_format)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True, data_format=args.data_format)
if args.data_format == "NCHW":
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
else:
stdv = 1.0 / math.sqrt(pool.shape[-1] * 1.0)
out = fluid.layers.fc(input=pool,
size=class_dim,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv)))
return out
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None,
data_format='NCHW'):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False,
name=name + '.conv2d.output.1',
data_format=data_format)
if name == "conv1":
bn_name = "batch_norm_" + name
else:
bn_name = "batch_norm" + name[3:]
return fluid.layers.batch_norm(input=conv,
act=act,
name=bn_name+'.output.1',
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance',
data_layout=data_format)
def shortcut(self, input, ch_out, stride, is_first, name, data_format):
if data_format == 'NCHW':
ch_in = input.shape[1]
else:
ch_in = input.shape[-1]
if ch_in != ch_out or stride != 1 or is_first == True:
return self.conv_bn_layer(input, ch_out, 1, stride, name=name, data_format=data_format)
else:
return input
def bottleneck_block(self, input, num_filters, stride, name, data_format):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu',name=name+"_branch2a", data_format=data_format)
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu',
name=name+"_branch2b",
data_format=data_format)
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 4, filter_size=1, act=None, name=name+"_branch2c", data_format=data_format)
short = self.shortcut(input, num_filters * 4, stride, is_first=False, name=name + "_branch1", data_format=data_format)
return fluid.layers.elementwise_add(x=short, y=conv2, act='relu',name=name+".add.output.5")
def basic_block(self, input, num_filters, stride, is_first, name, data_format):
conv0 = self.conv_bn_layer(input=input, num_filters=num_filters, filter_size=3, act='relu', stride=stride,
name=name+"_branch2a", data_format=data_format)
conv1 = self.conv_bn_layer(input=conv0, num_filters=num_filters, filter_size=3, act=None,
name=name+"_branch2b", data_format=data_format)
short = self.shortcut(input, num_filters, stride, is_first, name=name + "_branch1", data_format=data_format)
return fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
def ResNet18():
model = ResNet(layers=18)
return model
def ResNet34():
model = ResNet(layers=34)
return model
def ResNet50():
model = ResNet(layers=50)
return model
def ResNet101():
model = ResNet(layers=101)
return model
def ResNet152():
model = ResNet(layers=152)
return model
|
py
|
1a5e4f265ff8a534b596017fb1304da0160448b9
|
# -*- coding: utf-8 -*-
"""Here is a `foo_bar` helper example:
.. testcode::
from dp_tornado.engine.helper import Helper as dpHelper
class FooHelper(dpHelper):
def func1(self):
\"""
assert self.helper.foo.func1(10, 20) == None
\"""
return None
def func2(self, a):
\"""
assert self.helper.foo.func2(10) == 10
\"""
return a
def func3(self, a, b):
\"""
assert self.helper.foo.func3(10, 20) == 30
\"""
return a + b
File/Class Invoke rules
-----------------------
* */helper/__init__.py*, **DO NOT IMPLEMENT ANY CODE IN THIS FILE**
* */helper/blog/__init__.py*, ``BlogHelper`` > **helper.blog**
* */helper/blog/admin/__init__.py*, ``AdminHelper`` > **helper.blog.admin**
* */helper/blog/post.py*, ``PostHelper`` > **helper.blog.post**
* */helper/blog/view.py*, ``ViewHelper`` > **helper.blog.view**
* */helper/foo_bar.py*, ``FooBarHelper`` > **helper.foo_bar**
Method Invoke rules
-------------------
* */helper/foo.py*, ``def func1(self)``: **helper.foo.func1()**
* */helper/foo.py*, ``def func2(self, a)``: **helper.foo.func2(a)**
* */helper/foo.py*, ``def func3(self, a, b)``: **helper.foo.func3(a, b)**
"""
from .singleton import Singleton as dpSingleton
from .engine import Engine as dpEngine
from .loader import Loader as dpLoader
class Helper(dpEngine, dpLoader, dpSingleton):
pass
|
py
|
1a5e4f760ef55a2790fff69420ce44173f06ce4e
|
import time
import sys
import unittest
import numpy as np
from io import StringIO
from regmodel_for_testing import root
from coreali.registerio import RegIoNoHW
from coreali.regmodel import Selector
from coreali import RegisterModel
class TestUseCases(unittest.TestCase):
"""
Test common use cases
"""
def test_writeread(self):
"""
Test that the write and read functions write to the right location
"""
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint8)
test_reg_desc.AnAddrmap.AnotherRegAt20.write(0x12345678)
test_reg_desc.AnotherAddrmap.AnotherRegAt20.write(0x87654321)
self.assertEqual(
test_reg_desc.AnAddrmap.AnotherRegAt20.read(), 0x12345678)
self.assertEqual(
test_reg_desc.AnAddrmap.AnotherRegAt20.VAL.read(), 0x12345678)
self.assertEqual(test_reg_desc._rio.mem[0x20], 0x78)
self.assertEqual(
test_reg_desc.AnotherAddrmap.AnotherRegAt20.read(), 0x87654321)
self.assertEqual(
test_reg_desc.AnotherAddrmap.AnotherRegAt20.VAL.read(), 0x87654321)
self.assertEqual(test_reg_desc._rio.mem[0x120], 0x21)
test_reg_desc.AnotherAddrmap.ARegWithFields.FIELD13DOWNTO4.write(3)
self.assertEqual(
test_reg_desc.AnotherAddrmap.ARegWithFields.FIELD13DOWNTO4.read(), 3)
def test_arrays(self):
"""
Test the accessibility of arrays through the __get_item__ or [] method
"""
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint8)
test_reg_desc._rio.verbose = False
# Elementwise access
test_reg_desc.AnAddrmap.ARepeatedReg[0].write(11)
test_reg_desc.AnAddrmap.ARepeatedReg[1].write(12)
test_reg_desc.AnAddrmap.ARepeatedReg[2].write(13)
test_reg_desc.AnotherAddrmap.ARepeatedReg[0].write(1)
test_reg_desc.AnotherAddrmap.ARepeatedReg[1].write(2)
test_reg_desc.AnotherAddrmap.ARepeatedReg[2].write(3)
self.assertEqual(test_reg_desc.AnAddrmap.ARepeatedReg[0].read(), 11)
self.assertEqual(test_reg_desc.AnAddrmap.ARepeatedReg[1].read(), 12)
self.assertEqual(test_reg_desc.AnAddrmap.ARepeatedReg[2].read(), 13)
self.assertEqual(
test_reg_desc.AnotherAddrmap.ARepeatedReg[0].read(), 1)
self.assertEqual(
test_reg_desc.AnotherAddrmap.ARepeatedReg[1].read(), 2)
self.assertEqual(
test_reg_desc.AnotherAddrmap.ARepeatedReg[2].read(), 3)
self.assertTrue(
np.array_equal(test_reg_desc.AnotherAddrmap.ARepeatedReg[0:3].read(), [1, 2, 3]))
self.assertTrue(
np.array_equal(test_reg_desc.AnAddrmap.ARepeatedReg[0:3].read(), [11, 12, 13]))
# Read and write back test
test_reg_desc.AnotherAddrmap.ARepeatedReg.write(
test_reg_desc.AnotherAddrmap.ARepeatedReg[0:3].read())
self.assertTrue(
np.array_equal(test_reg_desc.AnotherAddrmap.ARepeatedReg.read(), [1, 2, 3]))
# Write starting from index
test_reg_desc.AnotherAddrmap.ARepeatedReg[1:].write([4, 5])
self.assertTrue(
np.array_equal(test_reg_desc.AnotherAddrmap.ARepeatedReg.read(), [1, 4, 5]))
# Write and read a slice
test_reg_desc.AnotherAddrmap.TenRegs[4:7].write([4, 5, 6])
self.assertTrue(
np.array_equal(test_reg_desc.AnotherAddrmap.TenRegs.read(), [
0, 0, 0, 0, 4, 5, 6, 0, 0, 0]))
self.assertTrue(
np.array_equal(test_reg_desc.AnotherAddrmap.TenRegs[2:6].read(), [0, 0, 4, 5]))
test_reg_desc.AnotherAddrmap.TenRegs.write([0]*10)
test_reg_desc.AnotherAddrmap.TenRegs[2:7:2].write([2, 4, 6])
self.assertTrue(
np.array_equal(test_reg_desc.AnotherAddrmap.TenRegs.read(), [
0, 0, 2, 0, 4, 0, 6, 0, 0, 0]))
self.assertTrue(
np.array_equal(test_reg_desc.AnotherAddrmap.TenRegs[2:7:2].read(), [2, 4, 6]))
test_reg_desc.AnAddrmap.AnotherRegfile[0].AReg.write(1111)
test_reg_desc.AnAddrmap.AnotherRegfile[1].AReg.write(2222)
self.assertTrue(
np.array_equal(test_reg_desc.AnAddrmap.AnotherRegfile.AReg.read(), [1111, 2222]))
test_reg_desc.AnAddrmap.AnotherRegfile.AnotherReg.write([1, 2])
self.assertEqual(
test_reg_desc.AnAddrmap.AnotherRegfile[0].AnotherReg.read(), 1)
self.assertEqual(
test_reg_desc.AnAddrmap.AnotherRegfile[1].AnotherReg.read(), 2)
def test_arrays_of_array(self):
"""
Test the accessibility of arrays through the __get_item__ or [] method
"""
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint8)
test_reg_desc.AnAddrmap.ARegfile[0].ARegInARegFile[0].write(1)
test_reg_desc.AnAddrmap.ARegfile[0].ARegInARegFile[1].write(2)
test_reg_desc.AnAddrmap.ARegfile[0].ARegInARegFile[2].write(3)
test_reg_desc.AnAddrmap.ARegfile[0].ARegInARegFile[3].write(4)
test_reg_desc.AnAddrmap.ARegfile[1].ARegInARegFile[0].write(11)
test_reg_desc.AnAddrmap.ARegfile[1].ARegInARegFile[1].write(12)
test_reg_desc.AnAddrmap.ARegfile[1].ARegInARegFile[2].write(13)
test_reg_desc.AnAddrmap.ARegfile[1].ARegInARegFile[3].write(14)
self.assertTrue(
np.array_equal(test_reg_desc.AnAddrmap.ARegfile[0].ARegInARegFile.read(), [1, 2, 3, 4]))
self.assertTrue(
np.array_equal(test_reg_desc.AnAddrmap.ARegfile[1].ARegInARegFile.read(), [11, 12, 13, 14]))
def test_mem(self):
"""
Test the accessibility of memories and arrays of memories
"""
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint8)
self.assertTrue(
np.array_equal(test_reg_desc.Mem64x32.read(), [0]*64))
test_reg_desc.Mem64x32.write(0, list(range(0, 64, 1)))
self.assertTrue(
np.array_equal(list(range(0, 64, 1)),
test_reg_desc._rio.read_words(0x800, 4, 4, 64)))
self.assertTrue(
np.array_equal(test_reg_desc.Mem64x32.read(), list(range(0, 64, 1))))
self.assertTrue(
np.array_equal(test_reg_desc.Mem64x32.read(10, 40), list(range(10, 40, 1))))
test_reg_desc.ABlockWithMemory.AMemory.write(0, list(range(0, 128, 2)))
self.assertTrue(
np.array_equal(list(range(0, 128, 2)),
test_reg_desc._rio.read_words(0x500, 4, 4, 64)))
test_reg_desc.TwoMemories.write(
0, [list(range(100, 164, 1)), list(range(200, 264, 1))])
self.assertTrue(
np.array_equal(test_reg_desc.TwoMemories.read(), [list(range(100, 164, 1)), list(range(200, 264, 1))]))
def test_mem_write(self):
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint32)
test_reg_desc.TwoMemories.write(
0, [list(range(100, 164, 1)), list(range(200, 264, 1))])
test_reg_desc.TwoMemories.node.current_idx = [0]
self.assertEqual(test_reg_desc._rio.read_words(
test_reg_desc.TwoMemories.node.absolute_address, 4)[0], 100)
self.assertEqual(test_reg_desc._rio.read_words(
test_reg_desc.TwoMemories.node.absolute_address+10*4, 4)[0], 110)
test_reg_desc.TwoMemories.node.current_idx = [1]
self.assertEqual(test_reg_desc._rio.read_words(
test_reg_desc.TwoMemories.node.absolute_address, 4)[0], 200)
self.assertEqual(test_reg_desc._rio.read_words(
test_reg_desc.TwoMemories.node.absolute_address+10*4, 4)[0], 210)
def test_mem_read(self):
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint8)
test_reg_desc._rio.write_words(0, 4, 4, np.arange(
0, test_reg_desc.node.size-4, 4, dtype=np.uint64))
read_data = test_reg_desc.TwoMemories.read()
test_reg_desc.TwoMemories.node.current_idx = [0]
self.assertEqual(
test_reg_desc.TwoMemories.node.absolute_address, read_data[0][0])
self.assertEqual(
test_reg_desc.TwoMemories.node.absolute_address+6*4, read_data[0][6])
test_reg_desc.TwoMemories.node.current_idx = [1]
self.assertEqual(
test_reg_desc.TwoMemories.node.absolute_address, read_data[1][0])
self.assertEqual(
test_reg_desc.TwoMemories.node.absolute_address+6*4, read_data[1][6])
def test_tostr(self):
"""
Test that the tostr function generates the desired output
"""
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.array(
list(range(test_reg_desc.node.size)), np.uint8)
test_reg_desc._rio.verbose = False
test_reg_desc.AnAddrmap.AnotherRegAt20.write(0x12345678)
test_reg_desc.AnAddrmap.ARegWithFields.FIELD13DOWNTO4.write(3)
test_reg_desc.AnotherAddrmap.AnotherRegAt20.write(0x87654321)
test_reg_desc.AnotherAddrmap.ARegWithFields.FIELD0DOWNTO0.write(1)
test_reg_desc.Mem64x32.write(0, list(range(0, 20, 2)))
test_reg_desc.AnotherAddrmap.TenRegs[4:7].write([4, 5, 6])
test_reg_desc.TwoMemories[0].write(0, list(range(10, 120, 1)))
test_reg_desc.TwoMemories[1].write(0, list(range(10, 120, 2)))
result = str(test_reg_desc)
expected = """test_register_description:
AnAddrmap :
ARegWithFields : 50462768 = 0x03020030
FIELD0DOWNTO0 : 0 = 0x00000000
FIELD13DOWNTO4 : 3 = 0x00000003
ARepeatedReg : [117835012 185207048 252579084]
VAL : [117835012 185207048 252579084]
AnotherRegAt20 : 305419896 = 0x12345678
VAL : 305419896 = 0x12345678
TenRegs : [ 656811300 724183336 791555372 858927408 926299444 993671480 ...
VAL : [ 656811300 724183336 791555372 858927408 926299444 993671480 ...
ARegfile :
ARegInARegFile : [[1397903696 1465275732 1532647768 1600019804] ...
VAL : [[1397903696 1465275732 1532647768 1600019804] ...
AnotherRegfile :
AReg : [1936879984 2071624056]
VAL : [1936879984 2071624056]
AnotherReg : [2004252020 2138996092]
VAL : [2004252020 2138996092]
AnotherAddrmap :
ARegWithFields : 50462977 = 0x03020101
FIELD0DOWNTO0 : 1 = 0x00000001
FIELD13DOWNTO4 : 16 = 0x00000010
ARepeatedReg : [117835012 185207048 252579084]
VAL : [117835012 185207048 252579084]
AnotherRegAt20 : 2271560481 = 0x87654321
VAL : 2271560481 = 0x87654321
TenRegs : [ 656811300 724183336 791555372 858927408 4 5 ...
VAL : [ 656811300 724183336 791555372 858927408 4 5 ...
ARegfile :
ARegInARegFile : [[1397903696 1465275732 1532647768 1600019804] ...
VAL : [[1397903696 1465275732 1532647768 1600019804] ...
AnotherRegfile :
AReg : [1936879984 2071624056]
VAL : [1936879984 2071624056]
AnotherReg : [2004252020 2138996092]
VAL : [2004252020 2138996092]
ABlockWithMemory :
AReg : 50462976 = 0x03020100
VAL : 50462976 = 0x03020100
AMemory : [ 50462976 117835012 185207048 252579084 319951120 387323156 ...
HundredRegs : [ 50462976 117835012 185207048 252579084 319951120 387323156 ...
VAL : [ 256 1284 2312 3340 4368 5396 6424 7452 8480 9508 10536 11564 ...
Mem64x32 : [ 0 2 4 6 8 10 ...
TwoMemories : [[ 10 11 12 13 14 15 ...
AnAddrmapWith8bitRegs:
AReg0 : 0 = 0x00
VAL : 0 = 0x00
AReg1 : 1 = 0x01
FIELD3DOWNTO0 : 1 = 0x01
FIELD7DOWNTO4 : 0 = 0x00"""
if result != expected:
print(result)
print(expected)
self.assertEqual(result, expected)
def test_help(self):
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint8)
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
test_reg_desc.TwoMemories.help()
sys.stdout = old_stdout
result = mystdout.getvalue()
expected = """name: 2x 64x32 Memory
desc: This are two memories
mementries: 64
memwidth: 32
"""
if result != expected:
print(result)
self.assertEqual(result, expected)
def test_performance(self):
"""
Measure the performance and check if it is suitable
"""
t = time.time()
test_reg_desc = RegisterModel(root, RegIoNoHW())
elapsed = time.time() - t
print("Model creation time =" + str(elapsed) + "s")
self.assertLess(elapsed, 0.2)
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint32)
access_cnt = 0
ACCESSES = 10e3
t = time.time()
while access_cnt < ACCESSES:
test_reg_desc.AnAddrmap.AnotherRegAt20.write(36)
access_cnt += 1
test_reg_desc.AnAddrmap.ARegWithFields.FIELD0DOWNTO0.write(1)
access_cnt += 1
test_reg_desc.AnAddrmap.AnotherRegAt20.read()
access_cnt += 1
test_reg_desc.AnAddrmap.ARepeatedReg[1].write(12)
access_cnt += 1
test_reg_desc.AnAddrmap.ARepeatedReg[1:3].read()
access_cnt += 2
test_reg_desc.Mem64x32.read(10, 20)
access_cnt += 10
elapsed = time.time() - t
accesses_per_second = ACCESSES/elapsed
print("Accesses = " + str(int(accesses_per_second)) + "/s")
self.assertGreater(accesses_per_second, 20e3)
def test_selectable(self):
"""
Test is the selectable class is properly integrated
"""
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint8)
test_reg_desc._rio.verbose = False
selector = Selector()
test_reg_desc.AnotherAddrmap.ARepeatedReg[2]._construct_selector(
selector.selected)
self.assertEqual(selector.selected, [0, 0, 2])
test_reg_desc.AnotherAddrmap.ARepeatedReg[2]._set_current_idx(
selector.selected)
self.assertEqual(
test_reg_desc.AnotherAddrmap.ARepeatedReg.node.current_idx, [2])
self.assertEqual(
test_reg_desc.AnotherAddrmap.ARepeatedReg.node.parent.current_idx, [0])
self.assertEqual(
test_reg_desc.AnotherAddrmap.ARepeatedReg.node.parent.parent.current_idx, [0])
def test_selectable_with_slice(self):
"""
Test is the selectable class is properly integrated
"""
test_reg_desc = RegisterModel(root, RegIoNoHW())
test_reg_desc._rio.mem = np.zeros([test_reg_desc.node.size], np.uint8)
test_reg_desc._rio.verbose = False
selector = Selector()
test_reg_desc.AnAddrmap.ARegfile.ARegInARegFile._construct_selector(
selector.selected)
self.assertEqual(selector.selected, [
0, 0, slice(0, 2, 1), slice(0, 4, 1)])
selector = Selector()
test_reg_desc.AnAddrmap.ARegfile[0].ARegInARegFile[0:3:2]._construct_selector(
selector.selected)
self.assertEqual(selector.selected, [0, 0, 0, slice(0, 3, 2)])
selector = Selector()
test_reg_desc.AnAddrmap.ARegfile[0].ARegInARegFile[2]._construct_selector(
selector.selected)
self.assertEqual(selector.selected, [0, 0, 0, 2])
selector = Selector()
test_reg_desc.AnotherAddrmap.TenRegs[2:7:2]._construct_selector(
selector.selected)
self.assertEqual(selector.selected, [0, 0, slice(2, 7, 2)])
if __name__ == '__main__':
unittest.main()
|
py
|
1a5e4fb608f8232c6ee1987b30c417a553e165a5
|
import paho.mqtt.client as mqtt
import configparser
import logging
import json
import paho.mqtt.client as mqtt
class Messaging:
""" This is a wrapper for the mqtt client. """
def __init__(self, config, subscription = None, on_message = None, clientId = None):
global on_connect
self.config = config
defaultHost = 'None'
if (clientId):
self.client = mqtt.Client(clientId)
else:
self.client = mqtt.Client()
self.client.enable_logger()
self.client.on_connect = on_connect
self.client.tls_set_context()
self.client.tls_insecure_set(True)
if (subscription):
self.client.user_data_set(subscription)
if (on_message):
self.client.on_message = on_message
username = config.get('username', None)
password = config.get('password', None)
if username is not None:
self.client.username_pw_set(username, password)
port = int(config.get('port', '1883'))
host = config.get('host', defaultHost)
print("Host: ", host, "port: ", port)
if host is None:
raise Exception("Host must be defined in the config file or in the servers section.")
self.client.connect(host, port)
def publish(self, topic, payload, qos = 0, retain = False):
self.client.publish(topic, payload, qos, retain)
def subscribe(self, topic):
self.client.subscribe(topic)
def loop_forever(self):
self.client.loop_forever()
def loop_start(self):
self.client.loop_start()
def on_connect(client, userdata, flags, rc):
if (userdata):
client.subscribe(userdata)
|
py
|
1a5e4ffc0ada22e54b96484c35262ce0526a94d6
|
from utils import *
import numpy as np
import h5py
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
def resize_images(image_list, im_size):
return_list = []
for im in image_list:
img = Image.open(im)
img = img.resize((im_size, im_size), Image.ANTIALIAS)
np_img = np.array(img)
return_list.append(np_img)
return return_list
def create_image_label_list(img_path, group, im_size, skip, all_labels):
label = all_labels['label'].loc[int(group)]
image_list = os.listdir(img_path + os.sep + group)
if len(image_list) < 24:
return [], []
image_list = sorted(image_list[:24:skip])
images = resize_images([img_path + os.sep + group + os.sep + i for i in image_list], im_size)
return images, label
def make_hdf5(img_path, im_size, skip, all_labels, desired_labels, fname='data_hdf5.h5'):
indices = list(all_labels[all_labels['label'].isin(desired_labels)].index)
hf = h5py.File(fname, 'w')
for group in tqdm(indices):
group = str(group)
images, label = create_image_label_list(img_path, group, im_size, skip, all_labels)
if not images:
print('{} excluded, because of the short length'.format(group))
continue
label_id = desired_labels.index(label)
hfgroup = hf.create_group(group)
hfgroup.create_dataset('images', data=images)
hfgroup.create_dataset('label', data=label)
hfgroup.create_dataset('label_id', data=label_id)
hf.close()
if __name__ == "__main__":
# read config.ini and use the settings
param = get_configs()
data_path = param['data_path']
img_path = param['img_path']
train_labels = pd.read_csv(param['csv_train'], names=['label'], sep=';')
val_labels = pd.read_csv(param['csv_val'], names=['label'], sep=';')
all_labels = pd.read_csv(param['csv_labels'], sep=';')
labels = param['labels']
fn_postfix = str(len(labels))
print('labels are {}, length of {}'.format(labels, fn_postfix))
train_fn = data_path + os.sep + 'train_hdf5' + fn_postfix + '.h5'
val_fn = data_path + os.sep + 'val_hdf5' + fn_postfix + '.h5'
maker_params = {'img_path': img_path, 'im_size': param['im_size'], 'skip': param['skip'], 'desired_labels': labels}
make_hdf5(all_labels=train_labels, fname=train_fn, **maker_params)
make_hdf5(all_labels=val_labels, fname=val_fn, **maker_params)
|
py
|
1a5e511f001d46c49fd74fda50080dcc52213e84
|
"""AWS Resource classes."""
from typing import Tuple, Type
from altimeter.aws.resource.resource_spec import AWSResourceSpec
from altimeter.aws.resource.account import AccountResourceSpec
from altimeter.aws.resource.awslambda.function import LambdaFunctionResourceSpec
from altimeter.aws.resource.dynamodb.dynamodb_table import DynamoDbTableResourceSpec
from altimeter.aws.resource.ec2.image import EC2ImageResourceSpec
from altimeter.aws.resource.ec2.instance import EC2InstanceResourceSpec
from altimeter.aws.resource.ec2.network_interface import EC2NetworkInterfaceResourceSpec
from altimeter.aws.resource.ec2.region import RegionResourceSpec
from altimeter.aws.resource.ec2.route_table import EC2RouteTableResourceSpec
from altimeter.aws.resource.ec2.transit_gateway_vpc_attachment import (
TransitGatewayVpcAttachmentResourceSpec,
)
from altimeter.aws.resource.ec2.security_group import SecurityGroupResourceSpec
from altimeter.aws.resource.ec2.snapshot import EBSSnapshotResourceSpec
from altimeter.aws.resource.ec2.subnet import SubnetResourceSpec
from altimeter.aws.resource.ec2.transit_gateway import TransitGatewayResourceSpec
from altimeter.aws.resource.ec2.volume import EBSVolumeResourceSpec
from altimeter.aws.resource.ec2.vpc import VPCResourceSpec
from altimeter.aws.resource.ec2.vpc_endpoint import VpcEndpointResourceSpec
from altimeter.aws.resource.elasticloadbalancing.load_balancer import LoadBalancerResourceSpec
from altimeter.aws.resource.elasticloadbalancing.target_group import TargetGroupResourceSpec
from altimeter.aws.resource.events.cloudwatchevents_rule import EventsRuleResourceSpec
from altimeter.aws.resource.iam.iam_saml_provider import IAMSAMLProviderResourceSpec
from altimeter.aws.resource.iam.instance_profile import InstanceProfileResourceSpec
from altimeter.aws.resource.iam.policy import IAMPolicyResourceSpec, IAMAWSManagedPolicyResourceSpec
from altimeter.aws.resource.iam.role import IAMRoleResourceSpec
from altimeter.aws.resource.iam.user import IAMUserResourceSpec
from altimeter.aws.resource.kms.key import KMSKeyResourceSpec
from altimeter.aws.resource.organizations.org import OrgResourceSpec
from altimeter.aws.resource.organizations.ou import OUResourceSpec
from altimeter.aws.resource.organizations.account import OrgsAccountResourceSpec
from altimeter.aws.resource.rds.instance import RDSInstanceResourceSpec
from altimeter.aws.resource.rds.snapshot import RDSSnapshotResourceSpec
from altimeter.aws.resource.s3.bucket import S3BucketResourceSpec
# To enable a resource to be scanned, add it here.
RESOURCE_SPEC_CLASSES: Tuple[Type[AWSResourceSpec], ...] = (
DynamoDbTableResourceSpec,
EBSSnapshotResourceSpec,
EBSVolumeResourceSpec,
EC2ImageResourceSpec,
EC2InstanceResourceSpec,
EC2NetworkInterfaceResourceSpec,
EC2RouteTableResourceSpec,
EventsRuleResourceSpec,
IAMAWSManagedPolicyResourceSpec,
IAMPolicyResourceSpec,
IAMRoleResourceSpec,
IAMSAMLProviderResourceSpec,
IAMUserResourceSpec,
InstanceProfileResourceSpec,
KMSKeyResourceSpec,
LambdaFunctionResourceSpec,
LoadBalancerResourceSpec,
RDSInstanceResourceSpec,
RDSSnapshotResourceSpec,
S3BucketResourceSpec,
SecurityGroupResourceSpec,
SubnetResourceSpec,
TargetGroupResourceSpec,
TransitGatewayResourceSpec,
TransitGatewayVpcAttachmentResourceSpec,
VPCResourceSpec,
VpcEndpointResourceSpec,
)
INFRA_RESOURCE_SPEC_CLASSES: Tuple[Type[AWSResourceSpec], ...] = (
AccountResourceSpec,
RegionResourceSpec,
)
ORG_RESOURCE_SPEC_CLASSES: Tuple[Type[AWSResourceSpec], ...] = (
OrgResourceSpec,
OrgsAccountResourceSpec,
OUResourceSpec,
)
|
py
|
1a5e51758ee6a9e25477e8c18e121a6f0e69eea2
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..model import Concatenate
def test_Concatenate_inputs():
input_map = dict(add_val=dict(argstr='--add %f',
),
args=dict(argstr='%s',
),
combine=dict(argstr='--combine',
),
concatenated_file=dict(argstr='--o %s',
genfile=True,
),
environ=dict(nohash=True,
usedefault=True,
),
gmean=dict(argstr='--gmean %d',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(argstr='--i %s...',
mandatory=True,
),
keep_dtype=dict(argstr='--keep-datatype',
),
mask_file=dict(argstr='--mask %s',
),
max_bonfcor=dict(argstr='--max-bonfcor',
),
max_index=dict(argstr='--max-index',
),
mean_div_n=dict(argstr='--mean-div-n',
),
multiply_by=dict(argstr='--mul %f',
),
multiply_matrix_file=dict(argstr='--mtx %s',
),
paired_stats=dict(argstr='--paired-%s',
),
sign=dict(argstr='--%s',
),
sort=dict(argstr='--sort',
),
stats=dict(argstr='--%s',
),
subjects_dir=dict(),
terminal_output=dict(nohash=True,
),
vote=dict(argstr='--vote',
),
)
inputs = Concatenate.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Concatenate_outputs():
output_map = dict(concatenated_file=dict(),
)
outputs = Concatenate.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
py
|
1a5e520e42f56529133381c93cca81b7e91fe302
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: [email protected]
"""
้ๆบๆนๆณ
"""
import pytest
import os
from wpy.path import read_dict
from wpy.randoms import (
random_str
)
from lfsdb import FileStorage
from lfsdb.db import FileStorageError
from lfsdb.db.errors import FSQueryError
from lfsdb.db.cache import CacheTable
from lfsdb.db.client import FileTable
from lfsdb.sockets.db import SocketTable
root = '/tmp'
root = None
db_name = 'wpy_db'
table = 'wpy_table'
fs = FileStorage(root)
file_table = fs.get_db(db_name).get_table(table)
socket_table = SocketTable(db_name, table)
cache_table = CacheTable(db_name, table)
tables = [file_table, socket_table, cache_table]
table_root = os.path.join(fs.root, db_name, table)
def _origin_data(data):
for k in ('_id', '_update_time', "_create_time"):
data.pop(k, None)
return data
def _handle_table_test(func):
for table in tables:
table.drop()
func(table)
table.drop()
def test_insert():
_handle_table_test(_test_insert)
def _test_insert(db):
name = random_str(6)
doc = {
"name": name
}
# ๆฅ็ๆๅ
ฅ็ๆฐๆฎๆฏๅฆๅญๅ
ฅๅฐๆไปถไธญ
_id = db.insert(doc)
if isinstance(db, FileTable):
path = os.path.join(table_root, _id)
data = read_dict(path)
data = _origin_data(data)
assert doc == data
data = db.find_by_id(_id)
data = _origin_data(data)
assert doc == data
doc['_id'] = _id
with pytest.raises(FileStorageError) as excinfo:
db.insert(doc)
assert str(excinfo) == '{}._id {} is exists'.format(table, _id)
db.drop()
assert not os.path.exists(table_root)
def test_find():
_handle_table_test(_test_find)
def _test_find(db):
name = random_str(6)
doc = { "name": name}
db.drop()
db.insert(doc)
db.insert(doc)
doc['age'] = 12
db.insert(doc)
# ๆกไปถไธบ็ฉบ
docs = db.find()
assert len(docs) == 3
docs = db.find({ "name": name })
assert len(docs) == 3
docs = db.find({ "name": name, "age": 12 })
assert len(docs) == 1
doc = db.find_one({"age": 12}, {})
assert len(doc.keys()) == 5
doc = db.find_one({"age": 12}, {"name": 1})
assert len(doc.keys()) == 2
with pytest.raises(FSQueryError) as exe_info:
doc = db.find_one({"age": 12}, {"name": 1, "age": 0})
assert str(exe_info) == ('Projection cannot have a mix of inclusion'
' and exclusion.')
doc = db.find_one({"age": 12}, {"name": 1, "_id": 0})
assert len(doc.keys()) == 2
db.drop()
def test_update():
_handle_table_test(_test_update)
def _test_update(db):
# TODO ็ผๅญ
name = random_str(6)
doc = { "name": name}
db.insert(doc)
_id = db.insert(doc)
insert_utime = db.find_by_id(_id).get("_update_time")
db.insert(doc)
count = db.update(doc, {"name": "wxnacy"})
assert count == 3
db.update({"_id": _id}, {"name": "wxn"})
data = db.find_by_id(_id)
update_utime = data.get("_update_time")
# ๆฃๆฅไฟฎๆนๆถ้ดๆฏๅฆๆนๅ
assert insert_utime < update_utime
data = db.find_by_id(_id)
data = _origin_data(data)
assert { "name": "wxn" } == data
db.drop()
def test_delete():
_handle_table_test(_test_delete)
def _test_delete(db):
db.drop()
name = random_str(6)
doc = { "name": name}
db.insert(doc)
_id = db.insert(doc)
db.insert(doc)
assert db.delete({ "_id": _id }) == 1
docs = db.find()
assert len(docs) == 2
count = db.delete(doc)
assert count == 2
db.drop()
def test_sort():
_handle_table_test(_test_sort)
def _test_sort(db):
db.drop()
arr = [{"age": 5, "id": 2}, {"age": 5, "id": 5}, {"age": 3, "id": 4}]
for a in arr:
db.insert(a)
items = db.find(sorter = [('age', 1), ('id', -1)])
for item in items:
item.pop('_id', None)
item.pop('_create_time', None)
item.pop('_update_time', None)
assert items == [{"age": 3, "id": 4},{"age": 5, "id": 5}, {"age": 5, "id": 2}]
db.drop()
socket_table.close()
|
py
|
1a5e52aa4d0c6329087530b6c09a57c58f0d1cf8
|
#!/usr/bin/env python
import roshelper
import rospy
from geometry_msgs.msg import Vector3
from std_msgs.msg import Float32, Bool
node_name = "Input"
n = roshelper.Node(node_name, anonymous=False)
# A class for the manipulator arm, still needs actual servos,
# motors and stuff
@n.entry_point() #(exp_a=1, exp_b=1, exp_c=1)
class TenderBotInput(object):
button_state = False
last_button_state = False
# ctor, start service
def __init__(self): # (self, exp_a, exp_b, exp_c)
pass
def check_if_publish_button(self):
return self.button_state == self.last_button_state
# Publishes the end effector position
@n.publisher(node_name + "/button", Bool)
def publish_button(self):
msg = Bool();
msg.data = self.button_state;
return msg
@n.main_loop(frequency=30)
def run(self):
if self.check_if_publish_button():
self.publish_button()
if __name__ == "__main__":
n.start(spin=True)
|
py
|
1a5e539614ca5e5378e02be2a310ca81dfddf7a4
|
"""
Django settings for WaterPump project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p)_k*g5ud&5c!y^(udupt^jln=)kl_)8&iqxlv4bj32m+e=m7!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'base',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'WaterPump.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'WaterPump.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
py
|
1a5e567c4a7b1a8810d819af5cde587b0d4ca1b6
|
__all__ = [
'RulebaseAgentBase',
'RulebaseAgentRandomAlpha',
'RulebaseAgentWrfWeepRandomAlpha',
'RulebaseAgentWrfWeepNoAction',
'RulebaseAgentWepegWeepRandomAlpha',
'RulebaseAgentGarnet',
]
from .rulebase_policy import *
from fice_nike.agent_base import AgentBase
class RulebaseAgentBase(AgentBase):
_policy = None
def __init__(self, *args):
super().__init__(*args)
def policy(self, frame_data):
data = {
'frame_data': frame_data,
'player': self.player,
}
return self._policy.update(data)
class RulebaseAgentRandomAlpha(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyRandomAlpha()
class RulebaseAgentWrfWeepRandomAlpha(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyWrfWeepRandomAlpha()
class RulebaseAgentWrfWeepNoAction(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyWrfWeepNoAction()
class RulebaseAgentWepegWeepRandomAlpha(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyWepegWeepRandomAlpha()
class RulebaseAgentGarnet(RulebaseAgentBase):
def __init__(self, *args):
super().__init__(*args)
self._policy = RulebasePolicyGarnet()
|
py
|
1a5e576c50b7b82e2de14cef7dbfdfe8a2daa8f3
|
# -*- coding: UTF-8 -*-
# Newer unittest features aren't built in for python 2.6
import sys
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest
else:
import unittest
import sc2reader
sc2reader.log_utils.log_to_console("INFO")
class TestSummaries(unittest.TestCase):
def test_a_WoL_s2gs(self):
summary = sc2reader.load_game_summary("test_s2gs/s2gs1.s2gs")
self.assertEqual(summary.players[0].resource_collection_rate, 1276)
self.assertEqual(summary.players[0].build_order[0].order, 'Probe')
self.assertEqual(summary.expansion, 'WoL')
def test_a_HotS_s2gs(self):
summary = sc2reader.load_game_summary("test_s2gs/hots1.s2gs")
self.assertEqual(summary.players[0].resource_collection_rate, 1599)
self.assertEqual(summary.players[0].build_order[0].order, 'SCV')
self.assertEqual(summary.expansion, 'HotS')
def test_another_HotS_s2gs(self):
summary = sc2reader.load_game_summary("test_s2gs/hots2.s2gs")
self.assertEqual(summary.players[0].enemies_destroyed, 14575)
self.assertEqual(summary.players[0].time_supply_capped, 50)
self.assertEqual(summary.players[0].idle_production_time, 4438)
self.assertEqual(summary.players[0].resources_spent, 25450)
self.assertEqual(summary.players[0].apm, 204)
self.assertEqual(summary.players[0].workers_active_graph.as_points()[8][1], 25)
self.assertEqual(summary.players[0].upgrade_spending_graph.as_points()[8][1], 300)
self.assertEqual(summary.expansion, 'HotS')
if __name__ == '__main__':
unittest.main()
|
py
|
1a5e5a62fdb4ba442700dc329d1d241426786a84
|
#!/usr/bin/python3
def uppercase(str):
for char in str:
if (ord(char) > 96 and ord(char) < 123):
char = (chr(ord(char) - 32))
print("{}".format(char), end='')
print("")
|
py
|
1a5e5a97ca775c83feba680ef7d64c748b64bc06
|
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
from setuptools.extension import Extension
from Cython.Build import cythonize
with open("README.md") as f:
readme = f.read()
with open("LICENSE.txt") as f:
license = f.read()
extensions = [
Extension(
"tenforty.ots_2020",
["tenforty/ots/ots_2020.pyx"],
libraries=[],
include_dirs=[],
),
Extension(
"tenforty.ots_2019",
["tenforty/ots/ots_2019.pyx"],
libraries=[],
include_dirs=[],
),
Extension(
"tenforty.ots_2018",
["tenforty/ots/ots_2018.pyx"],
libraries=[],
include_dirs=[],
),
Extension(
"tenforty.ots_2017",
["tenforty/ots/ots_2017.pyx"],
libraries=[],
include_dirs=[],
),
]
setup(
name="tenforty",
version="0.1.0",
description="Compute US federal taxes, and state taxes for some states.",
long_description=readme,
author="Mike Macpherson",
author_email="[email protected]",
url="https://github.com/mmacpherson/tenforty",
license=license,
packages=find_packages(exclude=("tests", "docs")),
# cmdclass=dict(build_ext=build_ext),
ext_modules=cythonize(extensions),
zip_safe=False,
)
|
py
|
1a5e5ac16469363f4d3929318069254671544aac
|
# Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-x',
'--xml_file',
help='A Fast-RTPS XML configuration file',
required=False
)
parser.add_argument(
'-f',
'--demands_file',
help='Filename of the demands configuration file',
required=False,
default=None
)
parser.add_argument(
'-n',
'--number_of_samples',
help='The number of measurements to take for each payload',
required=False,
default='10000'
)
parser.add_argument(
'-s',
'--security',
action='store_true',
help='Enables security (Defaults: disable)',
required=False
)
parser.add_argument(
'-i',
'--interprocess',
action='store_true',
help='Publisher and subscribers in separate processes. Defaults:False',
required=False
)
parser.add_argument(
'-d',
'--data_sharing',
choices=['on', 'off'],
help='Explicitly enable/disable data sharing. (Defaults: Fast-DDS default settings)',
required=False
)
parser.add_argument(
'-l',
'--data_loans',
action='store_true',
help='Enable the use of the loan sample API (Defaults: disable)',
required=False
)
parser.add_argument(
'-r',
'--reliability',
action='store_true',
help='Run with RELIABLE reliability (Defaults: disable)',
required=False
)
parser.add_argument(
'--shared_memory',
choices=['on', 'off'],
help='Explicitly enable/disable shared memory transport. (Defaults: Fast-DDS default settings)',
required=False
)
# Parse arguments
args = parser.parse_args()
xml_file = args.xml_file
security = args.security
interprocess = args.interprocess
if security and not interprocess:
print('Intra-process delivery NOT supported with security')
exit(1) # Exit with error
# Check that samples is positive
if str.isdigit(args.number_of_samples) and int(args.number_of_samples) > 0:
samples = str(args.number_of_samples)
else:
print(
'"number_of_samples" must be a positive integer, NOT {}'.format(
args.number_of_samples
)
)
exit(1) # Exit with error
# Demands files options
demands_options = []
if args.demands_file:
if not os.path.isfile(args.demands_file):
print('Demands file "{}" is NOT a file'.format(args.demands_file))
exit(1) # Exit with error
else:
demands_options = [
'--file',
args.demands_file,
]
# XML options
filename_options = 'default'
xml_options = []
if xml_file:
if not os.path.isfile(xml_file):
print('XML file "{}" is NOT a file'.format(xml_file))
exit(1) # Exit with error
else:
xml_options = ['--xml', xml_file]
# Get QoS from XML
filename_options = xml_file.split('/')[-1].split('\\')[-1]
filename_options = filename_options.split('.')[-2].split('_')[1:]
filename_options = '_'.join(filename_options)
# Data sharing and loans options
# modify output file names
if args.data_sharing and 'on' == args.data_sharing and args.data_loans:
filename_options += '_data_loans_and_sharing'
elif args.data_sharing and 'on' == args.data_sharing:
filename_options += '_data_sharing'
elif args.data_loans:
filename_options += '_data_loans'
# add flags to the command line
data_options = []
if args.data_sharing:
if 'on' == args.data_sharing:
data_options += ['--data_sharing=on']
else:
data_options += ['--data_sharing=off']
if args.data_loans:
data_options += ['--data_loans']
reliability_options = []
if args.reliability:
reliability_options = ['--reliability=reliable']
else:
reliability_options = ['--reliability=besteffort']
if args.shared_memory:
if 'on' == args.shared_memory:
data_options += ['--shared_memory=on']
else:
data_options += ['--shared_memory=off']
# Environment variables
executable = os.environ.get('LATENCY_TEST_BIN')
certs_path = os.environ.get('CERTS_PATH')
# Check that executable exists
if executable:
if not os.path.isfile(executable):
print('LATENCY_TEST_BIN does NOT specify a file')
exit(1) # Exit with error
else:
print('LATENCY_TEST_BIN is NOT set')
exit(1) # Exit with error
# Security
security_options = []
if security is True:
if certs_path:
if os.path.isdir(certs_path):
security_options = ['--security=true', '--certs=' + certs_path]
else:
print('CERTS_PATH does NOT specify a directory')
exit(1) # Exit with error
else:
print('Cannot find CERTS_PATH environment variable')
exit(1) # Exit with error
# Domain
domain = str(os.getpid() % 230)
domain_options = ['--domain', domain]
if interprocess is True:
# Base of test command for publisher agent
pub_command = [
executable,
'publisher',
'--samples',
samples,
'--export_raw_data',
]
# Base of test command for subscriber agent
sub_command = [
executable,
'subscriber',
]
# Manage security
if security is True:
pub_command.append(
'./measurements_interprocess_{}_security.csv'.format(
filename_options
)
)
pub_command += security_options
sub_command += security_options
else:
pub_command.append(
'./measurements_interprocess_{}.csv'.format(
filename_options
)
)
pub_command += domain_options
pub_command += xml_options
pub_command += demands_options
pub_command += data_options
pub_command += reliability_options
sub_command += domain_options
sub_command += xml_options
sub_command += demands_options
sub_command += data_options
sub_command += reliability_options
print('Publisher command: {}'.format(
' '.join(element for element in pub_command)),
flush=True
)
print('Subscriber command: {}'.format(
' '.join(element for element in sub_command)),
flush=True
)
# Spawn processes
publisher = subprocess.Popen(pub_command)
subscriber = subprocess.Popen(sub_command)
# Wait until finish
subscriber.communicate()
publisher.communicate()
if subscriber.returncode != 0:
exit(subscriber.returncode)
elif publisher.returncode != 0:
exit(publisher.returncode)
else:
# Base of test command to execute
command = [
executable,
'both',
'--samples',
samples,
'--export_raw_data',
]
# Manage security
if security is True:
command.append(
'./measurements_intraprocess_{}_security.csv'.format(
filename_options
)
)
command += security_options
else:
command.append(
'./measurements_intraprocess_{}.csv'.format(
filename_options
)
)
command += domain_options
command += xml_options
command += demands_options
command += data_options
command += reliability_options
print('Executable command: {}'.format(
' '.join(element for element in command)),
flush=True
)
# Spawn process
both = subprocess.Popen(command)
# Wait until finish
both.communicate()
exit(both.returncode)
exit(0)
|
py
|
1a5e5ac6dc6c58b6a8fef43b2e586e7a51a44c47
|
import unittest
from bidfx.pricing._pixie.util.buffer_reads import scale_to_long
class TestScaleToLong(unittest.TestCase):
def test_scale_to_long(self):
self.assertEqual(scale_to_long(1, 1), "10")
self.assertEqual(scale_to_long(1, 2), "100")
self.assertEqual(scale_to_long(1, 3), "1000")
self.assertEqual(scale_to_long(1, 4), "10000")
self.assertEqual(scale_to_long(1, 5), "100000")
self.assertEqual(scale_to_long(1, 6), "1000000")
self.assertEqual(scale_to_long(56, 1), "560")
self.assertEqual(scale_to_long(123, 2), "12300")
self.assertEqual(scale_to_long(678, 3), "678000")
self.assertEqual(scale_to_long(55, 4), "550000")
|
py
|
1a5e5ba35b59db210fd21d3471257b56e78c998b
|
import torch
class SamplingResult(object):
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, gt_polygons, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
if gt_polygons is not None:
self.pos_gt_polygon = gt_polygons[self.pos_assigned_gt_inds, :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
return torch.cat([self.pos_bboxes, self.neg_bboxes])
|
py
|
1a5e5bcd20aee41ef93f0cbe66effacbbe80c531
|
import sys
import traceback
import ujson as json
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux.aws.sts import boto3_cached_conn
from consoleme.config import config
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.role_updater.schemas import RoleUpdaterRequest
log = config.get_logger()
stats = get_plugin_by_name(config.get("plugins.metrics"))()
async def update_role(event):
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"event": event,
"message": "Working on event",
}
log.debug(log_data)
if not isinstance(event, list):
raise Exception("The passed event must be a list.")
# Let's normalize all of the policies to JSON if they are not already
for d in event:
for i in d.get("inline_policies", []):
if i.get("policy_document") and isinstance(i.get("policy_document"), dict):
i["policy_document"] = json.dumps(
i["policy_document"], escape_forward_slashes=False
)
if d.get("assume_role_policy_document", {}):
if isinstance(
d.get("assume_role_policy_document", {}).get(
"assume_role_policy_document"
),
dict,
):
d["assume_role_policy_document"][
"assume_role_policy_document"
] = json.dumps(
d["assume_role_policy_document"]["assume_role_policy_document"],
escape_forward_slashes=False,
)
bad_validation = RoleUpdaterRequest().validate(event, many=True)
if bad_validation:
log_data["error"] = bad_validation
log.error(log_data)
return {"error_msg": "invalid schema passed", "detail_error": bad_validation}
event = RoleUpdaterRequest().load(event, many=True)
result = {"success": False}
for d in event:
arn = d["arn"]
aws_session_name = "roleupdater-" + d["requester"]
account_number = await parse_account_id_from_arn(arn)
role_name = await parse_role_name_from_arn(arn)
# TODO: Make configurable
client = boto3_cached_conn(
"iam",
account_number=account_number,
assume_role=config.get("policies.role_name", "ConsoleMe"),
session_name=aws_session_name,
)
inline_policies = d.get("inline_policies", [])
managed_policies = d.get("managed_policies", [])
assume_role_doc = d.get("assume_role_policy_document", {})
tags = d.get("tags", [])
if (
not inline_policies
and not managed_policies
and not assume_role_doc
and not tags
):
result["message"] = f"Invalid request. No response taken on event: {event}"
return result
try:
for policy in inline_policies:
await update_inline_policy(client, role_name, policy)
for policy in managed_policies:
await update_managed_policy(client, role_name, policy)
if assume_role_doc:
await update_assume_role_document(client, role_name, assume_role_doc)
for tag in tags:
await update_tags(client, role_name, tag)
except ClientError as ce:
result["message"] = ce.response["Error"]
result["Traceback"] = traceback.format_exc()
return result
result["success"] = True
return result
async def parse_account_id_from_arn(arn):
return arn.split(":")[4]
async def parse_role_name_from_arn(arn):
return arn.split("/")[-1]
async def update_inline_policy(client, role_name, policy):
log.debug(
{"message": "Updating inline policy", "role_name": role_name, "policy": policy}
)
if policy.get("action") == "attach":
response = await sync_to_async(client.put_role_policy)(
RoleName=role_name,
PolicyName=policy["policy_name"],
PolicyDocument=policy["policy_document"],
)
elif policy.get("action") == "detach":
response = await sync_to_async(client.delete_role_policy)(
RoleName=role_name, PolicyName=policy["policy_name"]
)
else:
raise Exception("Unable to update managed policy")
return response
async def update_managed_policy(client, role_name, policy):
log.debug(
{"message": "Updating managed policy", "role_name": role_name, "policy": policy}
)
if policy.get("action") == "attach":
response = await sync_to_async(client.attach_role_policy)(
PolicyArn=policy["arn"], RoleName=role_name
)
elif policy.get("action") == "detach":
response = await sync_to_async(client.detach_role_policy)(
PolicyArn=policy["arn"], RoleName=role_name
)
else:
raise Exception("Unable to update managed policy.")
return response
async def update_assume_role_document(client, role_name, assume_role_doc):
log.debug(
{
"message": "Updating assume role doc",
"role_name": role_name,
"assume_role_doc": assume_role_doc,
}
)
response = None
if assume_role_doc.get("action", "") in ["create", "update"]:
response = await sync_to_async(client.update_assume_role_policy)(
RoleName=role_name,
PolicyDocument=assume_role_doc["assume_role_policy_document"],
)
return response
# Log or report result?
async def update_tags(client, role_name, tag):
log.debug({"message": "Updating tag", "role_name": role_name, "tag": tag})
if tag.get("action") == "add":
response = await sync_to_async(client.tag_role)(
RoleName=role_name, Tags=[{"Key": tag["key"], "Value": tag["value"]}]
)
elif tag.get("action") == "remove":
response = await sync_to_async(client.untag_role)(
RoleName=role_name, TagKeys=[tag["key"]]
)
else:
raise Exception("Unable to update tags.")
return response
|
py
|
1a5e5db6434e4e1e5766403f236a8b1fadf1ca75
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modoboa_imap_migration', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='migration',
name='_password',
field=models.CharField(max_length=255),
),
]
|
py
|
1a5e5dcc01e5a32e285b2d2076cb3f92cf341084
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2020 Snowflake Computing Inc. All right reserved.
#
import glob
import gzip
import os
import sys
import time
from filecmp import cmp
from logging import getLogger
import mock
import pytest
import requests
from snowflake.connector.constants import UTF8
from snowflake.connector.file_transfer_agent import SnowflakeFileTransferAgent
from ..generate_test_files import generate_k_lines_of_n_files
try:
from parameters import (CONNECTION_PARAMETERS_ADMIN)
except ImportError:
CONNECTION_PARAMETERS_ADMIN = {}
logger = getLogger(__name__)
# Mark every test in this module as a gcp test
pytestmark = pytest.mark.gcp
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_put_get_with_gcp(tmpdir, conn_cnx, db_parameters):
"""[gcp] Puts and Gets a small text using gcp."""
# create a data file
fname = str(tmpdir.join('test_put_get_with_gcp_token.txt.gz'))
with gzip.open(fname, 'wb') as f:
original_contents = "123,test1\n456,test2\n"
f.write(original_contents.encode(UTF8))
tmp_dir = str(tmpdir.mkdir('test_put_get_with_gcp_token'))
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
with cnx.cursor() as csr:
csr.execute("rm @~/snow32806")
csr.execute(
"create or replace table snow32806 (a int, b string)")
try:
csr.execute(
"put file://{} @%snow32806 auto_compress=true parallel=30".format(
fname))
rec = csr.fetchone()
assert rec[6] == 'UPLOADED'
csr.execute("copy into snow32806")
csr.execute(
"copy into @~/snow32806 from snow32806 "
"file_format=( format_name='common.public.csv' "
"compression='gzip')")
csr.execute(
"get @~/snow32806 file://{} pattern='snow32806.*'".format(
tmp_dir))
rec = csr.fetchone()
assert rec[0].startswith(
'snow32806'), 'A file downloaded by GET'
assert rec[1] == 36, 'Return right file size'
assert rec[2] == 'DOWNLOADED', 'Return DOWNLOADED status'
assert rec[3] == '', 'Return no error message'
finally:
csr.execute("drop table snow32806")
csr.execute("rm @~/snow32806")
files = glob.glob(os.path.join(tmp_dir, 'snow32806*'))
with gzip.open(files[0], 'rb') as fd:
contents = fd.read().decode(UTF8)
assert original_contents == contents, (
'Output is different from the original file')
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_put_copy_many_files_gcp(tmpdir, conn_cnx, db_parameters):
"""[gcp] Puts and Copies many files."""
# generates N files
number_of_files = 10
number_of_lines = 1000
tmp_dir = generate_k_lines_of_n_files(number_of_lines, number_of_files, tmp_dir=str(tmpdir.mkdir('data')))
files = os.path.join(tmp_dir, 'file*')
def run(csr, sql):
sql = sql.format(
files=files,
name=db_parameters['name'])
return csr.execute(sql).fetchall()
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
with cnx.cursor() as csr:
run(csr, """
create or replace table {name} (
aa int,
dt date,
ts timestamp,
tsltz timestamp_ltz,
tsntz timestamp_ntz,
tstz timestamp_tz,
pct float,
ratio number(6,2))
""")
try:
all_recs = run(csr, "put file://{files} @%{name}")
assert all([rec[6] == 'UPLOADED' for rec in all_recs])
run(csr, "copy into {name}")
rows = sum([rec[0] for rec in run(csr, "select count(*) from "
"{name}")])
assert rows == number_of_files * number_of_lines, \
'Number of rows'
finally:
run(csr, "drop table if exists {name}")
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_put_copy_duplicated_files_gcp(tmpdir, conn_cnx,
db_parameters):
"""[gcp] Puts and Copies duplicated files."""
# generates N files
number_of_files = 5
number_of_lines = 100
tmp_dir = generate_k_lines_of_n_files(number_of_lines, number_of_files, tmp_dir=str(tmpdir.mkdir('data')))
files = os.path.join(tmp_dir, 'file*')
def run(csr, sql):
sql = sql.format(
files=files,
name=db_parameters['name'])
return csr.execute(sql).fetchall()
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
with cnx.cursor() as csr:
run(csr, """
create or replace table {name} (
aa int,
dt date,
ts timestamp,
tsltz timestamp_ltz,
tsntz timestamp_ntz,
tstz timestamp_tz,
pct float,
ratio number(6,2))
""")
try:
success_cnt = 0
skipped_cnt = 0
for rec in run(csr, "put file://{files} @%{name}"):
logger.info('rec=%s', rec)
if rec[6] == 'UPLOADED':
success_cnt += 1
elif rec[6] == 'SKIPPED':
skipped_cnt += 1
assert success_cnt == number_of_files, 'uploaded files'
assert skipped_cnt == 0, 'skipped files'
deleted_cnt = 0
run(csr, "rm @%{name}/file0")
deleted_cnt += 1
run(csr, "rm @%{name}/file1")
deleted_cnt += 1
run(csr, "rm @%{name}/file2")
deleted_cnt += 1
success_cnt = 0
skipped_cnt = 0
for rec in run(csr, "put file://{files} @%{name}"):
logger.info('rec=%s', rec)
if rec[6] == 'UPLOADED':
success_cnt += 1
elif rec[6] == 'SKIPPED':
skipped_cnt += 1
assert success_cnt == number_of_files, \
'uploaded files in the second time'
assert skipped_cnt == 0, \
'skipped files in the second time'
run(csr, "copy into {name}")
rows = 0
for rec in run(csr, "select count(*) from {name}"):
rows += rec[0]
assert rows == number_of_files * number_of_lines, \
'Number of rows'
finally:
run(csr, "drop table if exists {name}")
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_put_get_large_files_gcp(tmpdir, conn_cnx, db_parameters):
"""[gcp] Puts and Gets Large files."""
number_of_files = 3
number_of_lines = 200000
tmp_dir = generate_k_lines_of_n_files(number_of_lines, number_of_files, tmp_dir=str(tmpdir.mkdir('data')))
files = os.path.join(tmp_dir, 'file*')
output_dir = os.path.join(tmp_dir, 'output_dir')
os.makedirs(output_dir)
class cb(object):
def __init__(self, filename, filesize, **_):
pass
def __call__(self, bytes_amount):
pass
def run(cnx, sql):
return cnx.cursor().execute(
sql.format(
files=files,
dir=db_parameters['name'],
output_dir=output_dir),
_put_callback_output_stream=sys.stdout,
_get_callback_output_stream=sys.stdout,
_get_callback=cb,
_put_callback=cb).fetchall()
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
try:
all_recs = run(cnx, "PUT file://{files} @~/{dir}")
assert all([rec[6] == 'UPLOADED' for rec in all_recs])
for _ in range(60):
for _ in range(100):
all_recs = run(cnx, "LIST @~/{dir}")
if len(all_recs) == number_of_files:
break
# you may not get the files right after PUT command
# due to the nature of gcs blob, which synchronizes
# data eventually.
time.sleep(1)
else:
# wait for another second and retry.
# this could happen if the files are partially available
# but not all.
time.sleep(1)
continue
break # success
else:
pytest.fail(
'cannot list all files. Potentially '
'PUT command missed uploading Files: {}'.format(all_recs))
all_recs = run(cnx, "GET @~/{dir} file://{output_dir}")
assert len(all_recs) == number_of_files
assert all([rec[2] == 'DOWNLOADED' for rec in all_recs])
finally:
run(cnx, "RM @~/{dir}")
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_get_gcp_file_object_http_400_error(tmpdir, conn_cnx, db_parameters):
fname = str(tmpdir.join('test_put_get_with_gcp_token.txt.gz'))
with gzip.open(fname, 'wb') as f:
original_contents = "123,test1\n456,test2\n"
f.write(original_contents.encode(UTF8))
tmp_dir = str(tmpdir.mkdir('test_put_get_with_gcp_token'))
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password']) as cnx:
with cnx.cursor() as csr:
csr.execute("rm @~/snow32807")
csr.execute(
"create or replace table snow32807 (a int, b string)")
try:
from requests import put, get
def mocked_put(*args, **kwargs):
if mocked_put.counter == 0:
mocked_put.counter += 1
exc = requests.exceptions.HTTPError(response=requests.Response())
exc.response.status_code = 400
raise exc
else:
return put(*args, **kwargs)
mocked_put.counter = 0
def mocked_file_agent(*args, **kwargs):
agent = SnowflakeFileTransferAgent(*args, **kwargs)
agent._update_file_metas_with_presigned_url = mock.MagicMock(
wraps=agent._update_file_metas_with_presigned_url
)
mocked_file_agent.agent = agent
return agent
with mock.patch('snowflake.connector.cursor.SnowflakeFileTransferAgent',
side_effect=mocked_file_agent):
with mock.patch('requests.put', side_effect=mocked_put):
csr.execute(
"put file://{} @%snow32807 auto_compress=true parallel=30".format(
fname))
assert mocked_file_agent.agent._update_file_metas_with_presigned_url.call_count == 2
rec = csr.fetchone()
assert rec[6] == 'UPLOADED'
csr.execute("copy into snow32807")
csr.execute(
"copy into @~/snow32807 from snow32807 "
"file_format=( format_name='common.public.csv' "
"compression='gzip')")
def mocked_get(*args, **kwargs):
if mocked_get.counter == 0:
mocked_get.counter += 1
exc = requests.exceptions.HTTPError(response=requests.Response())
exc.response.status_code = 400
raise exc
else:
return get(*args, **kwargs)
mocked_get.counter = 0
def mocked_file_agent(*args, **kwargs):
agent = SnowflakeFileTransferAgent(*args, **kwargs)
agent._update_file_metas_with_presigned_url = mock.MagicMock(
wraps=agent._update_file_metas_with_presigned_url
)
mocked_file_agent.agent = agent
return agent
with mock.patch('snowflake.connector.cursor.SnowflakeFileTransferAgent',
side_effect=mocked_file_agent):
with mock.patch('requests.get', side_effect=mocked_get):
csr.execute(
"get @~/snow32807 file://{} pattern='snow32807.*'".format(
tmp_dir))
assert mocked_file_agent.agent._update_file_metas_with_presigned_url.call_count == 2
rec = csr.fetchone()
assert rec[0].startswith(
'snow32807'), 'A file downloaded by GET'
assert rec[1] == 36, 'Return right file size'
assert rec[2] == 'DOWNLOADED', 'Return DOWNLOADED status'
assert rec[3] == '', 'Return no error message'
finally:
csr.execute("drop table snow32807")
csr.execute("rm @~/snow32807")
files = glob.glob(os.path.join(tmp_dir, 'snow32807*'))
with gzip.open(files[0], 'rb') as fd:
contents = fd.read().decode(UTF8)
assert original_contents == contents, (
'Output is different from the original file')
@pytest.mark.skipif(
not CONNECTION_PARAMETERS_ADMIN,
reason="Snowflake admin account is not accessible."
)
def test_auto_compress_off_gcp(tmpdir, conn_cnx, db_parameters):
"""[gcp] Puts and Gets a small text using gcp with no auto compression."""
fname = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data', 'example.json'))
with conn_cnx(
user=db_parameters['user'],
account=db_parameters['account'],
password=db_parameters['password'],
) as cnx:
with cnx.cursor() as cursor:
try:
cursor.execute("create or replace stage teststage")
cursor.execute("put file://{} @teststage auto_compress=false".format(fname))
cursor.execute("get @teststage file://{}".format(str(tmpdir)))
downloaded_file = os.path.join(str(tmpdir), 'example.json')
assert cmp(fname, downloaded_file)
finally:
cursor.execute("drop stage teststage")
|
py
|
1a5e5e70ee6671e7fae6a81fde5ff029dbc8f57c
|
import sys
import os
MIN_VERSION=(3,10)
if sys.version_info < MIN_VERSION:
print("At least %s.%s version of Python is required to run this file" % MIN_VERSION)
sys.exit(1)
def getFileLines(filePath):
with open(filePath) as f:
return f.readlines()
def parseInstructions(fileLines):
horizontal = 0
depth = 0
for line in fileLines:
instruction = line.split()
match instruction[0]:
case "forward":
horizontal = horizontal + int(instruction[1])
case "up":
depth = depth - int(instruction[1])
case "down":
depth = depth + int(instruction[1])
return horizontal * depth
def parseAimInstructions(fileLines):
aim = 0
depth = 0
horizontal = 0
for line in fileLines:
instruction = line.split()
match instruction[0]:
case "forward":
horizontal = horizontal + int(instruction[1])
depth = depth + (aim * int(instruction[1]))
case "up":
aim = aim - int(instruction[1])
case "down":
aim = aim + int(instruction[1])
return horizontal * depth
def main():
if len(sys.argv) <= 1:
print("Please specify the file to read instrcutions from")
return 0
elif not os.path.isfile(sys.argv[1]):
print ("File path provided is not a file or does not exist")
return 1
else:
instructions = getFileLines(sys.argv[1])
print("First Answer:", parseInstructions(instructions))
print ("Second Answer:", parseAimInstructions(instructions))
return 0
if __name__ == '__main__':
sys.exit(main())
|
py
|
1a5e5e7ad9bfffc1834519ba586427d7cfae5d76
|
# coding: utf-8
from defaults import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# this key is only used for dev localhost testing, not for production
SECRET_KEY = 'm*n5u7jgkbp2b5f&*hp#o+e1e33s^6&730wlpb#-g536l^4es-'
## LTI Parameters
LTI_DEBUG = True
CONSUMER_KEY = "__consumer_key__" # can be any random python string with enough length for OAuth
LTI_SECRET = "__lti_secret__" # can be any random python string with enough length for OAuth
INSTALLED_APPS_LOCAL = (
'django_nose',
)
INSTALLED_APPS += INSTALLED_APPS_LOCAL
# Use nose to run all tests
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Tell nose to measure coverage on the 'lti' and 'psa' apps
NOSE_ARGS = [
'--with-coverage',
'--cover-package=lti,psa,ct,fsm',
'--cover-inclusive',
]
try:
from local_conf import *
except ImportError:
print '''You must provide a settings/local_conf.py file,
e.g. by copying the provided local_conf_example.py'''
raise
|
py
|
1a5e5eba31feda19a20b492fd4e9fcfba7507d52
|
from conch.analysis.formants import FormantTrackFunction
import librosa
from conch.analysis.segments import FileSegment, SignalSegment
def test_formants_praat(base_filenames):
for f in base_filenames:
wavpath = f + '.wav'
func = FormantTrackFunction(time_step=0.01,
window_length=0.025, num_formants=5, max_frequency=5500)
formants = func(wavpath)
sig, sr = librosa.load(wavpath)
formants2 = func(SignalSegment(sig, sr))
# Things are not exact...
# assert formants == formants2
|
py
|
1a5e5ed2d57d736b35c8209c28db7ca23905f00c
|
"""
Main loop for state example
"""
from bottle import Bottle
from drink import Half
if __name__ == '__main__':
bottle = Bottle()
bottle.drink()
bottle.fill()
bottle.fill()
bottle.fill()
bottle.drink()
bottle.drink()
bottle.drink()
bottle.fill()
bottle.drink()
bottle.change_state(Half)
bottle.drink()
bottle.drink()
|
py
|
1a5e5ee5ac2c9e65b01c6221d387a6f1f4ffbbd4
|
# Classic non-recursive binary search
def bin_search(array: list, find: int) -> int:
start, end = 0, len(array) - 1
while start <= end:
# Get center of result
center = int((start + end) / 2)
if find == array[center]:
return center
elif find > array[center]:
start = center + 1
else:
end = center - 1
return -2
# ะะฐะดะฐัะฐ: ะ ะฟะตัะฒะพะน ัััะพะบะต ะดะฐะฝั ัะตะปะพะต ัะธัะปะพ 1โคnโค10^5ะธ ะผะฐััะธะฒ A[1โฆn] ะธะท n ัะฐะทะปะธัะฝัั
ะฝะฐัััะฐะปัะฝัั
ัะธัะตะป,
# ะฝะต ะฟัะตะฒััะฐััะธั
10^9, ะฒ ะฟะพััะดะบะต ะฒะพะทัะฐััะฐะฝะธั, ะฒะพ ะฒัะพัะพะน โ ัะตะปะพะต ัะธัะปะพ 1โคkโค10^5
# ะธ k ะฝะฐัััะฐะปัะฝัั
ัะธัะตะป b1....bk, ะฝะต ะฟัะตะฒััะฐััะธั
10^9. ะะปั ะบะฐะถะดะพะณะพ i ะพั 1 ะดะพ k
# ะฝะตะพะฑั
ะพะดะธะผะพ ะฒัะฒะตััะธ ะธะฝะดะตะบั 1 โค j โค n, ะดะปั ะบะพัะพัะพะณะพ A[j] = bi, ะธะปะธ -1, ะตัะปะธ ัะฐะบะพะณะพ j ะฝะตั.
# Input:
# 5 1 5 8 12 13
# 5 8 1 23 1 11
# Output
# 3 1 -1 1 -1
def main():
array_len, *array = map(int, input().split())
find_len, *find_array = map(int, input().split())
for find in find_array:
print(bin_search(array, find) + 1, end=" ")
if __name__ == '__main__':
main()
|
py
|
1a5e5fb217baebfcedf4e50cad55013f61e8299e
|
from .finam_stock_data import get_data
|
py
|
1a5e6169902f933bf0891a76c15d4e404b581b5e
|
#!/usr/bin/env python3
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
import sys
from apt import Cache
from catkin_pkg.packages import find_packages
from ros_buildfarm.argument import add_argument_os_code_name
from ros_buildfarm.argument import add_argument_os_name
from ros_buildfarm.argument import add_argument_output_dir
from ros_buildfarm.argument import add_argument_package_selection_args
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.argument import add_argument_skip_rosdep_keys
from ros_buildfarm.colcon import locate_packages
from ros_buildfarm.common import get_binary_package_versions
from ros_buildfarm.common import Scope
from rosdep2 import create_default_installer_context
from rosdep2.catkin_support import get_catkin_view
from rosdep2.catkin_support import resolve_for_os
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Lists available binary packages and versions which are'
'needed to satisfy rosdep keys for ROS packages in the workspace')
# Positional
add_argument_rosdistro_name(parser)
add_argument_os_name(parser)
add_argument_os_code_name(parser)
add_argument_output_dir(parser)
add_argument_package_selection_args(parser)
add_argument_skip_rosdep_keys(parser)
parser.add_argument(
'--package-root',
nargs='+',
help='The path to the directory containing packages')
args = parser.parse_args(argv)
workspace_root = args.package_root[-1]
os.chdir(workspace_root)
with Scope('SUBSECTION', 'mark packages with IGNORE files'):
all_packages = locate_packages(workspace_root)
selected_packages = all_packages
if args.package_selection_args:
print(
'Using package selection arguments:',
args.package_selection_args)
selected_packages = locate_packages(
workspace_root, extra_args=args.package_selection_args)
to_ignore = all_packages.keys() - selected_packages.keys()
print('Ignoring %d packages' % len(to_ignore))
for package in sorted(to_ignore):
print('-', package)
package_root = all_packages[package]
Path(package_root, 'COLCON_IGNORE').touch()
print('There are %d packages which meet selection criteria' %
len(selected_packages))
with Scope('SUBSECTION', 'Enumerating packages needed to build'):
# find all of the underlay packages
underlay_pkgs = {}
all_underlay_pkg_names = set()
for package_root in args.package_root[0:-1]:
print("Crawling for packages in '%s'" % package_root)
underlay_pkgs.update(find_packages(package_root))
# Check for a colcon index for non-ROS package detection
colcon_index = os.path.join(package_root, 'colcon-core', 'packages')
try:
all_underlay_pkg_names.update(os.listdir(colcon_index))
except FileNotFoundError:
pass
underlay_pkg_names = [pkg.name for pkg in underlay_pkgs.values()]
print('Found the following ROS underlay packages:')
for pkg_name in sorted(underlay_pkg_names):
print(' -', pkg_name)
# get direct build dependencies
package_root = args.package_root[-1]
non_ros_package_paths = set(
d for d in selected_packages.values()
if not os.path.isfile(os.path.join(d, 'package.xml')))
print("Crawling for packages in '%s'" % package_root)
pkgs = find_packages(package_root, exclude_paths=non_ros_package_paths)
pkg_names = [pkg.name for pkg in pkgs.values()]
print('Found the following ROS packages:')
for pkg_name in sorted(pkg_names):
print(' -', pkg_name)
# get build dependencies and map them to binary packages
all_pkgs = set(pkgs.values()).union(underlay_pkgs.values())
for pkg in all_pkgs:
pkg.evaluate_conditions(os.environ)
for pkg in all_pkgs:
for group_depend in pkg.group_depends:
if group_depend.evaluated_condition is not False:
group_depend.extract_group_members(all_pkgs)
dependency_keys_build = get_dependencies(
all_pkgs, 'build', _get_build_and_recursive_run_dependencies,
pkgs.values())
dependency_keys_test = get_dependencies(
all_pkgs, 'run and test', _get_test_and_recursive_run_dependencies,
pkgs.values())
if args.skip_rosdep_keys:
dependency_keys_build.difference_update(args.skip_rosdep_keys)
dependency_keys_test.difference_update(args.skip_rosdep_keys)
# remove all non-ROS packages and packages which are present but
# specifically ignored
every_package_name = all_packages.keys() | all_underlay_pkg_names
dependency_keys_build -= every_package_name
dependency_keys_test -= every_package_name
context = initialize_resolver(
args.rosdistro_name, args.os_name, args.os_code_name)
os_pkg_names_build = resolve_names(dependency_keys_build, **context)
os_pkg_names_test = resolve_names(dependency_keys_test, **context)
os_pkg_names_test -= os_pkg_names_build
with Scope('SUBSECTION', 'Resolving packages versions using apt cache'):
apt_cache = Cache()
os_pkg_versions = get_binary_package_versions(
apt_cache, os_pkg_names_build | os_pkg_names_test)
with open(os.path.join(args.output_dir, 'install_list_build.txt'), 'w') as out_file:
for package in sorted(os_pkg_names_build):
out_file.write('# break docker cache %s=%s\n' % (package, os_pkg_versions[package]))
out_file.write('%s\n' % (package))
with open(os.path.join(args.output_dir, 'install_list_test.txt'), 'w') as out_file:
for package in sorted(os_pkg_names_test):
out_file.write('# break docker cache %s=%s\n' % (package, os_pkg_versions[package]))
out_file.write('%s\n' % (package))
def get_dependencies(pkgs, label, get_dependencies_callback, target_pkgs):
pkg_names = [pkg.name for pkg in pkgs]
depend_names = set([])
for pkg in target_pkgs:
depend_names.update(
[d for d in get_dependencies_callback(pkg, pkgs)
if d not in pkg_names])
print('Identified the following %s dependencies ' % label +
'(ignoring packages available from source):')
for depend_name in sorted(depend_names):
print(' -', depend_name)
return depend_names
def _get_build_and_recursive_run_dependencies(pkg, pkgs):
depends = [
d.name for d in pkg.build_depends + pkg.buildtool_depends
if d.evaluated_condition is not False]
# include recursive run dependencies on other pkgs in the workspace
# if pkg A in the workspace build depends on pkg B in the workspace
# then the recursive run dependencies of pkg B need to be installed
# in order to build the workspace
other_pkgs_by_names = \
dict([(p.name, p) for p in pkgs if p.name != pkg.name])
run_depends_in_pkgs = \
set([d for d in depends if d in other_pkgs_by_names])
while run_depends_in_pkgs:
# pick first element from sorted order to ensure deterministic results
pkg_name = sorted(run_depends_in_pkgs).pop(0)
pkg = other_pkgs_by_names[pkg_name]
other_pkgs_by_names.pop(pkg_name)
run_depends_in_pkgs.remove(pkg_name)
# append run dependencies
run_depends = [
d.name for d in pkg.build_export_depends +
pkg.buildtool_export_depends + pkg.exec_depends
if d.evaluated_condition is not False]
# append group dependencies
run_depends += [
member for group in pkg.group_depends for member in group.members
if group.evaluated_condition is not False]
depends += run_depends
# consider recursive dependencies
run_depends_in_pkgs.update(
[d for d in run_depends if d in other_pkgs_by_names])
return depends
def _get_test_and_recursive_run_dependencies(pkg, pkgs):
depends = [
d.name for d in pkg.build_export_depends +
pkg.buildtool_export_depends + pkg.exec_depends + pkg.test_depends
if d.evaluated_condition is not False]
# include recursive run dependencies on other pkgs in the workspace
# if pkg A in the workspace test depends on pkg B in the workspace
# then the recursive run dependencies of pkg B need to be installed
# in order to test the workspace
other_pkgs_by_names = \
dict([(p.name, p) for p in pkgs if p.name != pkg.name])
run_depends_in_pkgs = \
set([d for d in depends if d in other_pkgs_by_names])
while run_depends_in_pkgs:
# pick first element from sorted order to ensure deterministic results
pkg_name = sorted(run_depends_in_pkgs).pop(0)
pkg = other_pkgs_by_names[pkg_name]
other_pkgs_by_names.pop(pkg_name)
run_depends_in_pkgs.remove(pkg_name)
# append run dependencies
run_depends = [
d.name for d in pkg.build_export_depends +
pkg.buildtool_export_depends + pkg.exec_depends
if d.evaluated_condition is not False]
# append group dependencies
run_depends += [
member for group in pkg.group_depends for member in group.members
if group.evaluated_condition is not False]
depends += run_depends
# consider recursive dependencies
run_depends_in_pkgs.update(
[d for d in run_depends if d in other_pkgs_by_names])
return depends
def initialize_resolver(rosdistro_name, os_name, os_code_name):
# resolve rosdep keys into binary package names
ctx = create_default_installer_context()
try:
installer_key = ctx.get_default_os_installer_key(os_name)
except KeyError:
raise RuntimeError(
"Could not determine the rosdep installer for '%s'" % os_name)
installer = ctx.get_installer(installer_key)
view = get_catkin_view(rosdistro_name, os_name, os_code_name, update=False)
return {
'os_name': os_name,
'os_code_name': os_code_name,
'installer': installer,
'view': view,
}
def resolve_names(rosdep_keys, os_name, os_code_name, view, installer):
debian_pkg_names = set([])
for rosdep_key in sorted(rosdep_keys):
try:
resolved_names = resolve_for_os(
rosdep_key, view, installer, os_name, os_code_name)
except KeyError:
raise RuntimeError(
"Could not resolve the rosdep key '%s'" % rosdep_key)
debian_pkg_names.update(resolved_names)
print('Resolved the dependencies to the following binary packages:')
for debian_pkg_name in sorted(debian_pkg_names):
print(' -', debian_pkg_name)
return debian_pkg_names
if __name__ == '__main__':
sys.exit(main())
|
py
|
1a5e6189af88e7c22acf3dfe18717b2c00278f49
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['ConstantTrend'] , ['NoCycle'] , ['MLP'] );
|
py
|
1a5e624cb9ad440aa323a9169ff3037a6f932d07
|
"""Opens a Youtube video in a web browser every 2 hours."""
import time
import webbrowser
total_breaks = 3
break_time = 60 * 60 * 2 #2 hours
music = ["https://youtu.be/s4EmxvQSpfA?t=1m50s",
"https://youtu.be/OD3F7J2PeYU?t=10s",
"https://youtu.be/YQHsXMglC9A?t=2m20s"]
print "You are starting at " + time.ctime()
for i in music:
time.sleep(break_time)
print "It is now " + time.ctime()
print "Break time!"
webbrowser.open(i)
|
py
|
1a5e646ac081b28b93c4da28e909f894ce4e728f
|
from bcc import BPF
syscall_regex = "^[Ss]y[Ss]_"
class BpfProgram():
def __init__(self, text):
self._contents = text
self._bpf = None
self._probes = None
self._perf_buffer_size = 64 * 1024
def bpf_instance(self):
return self._bpf
def prepare(self):
assert self._bpf is None
self._bpf = BPF(text=self._contents)
def attach_probes(self):
self._attach_socket_probes()
self._attach_process_probes()
self._bpf.attach_tracepoint(tp="sched:sched_process_fork", fn_name="on_fork")
def detach_probes(self):
self._bpf.detach_tracepoint(tp="sched:sched_process_fork")
self._bpf.cleanup()
def filter_pid(self, pid):
assert isinstance(pid, int)
print 'Filtering events from PID: ' + str(pid)
self._contents = self._contents.replace(
'//PID_FILTER//', str(pid))
def open_event_buffer(self, name, handler):
self._bpf[name].open_perf_buffer(handler, page_cnt=self._perf_buffer_size)
def _attach_probes_set(self, probes):
for event, (entry, exit) in probes.items():
if event.startswith('re_'):
event = event[3:]
entry is not None and self._bpf.attach_kprobe(event_re=event, fn_name=entry)
exit is not None and self._bpf.attach_kretprobe(event_re=event, fn_name=exit)
else:
entry is not None and self._bpf.attach_kprobe(event=event, fn_name=entry)
exit is not None and self._bpf.attach_kretprobe(event=event, fn_name=exit)
def _attach_process_probes(self):
self._attach_probes_set(self.get_probes()['process'])
def _attach_socket_probes(self):
self._attach_probes_set(self.get_probes()['socket'])
def get_probes(self):
if self._probes is not None:
return self._probes
socket_probes = {}
socket_probes['tcp_connect'] = ('entry__tcp_connect', 'exit__tcp_connect')
socket_probes['inet_csk_accept'] = (None, 'exit__inet_csk_accept')
socket_probes['sock_sendmsg'] = ('entry__sock_sendmsg', 'exit__sock_sendmsg')
socket_probes['kernel_sendpage'] = ('entry__sock_sendmsg', 'exit__sock_sendmsg')
socket_probes['sock_recvmsg'] = ('entry__sock_recvmsg', 'exit__sock_recvmsg')
syscall_probes = {}
# Prefix with 're' to indicate it is a regex.
syscall_probes['re_' + syscall_regex + 'wait'] = (None, 'exit__sys_wait')
self._probes = {'socket': socket_probes, 'process': syscall_probes}
return self._probes
|
py
|
1a5e64f55c65ff07fdca3ea7ca9ac7a3efd0f599
|
"""
.. moduleauthor:: Fabian Ball <[email protected]>
"""
from __future__ import absolute_import
from unittest import TestCase
import pysaucy2
class TestDatastructures(TestCase):
def test_IntArray_success(self):
ia = pysaucy2.datastructures.IntArray(range(10))
self.assertEqual(len(ia), 10)
for i in range(10):
self.assertEqual(ia[i], i)
self.assertEqual(len(pysaucy2.datastructures.IntArray()), 0)
def test_IntArray_wrong_type(self):
with self.assertRaises(TypeError):
pysaucy2.datastructures.IntArray([1, 2, '3'])
# Implicit type conversion... We can live with that
ia = pysaucy2.datastructures.IntArray([1.1, 2])
self.assertEqual(ia[0], 1)
def test_IntArray_assignment(self):
ia = pysaucy2.datastructures.IntArray([1, 2])
with self.assertRaises(TypeError):
ia[0] = 2
with self.assertRaises(TypeError):
del ia[1]
|
py
|
1a5e65391739a0501e9cf75d2b700010b801af89
|
#!/usr/bin/env python3
import sys,os,re
import json
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
N = int(sys.argv[1]) if len(sys.argv) >= 2 else 30
W = sys.argv[2] if len(sys.argv) >= 3 else None
re_r = re.compile(r'(.+)\t(.+)\t(.+)\t(.+)\t(.+)\t(.+)')
for line in sys.stdin:
try:
fname = line.strip()
o = fname
f = False
with open(fname,'r') as fp:
w = []
for cnt, l in enumerate(fp):
if cnt > N: break
mo = re_r.search(l)
if mo != None:
c = '\t' if o != '' else ''
o = o + c + mo.group(1)
if W == None or mo.group(1) == W:
f = True
if f: print(o)
except StopIteration:
print('EOF')
|
py
|
1a5e6645c636a4ec000a54e2df49b7ae1be0db57
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main hook file that is called by Juju.
"""
import json
import httplib
import os
import time
import socket
import subprocess
import sys
import urlparse
from charmhelpers.core import hookenv, host
from kubernetes_installer import KubernetesInstaller
from path import path
from lib.registrator import Registrator
hooks = hookenv.Hooks()
@hooks.hook('api-relation-changed')
def api_relation_changed():
"""
On the relation to the api server, this function determines the appropriate
architecture and the configured version to copy the kubernetes binary files
from the kubernetes-master charm and installs it locally on this machine.
"""
hookenv.log('Starting api-relation-changed')
charm_dir = path(hookenv.charm_dir())
# Get the package architecture, rather than the from the kernel (uname -m).
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
kubernetes_bin_dir = path('/opt/kubernetes/bin')
# Get the version of kubernetes to install.
version = subprocess.check_output(['relation-get', 'version']).strip()
print('Relation version: ', version)
if not version:
print('No version present in the relation.')
exit(0)
version_file = charm_dir / '.version'
if version_file.exists():
previous_version = version_file.text()
print('Previous version: ', previous_version)
if version == previous_version:
exit(0)
# Can not download binaries while the service is running, so stop it.
# TODO: Figure out a better way to handle upgraded kubernetes binaries.
for service in ('kubelet', 'proxy'):
if host.service_running(service):
host.service_stop(service)
command = ['relation-get', 'private-address']
# Get the kubernetes-master address.
server = subprocess.check_output(command).strip()
print('Kubernetes master private address: ', server)
installer = KubernetesInstaller(arch, version, server, kubernetes_bin_dir)
installer.download()
installer.install()
# Write the most recently installed version number to the file.
version_file.write_text(version)
relation_changed()
@hooks.hook('etcd-relation-changed',
'network-relation-changed')
def relation_changed():
"""Connect the parts and go :-)
"""
template_data = get_template_data()
# Check required keys
for k in ('etcd_servers', 'kubeapi_server'):
if not template_data.get(k):
print('Missing data for %s %s' % (k, template_data))
return
print('Running with\n%s' % template_data)
# Setup kubernetes supplemental group
setup_kubernetes_group()
# Register upstart managed services
for n in ('kubelet', 'proxy'):
if render_upstart(n, template_data) or not host.service_running(n):
print('Starting %s' % n)
host.service_restart(n)
# Register machine via api
print('Registering machine')
register_machine(template_data['kubeapi_server'])
# Save the marker (for restarts to detect prev install)
template_data.save()
def get_template_data():
rels = hookenv.relations()
template_data = hookenv.Config()
template_data.CONFIG_FILE_NAME = '.unit-state'
overlay_type = get_scoped_rel_attr('network', rels, 'overlay_type')
etcd_servers = get_rel_hosts('etcd', rels, ('hostname', 'port'))
api_servers = get_rel_hosts('api', rels, ('hostname', 'port'))
# kubernetes master isn't ha yet.
if api_servers:
api_info = api_servers.pop()
api_servers = 'http://%s:%s' % (api_info[0], api_info[1])
template_data['overlay_type'] = overlay_type
template_data['kubelet_bind_addr'] = _bind_addr(
hookenv.unit_private_ip())
template_data['proxy_bind_addr'] = _bind_addr(
hookenv.unit_get('public-address'))
template_data['kubeapi_server'] = api_servers
template_data['etcd_servers'] = ','.join([
'http://%s:%s' % (s[0], s[1]) for s in sorted(etcd_servers)])
template_data['identifier'] = os.environ['JUJU_UNIT_NAME'].replace(
'/', '-')
return _encode(template_data)
def _bind_addr(addr):
if addr.replace('.', '').isdigit():
return addr
try:
return socket.gethostbyname(addr)
except socket.error:
raise ValueError('Could not resolve private address')
def _encode(d):
for k, v in d.items():
if isinstance(v, unicode):
d[k] = v.encode('utf8')
return d
def get_scoped_rel_attr(rel_name, rels, attr):
private_ip = hookenv.unit_private_ip()
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_data.get('private-address') != private_ip:
continue
if unit_data.get(attr):
return unit_data.get(attr)
def get_rel_hosts(rel_name, rels, keys=('private-address',)):
hosts = []
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_id == hookenv.local_unit():
continue
values = [unit_data.get(k) for k in keys]
if not all(values):
continue
hosts.append(len(values) == 1 and values[0] or values)
return hosts
def render_upstart(name, data):
tmpl_path = os.path.join(
os.environ.get('CHARM_DIR'), 'files', '%s.upstart.tmpl' % name)
with open(tmpl_path) as fh:
tmpl = fh.read()
rendered = tmpl % data
tgt_path = '/etc/init/%s.conf' % name
if os.path.exists(tgt_path):
with open(tgt_path) as fh:
contents = fh.read()
if contents == rendered:
return False
with open(tgt_path, 'w') as fh:
fh.write(rendered)
return True
def register_machine(apiserver, retry=False):
parsed = urlparse.urlparse(apiserver)
# identity = hookenv.local_unit().replace('/', '-')
private_address = hookenv.unit_private_ip()
with open('/proc/meminfo') as fh:
info = fh.readline()
mem = info.strip().split(':')[1].strip().split()[0]
cpus = os.sysconf('SC_NPROCESSORS_ONLN')
registration_request = Registrator()
registration_request.data['Kind'] = 'Minion'
registration_request.data['id'] = private_address
registration_request.data['name'] = private_address
registration_request.data['metadata']['name'] = private_address
registration_request.data['spec']['capacity']['mem'] = mem + ' K'
registration_request.data['spec']['capacity']['cpu'] = cpus
registration_request.data['spec']['externalID'] = private_address
registration_request.data['status']['hostIP'] = private_address
response, result = registration_request.register(parsed.hostname,
parsed.port,
'/api/v1beta3/nodes')
print(response)
try:
registration_request.command_succeeded(response, result)
except ValueError:
# This happens when we have already registered
# for now this is OK
pass
def setup_kubernetes_group():
output = subprocess.check_output(['groups', 'kubernetes'])
# TODO: check group exists
if 'docker' not in output:
subprocess.check_output(
['usermod', '-a', '-G', 'docker', 'kubernetes'])
if __name__ == '__main__':
hooks.execute(sys.argv)
|
py
|
1a5e672aed86a0cfe42f99e0175e2946e1de29bc
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import yaml
from mistral_lib import actions
from swiftclient import exceptions as swiftexceptions
from tripleo_common.actions import heat_capabilities
from tripleo_common.tests import base
MAPPING_YAML_CONTENTS = """topics:
- title: Fake Single Environment Group Configuration
description:
environment_groups:
- title:
description: Random fake string of text
environments:
- file: /path/to/network-isolation.json
title: Default Configuration
description:
- title: Fake Multiple Environment Group Configuration
description:
environment_groups:
- title: Random Fake 1
description: Random fake string of text
environments:
- file: /path/to/ceph-storage-env.yaml
title: Fake1
description: Random fake string of text
- title: Random Fake 2
description:
environments:
- file: /path/to/poc-custom-env.yaml
title: Fake2
description:
"""
MAPPING_JSON_CONTENTS = """{
"Fake Multiple Environment Group Configuration": {
"description": null,
"environment_groups": [
{
"description": "Random fake string of text",
"environments": [
{
"description": "Random fake string of text",
"enabled": false,
"file": "/path/to/ceph-storage-env.yaml",
"title": "Fake1"
}
],
"title": "Random Fake 1"
},
{
"description": null,
"environments": [
{
"description": null,
"enabled": false,
"file": "/path/to/poc-custom-env.yaml",
"title": "Fake2"
}
],
"title": "Random Fake 2"
}
],
"title": "Fake Multiple Environment Group Configuration"
},
"Fake Single Environment Group Configuration": {
"description": null,
"environment_groups": [
{
"description": "Random fake string of text",
"environments": [
{
"description": null,
"enabled": true,
"file": "/path/to/network-isolation.json",
"title": "Default Configuration"
}
],
"title": null
}
],
"title": "Fake Single Environment Group Configuration"
},
"Other": {
"description": null,
"environment_groups": [
{
"description": null,
"environments": [
{
"description": "Enable /path/to/environments/custom.yaml environment",
"enabled": false,
"file": "/path/to/environments/custom.yaml",
"title": "/path/to/environments/custom.yaml",
}
],
"title": "/path/to/environments/custom.yaml",
},
{
"description": null,
"environments": [
{
"description": "Enable /path/to/environments/custom2.yaml environment",
"enabled": false,
"file": "/path/to/environments/custom2.yaml",
"title": "/path/to/environments/custom2.yaml",
}
],
"title": "/path/to/environments/custom2.yaml",
}
],
"title": "Other"
}
}
"""
class GetCapabilitiesActionTest(base.TestCase):
def setUp(self):
super(GetCapabilitiesActionTest, self).setUp()
self.container_name = 'test-container'
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_yaml_error(self, get_obj_client_mock):
mock_ctx = mock.MagicMock()
# setup swift
swift = mock.MagicMock()
swift.get_object.return_value = mock.Mock(side_effect=ValueError)
get_obj_client_mock.return_value = swift
action = heat_capabilities.GetCapabilitiesAction(self.container_name)
expected = actions.Result(
data=None,
error="Error parsing capabilities-map.yaml.")
self.assertEqual(expected, action.run(mock_ctx))
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_env_missing(self, get_obj_client_mock):
mock_ctx = mock.MagicMock()
# setup swift
swift = mock.MagicMock()
swift.get_object.side_effect = (
({}, MAPPING_YAML_CONTENTS),
swiftexceptions.ClientException(self.container_name)
)
get_obj_client_mock.return_value = swift
action = heat_capabilities.GetCapabilitiesAction(self.container_name)
expected = actions.Result(
data=None,
error="Error retrieving environment for plan test-container: "
"test-container")
self.assertEqual(expected, action.run(mock_ctx))
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run(self, get_obj_client_mock):
mock_ctx = mock.MagicMock()
# setup swift
swift = mock.MagicMock()
mock_env = """
template: overcloud
environments:
- path: /path/to/network-isolation.json
"""
swift.get_object.side_effect = (
({}, MAPPING_YAML_CONTENTS),
({}, mock_env)
)
swift_files_data = ({
u'x-container-meta-usage-tripleo': u'plan',
u'content-length': u'54271', u'x-container-object-count': u'3',
u'accept-ranges': u'bytes', u'x-storage-policy': u'Policy-0',
u'date': u'Wed, 31 Aug 2016 16:04:37 GMT',
u'x-timestamp': u'1471025600.02126',
u'x-trans-id': u'txebb37f980dbc4e4f991dc-0057c70015',
u'x-container-bytes-used': u'970557',
u'content-type': u'application/json; charset=utf-8'}, [{
u'bytes': 808,
u'last_modified': u'2016-08-12T18:13:22.231760',
u'hash': u'2df2606ed8b866806b162ab3fa9a77ea',
u'name': 'all-nodes-validation.yaml',
u'content_type': u'application/octet-stream'
}, {
u'bytes': 1808,
u'last_modified': u'2016-08-13T18:13:22.231760',
u'hash': u'3df2606ed8b866806b162ab3fa9a77ea',
u'name': '/path/to/environments/custom.yaml',
u'content_type': u'application/octet-stream'
}, {
u'bytes': 2808,
u'last_modified': u'2016-07-13T18:13:22.231760',
u'hash': u'4df2606ed8b866806b162ab3fa9a77ea',
u'name': '/path/to/environments/custom2.yaml',
u'content_type': u'application/octet-stream'
}])
swift.get_container.return_value = swift_files_data
get_obj_client_mock.return_value = swift
action = heat_capabilities.GetCapabilitiesAction(self.container_name)
yaml_mapping = yaml.safe_load(MAPPING_JSON_CONTENTS)
self.assertEqual(yaml_mapping, action.run(mock_ctx))
class UpdateCapabilitiesActionTest(base.TestCase):
def setUp(self,):
super(UpdateCapabilitiesActionTest, self).setUp()
self.container_name = 'test-container'
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run(self, get_object_client_mock, mock_cache):
mock_ctx = mock.MagicMock()
# setup swift
swift = mock.MagicMock()
mocked_env = """
name: test-container
environments:
- path: /path/to/overcloud-default-env.yaml
- path: /path/to/ceph-storage-env.yaml
"""
swift.get_object.return_value = ({}, mocked_env)
get_object_client_mock.return_value = swift
environments = {
'/path/to/ceph-storage-env.yaml': False,
'/path/to/network-isolation.json': False,
'/path/to/poc-custom-env.yaml': True
}
action = heat_capabilities.UpdateCapabilitiesAction(
environments, self.container_name)
self.assertEqual({
'name': 'test-container',
'environments': [
{'path': '/path/to/overcloud-default-env.yaml'},
{'path': '/path/to/poc-custom-env.yaml'}
]},
action.run(mock_ctx))
mock_cache.assert_called_once_with(
mock_ctx,
self.container_name,
"tripleo.parameters.get"
)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_purge_missing(self, get_object_client_mock, mock_cache):
mock_ctx = mock.MagicMock()
# setup swift
swift = mock.MagicMock()
mocked_env = """
name: test-container
environments:
- path: /path/to/overcloud-default-env.yaml
- path: /path/to/ceph-storage-env.yaml
"""
swift.get_object.return_value = ({}, mocked_env)
get_object_client_mock.return_value = swift
environments = {
'/path/to/overcloud-default-env.yaml': True,
'/path/to/network-isolation.json': False,
'/path/to/poc-custom-env.yaml': True
}
action = heat_capabilities.UpdateCapabilitiesAction(
environments, self.container_name, True)
self.assertEqual({
'name': 'test-container',
'environments': [
{'path': '/path/to/overcloud-default-env.yaml'},
{'path': '/path/to/poc-custom-env.yaml'}
]},
action.run(mock_ctx))
mock_cache.assert_called_once_with(
mock_ctx,
self.container_name,
"tripleo.parameters.get"
)
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_env_missing(self, get_obj_client_mock):
mock_ctx = mock.MagicMock()
# setup swift
swift = mock.MagicMock()
swift.get_object.side_effect = (
swiftexceptions.ClientException(self.container_name))
get_obj_client_mock.return_value = swift
action = heat_capabilities.UpdateCapabilitiesAction(
{}, self.container_name)
expected = actions.Result(
data=None,
error="Error retrieving environment for plan test-container: "
"test-container"
)
self.assertEqual(expected, action.run(mock_ctx))
|
py
|
1a5e679aa391c4056f238dfa6587c8e374a067fe
|
from pygeppetto.model import GeppettoModel
from pygeppetto.model.exceptions import GeppettoModelException
class QueryNotFoundException(GeppettoModelException): pass
def get_query(query_path, model: GeppettoModel):
data_source = None
for token in query_path.split('.'):
if data_source is None:
try:
return next(query for query in model.queries if query.id == token)
except StopIteration:
try:
data_source = next(ds for ds in model.dataSources if ds.id == token)
except StopIteration:
raise QueryNotFoundException("Query `{}` not found in model.".format(query_path))
else:
try:
return next(query for query in data_source.queries if query.id == token)
except StopIteration:
raise QueryNotFoundException("Query `{}` not found in model.".format(query_path))
else:
raise QueryNotFoundException("Query `{}` not found in model.".format(query_path))
|
py
|
1a5e67ba01bb5575537f43f8af5f2c7ab07bb082
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name(s) of the copyright holder(s) nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import torch
import torch.autograd
import numpy as np
from .binding import einsum
from ..common import normalize_subscript
class EinsumFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, equation, input_0, input_1=None):
equation, isBinary = normalize_subscript(equation)
if isBinary and input_1 is None:
raise RuntimeError('The subscript indicates two inputs, but only one was passed')
if not isBinary and input_1 is not None:
raise RuntimeError('The subscript indicates one input, but two were passed')
if input_1 is None:
input_1 = input_0.new_empty((1,))
output = einsum(equation, input_0, input_1, False, False)
if isBinary:
ctx.save_for_backward(input_0, input_1)
ctx.equation = equation
ctx.isBinary = isBinary
return output
@staticmethod
def backward(ctx, grad_output):
equation = ctx.equation
lhs, modeC = equation.split('->')
if ctx.isBinary:
input_0, input_1 = ctx.saved_tensors
conjugate = False
if torch.is_complex(input_0) or torch.is_complex(input_1):
conjugate = True
modeA, modeB = lhs.split(',')
d_input_0 = einsum(modeC + ',' + modeB + '->' + modeA, grad_output,
input_1, False, conjugate)
d_input_1 = einsum(modeA + ',' + modeC + '->' + modeB, input_0,
grad_output, conjugate, False)
return None, d_input_0, d_input_1
else:
dummy = grad_output.new_empty((1,))
d_input = einsum(modeC + '->' + lhs, grad_output, dummy, False, False)
return None, d_input
class Einsum(torch.nn.Module):
def __init__(self, equation):
super(Einsum, self).__init__()
self.equation = equation
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, input_0, input_1):
return EinsumFunction.apply(self.equation, input_0, input_1)
def _compute_target_tensor(in0, in1, target):
result = in0[:-1] + in1[:-1] + in1[-1] + in0[-1]
# remove duplicates
duplicates = set(in0) & set(in1)
for elem in duplicates:
result = result.replace(elem, '')
# reorder target modes like target
result = list(result)
for i in range(len(result)):
if result[i] not in target: continue
for j in range(i):
if result[j] not in target: continue
if target.index(result[j]) > target.index(result[i]):
result[i], result[j] = result[j], result[i]
return ''.join(result)
def EinsumGeneral(equation, *tensors, **kwargs):
tensors = list(tensors)
equation, isBinary = normalize_subscript(equation)
path = np.einsum_path(equation,
*[np.broadcast_to(np.nan, t.shape) for t in tensors],
**kwargs)
path = path[0][1:]
equation = equation.split('->')
eqs = equation[0].split(',')
target = equation[1]
for step in path:
if len(step) == 1:
result = EinsumFunction.apply(eqs[0] + '->' + target, tensors[0])
continue
assert step[0] < step[1]
in0 = tensors[step[0]]
in1 = tensors[step[1]]
tensors.pop(step[1])
tensors.pop(step[0])
tgt = _compute_target_tensor(eqs[step[0]], eqs[step[1]], target)
assert tgt != ""
eq = eqs[step[0]] + ',' + eqs[step[1]] + '->' + tgt
eqs.pop(step[1])
eqs.pop(step[0])
eqs.append(tgt)
result = EinsumFunction.apply(eq, in0, in1)
tensors.append(result)
return result
|
py
|
1a5e67d73f3bc14ea19d89a754b6a2f66c7333ab
|
# coding: utf-8
"""
fn
The open source serverless platform.
OpenAPI spec version: 0.2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .app import App
from .app_wrapper import AppWrapper
from .apps_wrapper import AppsWrapper
from .call import Call
from .call_wrapper import CallWrapper
from .calls_wrapper import CallsWrapper
from .error import Error
from .error_body import ErrorBody
from .log import Log
from .log_wrapper import LogWrapper
from .route import Route
from .route_wrapper import RouteWrapper
from .routes_wrapper import RoutesWrapper
from .version import Version
|
py
|
1a5e69b1d097c5200f3fbcceeb4b4a1e0e14cd23
|
print(len("hello"))
print(len([1,2,3,4]))
#lists
vegan_no_nos = ["pork","taco","everything","cowFarts"]
if "taco" in vegan_no_nos:
print("NOT SAFE")
else:
print("SAFE")
vegan_no_nos = ["pork","taco","everything","cowFarts"]
print(vegan_no_nos[-1])
print(vegan_no_nos[-2])
vegan_no_nos = ["pork","taco","everything","cowFarts"]
print(vegan_no_nos[0:2:1])
print(vegan_no_nos[2:])
print(vegan_no_nos[2::])
#splice
colors = ["Red","Blue","Magenta","Mustard","Amethyst"]
colors[2:4] = ["Purple","Black"]
print(colors)
print(colors.count("Red"))
myNum = '3'
myNum = myNum.zfill(6)
print(myNum)
myLaugh = ".".join('LOOOOOOOL')
print(myLaugh)
myNewLaugh = myLaugh.replace("L","HOOOOO")
myNewNewLaugh = myNewLaugh.replace(".","HO")
print(myNewNewLaugh)
#dictionaries
person = {"first":"Henry","second":"Bob"}
random = {
"apple":"red",
"name":"butters",
"age":"6 months",
"breed": "Silkie"
}
stuff = {
True: 34,
100: "AWESOME",
}
print("apple" in random)
print("butters" in random)
print(random["breed"])
random["age"] = 12
print(random["age"])
print(random.get("NotAKey","haha"))
#set
languages = {'ruby', 'python', 'javascript'}
print(languages)
languages = {'ruby', 'python', 'javascript', 'ruby'}
print(languages)
numbers = [1,2,5,7,4,1,3,45,65,7,87,5,34,5,6,65,6,7,8,9,67,5,3,343,4,55,6,7,8,9,3,1,2,3,4,44,45,5,6,7,8,8,8,8,8,86,235,5456,7,7587]
print(set(numbers))
#these operators only work on sets, but the actual methods accepts any iterable and turns it into a set
lemon = {"Sour","Yellow","Fruit","Bumpy"}
banana = {"Sweet","Yellow","Fruit","Smooth"}
print(lemon | banana) #all
print(lemon | banana | {"Yucky","Smelly"}) #all
print(lemon & banana) #all sames
print(lemon - banana) #only first not sames
print(banana - lemon) #only first not sames
print(banana ^ lemon) #all differents
#tuple
colors = ("red","yellow","green")
print(type(colors))
myTuple = tuple([1,2,3,4])
print(type(myTuple))
#comprehentions
nums = [1,2,3,4,5,6,7,8,9,10,11,12,13]
evens =[]
for num in nums:
if num %2 == 0:
evens.append(num)
print(evens)
evens = [num for num in nums if num % 2 == 0]
print(evens)
doubled = [num * 2 for num in nums]
print(doubled)
print([n*n for n in [2,4,6,8]])
print([char.upper() + '.' for char in 'lmafo'])
print([[0 for y in range(3)] for x in range(3)])
print({day:0 for day in "MTWRFSU"})
|
py
|
1a5e6a790a82a0b1de9a0377263cf62b30db49b6
|
# Authors: Sylvain Marie <[email protected]>
#
# Copyright (c) Schneider Electric Industries, 2019. All right reserved.
import sys
import pytest
@pytest.mark.skipif(sys.version_info < (3, 6), reason="member type hints not supported in python < 3.6")
def test_issue_51():
from ._test_py36 import test_issue_51
A = test_issue_51()
a = A()
|
py
|
1a5e6ae1221d64d1270bd4e797a9dbdb731b78d4
|
"""
=======================================
FDR correction on T-test on sensor data
=======================================
One tests if the evoked response significantly deviates from 0.
Multiple comparison problem is addressed with
False Discovery Rate (FDR) correction.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD-3-Clause
# %%
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.stats import bonferroni_correction, fdr_correction
print(__doc__)
# %%
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)[:30]
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
# %%
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
X = epochs.get_data() # as 3D matrix
X = X[:, 0, :] # take only one channel to get a 2D array
# %%
# Compute statistic
T, pval = stats.ttest_1samp(X, 0)
alpha = 0.05
n_samples, n_tests = X.shape
threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha=alpha)
threshold_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
threshold_fdr = np.min(np.abs(T)[reject_fdr])
# %%
# Plot
times = 1e3 * epochs.times
plt.close('all')
plt.plot(times, T, 'k', label='T-stat')
xmin, xmax = plt.xlim()
plt.hlines(threshold_uncorrected, xmin, xmax, linestyle='--', colors='k',
label='p=0.05 (uncorrected)', linewidth=2)
plt.hlines(threshold_bonferroni, xmin, xmax, linestyle='--', colors='r',
label='p=0.05 (Bonferroni)', linewidth=2)
plt.hlines(threshold_fdr, xmin, xmax, linestyle='--', colors='b',
label='p=0.05 (FDR)', linewidth=2)
plt.legend()
plt.xlabel("Time (ms)")
plt.ylabel("T-stat")
plt.show()
|
py
|
1a5e6b601f63024e1895c6d4b10e95fe8686ab4e
|
"""Place and route utilities.
"""
from __future__ import absolute_import
import pickle
from six import iteritems
from collections import defaultdict
from rig.netlist import Net
from rig.place_and_route.constraints import (LocationConstraint,
RouteEndpointConstraint,
SameChipConstraint)
from nengo_spinnaker.builder import Model
from nengo_spinnaker.node_io import Ethernet
def create_network_netlist(network, n_steps, fp, dt=0.001):
"""Create a netlist of a network running for a number of steps, dump that
netlist to file.
"""
# Build the network, assuming EthernetIO
model = Model(dt)
node_io = Ethernet()
model.build(network, **node_io.builder_kwargs)
# Build the netlist
netlist = model.make_netlist(n_steps).as_rig_arguments()
pickle_netlist(netlist, fp)
def pickle_netlist(netlist_dict, fp, **kwargs):
"""Dump a pickle of a netlist to a file.
This function replaces all vertices with `object` instances so that
nengo-specific or project-specific dependencies are not included.
"""
# {old_vertex: new_vertex, ...}
new_vertices = defaultdict(object)
netlist_dict["vertices_resources"] = {
new_vertices[vertex]: resources
for (vertex, resources)
in iteritems(netlist_dict["vertices_resources"])
}
netlist_dict["nets"] = [
Net(new_vertices[net.source],
[new_vertices[sink] for sink in net.sinks],
net.weight)
for net in netlist_dict["nets"]
]
old_constraints = netlist_dict["constraints"]
netlist_dict["constraints"] = []
for constraint in old_constraints:
if isinstance(constraint, LocationConstraint):
netlist_dict["constraints"].append(
LocationConstraint(new_vertices[constraint.vertex],
constraint.location))
elif isinstance(constraint, RouteEndpointConstraint):
netlist_dict["constraints"].append(
RouteEndpointConstraint(new_vertices[constraint.vertex],
constraint.route))
elif isinstance(constraint, SameChipConstraint):
# Get the new vertices
vs = [new_vertices[v] for v in constraint.vertices]
netlist_dict["constraints"].append(SameChipConstraint(vs))
else:
netlist_dict["constraints"].append(constraint)
pickle.dump(netlist_dict, fp, **kwargs)
|
py
|
1a5e6cde2bad5a3f0c2ba693aa104ed1e81657b9
|
# -*- coding: utf-8 -*-
"""
chemspipy.api
~~~~~~~~~~~~~
Core API for interacting with ChemSpider web services.
:copyright: Copyright 2014 by Matt Swain.
:license: MIT, see LICENSE file for more details.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from base64 import b64decode
import logging
import sys
import warnings
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
import requests
import six
from . import __version__
from .errors import ChemSpiPyError, ChemSpiPyParseError, ChemSpiPyAuthError, ChemSpiPyServerError
from .errors import ChemSpiPyNotFoundError
from .objects import Compound, Spectrum
from .search import Results
log = logging.getLogger(__name__)
#: 2D coordinate dimensions
MOL2D = '2d'
#: 3D coordinate dimensions
MOL3D = '3d'
#: Both coordinate dimensions
BOTH = 'both'
#: Ascending sort direction
ASCENDING = 'ascending'
#: Descending sort direction
DESCENDING = 'descending'
#: CSID sort order
CSID = 'csid'
#: Mass defect sort order
MASS_DEFECT = 'mass_defect'
#: Molecular weight sort order
MOLECULAR_WEIGHT = 'molecular_weight'
#: Reference count sort order
REFERENCE_COUNT = 'reference_count'
#: Datasource count sort order
DATASOURCE_COUNT = 'datasource_count'
#: Pubmed count sort order
PUBMED_COUNT = 'pubmed_count'
#: RSC count sort order
RSC_COUNT = 'rsc_count'
#: Coordinate dimensions
DIMENSIONS = {
MOL2D: 'e2D',
MOL3D: 'e3D',
BOTH: 'eBoth'
}
#: Sort directions
DIRECTIONS = {
ASCENDING: 'eAscending',
DESCENDING: 'eDescending'
}
#: Sort orders
ORDERS = {
CSID: 'eCSID',
MASS_DEFECT: 'eMassDefect',
MOLECULAR_WEIGHT: 'eMolecularWeight',
REFERENCE_COUNT: 'eReferenceCount',
DATASOURCE_COUNT: 'eDataSourceCount',
PUBMED_COUNT: 'ePubMedCount',
RSC_COUNT: 'eRscCount'
}
#: API to python field mappings
FIELDS = {
'CSID': ('csid', int),
'csid': ('csid', int),
'MF': ('molecular_formula', six.text_type),
'SMILES': ('smiles', six.text_type),
'InChI': ('inchi', six.text_type),
'InChIKey': ('inchikey', six.text_type),
'AverageMass': ('average_mass', float),
'MolecularWeight': ('molecular_weight', float),
'MonoisotopicMass': ('monoisotopic_mass', float),
'NominalMass': ('nominal_mass', float),
'ALogP': ('alogp', float),
'XLogP': ('xlogp', float),
'CommonName': ('common_name', six.text_type),
'MOL2d': ('mol_2d', six.text_type),
'MOL3d': ('mol_3d', six.text_type),
'ReferenceCount': ('reference_count', int),
'DataSourceCount': ('datasource_count', int),
'PubMedCount': ('pubmed_count', int),
'RSCCount': ('rsc_count', int),
'ExternalReferences': ('external_references', list),
'ds_name': ('datasource_name', six.text_type),
'ds_url': ('datasource_url', six.text_type),
'ext_id': ('external_id', six.text_type),
'ext_url': ('external_url', six.text_type),
'Status': ('status', six.text_type),
'Count': ('count', int),
'Message': ('message', six.text_type),
'Elapsed': ('elapsed', six.text_type),
'spc_id': ('spectrum_id', int),
'spc_type': ('spectrum_type', six.text_type),
'file_name': ('file_name', six.text_type),
'comments': ('comments', six.text_type),
'original_url': ('original_url', six.text_type),
'submitted_date': ('submitted_date', six.text_type),
}
class BaseChemSpider(object):
def __init__(self, security_token=None, user_agent=None, api_url=None):
"""
:param string security_token: (Optional) Your ChemSpider security token.
:param string user_agent: (Optional) Identify your application to ChemSpider servers.
:param string api_url: (Optional) Alternative API server.
"""
log.debug('Initializing ChemSpider')
self.api_url = api_url if api_url else 'https://www.chemspider.com'
self.http = requests.session()
self.http.headers['User-Agent'] = user_agent if user_agent else 'ChemSpiPy/%s Python/%s ' % (__version__, sys.version.split()[0])
self.security_token = security_token
def request(self, api, endpoint, **params):
"""Construct API request and return the XML response.
:param string api: The specific ChemSpider API to call (MassSpec, Search, Spectra, InChI).
:param string endpoint: ChemSpider API endpoint.
:param params: (Optional) Parameters for the ChemSpider endpoint as keyword arguments.
:rtype: xml tree
"""
url = '%s/%s.asmx/%s' % (self.api_url, api, endpoint)
log.debug('Request: %s %s', url, params)
params['token'] = self.security_token
try:
response = self.http.post(url, data=params)
except requests.RequestException as e:
raise ChemSpiPyError(six.text_type(e))
if response.status_code == 500:
if 'Missing parameter: token.' in response.text:
raise ChemSpiPyAuthError('Endpoint requires a security token.')
elif 'Error converting data type nvarchar to uniqueidentifier' in response.text:
# Generally when supplying a security token with incorrect format
raise ChemSpiPyAuthError('Invalid security token. Did you copy the entire token?')
elif 'Unauthorized web service usage' in response.text:
# Fake/incorrect token (but in correct format)
raise ChemSpiPyAuthError(response.text)
elif 'Unable to get record details' in response.text:
# Generally when requesting a non-existent CSID
raise ChemSpiPyNotFoundError(response.text)
elif 'Unable to get records spectra' in response.text:
# No spectra for a CSID, shouldn't be an exception
return []
else:
raise ChemSpiPyServerError(response.text)
try:
tree = etree.fromstring(response.content)
except etree.ParseError as e:
raise ChemSpiPyParseError('Unable to parse XML response: %s' % e)
return tree
def construct_api_url(self, api, endpoint, **params):
"""Construct a Chemspider API url, encoded, with parameters as a GET querystring.
:param string api: The specific ChemSpider API to call (MassSpecAPI, Search, Spectra, InChI).
:param string endpoint: ChemSpider API endpoint.
:param params: (Optional) Parameters for the ChemSpider endpoint as keyword arguments.
:rtype: string
"""
querystring = []
for k, v in params.items():
querystring.append('%s=%s' % (k, six.moves.urllib.parse.quote_plus(six.text_type(v))))
if self.security_token:
querystring.append('token=%s' % self.security_token)
return '%s/%s.asmx/%s?%s' % (self.api_url, api, endpoint, '&'.join(querystring))
def xml_to_dict(t):
"""Convert a ChemSpider XML response to a python dict."""
d = {}
for child in t:
tag = child.tag.split('}')[1]
tag, rtype = FIELDS.get(tag, (tag, six.text_type))
if rtype == list:
d[tag] = [xml_to_dict(grandchild) for grandchild in child]
elif rtype == dict:
d[tag] = xml_to_dict(child)
elif child.text is not None:
d[tag] = rtype(child.text.strip())
return d
class MassSpecApi(BaseChemSpider):
def get_databases(self):
"""Get the list of datasources in ChemSpider."""
response = self.request('MassSpecApi', 'GetDatabases')
return [el.text for el in response]
def get_extended_compound_info(self, csid):
"""Get extended record details for a CSID. Security token is required.
:param string|int csid: ChemSpider ID.
"""
response = self.request('MassSpecApi', 'GetExtendedCompoundInfo', csid=csid)
return xml_to_dict(response)
def get_extended_compound_info_list(self, csids):
"""Get extended record details for a list of CSIDs. Security token is required.
:param list[string|int] csids: ChemSpider IDs.
"""
response = self.request('MassSpecApi', 'GetExtendedCompoundInfoArray', csids=csids)
return [xml_to_dict(result) for result in response]
def get_extended_mol_compound_info_list(self, csids, mol_type=MOL2D, include_reference_counts=False,
include_external_references=False):
"""Get extended record details (including MOL) for a list of CSIDs.
A maximum of 250 CSIDs can be fetched per request. Security token is required.
:param list[string|int] csids: ChemSpider IDs.
:param string mol_type: :data:`~chemspipy.api.MOL2D`, :data:`~chemspipy.api.MOL3D` or
:data:`~chemspipy.api.BOTH`.
:param bool include_reference_counts: Whether to include reference counts.
:param bool include_external_references: Whether to include external references.
"""
response = self.request('MassSpecApi', 'GetExtendedMolCompoundInfoArray', csids=csids,
eMolType=DIMENSIONS.get(mol_type, mol_type),
includeReferenceCounts=include_reference_counts,
includeExternalReferences=include_external_references)
return [xml_to_dict(result) for result in response]
def get_record_mol(self, csid, calc3d=False):
"""Get ChemSpider record in MOL format. Security token is required.
:param string|int csid: ChemSpider ID.
:param bool calc3d: Whether 3D coordinates should be calculated before returning record data.
"""
response = self.request('MassSpecApi', 'GetRecordMol', csid=csid, calc3d=calc3d)
return response.text
def simple_search_by_formula(self, formula):
"""Search ChemSpider by molecular formula.
:param string formula: Molecular formula
:returns: A list of Compounds.
:rtype: list[:class:`~chemspipy.Compound`]
"""
warnings.warn("Use search_by_formula instead of simple_search_by_formula.", DeprecationWarning)
response = self.request('MassSpecApi', 'SearchByFormula2', formula=formula)
return [Compound(self, el.text) for el in response]
def simple_search_by_mass(self, mass, mass_range):
"""Search ChemSpider by mass +/- range.
:param float mass: The mass to search for.
:param float mass_range: The +/- mass range to allow.
:returns: A list of Compounds.
:rtype: list[:class:`~chemspipy.Compound`]
"""
warnings.warn("Use search_by_mass instead of simple_search_by_mass.", DeprecationWarning)
response = self.request('MassSpecApi', 'SearchByMass2', mass=mass, range=mass_range)
return [Compound(self, el.text) for el in response]
# def get_compressed_records_sdf(self, rid):
# """Get an SDF containing all the results from a search operation.
#
# A maximum of 10000 records can be fetched per request. Subscriber role security token is required.
#
# Warning: This doesn't work reliably.
#
# :param string rid: A transaction ID, returned by an asynchronous search method.
# :returns: SDF containing the requested records.
# :rtype: string
# """
# response = self.request('MassSpecApi', 'GetCompressedRecordsSdf', rid=rid, eComp='eGzip')
# if response.text:
# return zlib.decompress(b64decode(response.text.encode('utf-8')), 16+zlib.MAX_WBITS)
#
# def get_records_sdf(self, rid):
# """Get an SDF containing all the results from a search operation.
#
# A maximum of 10000 records can be fetched per request. Subscriber role security token is required.
#
# Warning: This doesn't work reliably.
#
# :param string rid: A transaction ID, returned by an asynchronous search method.
# :returns: SDF containing the requested records.
# :rtype: string
# """
# response = self.request('MassSpecApi', 'GetRecordsSdf', rid=rid)
# if response.text:
# return response.text.encode('utf-8')
class SearchApi(BaseChemSpider):
def async_simple_search(self, query):
"""Search ChemSpider with arbitrary query, returning results in order of the best match found.
This method returns a transaction ID which can be used with other methods to get search status and results.
Security token is required.
:param string query: Search query - a name, SMILES, InChI, InChIKey, CSID, etc.
:returns: Transaction ID.
:rtype: string
"""
response = self.request('Search', 'AsyncSimpleSearch', query=query)
return response.text
def async_simple_search_ordered(self, query, order=CSID, direction=ASCENDING):
"""Search ChemSpider with arbitrary query, returning results with a custom order.
This method returns a transaction ID which can be used with other methods to get search status and results.
Security token is required.
:param string query: Search query - a name, SMILES, InChI, InChIKey, CSID, etc.
:param string order: :data:`~chemspipy.api.CSID`, :data:`~chemspipy.api.MASS_DEFECT`,
:data:`~chemspipy.api.MOLECULAR_WEIGHT`, :data:`~chemspipy.api.REFERENCE_COUNT`,
:data:`~chemspipy.api.DATASOURCE_COUNT`, :data:`~chemspipy.api.PUBMED_COUNT` or
:data:`~chemspipy.api.RSC_COUNT`.
:param string direction: :data:`~chemspipy.api.ASCENDING` or :data:`~chemspipy.api.DESCENDING`.
:returns: Transaction ID.
:rtype: string
"""
response = self.request('Search', 'AsyncSimpleSearchOrdered', query=query, orderBy=ORDERS[order],
orderDirection=DIRECTIONS[direction])
return response.text
def get_async_search_status(self, rid):
"""Check the status of an asynchronous search operation.
Security token is required.
:param string rid: A transaction ID, returned by an asynchronous search method.
:returns: Unknown, Created, Scheduled, Processing, Suspended, PartialResultReady, ResultReady, Failed,
TooManyRecords
:rtype: string
"""
response = self.request('Search', 'GetAsyncSearchStatus', rid=rid)
return response.text
def get_async_search_status_and_count(self, rid):
"""Check the status of an asynchronous search operation. If ready, a count and message are also returned.
Security token is required.
:param string rid: A transaction ID, returned by an asynchronous search method.
:rtype: dict
"""
response = self.request('Search', 'GetAsyncSearchStatusAndCount', rid=rid)
return xml_to_dict(response)
def get_async_search_result(self, rid):
"""Get the results from a asynchronous search operation. Security token is required.
:param string rid: A transaction ID, returned by an asynchronous search method.
:returns: A list of Compounds.
:rtype: list[:class:`~chemspipy.Compound`]
"""
response = self.request('Search', 'GetAsyncSearchResult', rid=rid)
return [Compound(self, el.text) for el in response]
def get_async_search_result_part(self, rid, start=0, count=-1):
"""Get a slice of the results from a asynchronous search operation. Security token is required.
:param string rid: A transaction ID, returned by an asynchronous search method.
:param int start: The number of results to skip.
:param int count: The number of results to return. -1 returns all through to end.
:returns: A list of Compounds.
:rtype: list[:class:`~chemspipy.Compound`]
"""
response = self.request('Search', 'GetAsyncSearchResultPart', rid=rid, start=start, count=count)
return [Compound(self, el.text) for el in response]
def get_compound_info(self, csid):
"""Get SMILES, StdInChI and StdInChIKey for a given CSID. Security token is required.
:param string|int csid: ChemSpider ID.
:rtype: dict
"""
response = self.request('Search', 'GetCompoundInfo', csid=csid)
return xml_to_dict(response)
def get_compound_thumbnail(self, csid):
"""Get PNG image as binary data.
:param string|int csid: ChemSpider ID.
:rtype: bytes
"""
response = self.request('Search', 'GetCompoundThumbnail', id=csid)
return b64decode(response.text.encode('utf-8'))
def simple_search(self, query):
"""Search ChemSpider with arbitrary query.
A maximum of 100 results are returned. Security token is required.
:param string query: Search query - a name, SMILES, InChI, InChIKey, CSID, etc.
:returns: List of :class:`Compounds <chemspipy.Compound>`.
:rtype: list[:class:`~chemspipy.Compound`]
"""
response = self.request('Search', 'SimpleSearch', query=query)
return [Compound(self, el.text) for el in response]
class SpectraApi(BaseChemSpider):
def get_all_spectra_info(self):
"""Get full list of all spectra in ChemSpider. Subscriber role security token is required.
rtype: list[dict]
"""
response = self.request('Spectra', 'GetAllSpectraInfo')
return [xml_to_dict(result) for result in response]
def get_spectrum_info(self, spectrum_id):
"""Get information for a specific spectrum ID. Subscriber role security token is required.
:param string|int spectrum_id: spectrum ID.
:returns: Spectrum info.
:rtype: dict
"""
response = self.request('Spectra', 'GetSpectrumInfo', spc_id=spectrum_id)
return xml_to_dict(response)
def get_compound_spectra_info(self, csid):
"""Get information about all the spectra for a ChemSpider ID. Subscriber role security token is required.
:param string|int csid: ChemSpider ID.
:returns: List of spectrum info.
:rtype: list[dict]
"""
response = self.request('Spectra', 'GetCompoundSpectraInfo', csid=csid)
return [xml_to_dict(result) for result in response]
def get_spectra_info_list(self, csids):
"""Get information about all the spectra for a list of ChemSpider IDs.
:param list[string|int] csids: ChemSpider IDs.
:returns: List of spectrum info.
:rtype: list[dict]
"""
response = self.request('Spectra', 'GetSpectraInfoArray', csids=csids)
return [xml_to_dict(result) for result in response]
class InchiApi(BaseChemSpider):
def get_original_mol(self, csid):
"""Get original submitted MOL file. Security token is required.
:param string|int csid: ChemSpider ID.
"""
response = self.request('InChI', 'CSIDToMol', csid=csid)
return response.text
# TODO
# InChIKeyToCSID - inchi_key - csid
# InChIKeyToInChI - inchi_key - InChI
# InChIKeyToMol - inchi_key - Mol
# InChIToCSID - inchi - csid
# InChIToInChIKey - inchi - inchikey
# InChIToMol - inchi - mol
# InChIToSMILES - inchi - smiles
# IsValidInChIKey - inchi_key - bool
# MolToInChI - mol - inchi
# MolToInChIKey - mol - inchi
# ResolveInChIKey - inchi_key, out_format (MOL/SDF/SMILES/InChI) - list of strings
# SMILESToInChI - smiles - inchi
class CustomApi(BaseChemSpider):
def get_compound(self, csid):
"""Return a Compound object for a given ChemSpider ID. Security token is required.
:param string|int csid: ChemSpider ID.
:returns: The Compound with the specified ChemSpider ID.
:rtype: :class:`~chemspipy.Compound`
"""
return Compound(self, csid)
def get_compounds(self, csids):
"""Return a list of Compound objects, given a list ChemSpider IDs. Security token is required.
:param list[string|int] csids: List of ChemSpider IDs.
:returns: List of Compounds with the specified ChemSpider IDs.
:rtype: list[:class:`~chemspipy.Compound`]
"""
return [Compound(self, csid) for csid in csids]
def get_spectrum(self, spectrum_id):
"""Return a :class:`~chemspipy.Spectrum` object for a given spectrum ID. Subscriber role security token is required.
:param string|int spectrum_id: Spectrum ID.
:returns: The Spectrum with the specified spectrum ID.
:rtype: :class:`~chemspipy.Spectrum`
"""
return Spectrum(self, spectrum_id)
def get_spectra(self, spectrum_ids):
"""Return a :class:`~chemspipy.Spectrum` object for a given spectrum ID. Subscriber role security token is required.
:param list[string|int] spectrum_ids: List of spectrum IDs.
:returns: List of spectra with the specified spectrum IDs.
:rtype: list[:class:`~chemspipy.Spectrum`]
"""
return [Spectrum(self, spectrum_id) for spectrum_id in spectrum_ids]
def get_compound_spectra(self, csid):
"""Return :class:`~chemspipy.Spectrum` objects for all the spectra associated with a ChemSpider ID.
:param csid: string|int csid: ChemSpider ID.
:returns: List of spectra for the specified ChemSpider ID.
:rtype: list[:class:`~chemspipy.Spectrum`]
"""
return [Spectrum.from_info_dict(self, info) for info in self.get_spectra_info_list([csid])]
def get_all_spectra(self):
"""Return a full list of :class:`~chemspipy.Spectrum` objects for all spectra in ChemSpider.
Subscriber role security token is required.
:returns: Full list of spectra in ChemSpider.
:rtype: list[:class:`~chemspipy.Spectrum`]
"""
return [Spectrum.from_info_dict(self, info) for info in self.get_all_spectra_info()]
def search(self, query, order=None, direction=ASCENDING, raise_errors=False):
"""Search ChemSpider for the specified query and return the results. Security token is required.
:param string|int query: Search query.
:param string order: (Optional) :data:`~chemspipy.api.CSID`, :data:`~chemspipy.api.MASS_DEFECT`,
:data:`~chemspipy.api.MOLECULAR_WEIGHT`, :data:`~chemspipy.api.REFERENCE_COUNT`,
:data:`~chemspipy.api.DATASOURCE_COUNT`, :data:`~chemspipy.api.PUBMED_COUNT` or
:data:`~chemspipy.api.RSC_COUNT`.
:param string direction: (Optional) :data:`~chemspipy.api.ASCENDING` or :data:`~chemspipy.api.DESCENDING`.
:param bool raise_errors: If True, raise exceptions. If False, store on Results ``exception`` property.
:returns: Search Results list.
:rtype: Results
"""
if order and direction:
return Results(self, self.async_simple_search_ordered, (query, order, direction), raise_errors=raise_errors)
else:
return Results(self, self.async_simple_search, (query,), raise_errors=raise_errors)
# TODO: Wrappers for subscriber role asynchronous searches
class ChemSpider(CustomApi, MassSpecApi, SearchApi, SpectraApi, InchiApi):
"""Provides access to the ChemSpider API.
Usage::
>>> from chemspipy import ChemSpider
>>> cs = ChemSpider('<YOUR-SECURITY-TOKEN>')
"""
def __repr__(self):
return 'ChemSpider()'
|
py
|
1a5e6ce1f756916bf01acf3bb2bd7a0e7712e500
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Rlwrap(AutotoolsPackage):
"""rlwrap is a 'readline wrapper', a small utility that uses the GNU
readline library to allow the editing of keyboard input for any command."""
homepage = "https://github.com/hanslub42/rlwrap"
url = "https://github.com/hanslub42/rlwrap/releases/download/v0.43/rlwrap-0.43.tar.gz"
version('0.43', sha256='8e86d0b7882d9b8a73d229897a90edc207b1ae7fa0899dca8ee01c31a93feb2f')
depends_on('[email protected]:')
|
py
|
1a5e6dae5e5cc8933241fa27a27ab996e953bed4
|
# -*- coding: utf-8 -*-
"""CCXT: CryptoCurrency eXchange Trading Library (Async)"""
# -----------------------------------------------------------------------------
__version__ = '1.20.94'
# -----------------------------------------------------------------------------
from ccxt.async_support.base.exchange import Exchange # noqa: F401
from ccxt.base.decimal_to_precision import decimal_to_precision # noqa: F401
from ccxt.base.decimal_to_precision import TRUNCATE # noqa: F401
from ccxt.base.decimal_to_precision import ROUND # noqa: F401
from ccxt.base.decimal_to_precision import DECIMAL_PLACES # noqa: F401
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS # noqa: F401
from ccxt.base.decimal_to_precision import NO_PADDING # noqa: F401
from ccxt.base.decimal_to_precision import PAD_WITH_ZERO # noqa: F401
from ccxt.base import errors # noqa: F401
from ccxt.base.errors import BaseError # noqa: F401
from ccxt.base.errors import ExchangeError # noqa: F401
from ccxt.base.errors import NotSupported # noqa: F401
from ccxt.base.errors import AuthenticationError # noqa: F401
from ccxt.base.errors import PermissionDenied # noqa: F401
from ccxt.base.errors import AccountSuspended # noqa: F401
from ccxt.base.errors import InvalidNonce # noqa: F401
from ccxt.base.errors import InsufficientFunds # noqa: F401
from ccxt.base.errors import InvalidOrder # noqa: F401
from ccxt.base.errors import OrderNotFound # noqa: F401
from ccxt.base.errors import OrderNotCached # noqa: F401
from ccxt.base.errors import DuplicateOrderId # noqa: F401
from ccxt.base.errors import CancelPending # noqa: F401
from ccxt.base.errors import NetworkError # noqa: F401
from ccxt.base.errors import DDoSProtection # noqa: F401
from ccxt.base.errors import RateLimitExceeded # noqa: F401
from ccxt.base.errors import RequestTimeout # noqa: F401
from ccxt.base.errors import ExchangeNotAvailable # noqa: F401
from ccxt.base.errors import OnMaintenance # noqa: F401
from ccxt.base.errors import InvalidAddress # noqa: F401
from ccxt.base.errors import AddressPending # noqa: F401
from ccxt.base.errors import ArgumentsRequired # noqa: F401
from ccxt.base.errors import BadRequest # noqa: F401
from ccxt.base.errors import BadResponse # noqa: F401
from ccxt.base.errors import NullResponse # noqa: F401
from ccxt.base.errors import OrderImmediatelyFillable # noqa: F401
from ccxt.base.errors import OrderNotFillable # noqa: F401
from ccxt.async_support._1btcxe import _1btcxe # noqa: F401
from ccxt.async_support.acx import acx # noqa: F401
from ccxt.async_support.adara import adara # noqa: F401
from ccxt.async_support.allcoin import allcoin # noqa: F401
from ccxt.async_support.anxpro import anxpro # noqa: F401
from ccxt.async_support.bcex import bcex # noqa: F401
from ccxt.async_support.bequant import bequant # noqa: F401
from ccxt.async_support.bibox import bibox # noqa: F401
from ccxt.async_support.bigone import bigone # noqa: F401
from ccxt.async_support.binance import binance # noqa: F401
from ccxt.async_support.binanceje import binanceje # noqa: F401
from ccxt.async_support.binanceus import binanceus # noqa: F401
from ccxt.async_support.bit2c import bit2c # noqa: F401
from ccxt.async_support.bitbank import bitbank # noqa: F401
from ccxt.async_support.bitbay import bitbay # noqa: F401
from ccxt.async_support.bitfinex import bitfinex # noqa: F401
from ccxt.async_support.bitfinex2 import bitfinex2 # noqa: F401
from ccxt.async_support.bitflyer import bitflyer # noqa: F401
from ccxt.async_support.bitforex import bitforex # noqa: F401
from ccxt.async_support.bithumb import bithumb # noqa: F401
from ccxt.async_support.bitkk import bitkk # noqa: F401
from ccxt.async_support.bitlish import bitlish # noqa: F401
from ccxt.async_support.bitmart import bitmart # noqa: F401
from ccxt.async_support.bitmax import bitmax # noqa: F401
from ccxt.async_support.bitmex import bitmex # noqa: F401
from ccxt.async_support.bitso import bitso # noqa: F401
from ccxt.async_support.bitstamp import bitstamp # noqa: F401
from ccxt.async_support.bitstamp1 import bitstamp1 # noqa: F401
from ccxt.async_support.bittrex import bittrex # noqa: F401
from ccxt.async_support.bitz import bitz # noqa: F401
from ccxt.async_support.bl3p import bl3p # noqa: F401
from ccxt.async_support.bleutrade import bleutrade # noqa: F401
from ccxt.async_support.braziliex import braziliex # noqa: F401
from ccxt.async_support.btcalpha import btcalpha # noqa: F401
from ccxt.async_support.btcbox import btcbox # noqa: F401
from ccxt.async_support.btcchina import btcchina # noqa: F401
from ccxt.async_support.btcmarkets import btcmarkets # noqa: F401
from ccxt.async_support.btctradeim import btctradeim # noqa: F401
from ccxt.async_support.btctradeua import btctradeua # noqa: F401
from ccxt.async_support.btcturk import btcturk # noqa: F401
from ccxt.async_support.buda import buda # noqa: F401
from ccxt.async_support.bw import bw # noqa: F401
from ccxt.async_support.bytetrade import bytetrade # noqa: F401
from ccxt.async_support.cex import cex # noqa: F401
from ccxt.async_support.chilebit import chilebit # noqa: F401
from ccxt.async_support.cobinhood import cobinhood # noqa: F401
from ccxt.async_support.coinbase import coinbase # noqa: F401
from ccxt.async_support.coinbaseprime import coinbaseprime # noqa: F401
from ccxt.async_support.coinbasepro import coinbasepro # noqa: F401
from ccxt.async_support.coincheck import coincheck # noqa: F401
from ccxt.async_support.coinegg import coinegg # noqa: F401
from ccxt.async_support.coinex import coinex # noqa: F401
from ccxt.async_support.coinfalcon import coinfalcon # noqa: F401
from ccxt.async_support.coinfloor import coinfloor # noqa: F401
from ccxt.async_support.coingi import coingi # noqa: F401
from ccxt.async_support.coinmarketcap import coinmarketcap # noqa: F401
from ccxt.async_support.coinmate import coinmate # noqa: F401
from ccxt.async_support.coinone import coinone # noqa: F401
from ccxt.async_support.coinspot import coinspot # noqa: F401
from ccxt.async_support.coolcoin import coolcoin # noqa: F401
from ccxt.async_support.coss import coss # noqa: F401
from ccxt.async_support.crex24 import crex24 # noqa: F401
from ccxt.async_support.deribit import deribit # noqa: F401
from ccxt.async_support.digifinex import digifinex # noqa: F401
from ccxt.async_support.dsx import dsx # noqa: F401
from ccxt.async_support.exmo import exmo # noqa: F401
from ccxt.async_support.exx import exx # noqa: F401
from ccxt.async_support.fcoin import fcoin # noqa: F401
from ccxt.async_support.fcoinjp import fcoinjp # noqa: F401
from ccxt.async_support.flowbtc import flowbtc # noqa: F401
from ccxt.async_support.foxbit import foxbit # noqa: F401
from ccxt.async_support.ftx import ftx # noqa: F401
from ccxt.async_support.fybse import fybse # noqa: F401
from ccxt.async_support.gateio import gateio # noqa: F401
from ccxt.async_support.gemini import gemini # noqa: F401
from ccxt.async_support.hitbtc import hitbtc # noqa: F401
from ccxt.async_support.hitbtc2 import hitbtc2 # noqa: F401
from ccxt.async_support.huobipro import huobipro # noqa: F401
from ccxt.async_support.huobiru import huobiru # noqa: F401
from ccxt.async_support.ice3x import ice3x # noqa: F401
from ccxt.async_support.idex import idex # noqa: F401
from ccxt.async_support.independentreserve import independentreserve # noqa: F401
from ccxt.async_support.indodax import indodax # noqa: F401
from ccxt.async_support.itbit import itbit # noqa: F401
from ccxt.async_support.kkex import kkex # noqa: F401
from ccxt.async_support.kraken import kraken # noqa: F401
from ccxt.async_support.kucoin import kucoin # noqa: F401
from ccxt.async_support.kuna import kuna # noqa: F401
from ccxt.async_support.lakebtc import lakebtc # noqa: F401
from ccxt.async_support.latoken import latoken # noqa: F401
from ccxt.async_support.lbank import lbank # noqa: F401
from ccxt.async_support.liquid import liquid # noqa: F401
from ccxt.async_support.livecoin import livecoin # noqa: F401
from ccxt.async_support.luno import luno # noqa: F401
from ccxt.async_support.lykke import lykke # noqa: F401
from ccxt.async_support.mandala import mandala # noqa: F401
from ccxt.async_support.mercado import mercado # noqa: F401
from ccxt.async_support.mixcoins import mixcoins # noqa: F401
from ccxt.async_support.oceanex import oceanex # noqa: F401
from ccxt.async_support.okcoincny import okcoincny # noqa: F401
from ccxt.async_support.okcoinusd import okcoinusd # noqa: F401
from ccxt.async_support.okex import okex # noqa: F401
from ccxt.async_support.okex3 import okex3 # noqa: F401
from ccxt.async_support.paymium import paymium # noqa: F401
from ccxt.async_support.poloniex import poloniex # noqa: F401
from ccxt.async_support.rightbtc import rightbtc # noqa: F401
from ccxt.async_support.southxchange import southxchange # noqa: F401
from ccxt.async_support.stex import stex # noqa: F401
from ccxt.async_support.stronghold import stronghold # noqa: F401
from ccxt.async_support.surbitcoin import surbitcoin # noqa: F401
from ccxt.async_support.theocean import theocean # noqa: F401
from ccxt.async_support.therock import therock # noqa: F401
from ccxt.async_support.tidebit import tidebit # noqa: F401
from ccxt.async_support.tidex import tidex # noqa: F401
from ccxt.async_support.timex import timex # noqa: F401
from ccxt.async_support.upbit import upbit # noqa: F401
from ccxt.async_support.vaultoro import vaultoro # noqa: F401
from ccxt.async_support.vbtc import vbtc # noqa: F401
from ccxt.async_support.virwox import virwox # noqa: F401
from ccxt.async_support.whitebit import whitebit # noqa: F401
from ccxt.async_support.xbtce import xbtce # noqa: F401
from ccxt.async_support.yobit import yobit # noqa: F401
from ccxt.async_support.zaif import zaif # noqa: F401
from ccxt.async_support.zb import zb # noqa: F401
exchanges = [
'_1btcxe',
'acx',
'adara',
'allcoin',
'anxpro',
'bcex',
'bequant',
'bibox',
'bigone',
'binance',
'binanceje',
'binanceus',
'bit2c',
'bitbank',
'bitbay',
'bitfinex',
'bitfinex2',
'bitflyer',
'bitforex',
'bithumb',
'bitkk',
'bitlish',
'bitmart',
'bitmax',
'bitmex',
'bitso',
'bitstamp',
'bitstamp1',
'bittrex',
'bitz',
'bl3p',
'bleutrade',
'braziliex',
'btcalpha',
'btcbox',
'btcchina',
'btcmarkets',
'btctradeim',
'btctradeua',
'btcturk',
'buda',
'bw',
'bytetrade',
'cex',
'chilebit',
'cobinhood',
'coinbase',
'coinbaseprime',
'coinbasepro',
'coincheck',
'coinegg',
'coinex',
'coinfalcon',
'coinfloor',
'coingi',
'coinmarketcap',
'coinmate',
'coinone',
'coinspot',
'coolcoin',
'coss',
'crex24',
'deribit',
'digifinex',
'dsx',
'exmo',
'exx',
'fcoin',
'fcoinjp',
'flowbtc',
'foxbit',
'ftx',
'fybse',
'gateio',
'gemini',
'hitbtc',
'hitbtc2',
'huobipro',
'huobiru',
'ice3x',
'idex',
'independentreserve',
'indodax',
'itbit',
'kkex',
'kraken',
'kucoin',
'kuna',
'lakebtc',
'latoken',
'lbank',
'liquid',
'livecoin',
'luno',
'lykke',
'mandala',
'mercado',
'mixcoins',
'oceanex',
'okcoincny',
'okcoinusd',
'okex',
'okex3',
'paymium',
'poloniex',
'rightbtc',
'southxchange',
'stex',
'stronghold',
'surbitcoin',
'theocean',
'therock',
'tidebit',
'tidex',
'timex',
'upbit',
'vaultoro',
'vbtc',
'virwox',
'whitebit',
'xbtce',
'yobit',
'zaif',
'zb',
]
base = [
'Exchange',
'exchanges',
'decimal_to_precision',
]
__all__ = base + errors.__all__ + exchanges
|
py
|
1a5e6eb878c07a08caaec8bb3086a529730cfcb1
|
############################################################################
## Copyright 2015-2021 Google LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
############################################################################
import gdb
import lldb
def get_basic_type(t):
# This is not equivalent to what gdb does. For example, if you have:
#
# typedef int* pint;
# typedef pint* ppint;
#
# and run `gdb.types.get_basic_type(gdb.lookup_type("ppint"))`, gdb won't give
# you `int **`. It will stop stripping typedefs at `pint *`, which is
# technically not a typedef, but a pointer.
#
# lldb, however, will give you `int **` even if you try to just strip
# qualifiers with `SBType.GetUnqualifiedType`. So I'm not sure we can get the
# exact behavior of "remove qualifiers and strip layers of typedefs only until
# you find a pointer type".
#
# This will do the trick for basic cases like "get the underlying unqualified
# type in order to match a RegexpCollectionPrettyPrinter".
return gdb.Type(t.sbtype().GetUnqualifiedType().GetCanonicalType())
def _sbtype_has_field(sbtype, field_name):
"""Recursive helper to have has_field search up the inheritance hierarchy."""
for f in sbtype.fields:
if f.name == field_name:
return True
for b in sbtype.bases:
if _sbtype_has_field(b.type, field_name):
return True
for b in sbtype.vbases:
if _sbtype_has_field(b.type, field_name):
return True
return False
def has_field(t, field_name):
return _sbtype_has_field(t.sbtype(), field_name)
def make_enum_dict(t):
"""Returns a dict {'enum_value_name': enum_value...}."""
return {field.name: field.enumval for field in t.fields()}
|
py
|
1a5e6edad9b28f22e14a22225b7d4f06a026b679
|
"""Unit tests for contextlib.py, and other context managers."""
import io
import sys
import tempfile
import threading
import unittest
from contextlib import * # Tests __all__
from test import support
class TestAbstractContextManager(unittest.TestCase):
def test_enter(self):
class DefaultEnter(AbstractContextManager):
def __exit__(self, *args):
super().__exit__(*args)
manager = DefaultEnter()
self.assertIs(manager.__enter__(), manager)
def test_exit_is_abstract(self):
class MissingExit(AbstractContextManager):
pass
with self.assertRaises(TypeError):
MissingExit()
def test_structural_subclassing(self):
class ManagerFromScratch:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
return None
self.assertTrue(issubclass(ManagerFromScratch, AbstractContextManager))
class DefaultEnter(AbstractContextManager):
def __exit__(self, *args):
super().__exit__(*args)
self.assertTrue(issubclass(DefaultEnter, AbstractContextManager))
class NoEnter(ManagerFromScratch):
__enter__ = None
self.assertFalse(issubclass(NoEnter, AbstractContextManager))
class NoExit(ManagerFromScratch):
__exit__ = None
self.assertFalse(issubclass(NoExit, AbstractContextManager))
class ContextManagerTestCase(unittest.TestCase):
def test_contextmanager_plain(self):
state = []
@contextmanager
def woohoo():
state.append(1)
yield 42
state.append(999)
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_finally(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
finally:
state.append(999)
with self.assertRaises(ZeroDivisionError):
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError()
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_no_reraise(self):
@contextmanager
def whee():
yield
ctx = whee()
ctx.__enter__()
# Calling __exit__ should not result in an exception
self.assertFalse(ctx.__exit__(TypeError, TypeError("foo"), None))
def test_contextmanager_trap_yield_after_throw(self):
@contextmanager
def whoo():
try:
yield
except:
yield
ctx = whoo()
ctx.__enter__()
self.assertRaises(
RuntimeError, ctx.__exit__, TypeError, TypeError("foo"), None
)
def test_contextmanager_except(self):
state = []
@contextmanager
def woohoo():
state.append(1)
try:
yield 42
except ZeroDivisionError as e:
state.append(e.args[0])
self.assertEqual(state, [1, 42, 999])
with woohoo() as x:
self.assertEqual(state, [1])
self.assertEqual(x, 42)
state.append(x)
raise ZeroDivisionError(999)
self.assertEqual(state, [1, 42, 999])
def test_contextmanager_except_stopiter(self):
stop_exc = StopIteration('spam')
@contextmanager
def woohoo():
yield
try:
with self.assertWarnsRegex(DeprecationWarning,
"StopIteration"):
with woohoo():
raise stop_exc
except Exception as ex:
self.assertIs(ex, stop_exc)
else:
self.fail('StopIteration was suppressed')
def test_contextmanager_except_pep479(self):
code = """\
from __future__ import generator_stop
from contextlib import contextmanager
@contextmanager
def woohoo():
yield
"""
locals = {}
exec(code, locals, locals)
woohoo = locals['woohoo']
stop_exc = StopIteration('spam')
try:
with woohoo():
raise stop_exc
except Exception as ex:
self.assertIs(ex, stop_exc)
else:
self.fail('StopIteration was suppressed')
def test_contextmanager_do_not_unchain_non_stopiteration_exceptions(self):
@contextmanager
def test_issue29692():
try:
yield
except Exception as exc:
raise RuntimeError('issue29692:Chained') from exc
try:
with test_issue29692():
raise ZeroDivisionError
except Exception as ex:
self.assertIs(type(ex), RuntimeError)
self.assertEqual(ex.args[0], 'issue29692:Chained')
self.assertIsInstance(ex.__cause__, ZeroDivisionError)
try:
with test_issue29692():
raise StopIteration('issue29692:Unchained')
except Exception as ex:
self.assertIs(type(ex), StopIteration)
self.assertEqual(ex.args[0], 'issue29692:Unchained')
self.assertIsNone(ex.__cause__)
def _create_contextmanager_attribs(self):
def attribs(**kw):
def decorate(func):
for k,v in kw.items():
setattr(func,k,v)
return func
return decorate
@contextmanager
@attribs(foo='bar')
def baz(spam):
"""Whee!"""
return baz
def test_contextmanager_attribs(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__name__,'baz')
self.assertEqual(baz.foo, 'bar')
@support.requires_docstrings
def test_contextmanager_doc_attrib(self):
baz = self._create_contextmanager_attribs()
self.assertEqual(baz.__doc__, "Whee!")
@support.requires_docstrings
def test_instance_docstring_given_cm_docstring(self):
baz = self._create_contextmanager_attribs()(None)
self.assertEqual(baz.__doc__, "Whee!")
def test_keywords(self):
# Ensure no keyword arguments are inhibited
@contextmanager
def woohoo(self, func, args, kwds):
yield (self, func, args, kwds)
with woohoo(self=11, func=22, args=33, kwds=44) as target:
self.assertEqual(target, (11, 22, 33, 44))
class ClosingTestCase(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = closing.__doc__
obj = closing(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_closing(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with closing(x) as y:
self.assertEqual(x, y)
self.assertEqual(state, [1])
def test_closing_error(self):
state = []
class C:
def close(self):
state.append(1)
x = C()
self.assertEqual(state, [])
with self.assertRaises(ZeroDivisionError):
with closing(x) as y:
self.assertEqual(x, y)
1 / 0
self.assertEqual(state, [1])
class NullcontextTestCase(unittest.TestCase):
def test_nullcontext(self):
class C:
pass
c = C()
with nullcontext(c) as c_in:
self.assertIs(c_in, c)
class FileContextTestCase(unittest.TestCase):
def testWithOpen(self):
tfn = tempfile.mktemp()
try:
f = None
with open(tfn, "w") as f:
self.assertFalse(f.closed)
f.write("Booh\n")
self.assertTrue(f.closed)
f = None
with self.assertRaises(ZeroDivisionError):
with open(tfn, "r") as f:
self.assertFalse(f.closed)
self.assertEqual(f.read(), "Booh\n")
1 / 0
self.assertTrue(f.closed)
finally:
support.unlink(tfn)
class LockContextTestCase(unittest.TestCase):
def boilerPlate(self, lock, locked):
self.assertFalse(locked())
with lock:
self.assertTrue(locked())
self.assertFalse(locked())
with self.assertRaises(ZeroDivisionError):
with lock:
self.assertTrue(locked())
1 / 0
self.assertFalse(locked())
def testWithLock(self):
lock = threading.Lock()
self.boilerPlate(lock, lock.locked)
def testWithRLock(self):
lock = threading.RLock()
self.boilerPlate(lock, lock._is_owned)
def testWithCondition(self):
lock = threading.Condition()
def locked():
return lock._is_owned()
self.boilerPlate(lock, locked)
def testWithSemaphore(self):
lock = threading.Semaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
def testWithBoundedSemaphore(self):
lock = threading.BoundedSemaphore()
def locked():
if lock.acquire(False):
lock.release()
return False
else:
return True
self.boilerPlate(lock, locked)
class mycontext(ContextDecorator):
"""Example decoration-compatible context manager for testing"""
started = False
exc = None
catch = False
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
return self.catch
class TestContextDecorator(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = mycontext.__doc__
obj = mycontext()
self.assertEqual(obj.__doc__, cm_docstring)
def test_contextdecorator(self):
context = mycontext()
with context as result:
self.assertIs(result, context)
self.assertTrue(context.started)
self.assertEqual(context.exc, (None, None, None))
def test_contextdecorator_with_exception(self):
context = mycontext()
with self.assertRaisesRegex(NameError, 'foo'):
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
context = mycontext()
context.catch = True
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorator(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_decorator_with_exception(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
raise NameError('foo')
with self.assertRaisesRegex(NameError, 'foo'):
test()
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorating_method(self):
context = mycontext()
class Test(object):
@context
def method(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
# these tests are for argument passing when used as a decorator
test = Test()
test.method(1, 2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
self.assertEqual(test.c, None)
test = Test()
test.method('a', 'b', 'c')
self.assertEqual(test.a, 'a')
self.assertEqual(test.b, 'b')
self.assertEqual(test.c, 'c')
test = Test()
test.method(a=1, b=2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
def test_typo_enter(self):
class mycontext(ContextDecorator):
def __unter__(self):
pass
def __exit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_typo_exit(self):
class mycontext(ContextDecorator):
def __enter__(self):
pass
def __uxit__(self, *exc):
pass
with self.assertRaises(AttributeError):
with mycontext():
pass
def test_contextdecorator_as_mixin(self):
class somecontext(object):
started = False
exc = None
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
class mycontext(somecontext, ContextDecorator):
pass
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_contextmanager_as_decorator(self):
@contextmanager
def woohoo(y):
state.append(y)
yield
state.append(999)
state = []
@woohoo(1)
def test(x):
self.assertEqual(state, [1])
state.append(x)
test('something')
self.assertEqual(state, [1, 'something', 999])
# Issue #11647: Ensure the decorated function is 'reusable'
state = []
test('something else')
self.assertEqual(state, [1, 'something else', 999])
class TestExitStack(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = ExitStack.__doc__
obj = ExitStack()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_resources(self):
with ExitStack():
pass
def test_callback(self):
expected = [
((), {}),
((1,), {}),
((1,2), {}),
((), dict(example=1)),
((1,), dict(example=1)),
((1,2), dict(example=1)),
]
result = []
def _exit(*args, **kwds):
"""Test metadata propagation"""
result.append((args, kwds))
with ExitStack() as stack:
for args, kwds in reversed(expected):
if args and kwds:
f = stack.callback(_exit, *args, **kwds)
elif args:
f = stack.callback(_exit, *args)
elif kwds:
f = stack.callback(_exit, **kwds)
else:
f = stack.callback(_exit)
self.assertIs(f, _exit)
for wrapper in stack._exit_callbacks:
self.assertIs(wrapper.__wrapped__, _exit)
self.assertNotEqual(wrapper.__name__, _exit.__name__)
self.assertIsNone(wrapper.__doc__, _exit.__doc__)
self.assertEqual(result, expected)
def test_push(self):
exc_raised = ZeroDivisionError
def _expect_exc(exc_type, exc, exc_tb):
self.assertIs(exc_type, exc_raised)
def _suppress_exc(*exc_details):
return True
def _expect_ok(exc_type, exc, exc_tb):
self.assertIsNone(exc_type)
self.assertIsNone(exc)
self.assertIsNone(exc_tb)
class ExitCM(object):
def __init__(self, check_exc):
self.check_exc = check_exc
def __enter__(self):
self.fail("Should not be called!")
def __exit__(self, *exc_details):
self.check_exc(*exc_details)
with ExitStack() as stack:
stack.push(_expect_ok)
self.assertIs(stack._exit_callbacks[-1], _expect_ok)
cm = ExitCM(_expect_ok)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_suppress_exc)
self.assertIs(stack._exit_callbacks[-1], _suppress_exc)
cm = ExitCM(_expect_exc)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
stack.push(_expect_exc)
self.assertIs(stack._exit_callbacks[-1], _expect_exc)
1/0
def test_enter_context(self):
class TestCM(object):
def __enter__(self):
result.append(1)
def __exit__(self, *exc_details):
result.append(3)
result = []
cm = TestCM()
with ExitStack() as stack:
@stack.callback # Registered first => cleaned up last
def _exit():
result.append(4)
self.assertIsNotNone(_exit)
stack.enter_context(cm)
self.assertIs(stack._exit_callbacks[-1].__self__, cm)
result.append(2)
self.assertEqual(result, [1, 2, 3, 4])
def test_close(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(1)
self.assertIsNotNone(_exit)
stack.close()
result.append(2)
self.assertEqual(result, [1, 2])
def test_pop_all(self):
result = []
with ExitStack() as stack:
@stack.callback
def _exit():
result.append(3)
self.assertIsNotNone(_exit)
new_stack = stack.pop_all()
result.append(1)
result.append(2)
new_stack.close()
self.assertEqual(result, [1, 2, 3])
def test_exit_raise(self):
with self.assertRaises(ZeroDivisionError):
with ExitStack() as stack:
stack.push(lambda *exc: False)
1/0
def test_exit_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
1/0
def test_exit_exception_chaining_reference(self):
# Sanity check to make sure that ExitStack chaining matches
# actual nested with statements
class RaiseExc:
def __init__(self, exc):
self.exc = exc
def __enter__(self):
return self
def __exit__(self, *exc_details):
raise self.exc
class RaiseExcWithContext:
def __init__(self, outer, inner):
self.outer = outer
self.inner = inner
def __enter__(self):
return self
def __exit__(self, *exc_details):
try:
raise self.inner
except:
raise self.outer
class SuppressExc:
def __enter__(self):
return self
def __exit__(self, *exc_details):
type(self).saved_details = exc_details
return True
try:
with RaiseExc(IndexError):
with RaiseExcWithContext(KeyError, AttributeError):
with SuppressExc():
with RaiseExc(ValueError):
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = SuppressExc.saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_chaining(self):
# Ensure exception chaining matches the reference behaviour
def raise_exc(exc):
raise exc
saved_details = None
def suppress_exc(*exc_details):
nonlocal saved_details
saved_details = exc_details
return True
try:
with ExitStack() as stack:
stack.callback(raise_exc, IndexError)
stack.callback(raise_exc, KeyError)
stack.callback(raise_exc, AttributeError)
stack.push(suppress_exc)
stack.callback(raise_exc, ValueError)
1 / 0
except IndexError as exc:
self.assertIsInstance(exc.__context__, KeyError)
self.assertIsInstance(exc.__context__.__context__, AttributeError)
# Inner exceptions were suppressed
self.assertIsNone(exc.__context__.__context__.__context__)
else:
self.fail("Expected IndexError, but no exception was raised")
# Check the inner exceptions
inner_exc = saved_details[1]
self.assertIsInstance(inner_exc, ValueError)
self.assertIsInstance(inner_exc.__context__, ZeroDivisionError)
def test_exit_exception_non_suppressing(self):
# http://bugs.python.org/issue19092
def raise_exc(exc):
raise exc
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.callback(lambda: None)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, IndexError)
else:
self.fail("Expected IndexError, but no exception was raised")
try:
with ExitStack() as stack:
stack.callback(raise_exc, KeyError)
stack.push(suppress_exc)
stack.callback(raise_exc, IndexError)
except Exception as exc:
self.assertIsInstance(exc, KeyError)
else:
self.fail("Expected KeyError, but no exception was raised")
def test_exit_exception_with_correct_context(self):
# http://bugs.python.org/issue20317
@contextmanager
def gets_the_context_right(exc):
try:
yield
finally:
raise exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
# The contextmanager already fixes the context, so prior to the
# fix, ExitStack would try to fix it *again* and get into an
# infinite self-referential loop
try:
with ExitStack() as stack:
stack.enter_context(gets_the_context_right(exc4))
stack.enter_context(gets_the_context_right(exc3))
stack.enter_context(gets_the_context_right(exc2))
raise exc1
except Exception as exc:
self.assertIs(exc, exc4)
self.assertIs(exc.__context__, exc3)
self.assertIs(exc.__context__.__context__, exc2)
self.assertIs(exc.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__)
def test_exit_exception_with_existing_context(self):
# Addresses a lack of test coverage discovered after checking in a
# fix for issue 20317 that still contained debugging code.
def raise_nested(inner_exc, outer_exc):
try:
raise inner_exc
finally:
raise outer_exc
exc1 = Exception(1)
exc2 = Exception(2)
exc3 = Exception(3)
exc4 = Exception(4)
exc5 = Exception(5)
try:
with ExitStack() as stack:
stack.callback(raise_nested, exc4, exc5)
stack.callback(raise_nested, exc2, exc3)
raise exc1
except Exception as exc:
self.assertIs(exc, exc5)
self.assertIs(exc.__context__, exc4)
self.assertIs(exc.__context__.__context__, exc3)
self.assertIs(exc.__context__.__context__.__context__, exc2)
self.assertIs(
exc.__context__.__context__.__context__.__context__, exc1)
self.assertIsNone(
exc.__context__.__context__.__context__.__context__.__context__)
def test_body_exception_suppress(self):
def suppress_exc(*exc_details):
return True
try:
with ExitStack() as stack:
stack.push(suppress_exc)
1/0
except IndexError as exc:
self.fail("Expected no exception, got IndexError")
def test_exit_exception_chaining_suppress(self):
with ExitStack() as stack:
stack.push(lambda *exc: True)
stack.push(lambda *exc: 1/0)
stack.push(lambda *exc: {}[1])
def test_excessive_nesting(self):
# The original implementation would die with RecursionError here
with ExitStack() as stack:
for i in range(10000):
stack.callback(int)
def test_instance_bypass(self):
class Example(object): pass
cm = Example()
cm.__exit__ = object()
stack = ExitStack()
self.assertRaises(AttributeError, stack.enter_context, cm)
stack.push(cm)
self.assertIs(stack._exit_callbacks[-1], cm)
def test_dont_reraise_RuntimeError(self):
# https://bugs.python.org/issue27122
class UniqueException(Exception): pass
class UniqueRuntimeError(RuntimeError): pass
@contextmanager
def second():
try:
yield 1
except Exception as exc:
raise UniqueException("new exception") from exc
@contextmanager
def first():
try:
yield 1
except Exception as exc:
raise exc
# The UniqueRuntimeError should be caught by second()'s exception
# handler which chain raised a new UniqueException.
with self.assertRaises(UniqueException) as err_ctx:
with ExitStack() as es_ctx:
es_ctx.enter_context(second())
es_ctx.enter_context(first())
raise UniqueRuntimeError("please no infinite loop.")
exc = err_ctx.exception
self.assertIsInstance(exc, UniqueException)
self.assertIsInstance(exc.__context__, UniqueRuntimeError)
self.assertIsNone(exc.__context__.__context__)
self.assertIsNone(exc.__context__.__cause__)
self.assertIs(exc.__cause__, exc.__context__)
class TestRedirectStream:
redirect_stream = None
orig_stream = None
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = self.redirect_stream.__doc__
obj = self.redirect_stream(None)
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_redirect_in_init(self):
orig_stdout = getattr(sys, self.orig_stream)
self.redirect_stream(None)
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
def test_redirect_to_string_io(self):
f = io.StringIO()
msg = "Consider an API like help(), which prints directly to stdout"
orig_stdout = getattr(sys, self.orig_stream)
with self.redirect_stream(f):
print(msg, file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue().strip()
self.assertEqual(s, msg)
def test_enter_result_is_target(self):
f = io.StringIO()
with self.redirect_stream(f) as enter_result:
self.assertIs(enter_result, f)
def test_cm_is_reusable(self):
f = io.StringIO()
write_to_f = self.redirect_stream(f)
orig_stdout = getattr(sys, self.orig_stream)
with write_to_f:
print("Hello", end=" ", file=getattr(sys, self.orig_stream))
with write_to_f:
print("World!", file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
def test_cm_is_reentrant(self):
f = io.StringIO()
write_to_f = self.redirect_stream(f)
orig_stdout = getattr(sys, self.orig_stream)
with write_to_f:
print("Hello", end=" ", file=getattr(sys, self.orig_stream))
with write_to_f:
print("World!", file=getattr(sys, self.orig_stream))
self.assertIs(getattr(sys, self.orig_stream), orig_stdout)
s = f.getvalue()
self.assertEqual(s, "Hello World!\n")
class TestRedirectStdout(TestRedirectStream, unittest.TestCase):
redirect_stream = redirect_stdout
orig_stream = "stdout"
class TestRedirectStderr(TestRedirectStream, unittest.TestCase):
redirect_stream = redirect_stderr
orig_stream = "stderr"
class TestSuppress(unittest.TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = suppress.__doc__
obj = suppress()
self.assertEqual(obj.__doc__, cm_docstring)
def test_no_result_from_enter(self):
with suppress(ValueError) as enter_result:
self.assertIsNone(enter_result)
def test_no_exception(self):
with suppress(ValueError):
self.assertEqual(pow(2, 5), 32)
def test_exact_exception(self):
with suppress(TypeError):
len(5)
def test_exception_hierarchy(self):
with suppress(LookupError):
'Hello'[50]
def test_other_exception(self):
with self.assertRaises(ZeroDivisionError):
with suppress(TypeError):
1/0
def test_no_args(self):
with self.assertRaises(ZeroDivisionError):
with suppress():
1/0
def test_multiple_exception_args(self):
with suppress(ZeroDivisionError, TypeError):
1/0
with suppress(ZeroDivisionError, TypeError):
len(5)
def test_cm_is_reentrant(self):
ignore_exceptions = suppress(Exception)
with ignore_exceptions:
pass
with ignore_exceptions:
len(5)
with ignore_exceptions:
with ignore_exceptions: # Check nested usage
len(5)
outer_continued = True
1/0
self.assertTrue(outer_continued)
if __name__ == "__main__":
unittest.main()
|
py
|
1a5e6ef34882d55f754a98000a97ec9f68dedb60
|
import json
from django.views import View
from schema import Schema, Regex, And, Or, Use, Optional
from apps.loon_base_view import LoonBaseView
from service.account.account_base_service import account_base_service_ins
from service.format_response import api_response
from service.permission.manage_permission import manage_permission_check
from service.workflow.workflow_base_service import workflow_base_service_ins
from service.workflow.workflow_custom_field_service import workflow_custom_field_service_ins
from service.workflow.workflow_custom_notice_service import workflow_custom_notice_service_ins
from service.workflow.workflow_runscript_service import workflow_run_script_service_ins
from service.workflow.workflow_state_service import workflow_state_service_ins
from service.workflow.workflow_transition_service import workflow_transition_service_ins
class WorkflowView(LoonBaseView):
post_schema = Schema({
'name': And(str, lambda n: n != '', error='name is needed'),
Optional('description'): str,
str: object
})
def get(self, request, *args, **kwargs):
"""
่ทๅๅทฅไฝๆตๅ่กจ
:param request:
:param args:
:param kwargs:
:return:
"""
request_data = request.GET
name = request_data.get('name', '')
per_page = int(request_data.get('per_page', 10))
page = int(request_data.get('page', 1))
from_admin = int(request_data.get('from_admin', 0)) # ่ทๅๆ็ฎก็ๆ้็ๅทฅไฝๆตๅ่กจ
username = request.META.get('HTTP_USERNAME')
app_name = request.META.get('HTTP_APPNAME')
flag, result = account_base_service_ins.app_workflow_permission_list(app_name)
if not flag:
return api_response(-1, result, {})
if not result.get('workflow_id_list'):
data = dict(value=[], per_page=per_page, page=page, total=0)
code, msg, = 0, ''
return api_response(code, msg, data)
permission_workflow_id_list = result.get('workflow_id_list')
flag, result = workflow_base_service_ins.get_workflow_list(name, page, per_page, permission_workflow_id_list, username, from_admin)
if flag is not False:
paginator_info = result.get('paginator_info')
data = dict(value=result.get('workflow_result_restful_list'), per_page=paginator_info.get('per_page'),
page=paginator_info.get('page'), total=paginator_info.get('total'))
code, msg, = 0, ''
else:
code, data, msg = -1, '', result
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def post(self, request, *args, **kwargs):
"""
ๆฐๅขๅทฅไฝๆต
:param request:
:param args:
:param kwargs:
:return:
"""
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
name = request_data_dict.get('name', '')
description = request_data_dict.get('description', '')
notices = request_data_dict.get('notices', '')
view_permission_check = request_data_dict.get('view_permission_check', 1)
limit_expression = request_data_dict.get('limit_expression', '')
display_form_str = request_data_dict.get('display_form_str', '')
workflow_admin = request_data_dict.get('workflow_admin', '')
creator = request.META.get('HTTP_USERNAME', '')
flag, result = workflow_base_service_ins.add_workflow(name, description, notices, view_permission_check, limit_expression,
display_form_str, creator, workflow_admin)
if flag is False:
code, msg, data = -1, result, {}
else:
code, msg, data = 0, '', {'workflow_id': result.get('workflow_id')}
return api_response(code, msg, data)
class WorkflowUserAdminView(LoonBaseView):
def get(self, request, *args, **kwargs):
"""
่ทๅ็จๆท็ฎก็็ๅทฅไฝๆตไฟกๆฏ
:param request:
:param args:
:param kwargs:
:return:
"""
username = request.META.get('HTTP_USERNAME')
flag, result = workflow_base_service_ins.get_workflow_manage_list(username)
if flag is False:
return api_response(-1, result, {})
return api_response(0, '', result.get('workflow_list'))
class WorkflowInitView(LoonBaseView):
def get(self, request, *args, **kwargs):
"""
่ทๅๅทฅไฝๆตๅๅง็ถๆไฟกๆฏ๏ผๅ
ๆฌ็ถๆ่ฏฆๆ
ไปฅๅๅ
่ฎธ็transition
:param request:
:param args:
:param kwargs:
:return:
"""
workflow_id = kwargs.get('workflow_id')
username = request.META.get('HTTP_USERNAME')
app_name = request.META.get('HTTP_APPNAME')
# ๅคๆญๆฏๅฆๆๅทฅไฝๆต็ๆ้
app_permission, msg = account_base_service_ins.app_workflow_permission_check(app_name, workflow_id)
if not app_permission:
return api_response(-1, 'APP:{} have no permission to get this workflow info'.format(app_name), '')
if not (workflow_id and username):
return api_response(-1, '่ฏทๆไพusername', '')
flag, state_result = workflow_state_service_ins.get_workflow_init_state(workflow_id)
if flag is not False:
code, msg, data = 0, '', state_result
else:
code, msg, data = -1, state_result, ''
return api_response(code, msg, data)
class WorkflowDetailView(LoonBaseView):
patch_schema = Schema({
'name': And(str, lambda n: n != '', error='name is needed'),
Optional('description'): str,
str: object
})
@manage_permission_check('workflow_admin')
def get(self, request, *args, **kwargs):
"""
่ทๅๅทฅไฝๆต่ฏฆๆ
:param request:
:param args:
:param kwargs:
:return:
"""
workflow_id = kwargs.get('workflow_id')
app_name = request.META.get('HTTP_APPNAME')
# ๅคๆญๆฏๅฆๆๅทฅไฝๆต็ๆ้
app_permission, msg = account_base_service_ins.app_workflow_permission_check(app_name, workflow_id)
if not app_permission:
return api_response(-1, 'APP:{} have no permission to get this workflow info'.format(app_name), '')
flag, workflow_result = workflow_base_service_ins.get_by_id(workflow_id)
if flag is False:
code, msg, data = -1, workflow_result, {}
else:
data = dict(name=workflow_result.name, description=workflow_result.description,
notices=workflow_result.notices, view_permission_check=workflow_result.view_permission_check,
limit_expression=workflow_result.limit_expression,
display_form_str=workflow_result.display_form_str, creator=workflow_result.creator,
gmt_created=str(workflow_result.gmt_created)[:19])
code = 0
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def patch(self, request, *args, **kwargs):
"""
ไฟฎๆนๅทฅไฝๆต
:param request:
:param args:
:param kwargs:
:return:
"""
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
app_name = request.META.get('HTTP_APPNAME')
workflow_id = kwargs.get('workflow_id')
from service.account.account_base_service import AccountBaseService
# ๅคๆญๆฏๅฆๆๅทฅไฝๆต็ๆ้
app_permission, msg = AccountBaseService.app_workflow_permission_check(app_name, workflow_id)
if not app_permission:
return api_response(-1, 'APP:{} have no permission to get this workflow info'.format(app_name), '')
name = request_data_dict.get('name', '')
description = request_data_dict.get('description', '')
notices = request_data_dict.get('notices', '')
view_permission_check = request_data_dict.get('view_permission_check', 1)
limit_expression = request_data_dict.get('limit_expression', '')
display_form_str = request_data_dict.get('display_form_str', '')
workflow_admin = request_data_dict.get('workflow_admin', '')
flag, result = workflow_base_service_ins.edit_workflow(
workflow_id, name, description, notices, view_permission_check, limit_expression, display_form_str,
workflow_admin)
if flag is False:
code, msg, data = -1, result, {}
else:
code, msg, data = 0, '', {}
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def delete(self, request, *args, **kwargs):
"""
ๅ ้คๅทฅไฝๆต
:param request:
:param args:
:param kwargs:
:return:
"""
app_name = request.META.get('HTTP_APPNAME')
workflow_id = kwargs.get('workflow_id')
# ๅคๆญๆฏๅฆๆๅทฅไฝๆต็ๆ้
app_permission, msg = account_base_service_ins.app_workflow_permission_check(app_name, workflow_id)
if not app_permission:
return api_response(-1, 'APP:{} have no permission to get this workflow info'.format(app_name), '')
flag, result = workflow_base_service_ins.delete_workflow(workflow_id)
if flag is False:
code, msg, data = -1, msg, {}
else:
code, msg, data = 0, '', {}
return api_response(code, msg, data)
class WorkflowTransitionView(LoonBaseView):
post_schema = Schema({
'name': And(str, lambda n: n != '', error='name is needed'),
'transition_type_id': And(int, error='transition_type_id is needed'),
'source_state_id': And(int, lambda n: n != 0, error='source_state_id is needed'),
'attribute_type_id': And(int, lambda n: n != 0, error='attribute_type_id is needed'),
Optional('alert_enable'): int,
Optional('field_require_check'): int,
Optional('alert_text'): str,
Optional('destination_state_id'): int,
Optional('timer'): int,
Optional('condition_expression'): str,
})
@manage_permission_check('workflow_admin')
def get(self, request, *args, **kwargs):
"""
่ทๅๆต่ฝฌ
:param request:
:param args:
:param kwargs:
:return:
"""
workflow_id = kwargs.get('workflow_id')
request_data = request.GET
per_page = int(request_data.get('per_page', 10)) if request_data.get('per_page', 10) else 10
page = int(request_data.get('page', 1)) if request_data.get('page', 1) else 1
query_value = request_data.get('search_value', '')
# if not username:
# return api_response(-1, '่ฏทๆไพusername', '')
flag, result = workflow_transition_service_ins.get_transitions_serialize_by_workflow_id(workflow_id, per_page, page, query_value)
if flag is not False:
paginator_info = result.get('paginator_info')
data = dict(value=result.get('workflow_transitions_restful_list'), per_page=paginator_info.get('per_page'),
page=paginator_info.get('page'), total=paginator_info.get('total'))
code, msg, = 0, ''
else:
code, data, msg = -1, {}, result
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def post(self, request, *args, **kwargs):
"""
ๆฐๅขๆต่ฝฌ
:param request:
:param args:
:param kwargs:
:return:
"""
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
workflow_id = kwargs.get('workflow_id')
username = request.user.username
name = request_data_dict.get('name', '')
transition_type_id = int(request_data_dict.get('transition_type_id', 0))
timer = int(request_data_dict.get('timer', 0))
source_state_id = int(request_data_dict.get('source_state_id', 0))
destination_state_id = int(request_data_dict.get('destination_state_id', 0))
condition_expression = request_data_dict.get('condition_expression', '')
attribute_type_id = int(request_data_dict.get('attribute_type_id', 0))
field_require_check = int(request_data_dict.get('field_require_check', 0))
alert_enable = int(request_data_dict.get('alert_enable', 0))
alert_text = request_data_dict.get('alert_text', '')
flag, result = workflow_transition_service_ins.add_workflow_transition(workflow_id, name, transition_type_id, timer, source_state_id,
destination_state_id, condition_expression, attribute_type_id,
field_require_check, alert_enable, alert_text, username)
if flag is not False:
data = dict(value=dict(transition_id=result.get('transition_id')))
code, msg, = 0, ''
else:
code, data, msg = -1, {}, result
return api_response(code, msg, data)
class WorkflowTransitionDetailView(LoonBaseView):
patch_schema = Schema({
'name': And(str, lambda n: n != '', error='name is needed'),
'transition_type_id': And(int, error='transition_type_id is needed'),
'source_state_id': And(int, lambda n: n != 0, error='source_state_id is needed'),
'attribute_type_id': And(int, lambda n: n != 0, error='attribute_type_id is needed'),
Optional('alert_enable'): int,
Optional('field_require_check'): int,
Optional('alert_text'): str,
Optional('destination_state_id'): int,
Optional('timer'): int,
Optional('condition_expression'): str,
})
@manage_permission_check('workflow_admin')
def patch(self, request, *args, **kwargs):
"""
็ผ่พ
:param request:
:param args:
:param kwargs:
:return:
"""
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
workflow_id = kwargs.get('workflow_id')
app_name = request.META.get('HTTP_APPNAME')
username = request.user.username
name = request_data_dict.get('name', '')
transition_type_id = int(request_data_dict.get('transition_type_id', 0))
timer = int(request_data_dict.get('timer', 0))
source_state_id = int(request_data_dict.get('source_state_id', 0))
destination_state_id = int(request_data_dict.get('destination_state_id', 0))
condition_expression = request_data_dict.get('condition_expression', '')
attribute_type_id = int(request_data_dict.get('attribute_type_id', 0))
field_require_check = int(request_data_dict.get('field_require_check', 0))
alert_enable = int(request_data_dict.get('alert_enable', 0))
alert_text = request_data_dict.get('alert_text', '')
transition_id = kwargs.get('transition_id')
flag, result = workflow_transition_service_ins.edit_workflow_transition(transition_id, workflow_id, name,
transition_type_id, timer, source_state_id,
destination_state_id, condition_expression,
attribute_type_id, field_require_check,
alert_enable, alert_text)
if flag is not False:
data = {}
code, msg, = 0, ''
else:
code, data, msg = -1, {}, ''
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def delete(self, request, *args, **kwargs):
"""
ๅ ้คtransition
:param request:
:param args:
:param kwargs:
:return:
"""
transition_id = kwargs.get('transition_id')
flag, result = workflow_transition_service_ins.del_workflow_transition(transition_id)
if flag is not False:
data = {}
code, msg, = 0, ''
else:
code, data, msg = -1, {}, ''
return api_response(code, msg, data)
class StateView(LoonBaseView):
def get(self, request, *args, **kwargs):
"""
่ทๅ็ถๆ่ฏฆๆ
:param request:
:param args:
:param kwargs:
:return:
"""
state_id = kwargs.get('state_id')
request_data = request.GET
username = request.META.get('HTTP_USERNAME')
if not username:
return api_response(-1, '่ฏทๆไพusername', '')
flag, state_info_dict = workflow_state_service_ins.get_restful_state_info_by_id(state_id)
if flag is not False:
code, data, msg = 0, state_info_dict, ''
else:
code, data, msg = -1, {}, state_info_dict
return api_response(code, msg, data)
class WorkflowStateView(LoonBaseView):
post_schema = Schema({
'name': And(str, lambda n: n != '', error='name is needed'),
'order_id': And(int, error='order_id is needed'),
'type_id': And(int, error='type_id is needed'),
'participant_type_id': int,
'distribute_type_id': And(int, lambda n: n != 0, error='distribute_type_id is needed'),
Optional('remember_last_man_enable'): int,
Optional('state_field_str'): str,
Optional('label'): str,
str: object
})
def get(self, request, *args, **kwargs):
"""
่ทๅๅทฅไฝๆตๆฅๆ็stateๅ่กจไฟกๆฏ
:param request:
:param args:
:param kwargs:
:return:
"""
workflow_id = kwargs.get('workflow_id')
request_data = request.GET
# username = request_data.get('username', '') # ๅ็ปญไผๆ นๆฎusernameๅๅฟ
่ฆ็ๆ้ๆงๅถ
username = request.META.get('HTTP_USERNAME')
search_value = request_data.get('search_value', '')
per_page = int(request_data.get('per_page', 10)) if request_data.get('per_page', 10) else 10
page = int(request_data.get('page', 1)) if request_data.get('page', 1) else 1
# if not username:
# return api_response(-1, '่ฏทๆไพusername', '')
flag, result = workflow_state_service_ins.get_workflow_states_serialize(workflow_id, per_page, page, search_value)
if flag is not False:
paginator_info = result.get('paginator_info')
data = dict(value=result.get('workflow_states_restful_list'), per_page=paginator_info.get('per_page'),
page=paginator_info.get('page'), total=paginator_info.get('total'))
code, msg, = 0, ''
else:
code, data, msg = -1, {}, result
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def post(self, request, *args, **kwargs):
"""
ๆฐๅข็ถๆ
:param request:
:param args:
:param kwargs:
:return:
"""
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
workflow_data = {}
app_name = request.META.get('HTTP_APPNAME')
username = request.META.get('HTTP_USERNAME')
name = request_data_dict.get('name', '')
is_hidden = request_data_dict.get('is_hidden', 0)
order_id = int(request_data_dict.get('order_id', 0))
type_id = int(request_data_dict.get('type_id', 0))
remember_last_man_enable = int(request_data_dict.get('remember_last_man_enable', 0))
enable_retreat = int(request_data_dict.get('enable_retreat', 0))
participant_type_id = int(request_data_dict.get('participant_type_id', 0))
participant = request_data_dict.get('participant', '')
distribute_type_id = int(request_data_dict.get('distribute_type_id', 1))
state_field_str = request_data_dict.get('state_field_str', '')
label = request_data_dict.get('label', '')
workflow_id = kwargs.get('workflow_id')
flag, result = workflow_state_service_ins.add_workflow_state(
workflow_id, name, is_hidden, order_id, type_id, remember_last_man_enable, participant_type_id,
participant, distribute_type_id, state_field_str, label, username, enable_retreat)
if flag is False:
code, msg, data = -1, result, {}
else:
code, msg, data = 0, '', {'state_id': result.get('workflow_state_id')}
return api_response(code, msg, data)
class WorkflowStateDetailView(LoonBaseView):
patch_schema = Schema({
'name': And(str, lambda n: n != '', error='name is needed'),
'order_id': And(int, error='order_id is needed'),
'type_id': And(int, error='type_id is needed'),
'participant_type_id': int,
'distribute_type_id': And(int, lambda n: n != 0, error='distribute_type_id is needed'),
Optional('remember_last_man_enable'): int,
Optional('state_field_str'): str,
Optional('label'): str,
str: object
})
@manage_permission_check('workflow_admin')
def patch(self, request, *args, **kwargs):
"""
็ผ่พ็ถๆ
:param request:
:param args:
:param kwargs:
:return:
"""
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
workflow_data = {}
app_name = request.META.get('HTTP_APPNAME')
username = request.META.get('HTTP_USERNAME')
name = request_data_dict.get('name', '')
is_hidden = request_data_dict.get('is_hidden', 0)
order_id = int(request_data_dict.get('order_id', 0))
type_id = int(request_data_dict.get('type_id', 0))
remember_last_man_enable = int(request_data_dict.get('remember_last_man_enable', 0))
enable_retreat = int(request_data_dict.get('enable_retreat', 0))
participant_type_id = int(request_data_dict.get('participant_type_id', 0))
participant = request_data_dict.get('participant', '')
distribute_type_id = int(request_data_dict.get('distribute_type_id', 1))
state_field_str = request_data_dict.get('state_field_str', '')
label = request_data_dict.get('label', '')
workflow_id = kwargs.get('workflow_id')
state_id = kwargs.get('state_id')
flag, result = workflow_state_service_ins.edit_workflow_state(
state_id, workflow_id, name, is_hidden, order_id, type_id, remember_last_man_enable, participant_type_id,
participant, distribute_type_id, state_field_str, label, enable_retreat)
if flag is False:
code, msg, data = -1, result, {}
else:
code, msg, data = 0, '', {}
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def delete(self, request, *args, **kwargs):
"""
ๅ ้ค็ถๆ
delete state
:param request:
:param args:
:param kwargs:
:return:
"""
app_name = request.META.get('HTTP_APPNAME')
state_id = kwargs.get('state_id')
flag, result = workflow_state_service_ins.del_workflow_state(state_id)
if flag is False:
code, msg, data = -1, result, {}
else:
code, msg, data = 0, '', {}
return api_response(code, msg, data)
class WorkflowRunScriptView(LoonBaseView):
@manage_permission_check('workflow_admin')
def get(self, request, *args, **kwargs):
"""
่ทๅๅทฅไฝๆตๆง่ก่ๆฌๅ่กจ
:param request:
:param args:
:param kwargs:
:return:
"""
request_data = request.GET
username = request.META.get('HTTP_USERNAME')
if not username:
username = request.user.username
search_value = request_data.get('search_value', '')
per_page = int(request_data.get('per_page', 10)) if request_data.get('per_page', 10) else 10
page = int(request_data.get('page', 1)) if request_data.get('page', 1) else 1
if not username:
return api_response(-1, '่ฏทๆไพusername', '')
flag, result = workflow_run_script_service_ins.get_run_script_list(search_value, page, per_page)
if flag is not False:
paginator_info = result.get('paginator_info')
data = dict(value=result.get('run_script_result_restful_list'), per_page=paginator_info.get('per_page'), page=paginator_info.get('page'),
total=paginator_info.get('total'))
code, msg, = 0, ''
else:
code, data = -1, ''
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def post(self, request, *args, **kwargs):
"""
ๆฐๅข่ๆฌ
:param request:
:param args:
:param kwargs:
:return:
"""
file_obj = request.FILES.get('file')
if file_obj: # ๅค็้ไปถไธไผ ๅฐๆนๆณ
import os
import uuid
from django.conf import settings
script_file_name = "workflow_script/{}.py".format(str(uuid.uuid1()))
upload_file = os.path.join(settings.MEDIA_ROOT, script_file_name)
with open(upload_file, 'wb') as new_file:
for chunk in file_obj.chunks():
new_file.write(chunk)
script_name = request.POST.get('script_name', '')
script_desc = request.POST.get('script_desc', '')
is_active = request.POST.get('is_active', '0')
flag, result = workflow_run_script_service_ins.add_run_script(script_name, script_file_name, script_desc, is_active, request.user.username)
if flag is not False:
data, code, msg = dict(script_id=result.get('script_id')), 0, ''
else:
code, data, msg = -1, {}, result
return api_response(code, msg, data)
class WorkflowRunScriptDetailView(LoonBaseView):
@manage_permission_check('workflow_admin')
def post(self, request, *args, **kwargs):
"""
ไฟฎๆน่ๆฌ,ๆฌๆฅๅๅค็จpatch็ใไฝๆฏๅ็ฐ้jsonๆไบค่ฟๆฅ่ทๅไธๅฐๆฐๆฎ(ๅ ไธบ่ฆไผ ๆไปถ๏ผๆไปฅไธ่ฝ็จjson)
update script
:param request:
:param args:
:param kwargs:
:return:
"""
file_obj = request.FILES.get('file')
if file_obj: # ๅค็้ไปถไธไผ ๅฐๆนๆณ
import os
import uuid
from django.conf import settings
script_file_name = "workflow_script/{}.py".format(str(uuid.uuid1()))
upload_file = os.path.join(settings.MEDIA_ROOT, script_file_name)
with open(upload_file, 'wb') as new_file:
for chunk in file_obj.chunks():
new_file.write(chunk)
else:
script_file_name = None
run_script_id = kwargs.get('run_script_id')
script_name = request.POST.get('script_name', '')
script_desc = request.POST.get('script_desc', '')
is_active = request.POST.get('is_active', '0')
flag, result = workflow_run_script_service_ins.edit_run_script(run_script_id, script_name, script_file_name, script_desc, is_active)
if flag is not False:
code, msg, data = 0, '', {}
else:
code, data, msg = -1, {}, result
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def delete(self, request, *args, **kwargs):
"""
ๅ ้ค่ๆฌ๏ผๆฌๆไฝไธๅ ้คๅฏนๅบ็่ๆฌๆไปถ๏ผๅชๆ ่ฎฐ่ฎฐๅฝ
:param request:
:param args:
:param kwargs:
:return:
"""
run_script_id = kwargs.get('run_script_id')
result, msg = workflow_run_script_service_ins.del_run_script(run_script_id)
if result is not False:
code, msg, data = 0, '', {}
else:
code, data = -1, {}
return api_response(code, msg, data)
class WorkflowCustomNoticeView(LoonBaseView):
post_schema = Schema({
'name': And(str, lambda n: n != '', error='name is needed'),
'hook_url': And(str, lambda n: n != '', error='hook_url is needed'),
'hook_token': And(str, lambda n: n != '', error='hook_token is needed'),
Optional('description'): str,
})
@manage_permission_check('admin')
def get(self, request, *args, **kwargs):
"""
get worklfow custom notice list
่ทๅๅทฅไฝๆต้็ฅๅ่กจ
:param request:
:param args:
:param kwargs:
:return:
"""
request_data = request.GET
# username = request_data.get('username', '') # ๅ็ปญไผๆ นๆฎusernameๅๅฟ
่ฆ็ๆ้ๆงๅถ
username = request.META.get('HTTP_USERNAME')
if not username:
username = request.user.username
search_value = request_data.get('search_value', '')
per_page = int(request_data.get('per_page', 10)) if request_data.get('per_page', 10) else 10
page = int(request_data.get('page', 1)) if request_data.get('page', 1) else 1
if not username:
return api_response(-1, '่ฏทๆไพusername', '')
result, msg = workflow_custom_notice_service_ins.get_notice_list(search_value, page, per_page)
if result is not False:
data = dict(value=result, per_page=msg['per_page'], page=msg['page'], total=msg['total'])
code, msg, = 0, ''
else:
code, data = -1, ''
return api_response(code, msg, data)
@manage_permission_check('admin')
def post(self, request, *args, **kwargs):
"""
add notice record
ๆฐๅข้็ฅ่ฎฐๅฝ
:param request:
:param args:
:param kwargs:
:return:
"""
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
name = request_data_dict.get('name', '')
description = request_data_dict.get('description', '')
hook_url = request_data_dict.get('hook_url', '')
hook_token = request_data_dict.get('hook_token', '')
creator = request.user.username
flag, result = account_base_service_ins.admin_permission_check(creator)
if flag is False:
return api_response(-1, result, {})
result, msg = workflow_custom_notice_service_ins.add_custom_notice(name, description, hook_url, hook_token, creator)
if result is not False:
data = {}
code, msg, = 0, ''
else:
code, data = -1, {}
return api_response(code, msg, data)
class WorkflowCustomNoticeDetailView(LoonBaseView):
patch_schema = Schema({
'name': And(str, lambda n: n != '', error='name is needed'),
'hook_url': And(str, lambda n: n != '', error='hook_url is needed'),
'hook_token': And(str, lambda n: n != '', error='hook_token is needed'),
Optional('description'): str,
})
@manage_permission_check('admin')
def patch(self, request, *args, **kwargs):
"""
ไฟฎๆน้็ฅ
:param request:
:param args:
:param kwargs:
:return:
"""
notice_id = kwargs.get('notice_id')
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
name = request_data_dict.get('name', '')
description = request_data_dict.get('description', '')
hook_url = request_data_dict.get('hook_url', '')
hook_token = request_data_dict.get('hook_token', '')
creator = request.user.username
flag, result = account_base_service_ins.admin_permission_check(creator)
if flag is False:
return api_response(-1, result, {})
result, msg = workflow_custom_notice_service_ins.update_custom_notice(notice_id, name, description, hook_url,
hook_token)
if result is not False:
data = {}
code, msg, = 0, ''
else:
code, data = -1, {}
return api_response(code, msg, data)
@manage_permission_check('admin')
def delete(self, request, *args, **kwargs):
"""
ๅ ้ค่ชๅฎไน้็ฅ
:param request:
:param args:
:param kwargs:
:return:
"""
notice_id = kwargs.get('notice_id')
result, msg = workflow_custom_notice_service_ins.del_custom_notice(notice_id)
if result is not False:
code, msg, data = 0, '', {}
else:
code, data = -1, {}
return api_response(code, msg, data)
class WorkflowCustomFieldView(LoonBaseView):
post_schema = Schema({
'field_key': And(str, lambda n: n != '', error='field_key is needed'),
'field_name': And(str, lambda n: n != '', error='field_name is needed'),
'field_type_id': And(int, lambda n: n != 0, error='field_type_id is needed and should be a number'),
'order_id': And(int, error='order_id is needed and should be a number'),
Optional('description'): str,
Optional('label'): str,
Optional('field_template'): str,
Optional('default_value'): str,
Optional('boolean_field_display'): str,
Optional('field_choice'): str,
})
def get(self, request, *args, **kwargs):
"""
่ทๅๅทฅไฝๆต่ชๅฎไนๅญๆฎตๅ่กจ
:param request:
:param args:
:param kwargs:
:return:
"""
request_data = request.GET
# username = request_data.get('username', '') # ๅ็ปญไผๆ นๆฎusernameๅๅฟ
่ฆ็ๆ้ๆงๅถ
username = request.META.get('HTTP_USERNAME')
if not username:
username = request.user.username
search_value = request_data.get('search_value', '')
per_page = int(request_data.get('per_page', 10)) if request_data.get('per_page', 10) else 10
page = int(request_data.get('page', 1)) if request_data.get('page', 1) else 1
if not username:
return api_response(-1, '่ฏทๆไพusername', '')
flag, result = workflow_custom_field_service_ins.get_workflow_custom_field_list(kwargs.get('workflow_id'), search_value, page, per_page)
if flag is not False:
paginator_info = result.get('paginator_info')
data = dict(value=result.get('workflow_custom_field_result_restful_list'),
per_page=paginator_info.get('per_page'), page=paginator_info.get('page'),
total=paginator_info.get('total'))
code, msg, = 0, ''
else:
code, data, msg = -1, {}, ''
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def post(self, request, *args, **kwargs):
"""
ๆฐๅขๅทฅไฝๆต่ชๅฎไนๅญๆฎต
:param request:
:param args:
:param kwargs:
:return:
"""
app_name = request.META.get('HTTP_APPNAME')
username = request.META.get('HTTP_USERNAME')
workflow_id = kwargs.get('workflow_id')
# ๅคๆญๆฏๅฆๆๅทฅไฝๆต็ๆ้
app_permission, msg = account_base_service_ins.app_workflow_permission_check(app_name, workflow_id)
if not app_permission:
return api_response(-1, 'APP:{} have no permission to get this workflow info'.format(app_name), '')
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
field_key = request_data_dict.get('field_key', '')
field_name = request_data_dict.get('field_name', '')
field_type_id = request_data_dict.get('field_type_id', '')
order_id = int(request_data_dict.get('order_id', 0))
label = request_data_dict.get('label', '')
description = request_data_dict.get('description', '')
field_template = request_data_dict.get('field_template', '')
default_value = request_data_dict.get('default_value', '')
boolean_field_display = request_data_dict.get('boolean_field_display', '')
field_choice = request_data_dict.get('field_choice', '')
flag, result = workflow_custom_field_service_ins.add_record(workflow_id, field_type_id, field_key, field_name, order_id,
default_value, description, field_template,
boolean_field_display, field_choice, label, username)
if flag is not False:
data = dict(value={'custom_field_id': result.get('custom_field_id')})
code, msg, = 0, ''
else:
code, data, msg = -1, {}, result
return api_response(code, msg, data)
class WorkflowCustomFieldDetailView(LoonBaseView):
patch_schema = Schema({
'field_key': And(str, lambda n: n != '', error='field_key is needed'),
'field_name': And(str, lambda n: n != '', error='field_name is needed'),
'field_type_id': And(int, lambda n: n != 0, error='field_type_id is needed and should be a number'),
'order_id': And(int, error='order_id is needed and should be a number'),
Optional('description'): str,
Optional('label'): str,
Optional('field_template'): str,
Optional('default_value'): str,
Optional('boolean_field_display'): str,
Optional('field_choice'): str,
})
@manage_permission_check('workflow_admin')
def patch(self, request, *args, **kwargs):
"""
ๆดๆฐ่ชๅฎไนๅญๆฎต
:param request:
:param args:
:param kwargs:
:return:
"""
custom_field_id = kwargs.get('custom_field_id')
app_name = request.META.get('HTTP_APPNAME')
username = request.META.get('HTTP_USERNAME')
workflow_id = kwargs.get('workflow_id')
# ๅคๆญๆฏๅฆๆๅทฅไฝๆต็ๆ้
app_permission, msg = account_base_service_ins.app_workflow_permission_check(app_name, workflow_id)
if not app_permission:
return api_response(-1, 'APP:{} have no permission to get this workflow info'.format(app_name), '')
json_str = request.body.decode('utf-8')
if not json_str:
return api_response(-1, 'postๅๆฐไธบ็ฉบ', {})
request_data_dict = json.loads(json_str)
field_key = request_data_dict.get('field_key', '')
field_name = request_data_dict.get('field_name', '')
field_type_id = request_data_dict.get('field_type_id', '')
order_id = int(request_data_dict.get('order_id', 0))
label = request_data_dict.get('label', '')
description = request_data_dict.get('description', '')
field_template = request_data_dict.get('field_template', '')
default_value = request_data_dict.get('default_value', '')
boolean_field_display = request_data_dict.get('boolean_field_display', '')
field_choice = request_data_dict.get('field_choice', '')
result, msg = workflow_custom_field_service_ins.edit_record(custom_field_id, workflow_id, field_type_id, field_key, field_name, order_id,
default_value, description, field_template,
boolean_field_display, field_choice, label)
if result is not False:
code, msg, data = 0, '', {}
else:
code, data = -1, ''
return api_response(code, msg, data)
@manage_permission_check('workflow_admin')
def delete(self, request, *args, **kwargs):
"""ๅ ้ค่ฎฐๅฝ"""
app_name = request.META.get('HTTP_APPNAME')
username = request.META.get('HTTP_USERNAME')
workflow_id = kwargs.get('workflow_id')
custom_field_id = kwargs.get('custom_field_id')
# ๅคๆญๆฏๅฆๆๅทฅไฝๆต็ๆ้
app_permission, msg = account_base_service_ins.app_workflow_permission_check(app_name, workflow_id)
if not app_permission:
return api_response(-1, 'APP:{} have no permission to get this workflow info'.format(app_name), '')
flag, result = workflow_custom_field_service_ins.delete_record(custom_field_id)
if flag is not False:
data = dict(value={'custom_field_id': result})
code, msg, = 0, ''
else:
code, data = -1, ''
return api_response(code, msg, data)
|
py
|
1a5e703298f959f0eeb1525b8961d2f59a0d4d19
|
# -*- coding: utf-8 -*-
"""Main Controller"""
from tg import expose, url, request, redirect
from eventstreamexamples.lib.base import BaseController
from eventstreamexamples.controllers.error import ErrorController
from eventstream import EventstreamController
from geventeventstream import GeventEventstreamController
__all__ = ['RootController']
class RootController(BaseController):
"""
The root controller for the eventstreamexamples application.
All the other controllers and WSGI applications should be mounted on this
controller. For example::
panel = ControlPanelController()
another_app = AnotherWSGIApplication()
Keep in mind that WSGI applications shouldn't be mounted directly: They
must be wrapped around with :class:`tg.controllers.WSGIAppController`.
"""
error = ErrorController()
eventstream = EventstreamController()
geventeventstream = GeventEventstreamController()
@expose()
def index(self):
return """
<h1><a href="/eventstream">EventStream example</a></h1>
<p>For this, the web app can be run with <b>paster serve --reload development.ini</b>.</p>
<h1><a href="/geventeventstream">EventStream example with gevent</a></h1>
<p>For this, the web app must be run with <b>paster serve development-gunicorn.ini</b>. <strike>reload</strike> gives errors.</p>
"""
|
py
|
1a5e704ed9f7932870f03993d0f323e434538719
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019 Machine Learning Group of the University of Oldenburg.
# Licensed under the Academic Free License version 3.0
import torch as to
import tvo
import numpy as np
from typing import Dict
from tvo.variational._set_redundant_lpj_to_low_CPU import set_redundant_lpj_to_low_CPU
def _unique_ind(x: to.Tensor) -> to.Tensor:
"""Find indices of unique rows in tensor.
:param x: torch tensor
:returns: indices of unique rows in tensor.
"""
n = x.shape[0]
unique_rows, inverse_ind = to.unique(x, sorted=False, return_inverse=True, dim=0)
n_unique = unique_rows.shape[0]
perm = to.arange(n, device=inverse_ind.device)
# make sure reverse_ind relative to old_states come last...
inverse_ind, perm = inverse_ind.flip([0]), perm.flip([0])
# ...so the indices that are written last in each position are the ones for old_states
uniq_ind = inverse_ind.new_empty(n_unique).scatter_(0, inverse_ind, perm)
return uniq_ind
def _set_redundant_lpj_to_low_GPU(new_states: to.Tensor, new_lpj: to.Tensor, old_states: to.Tensor):
"""Find redundant states in new_states w.r.t. old_states and set
corresponding lpg to low.
:param new_states: set of new variational states (batch_size, newS, H)
:param new_lpj: corresponding log-pseudo-joints (batch_size, newS)
:param old_states: (batch_size, S, H)
"""
N, S, H = old_states.shape
newS = new_states.shape[1]
# old_states must come first for np.unique to discard redundant new_states
old_and_new = to.cat((old_states, new_states), dim=1)
for n in range(N):
uniq_idx = _unique_ind(old_and_new[n])
# indexes of states in new_states[n] that are not in old_states[n]
new_uniq_idx = uniq_idx[uniq_idx >= S] - S
# BoolTensor in pytorch>=1.2, ByteTensor otherwise
bool_or_byte = (to.empty(0) < 0).dtype
mask = to.ones(newS, dtype=bool_or_byte, device=new_lpj.device)
# indexes of all non-unique states in new_states (complementary of new_uniq_idx)
mask[new_uniq_idx.to(device=new_lpj.device)] = 0
# set lpj of redundant states to an arbitrary low value
new_lpj[n][mask] = -1e20
# set_redundant_lpj_to_low is a performance hotspot. when running on CPU, we use a cython
# function that runs on numpy arrays, when running on GPU, we stick to torch tensors
def set_redundant_lpj_to_low(new_states: to.Tensor, new_lpj: to.Tensor, old_states: to.Tensor):
if tvo.get_device().type == "cpu":
set_redundant_lpj_to_low_CPU(new_states.numpy(), new_lpj.numpy(), old_states.numpy())
else:
_set_redundant_lpj_to_low_GPU(new_states, new_lpj, old_states)
def generate_unique_states(
n_states: int, H: int, crowdedness: float = 1.0, device: to.device = None
) -> to.Tensor:
"""Generate a torch tensor containing random and unique binary vectors.
:param n_states: number of unique vectors to be generated
:param H: size of binary vector
:param crowdedness: average crowdedness per state
:param device: torch.device of output Tensor. Defaults to tvo.get_device()
Requires that n_states <= 2**H. Return has shape (n_states, H).
"""
if device is None:
device = tvo.get_device()
assert n_states <= 2**H, "n_states must be smaller than 2**H"
n_samples = max(n_states // 2, 1)
s_set = {tuple(s) for s in np.random.binomial(1, p=crowdedness / H, size=(n_samples, H))}
while len(s_set) < n_states:
s_set.update(
{tuple(s) for s in np.random.binomial(1, p=crowdedness / H, size=(n_samples, H))}
)
while len(s_set) > n_states:
s_set.pop()
return to.from_numpy(np.array(tuple(s for s in s_set), dtype=int)).to(
dtype=to.uint8, device=device
)
def update_states_for_batch(
new_states: to.Tensor,
new_lpj: to.Tensor,
idx: to.Tensor,
all_states: to.Tensor,
all_lpj: to.Tensor,
sort_by_lpj: Dict[str, to.Tensor] = {},
) -> int:
"""Perform substitution of old and new states (and lpj, ...)
according to TVO criterion.
:param new_states: set of new variational states (idx.size, newS, H)
:param new_lpj: corresponding log-pseudo-joints (idx.size, newS)
:param idx: indeces of the datapoints that compose the batch within the dataset
:param all_states: set of all variational states (N, S, H)
:param all_lpj: corresponding log-pseudo-joints (N, S)
:param sort_by_lpj: optional list of tensors with shape (n,s,...) that will be
sorted by all_lpj, the same way all_lpj and all_states are sorted.
S is the number of variational states memorized for each of the N
data-points. idx contains the ordered list of indexes for which the
new_states have been evaluated (i.e. the states in new_states[0] are to
be put into all_s[idx[0]]. all_s[n] is updated to contain the set of
variational states with best log-pseudo-joints.
"""
# TODO Find out why lpj precision decreases for states without substitutions
# (difference on the order of 1e-15).
S = all_states.shape[1]
batch_size, newS, H = new_states.shape
old_states = all_states[idx]
old_lpj = all_lpj[idx]
assert old_states.shape == (batch_size, S, H)
assert old_lpj.shape == (batch_size, S)
conc_states = to.cat((old_states, new_states), dim=1)
conc_lpj = to.cat((old_lpj, new_lpj), dim=1) # (batch_size, S+newS)
# is (batch_size, S)
sorted_idx = to.flip(to.topk(conc_lpj, k=S, dim=1, largest=True, sorted=True)[1], [1])
flattened_sorted_idx = sorted_idx.flatten()
idx_n = idx.repeat(S, 1).t().flatten()
idx_s = to.arange(S, device=all_states.device).repeat(batch_size)
idx_sc = to.arange(batch_size, device=all_states.device).repeat(S, 1).t().flatten()
all_states[idx_n, idx_s] = conc_states[idx_sc, flattened_sorted_idx]
all_lpj[idx_n, idx_s] = conc_lpj[idx_sc, flattened_sorted_idx]
for t in sort_by_lpj.values():
idx_n_ = to.arange(batch_size).repeat(S, 1).t().flatten()
t[idx_n_, idx_s] = t[idx_n_, flattened_sorted_idx]
return (sorted_idx >= old_states.shape[1]).sum().item() # nsubs
def lpj2pjc(lpj: to.Tensor):
"""Shift log-pseudo-joint and convert log- to actual probability
:param lpj: log-pseudo-joint tensor
:returns: probability tensor
"""
up_lpg_bound = 0.0
shft = up_lpg_bound - lpj.max(dim=1, keepdim=True)[0]
tmp = to.exp(lpj + shft)
return tmp.div_(tmp.sum(dim=1, keepdim=True))
def _mean_post_einsum(g: to.Tensor, lpj: to.Tensor) -> to.Tensor:
"""Compute expectation value of g(s) w.r.t truncated variational distribution q(s).
:param g: Values of g(s) with shape (N,S,...).
:param lpj: Log-pseudo-joint with shape (N,S).
:returns: tensor with shape (N,...).
"""
return to.einsum("ns...,ns->n...", (g, lpj2pjc(lpj)))
def _mean_post_mul(g: to.Tensor, lpj: to.Tensor) -> to.Tensor:
"""Compute expectation value of g(s) w.r.t truncated variational distribution q(s).
:param g: Values of g(s) with shape (N,S,...).
:param lpj: Log-pseudo-joint with shape (N,S).
:returns: tensor with shape (N,...).
"""
# reshape lpj from (N,S) to (N,S,1,...), to match dimensionality of g
lpj = lpj.view(*lpj.shape, *(1 for _ in range(g.ndimension() - 2)))
return lpj2pjc(lpj).mul(g).sum(dim=1)
def mean_posterior(g: to.Tensor, lpj: to.Tensor) -> to.Tensor:
"""Compute expectation value of g(s) w.r.t truncated variational distribution q(s).
:param g: Values of g(s) with shape (N,S,...).
:param lpj: Log-pseudo-joint with shape (N,S).
:returns: tensor with shape (N,...).
"""
if tvo.get_device().type == "cpu":
means = _mean_post_einsum(g, lpj)
else:
means = _mean_post_mul(g, lpj)
assert means.shape == (g.shape[0], *g.shape[2:])
assert not to.isnan(means).any() and not to.isinf(means).any()
return means
|
py
|
1a5e705cc49231e666006787e08df72625f495fb
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Helveticum Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP 9 soft forks.
Connect to a single node.
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 2 block and save coinbases for later use
mine 141 blocks to transition from DEFINED to STARTED
mine 100 blocks signalling readiness and 44 not in order to fail to change state this period
mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN)
mine a further 143 blocks (LOCKED_IN)
test that enforcement has not triggered (which triggers ACTIVE)
test that enforcement has triggered
"""
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP
from io import BytesIO
import time
import itertools
class BIP9SoftForksTest(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1']]
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
tx.nVersion = 2
return tx
def sign_transaction(self, node, tx):
signresult = node.signrawtransaction(bytes_to_hex_str(tx.serialize()))
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = version
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
return test_blocks
def get_bip9_status(self, key):
info = self.nodes[0].getblockchaininfo()
return info['bip9_softforks'][key]
def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno):
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
# generate some coins for later
self.coinbase_blocks = self.nodes[0].generate(2)
self.height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = int(time.time())
assert_equal(self.get_bip9_status(bipName)['status'], 'defined')
assert_equal(self.get_bip9_status(bipName)['since'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert_equal(tmpl['version'], 0x20000000)
# Test 1
# Advance from DEFINED to STARTED
test_blocks = self.generate_blocks(141, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 1-A
# check stats after max number of "signalling not" blocks such that LOCKED_IN still possible this period
test_blocks = self.generate_blocks(36, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(10, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 46)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
# Test 1-B
# check stats after one additional "signalling not" block -- LOCKED_IN no longer possible this period
test_blocks = self.generate_blocks(1, 4, test_blocks) # 0x00000004 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 47)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 10)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], False)
# Test 1-C
# finish period with "ready" blocks, but soft fork will still fail to advance to LOCKED_IN
test_blocks = self.generate_blocks(97, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# Test 2
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 1
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
assert_equal(self.get_bip9_status(bipName)['since'], 144)
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 0)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 0)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
assert_equal(tmpl['vbavailable'][bipName], bitno)
assert_equal(tmpl['vbrequired'], 0)
assert(tmpl['version'] & activated_version)
# Test 3
# 108 out of 144 signal bit 1 to achieve LOCKED_IN
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(57, activated_version) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 4, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False)
# check counting stats and "possible" flag before last block of this period achieves LOCKED_IN...
assert_equal(self.get_bip9_status(bipName)['statistics']['elapsed'], 143)
assert_equal(self.get_bip9_status(bipName)['statistics']['count'], 107)
assert_equal(self.get_bip9_status(bipName)['statistics']['possible'], True)
assert_equal(self.get_bip9_status(bipName)['status'], 'started')
# ...continue with Test 3
test_blocks = self.generate_blocks(1, activated_version) # 0x20000001 (signalling ready)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 4
# 143 more version 536870913 blocks (waiting period-1)
test_blocks = self.generate_blocks(143, 4)
yield TestInstance(test_blocks, sync_every_block=False)
assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in')
assert_equal(self.get_bip9_status(bipName)['since'], 576)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName not in tmpl['rules'])
# Test 5
# Check that the new rule is enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = activated_version
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
self.height += 1
yield TestInstance([[block, True]])
assert_equal(self.get_bip9_status(bipName)['status'], 'active')
assert_equal(self.get_bip9_status(bipName)['since'], 720)
tmpl = self.nodes[0].getblocktemplate({})
assert(bipName in tmpl['rules'])
assert(bipName not in tmpl['vbavailable'])
assert_equal(tmpl['vbrequired'], 0)
assert(not (tmpl['version'] & (1 << bitno)))
# Test 6
# Check that the new sequence lock rules are enforced
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
invalidate(spendtx)
spendtx = self.sign_transaction(self.nodes[0], spendtx)
spendtx.rehash()
invalidatePostSignature(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(self.height), self.last_block_time + 1)
block.nVersion = 5
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
# Restart all
self.test.clear_all_connections()
self.stop_nodes()
shutil.rmtree(self.options.tmpdir + "/node0")
self.setup_chain()
self.setup_network()
self.test.add_all_connections(self.nodes)
NetworkThread().start()
self.test.test_nodes[0].wait_for_verack()
def get_tests(self):
for test in itertools.chain(
self.test_BIP('csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.mtp_invalidate, self.donothing, 0),
self.test_BIP('csv', 0x20000001, self.donothing, self.csv_invalidate, 0)
):
yield test
def donothing(self, tx):
return
def csv_invalidate(self, tx):
"""Modify the signature in vin 0 of the tx to fail CSV
Prepends -1 CSV DROP in the scriptSig itself.
"""
tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] +
list(CScript(tx.vin[0].scriptSig)))
def sequence_lock_invalidate(self, tx):
"""Modify the nSequence to make it fails once sequence lock rule is
activated (high timespan).
"""
tx.vin[0].nSequence = 0x00FFFFFF
tx.nLockTime = 0
def mtp_invalidate(self, tx):
"""Modify the nLockTime to make it fails once MTP rule is activated."""
# Disable Sequence lock, Activate nLockTime
tx.vin[0].nSequence = 0x90FFFFFF
tx.nLockTime = self.last_block_time
if __name__ == '__main__':
BIP9SoftForksTest().main()
|
py
|
1a5e707c81030e863d2dce0afaabf879060b3cf1
|
import subprocess
import pkg_resources
import json
class funnel:
"""
The funnel subdeployer to manage funnel deployment via Docker
"""
def __init__(self):
"""
Constructor for the funnel subdeployer
Determines paths to coordinate deployment
"""
self.pkgName = __name__
funnelPath = '/'.join(('.'))
self.funnelDir = pkg_resources.resource_filename(self.pkgName, funnelPath)
def route(self, args):
"""
The entry-point method for the subdeployer
Coordinates the deployment scheme based on the arguments
Configures and deploys the software container holding funnel
Parameters:
argparse.Namespace args - command-line arguments object
"""
# deploy funnel if selected
if args.funnel:
# configure the funnel setup based on args
self.config(args)
# run the Docker container
self.deployDocker(args.funnelImageName,
args.funnelContainerName,
args.funnelPort)
def deployDocker(self, funnelImageName, funnelContainerName, funnelPort):
"""
Deploy the funnel server via docker
Parameters:
string funnelImageName
string funnelContainerName
string funnelPort
Returns: None
"""
build = ["docker", "build", "-t", funnelImageName, self.funnelDir]
subprocess.call(build)
# We must allow Funnel to call Docker
# from inside one of Docker's container
# Hence we bind one of docker's sockets into its own container
run = ["docker", "run",
"-v", "/var/run/docker.sock:/var/run/docker.sock",
"-p", funnelPort + ":3002",
"--name", funnelContainerName, funnelImageName]
subprocess.Popen(run)
def config(self, args):
"""
Writes the keycloak.json file for the funnel client
Parameters:
argparse.Namespace args - An object containing the command-line
arguments as attributes
Returns: None
"""
fileName = self.funnelDir + "/funnel-node/node-client/keycloak.json"
authUrl = "http://" + args.keycloakIP + ":" + args.keycloakPort + "/auth"
redirectList = [ "http://" + args.funnelIP + ":" + args.funnelPort + "/oidc_callback" ]
secretDict = { "secret" : args.funnelSecret }
keycloakData = { "realm" : args.realmName,
"auth-server-url": authUrl,
"resource" : args.funnelID,
"redirect_uris" : redirectList,
"credentials" : secretDict }
jsonData = json.dumps(keycloakData, indent=1)
fileHandle = open(fileName, "w")
fileHandle.write(jsonData)
fileHandle.close()
|
py
|
1a5e70c6367843274331ba5b3417743c1081b26d
|
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import random
import numpy as np
from config import global_config as cfg
from reader import CamRest676Reader, get_glove_matrix
from reader import KvretReader
from network import FSDM, cuda_
from torch.optim import Adam
from torch.autograd import Variable
from reader import pad_sequences
import argparse, time
from metric import CamRestEvaluator, KvretEvaluator
import logging
class Model:
def __init__(self, dataset):
reader_dict = {
'camrest': CamRest676Reader,
'kvret': KvretReader,
}
model_dict = {
'FSDM': FSDM
}
evaluator_dict = {
'camrest': CamRestEvaluator,
'kvret': KvretEvaluator,
}
self.reader = reader_dict[dataset]()
self.m = model_dict[cfg.m](embed_size=cfg.embedding_size,
hidden_size=cfg.hidden_size,
vocab_size=cfg.vocab_size,
layer_num=cfg.layer_num,
dropout_rate=cfg.dropout_rate,
z_length=cfg.z_length,
max_ts=cfg.max_ts,
beam_search=cfg.beam_search,
beam_size=cfg.beam_size,
eos_token_idx=self.reader.vocab.encode('EOS_M'),
vocab=self.reader.vocab,
teacher_force=cfg.teacher_force,
degree_size=cfg.degree_size,
num_head=cfg.num_head,
separate_enc=cfg.separate_enc)
self.EV = evaluator_dict[dataset] # evaluator class
if cfg.cuda:
self.m = self.m.cuda()
self.base_epoch = -1
def _to_onehot(self, encoded):
_np = np.zeros((cfg.vocab_size, 1))
for idx in encoded:
_np[idx] = 1.
return _np
def _convert_batch(self, py_batch, prev_z_py=None):
kw_ret = {}
requested_7_np = np.stack(py_batch['requested_7'], axis=0).transpose()
requested_7_np = requested_7_np[:, :, np.newaxis] # 7, batchsize, 1
response_7_np = np.stack(py_batch['response_7'], axis=0).transpose()
response_7_np = response_7_np[:, :, np.newaxis] # 7, batchsize, 1
requestable_key = py_batch['requestable_key'] # (batchsize, 7) keys
requestable_slot = py_batch['requestable_slot'] # (batchsize, 7) slots
requestable_key_np = pad_sequences(requestable_key, len(requestable_key[0]), padding='post',
truncating='post').transpose((1, 0))
requestable_slot_np = pad_sequences(requestable_slot, len(requestable_slot[0]), padding='post',
truncating='post').transpose((1, 0))
kw_ret['requestable_key_np'] = requestable_key_np
kw_ret['requestable_slot_np'] = requestable_slot_np
kw_ret['requestable_key'] = cuda_(Variable(torch.from_numpy(requestable_key_np).long()))
kw_ret['requestable_slot'] = cuda_(Variable(torch.from_numpy(requestable_slot_np).long()))
kw_ret['requested_7'] = cuda_(Variable(torch.from_numpy(requested_7_np).float()))
kw_ret['response_7'] = cuda_(Variable(torch.from_numpy(response_7_np).float()))
u_input_py = py_batch['user']
u_len_py = py_batch['u_len']
if cfg.prev_z_method == 'concat' and prev_z_py is not None:
for i in range(len(u_input_py)):
eob = self.reader.vocab.encode('EOS_Z2')
if eob in prev_z_py[i] and prev_z_py[i].index(eob) != len(prev_z_py[i]) - 1:
idx = prev_z_py[i].index(eob)
u_input_py[i] = prev_z_py[i][:idx + 1] + u_input_py[i]
else:
u_input_py[i] = prev_z_py[i] + u_input_py[i]
u_len_py[i] = len(u_input_py[i])
for j, word in enumerate(prev_z_py[i]):
if word >= cfg.vocab_size:
prev_z_py[i][j] = 2 # unk
elif cfg.prev_z_method == 'separate' and prev_z_py is not None:
for i in range(len(prev_z_py)):
eob = self.reader.vocab.encode('EOS_Z2')
if eob in prev_z_py[i] and prev_z_py[i].index(eob) != len(prev_z_py[i]) - 1:
idx = prev_z_py[i].index(eob)
prev_z_py[i] = prev_z_py[i][:idx + 1]
for j, word in enumerate(prev_z_py[i]):
if word >= cfg.vocab_size:
prev_z_py[i][j] = 2 # unk
prev_z_input_np = pad_sequences(prev_z_py, cfg.max_ts, padding='post', truncating='pre').transpose((1, 0))
prev_z_len = np.array([len(_) for _ in prev_z_py])
prev_z_input = cuda_(Variable(torch.from_numpy(prev_z_input_np).long()))
kw_ret['prev_z_len'] = prev_z_len
kw_ret['prev_z_input'] = prev_z_input
kw_ret['prev_z_input_np'] = prev_z_input_np
degree_input_np = np.array(py_batch['degree'])
u_input_np = pad_sequences(u_input_py, cfg.max_ts, padding='post', truncating='pre').transpose((1, 0))
m_input_np = pad_sequences(py_batch['response'], cfg.max_ts, padding='post', truncating='post').transpose(
(1, 0))
r_input_np = pad_sequences(py_batch['requested'], cfg.req_length, padding='post', truncating='post').transpose(
(1, 0)) # (seqlen, batchsize)
k_input_np = pad_sequences(py_batch['constraint_key'], len(py_batch['constraint_key'][0]), padding='post',
truncating='post').transpose(
(1, 0))
flat_constraint_value = []
num_k = k_input_np.shape[0]
for b in py_batch['constraint_value']:
for k in b:
flat_constraint_value.append(k)
flat_i_input_np = pad_sequences(flat_constraint_value, cfg.inf_length, padding='post', truncating='post')
i_input_np = []
i_k_input_np = []
for idx, k in enumerate(flat_i_input_np):
i_k_input_np.append(k)
if (idx + 1) % num_k == 0:
i_input_np.append(np.asarray(i_k_input_np))
i_k_input_np = []
i_input_np = np.asarray(i_input_np) # (batchsize, key_size, seqlen)
u_len = np.array(u_len_py)
m_len = np.array(py_batch['m_len'])
degree_input = cuda_(Variable(torch.from_numpy(degree_input_np).float()))
u_input = cuda_(Variable(torch.from_numpy(u_input_np).long()))
m_input = cuda_(Variable(torch.from_numpy(m_input_np).long()))
r_input = cuda_(Variable(torch.from_numpy(r_input_np).long()))
k_input = cuda_(Variable(torch.from_numpy(k_input_np).long()))
i_input = cuda_(Variable(torch.from_numpy(i_input_np).long()))
i_input = i_input.permute(1, 2, 0)
z_input = []
for k_i_input in i_input:
z_input.append(k_i_input)
z_input = torch.cat(z_input, dim=0)
z_input_np = z_input.cpu().data.numpy()
kw_ret['z_input_np'] = z_input_np
return u_input, u_input_np, z_input, m_input, m_input_np, u_len, m_len, \
degree_input, k_input, i_input, r_input, kw_ret, py_batch['constraint_eos']
def _test_convert_batch(self, py_batch, prev_z_py=None, prev_m_py=None): # ???not easy to write
kw_ret = {}
requested_7_np = np.stack(py_batch['requested_7'], axis=0).transpose()
requested_7_np = requested_7_np[:, :, np.newaxis] # 7, batchsize, 1
response_7_np = np.stack(py_batch['response_7'], axis=0).transpose()
response_7_np = response_7_np[:, :, np.newaxis] # 7, batchsize, 1
requestable_key = py_batch['requestable_key'] # (batchsize, 7) keys
requestable_slot = py_batch['requestable_slot'] # (batchsize, 7) slots
requestable_key_np = pad_sequences(requestable_key, len(requestable_key[0]), padding='post',
truncating='pre').transpose((1, 0))
requestable_slot_np = pad_sequences(requestable_slot, len(requestable_slot[0]), padding='post',
truncating='pre').transpose((1, 0))
kw_ret['requestable_key_np'] = requestable_key_np
kw_ret['requestable_slot_np'] = requestable_slot_np
kw_ret['requestable_key'] = cuda_(Variable(torch.from_numpy(requestable_key_np).long()))
kw_ret['requestable_slot'] = cuda_(Variable(torch.from_numpy(requestable_slot_np).long()))
kw_ret['requested_7'] = cuda_(Variable(torch.from_numpy(requested_7_np).float()))
kw_ret['response_7'] = cuda_(Variable(torch.from_numpy(response_7_np).float()))
u_input_py = py_batch['user']
u_len_py = py_batch['u_len']
eom = self.reader.vocab.encode('EOS_M')
if prev_m_py != None:
fix_u_input_py = []
for b, m in zip(u_input_py, prev_m_py):
if eom in b:
idx = b.index(eom)
b = b[idx + 1:]
if eom in m:
idx = m.index(eom)
m = m[:idx + 1]
m = [self.reader.vocab.encode('<unk>') if w >= cfg.vocab_size else w for w in m]
fix_u_input_py.append(m + b)
else:
fix_u_input_py.append(b)
u_input_py = fix_u_input_py
u_len_py = [len(b) for b in fix_u_input_py]
if cfg.prev_z_method == 'concat' and prev_z_py is not None:
for i in range(len(u_input_py)):
eob = self.reader.vocab.encode('EOS_Z2')
if eob in prev_z_py[i] and prev_z_py[i].index(eob) != len(prev_z_py[i]) - 1:
idx = prev_z_py[i].index(eob)
u_input_py[i] = prev_z_py[i][:idx + 1] + u_input_py[i]
else:
u_input_py[i] = prev_z_py[i] + u_input_py[i]
u_len_py[i] = len(u_input_py[i])
for j, word in enumerate(prev_z_py[i]):
if word >= cfg.vocab_size:
prev_z_py[i][j] = 2 # unk
elif cfg.prev_z_method == 'separate' and prev_z_py is not None:
for i in range(len(prev_z_py)):
eob = self.reader.vocab.encode('EOS_Z2')
if eob in prev_z_py[i] and prev_z_py[i].index(eob) != len(prev_z_py[i]) - 1:
idx = prev_z_py[i].index(eob)
prev_z_py[i] = prev_z_py[i][:idx + 1]
for j, word in enumerate(prev_z_py[i]):
if word >= cfg.vocab_size:
prev_z_py[i][j] = 2 # unk
prev_z_input_np = pad_sequences(prev_z_py, cfg.max_ts, padding='post', truncating='pre').transpose((1, 0))
prev_z_len = np.array([len(_) for _ in prev_z_py])
prev_z_input = cuda_(Variable(torch.from_numpy(prev_z_input_np).long()))
kw_ret['prev_z_len'] = prev_z_len
kw_ret['prev_z_input'] = prev_z_input
kw_ret['prev_z_input_np'] = prev_z_input_np
degree_input_np = np.array(py_batch['degree'])
u_input_np = pad_sequences(u_input_py, cfg.max_ts, padding='post', truncating='pre').transpose((1, 0))
m_input_np = pad_sequences(py_batch['response'], cfg.max_ts, padding='post', truncating='post').transpose(
(1, 0))
r_input_np = pad_sequences(py_batch['requested'], cfg.req_length, padding='post', truncating='post').transpose(
(1, 0)) # (seqlen, batchsize)
k_input_np = pad_sequences(py_batch['constraint_key'], len(py_batch['constraint_key'][0]), padding='post',
truncating='post').transpose(
(1, 0))
flat_constraint_value = []
num_k = k_input_np.shape[0]
for b in py_batch['constraint_value']:
for k in b:
flat_constraint_value.append(k)
inf_length = max([len(l) for l in flat_constraint_value])
print(inf_length)
flat_i_input_np = pad_sequences(flat_constraint_value, cfg.inf_length, padding='post', truncating='post')
i_input_np = []
i_k_input_np = []
for idx, k in enumerate(flat_i_input_np):
i_k_input_np.append(k)
if (idx + 1) % num_k == 0:
i_input_np.append(np.asarray(i_k_input_np))
i_k_input_np = []
i_input_np = np.asarray(i_input_np) # (batchsize, key_size, seqlen)
u_len = np.array(u_len_py)
m_len = np.array(py_batch['m_len'])
degree_input = cuda_(Variable(torch.from_numpy(degree_input_np).float()))
u_input = cuda_(Variable(torch.from_numpy(u_input_np).long()))
m_input = cuda_(Variable(torch.from_numpy(m_input_np).long()))
r_input = cuda_(Variable(torch.from_numpy(r_input_np).long()))
k_input = cuda_(Variable(torch.from_numpy(k_input_np).long()))
i_input = cuda_(Variable(torch.from_numpy(i_input_np).long()))
i_input = i_input.permute(1, 2, 0)
z_input = []
for k_i_input in i_input:
z_input.append(k_i_input)
z_input = torch.cat(z_input, dim=0)
z_input_np = z_input.cpu().data.numpy()
kw_ret['z_input_np'] = z_input_np
if 'database' in py_batch.keys():
database = py_batch['database']
else:
database = None
return u_input, u_input_np, z_input, m_input, m_input_np, u_len, m_len, \
degree_input, k_input, i_input, r_input, kw_ret, database, py_batch['constraint_eos']
def train(self):
lr = cfg.lr
prev_min_loss = 0.
early_stop_count = cfg.early_stop_count
train_time = 0
for epoch in range(cfg.epoch_num):
loss_weights = [1., 1., 1., 1.]
sw = time.time()
if epoch <= self.base_epoch:
continue
self.training_adjust(epoch)
self.m.self_adjust(epoch)
sup_loss = 0
sup_cnt = 0
data_iterator = self.reader.mini_batch_iterator('train')
optim = Adam(lr=lr, params=filter(lambda x: x.requires_grad, self.m.parameters()), weight_decay=1e-5)
for iter_num, dial_batch in enumerate(data_iterator):
turn_states = {}
prev_z = None
for turn_num, turn_batch in enumerate(dial_batch):
if cfg.truncated:
logging.debug('iter %d turn %d' % (iter_num, turn_num))
optim.zero_grad()
u_input, u_input_np, z_input, m_input, m_input_np, u_len, \
m_len, degree_input, k_input, i_input, r_input, kw_ret, constraint_eos \
= self._convert_batch(turn_batch, prev_z)
loss, pr_loss, m_loss, turn_states, req_loss, res_loss = self.m(u_input=u_input, z_input=z_input,
m_input=m_input,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
turn_states=turn_states,
u_len=u_len, m_len=m_len,
k_input=k_input,
i_input=i_input,
r_input=r_input,
loss_weights=loss_weights,
mode='train', **kw_ret)
loss.backward(retain_graph=turn_num != len(dial_batch) - 1)
grad = torch.nn.utils.clip_grad_norm_(self.m.parameters(), 10.0)
optim.step()
sup_loss += loss.cpu().item()
sup_cnt += 1
logging.debug(
'loss:{} pr_loss:{} req_loss:{} res_loss:{} m_loss:{} grad:{}'.format(loss.cpu().item(),
pr_loss.cpu().item(),
req_loss.cpu().item(),
res_loss.cpu().item(),
m_loss.cpu().item(),
grad))
prev_z = turn_batch['bspan']
epoch_sup_loss = sup_loss / (sup_cnt + 1e-8)
train_time += time.time() - sw
logging.info('Traning time: {}'.format(train_time))
logging.info('avg training loss in epoch %d sup:%f' % (epoch, epoch_sup_loss))
valid_sup_loss, valid_unsup_loss = self.validate()
logging.info('validation loss in epoch %d sup:%f unsup:%f' % (epoch, valid_sup_loss, valid_unsup_loss))
logging.info('time for epoch %d: %f' % (epoch, time.time() - sw))
valid_loss = valid_sup_loss + valid_unsup_loss
metrics = self.eval(data='dev')
valid_metrics = metrics[-1] + metrics[-2] + metrics[-3]
logging.info('valid metric %f ' % (valid_metrics))
if valid_metrics >= prev_min_loss:
self.save_model(epoch)
prev_min_loss = valid_metrics
early_stop_count = cfg.early_stop_count
else:
early_stop_count -= 1
lr *= cfg.lr_decay
if not early_stop_count:
break
logging.info('early stop countdown %d, learning rate %f' % (early_stop_count, lr))
def eval(self, data='test'):
self.m.eval()
self.reader.result_file = None
data_iterator = self.reader.mini_batch_iterator(data)
mode = 'test' if not cfg.pretrain else 'pretrain_test'
for batch_num, dial_batch in enumerate(data_iterator):
turn_states = {}
prev_z = None
for turn_num, turn_batch in enumerate(dial_batch):
u_input, u_input_np, z_input, m_input, m_input_np, u_len, \
m_len, degree_input, k_input, i_input, r_input, kw_ret, constraint_eos \
= self._convert_batch(turn_batch, prev_z)
m_idx, z_idx, turn_states = self.m(u_input=u_input, z_input=z_input,
m_input=m_input,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
turn_states=turn_states,
u_len=u_len, m_len=m_len,
k_input=k_input,
i_input=i_input,
r_input=r_input,
mode='test', **kw_ret)
self.reader.wrap_result(turn_batch, m_idx, z_idx, prev_z=prev_z)
prev_z = z_idx
if self.reader.result_file != None:
self.reader.result_file.close()
ev = self.EV(result_path=cfg.result_path, data=data)
res = ev.run_metrics()
self.m.train()
return res
def validate(self, loss_weights=[1., 1., 1., 1.], data='dev'):
self.m.eval()
data_iterator = self.reader.mini_batch_iterator(data)
sup_loss, unsup_loss = 0, 0
sup_cnt, unsup_cnt = 0, 0
for dial_batch in data_iterator:
turn_states = {}
for turn_num, turn_batch in enumerate(dial_batch):
u_input, u_input_np, z_input, m_input, m_input_np, u_len, \
m_len, degree_input, k_input, i_input, r_input, kw_ret, constraint_eos \
= self._convert_batch(turn_batch)
loss, pr_loss, m_loss, turn_states, req_loss, res_loss = self.m(u_input=u_input, z_input=z_input,
m_input=m_input,
degree_input=degree_input,
u_input_np=u_input_np,
m_input_np=m_input_np,
turn_states=turn_states,
u_len=u_len, m_len=m_len,
k_input=k_input,
i_input=i_input,
r_input=r_input,
loss_weights=loss_weights,
mode='train', **kw_ret)
sup_loss += loss.cpu().item()
sup_cnt += 1
logging.debug(
'loss:{} pr_loss:{} req_loss:{} res_loss:{} m_loss:{}'.format(loss.cpu().item(), pr_loss.cpu().item(),
req_loss.cpu().item(),
res_loss.cpu().item(),
m_loss.cpu().item()))
sup_loss /= (sup_cnt + 1e-8)
unsup_loss /= (unsup_cnt + 1e-8)
self.m.train()
return sup_loss, unsup_loss
def save_model(self, epoch, path=None):
if not path:
path = cfg.model_path
all_state = {'lstd': self.m.state_dict(),
'config': cfg.__dict__,
'epoch': epoch}
torch.save(all_state, path)
def load_model(self, path=None):
if not path:
path = cfg.model_path
all_state = torch.load(path)
self.m.load_state_dict(all_state['lstd'])
self.base_epoch = all_state.get('epoch', 0)
def training_adjust(self, epoch):
return
def freeze_module(self, module):
for param in module.parameters():
param.requires_grad = False
def unfreeze_module(self, module):
for param in module.parameters():
param.requires_grad = True
def load_glove_embedding(self):
initial_arr = self.m.u_encoder.embedding.weight.data.cpu().numpy()
embedding_arr = torch.from_numpy(get_glove_matrix(self.reader.vocab, initial_arr))
self.m.u_encoder.embedding.weight.data.copy_(embedding_arr)
self.m.u_encoder.embedding.weight.requires_grad = cfg.emb_trainable
if cfg.separate_enc:
self.m.z_encoder.embedding.weight.data.copy_(embedding_arr)
self.m.z_encoder.embedding.weight.requires_grad = cfg.emb_trainable
for i in range(cfg.num_head):
self.m.z_decoders[i].emb.weight.data.copy_(embedding_arr)
self.m.z_decoders[i].emb.weight.requires_grad = cfg.emb_trainable
self.m.req_classifiers.emb.weight.data.copy_(embedding_arr)
self.m.req_classifiers.emb.weight.requires_grad = cfg.emb_trainable
self.m.res_classifiers.emb.weight.data.copy_(embedding_arr)
self.m.res_classifiers.emb.weight.requires_grad = cfg.emb_trainable
self.m.m_decoder.emb.weight.data.copy_(embedding_arr)
self.m.m_decoder.emb.weight.requires_grad = cfg.emb_trainable
def count_params(self):
module_parameters = filter(lambda p: p.requires_grad, self.m.parameters())
param_cnt = sum([np.prod(p.size()) for p in module_parameters if p.requires_grad == True])
print('total trainable params: %d' % param_cnt)
print(self.m)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-mode')
parser.add_argument('-data')
parser.add_argument('-cfg', nargs='*')
args = parser.parse_args()
cfg.init_handler(args.data)
if args.cfg:
for pair in args.cfg:
k, v = tuple(pair.split('='))
dtype = type(getattr(cfg, k))
if dtype == type(None):
raise ValueError()
if dtype is bool:
v = False if v == 'False' else True
else:
v = dtype(v)
setattr(cfg, k, v)
logging.debug(str(cfg))
if cfg.cuda:
torch.cuda.set_device(cfg.cuda_device)
logging.debug('Device: {}'.format(torch.cuda.current_device()))
cfg.mode = args.mode
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed(cfg.seed)
random.seed(cfg.seed)
np.random.seed(cfg.seed)
m = Model(args.data.split('-')[-1])
m.count_params()
if args.mode == 'train':
m.load_glove_embedding()
m.m.beam_search = False
m.train()
elif args.mode == 'adjust':
m.load_model()
m.train()
m.load_model()
m.eval()
elif args.mode == 'test':
m.load_model()
m.eval(data='test')
if __name__ == '__main__':
main()
|
py
|
1a5e7161e486a9ee1d4cb61deef74c7397167be3
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Bill.knesset_proposal'
db.delete_column('laws_bill', 'knesset_proposal_id')
# Removing M2M table for field proposals on 'Bill'
db.delete_table('laws_bill_proposals')
def backwards(self, orm):
# Adding field 'Bill.knesset_proposal'
db.add_column('laws_bill', 'knesset_proposal', self.gf('django.db.models.fields.related.ForeignKey')(related_name='bills', null=True, to=orm['laws.KnessetProposal'], blank=True), keep_default=False)
# Adding M2M table for field proposals on 'Bill'
db.create_table('laws_bill_proposals', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('bill', models.ForeignKey(orm['laws.bill'], null=False)),
('privateproposal', models.ForeignKey(orm['laws.privateproposal'], null=False))
))
db.create_unique('laws_bill_proposals', ['bill_id', 'privateproposal_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committees'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'committees.committeemeeting': {
'Meta': {'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'laws.bill': {
'Meta': {'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': "orm['laws.Vote']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.Law']"}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.knessetproposal': {
'Meta': {'object_name': 'KnessetProposal'},
'bill': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'knesset_proposal'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Bill']"}),
'booklet_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'committee': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['committees.Committee']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'originals': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'knesset_proposals'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.PrivateProposal']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_knessetproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.membervotingstatistics': {
'Meta': {'object_name': 'MemberVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Member']"})
},
'laws.partyvotingstatistics': {
'Meta': {'object_name': 'PartyVotingStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'voting_statistics'", 'unique': 'True', 'to': "orm['mks.Party']"})
},
'laws.privateproposal': {
'Meta': {'object_name': 'PrivateProposal'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proposals'", 'null': 'True', 'to': "orm['laws.Bill']"}),
'committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'knesset_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'to': "orm['laws.Law']"}),
'proposal_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_proposed'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'laws_privateproposal_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"})
},
'laws.vote': {
'Meta': {'object_name': 'Vote'},
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'mks.member': {
'Meta': {'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'planet.blog': {
'Meta': {'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['laws']
|
py
|
1a5e722b9d9f7ed626b8f1a56c3a33f7e86b0bb6
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 9 05:43:47 2021
@author: wq
"""
import os
from detectron2 import model_zoo
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import ColorMode
import numpy as np
import cv2
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data.datasets import register_coco_instances
from numpy import mat
# ไปฅไธ่ฟ้จๅๆฏไฝ ่ชๅทฑ่ฆ็ผ่พ็้จๅ๏ผๆ นๆฎไฝ ็ๆบๅจไบบ็้
็ฝฎๅปๆ กๅ็็ฏๅข็ๅไธช่ง็ๅๆ
rob_cor_1 = (0.337180175851907, -0.7709528989764918)
rob_cor_2 = (-0.3383507457068013, -0.7918474781347146)
rob_cor_3 = (0.3435026039288244, -0.3769407945516401)
rob_cor_4 = (-0.3350733477311105, -0.3822064940321181)
################################################################################
def get_metadata():
path_to_train_image = './trained_cnn/UR5_sim_coco/train'
path_to_train_json = './trained_cnn/UR5_sim_coco/annotations/train.json'
register_coco_instances(
'train', {}, path_to_train_json, path_to_train_image)
coco_val_metadata = MetadataCatalog.get('train')
return coco_val_metadata
def get_predictor():
cfg = get_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.merge_from_file(model_zoo.get_config_file(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
# Let training initialize from model zoo
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "./trained_cnn/model_final.pth")
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 4 # only has one class (ballon).
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
predictor = DefaultPredictor(cfg)
return predictor
def brg2rgb(image_rgb, resolution):
image_rgb_r = [image_rgb[i] for i in range(0, len(image_rgb), 3)]
image_rgb_r = np.array(image_rgb_r)
image_rgb_r = image_rgb_r.reshape(resolution[1], resolution[0])
image_rgb_r = image_rgb_r.astype(np.uint8)
image_rgb_g = [image_rgb[i] for i in range(1, len(image_rgb), 3)]
image_rgb_g = np.array(image_rgb_g)
image_rgb_g = image_rgb_g.reshape(resolution[1], resolution[0])
image_rgb_g = image_rgb_g.astype(np.uint8)
image_rgb_b = [image_rgb[i] for i in range(2, len(image_rgb), 3)]
image_rgb_b = np.array(image_rgb_b)
image_rgb_b = image_rgb_b.reshape(resolution[1], resolution[0])
image_rgb_b = image_rgb_b.astype(np.uint8)
result_rgb = cv2.merge([image_rgb_b, image_rgb_g, image_rgb_r])
result_rgb = cv2.flip(result_rgb, 0)
return result_rgb
def visulization(result_rgb, metadata, outputs):
v = Visualizer(result_rgb[:, :, ::-1],metadata=metadata, scale=0.5, instance_mode=ColorMode.IMAGE_BW)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.namedWindow("prediction",0);
cv2.resizeWindow("prediction", 1024, 512)
cv2.moveWindow("prediction",0,0)
cv2.imshow("prediction",out.get_image()[:, :, ::-1])
cv2.waitKey(0)
cv2.destroyAllWindows()
from copy import copy
def loc_label(outputs):
boxes = outputs["instances"].pred_boxes
center_pos = boxes.get_centers()
result_pos = center_pos.numpy().tolist()
sorted_list = []
result_pocy = copy(result_pos)
for i in range(len(result_pos)):
resmin = result_pocy[0]
for j in range(len(result_pocy)):
if resmin[0] > result_pocy[j][0]:
resmin = result_pocy[j]
sorted_list.append(resmin)
result_pocy.remove(resmin)
label1_3 = [sorted_list[0],sorted_list[1]]
label2_4 = [sorted_list[-1],sorted_list[-2]]
if label1_3[0][1] < label1_3[1][1]:
label1 = label1_3[0]
label3 = label1_3[1]
else:
label1 = label1_3[1]
label3 = label1_3[0]
if label2_4[0][1] < label2_4[1][1]:
label2 = label2_4[0]
label4 = label2_4[1]
else:
label2 = label2_4[1]
label4 = label2_4[0]
return [label1, label2, label3, label4]
def cal_obj_pos(obj_rgb_coor,label_coordinate):
rgb_cor_1 = label_coordinate[0]
rgb_cor_2 = label_coordinate[1]
rgb_cor_3 = label_coordinate[2]
rgb_cor_4 = label_coordinate[3]
dy_rob_1 = rob_cor_3[1] - rob_cor_1[1]
dy_rob_2 = rob_cor_4[1] - rob_cor_2[1]
dx_rob_1 = rob_cor_2[0] - rob_cor_1[0]
dx_rob_2 = rob_cor_4[0] - rob_cor_3[0]
dy_rgb_1 = rgb_cor_3[1] - rgb_cor_1[1]
dy_rgb_2 = rgb_cor_4[1] - rgb_cor_2[1]
dx_rgb_1 = rgb_cor_2[0] - rgb_cor_1[0]
dx_rgb_2 = rgb_cor_4[0] - rgb_cor_3[0]
obj_x_1 = (((obj_rgb_coor[0] - rgb_cor_1[0]) / dx_rgb_1) * dx_rob_1) + rob_cor_1[0]
obj_x_2 = (((obj_rgb_coor[0] - rgb_cor_2[0]) / dx_rgb_2) * dx_rob_2) + rob_cor_2[0]
obj_x = (obj_x_1 + obj_x_2) / 2
# print('x coordinate in the robot coordinate system is: ', obj_x)
obj_y_1 = (((obj_rgb_coor[1] - rgb_cor_1[1]) / dy_rgb_1) * dy_rob_1) + rob_cor_1[1]
obj_y_2 = (((obj_rgb_coor[1] - rgb_cor_2[1]) / dy_rgb_2) * dy_rob_2) + rob_cor_2[1]
obj_y = (obj_y_1 + obj_y_2) / 2
# print('y coordinate in the robot coordinate system is: ', obj_y)
return (obj_x, obj_y)
def get_all_objects_coordinate(cubiod_coor,sphere_coor,label_coordinate):
cub_coor = []
sph_coor = []
for cub in cubiod_coor:
cub_coor.append(cal_obj_pos(cub,label_coordinate))
for sph in sphere_coor:
sph_coor.append(cal_obj_pos(sph,label_coordinate))
return cub_coor, sph_coor
def list2mat(list):
m1 = [list[0],list[1],list[2],list[3]]
m2 = [list[4],list[5],list[6],list[7]]
m3 = [list[8],list[9],list[10],list[11]]
m4 = [list[12],list[13],list[14],list[15]]
matrix = mat([m1,m2,m3,m4])
return matrix
def mat2list(matrix):
lis = [matrix[0,0],matrix[0,1],matrix[0,2],matrix[0,3],\
matrix[1,0],matrix[1,1],matrix[1,2],matrix[1,3],\
matrix[2,0],matrix[2,1],matrix[2,2],matrix[2,3],\
matrix[3,0],matrix[3,1],matrix[3,2],matrix[3,3]]
return lis
|
py
|
1a5e7351b9dae3b16c6600b68a9eb769921c68f7
|
#!/usr/bin/python
import execnet, execnet.gateway, execnet.multi
class SshPortGateway(execnet.gateway.SshGateway):
def __init__(self, sshaddress, id, remotepython = None,
ssh_config=None,
ssh_port=None,
ssh_identity=None,
ssh_batchmode=None):
self.remoteaddress = sshaddress
if not remotepython: remotepython = "python"
args = ['ssh', '-C' ]
if ssh_config: args.extend(['-F', ssh_config])
if ssh_port: args.extend(['-p', ssh_port])
if ssh_identity: args.extend(['-i', ssh_identity])
if ssh_batchmode: args.extend(["-o", "BatchMode yes"])
remotecmd = '%s -c "%s"' % (remotepython, execnet.gateway.popen_bootstrapline)
args.extend([sshaddress, remotecmd])
execnet.gateway.PopenCmdGateway.__init__(self, args, id=id)
def makeportgateway(self, spec):
spec = execnet.XSpec(spec)
self.allocate_id(spec)
gw = SshPortGateway(spec.ssh,
remotepython=spec.python,
ssh_config=spec.ssh_config,
id=spec.id,
ssh_port=spec.ssh_port,
ssh_identity=spec.ssh_identity,
ssh_batchmode=spec.ssh_batchmode)
gw.spec = spec
self._register(gw)
# TODO add spec.{chdir,nice,env}
return gw
execnet.multi.Group.makeportgateway = makeportgateway
execnet.makeportgateway = execnet.multi.default_group.makeportgateway
|
py
|
1a5e74257d50db9764e733f68ff9fb8cf9203c07
|
import numpy as np
import pyanitools as pyt
from pyNeuroChem import cachegenerator as cg
import sys
import os
import hdnntools as hdn
import matplotlib.pyplot as plt
import matplotlib as mpl
def interval(v,S):
ps = 0.0
ds = 1.0 / float(S)
for s in range(S):
if v > ps and v <= ps+ds:
return s
ps = ps + ds
#wkdir = '/scratch/Research/force_train_testing/'
#saef = wkdir + "sae_6-31gd.dat"
wkdir = '/nh/nest/u/jsmith/Research/gutzwiller_research/train_all/gutz_model-5/'
saef = wkdir + "sae.dat"
#wkdir = '/scratch/Research/datasets/iso17/train_test/'
#saef = wkdir + "sae_6-31gd.dat"
#data_root = '/scratch/Research/GDB-11-AL-wB97x631gd/'
data_root = '/auto/nest/nest/u/jsmith/scratch/Research/gutzwiller_research/h5files/'
#data_root = '/scratch/Research/datasets/iso17/'
h5files = [#'/home/jujuman/Research/Cluster_AL/waterclusters1.h5',
#data_root + 'gutzwiller1-U2-rs1.5.h5',
#data_root + 'gutzwiller1-U4-rs1.5.h5',
#data_root + 'gutzwiller1-U6-rs1.5.h5',
#data_root + 'gutzwiller1-U8-rs1.5.h5',
#data_root + 'gutzwiller1-U10-rs1.5.h5',
data_root + 'gutzwiller1-U12-rs1.5.h5',
]
store_dir = wkdir + "cache-data-"
N = 5
for i in range(N):
if not os.path.exists(store_dir + str(i)):
os.mkdir(store_dir + str(i))
if os.path.exists(wkdir + 'testset.h5'):
os.remove(wkdir + 'testset.h5')
cachet = [cg('_train', saef, store_dir + str(r) + '/',False) for r in range(N)]
cachev = [cg('_valid', saef, store_dir + str(r) + '/',False) for r in range(N)]
testh5 = pyt.datapacker(wkdir + 'testset.h5')
Nd = np.zeros(N,dtype=np.int32)
Nbf = 0
for f,fn in enumerate(h5files):
print('Processing file('+ str(f+1) +' of '+ str(len(h5files)) +'):', fn[1])
adl = pyt.anidataloader(fn)
To = adl.size()
Ndc = 0
Fmt = []
Emt = []
for c, data in enumerate(adl):
#if c == 2 or c == 2 or c == 2:
# Get test store name
#Pn = fn.split('/')[-1].split('.')[0] + data['path']
Pn = data['path']+'_'+str(f).zfill(6)+'_'+str(c).zfill(6)
#print(Pn)
# Progress indicator
sys.stdout.write("\r%d%% %s" % (int(100*c/float(To)), Pn))
sys.stdout.flush()
#print(data.keys())
# Extract the data
X = data['coordinates']
E = data['energies']
F = -data['forces']
S = data['species']
Fmt.append(np.max(np.linalg.norm(F,axis=2),axis=1))
Emt.append(E)
Mv = np.max(np.linalg.norm(F,axis=2),axis=1)
#print(Mv.shape,X.shape)
index = np.where(Mv > 10000000.5)[0]
indexk = np.where(Mv <= 10000000.5)[0]
#if index.size > 0:
#print(Mv[index])
#hdn.writexyzfile(bddir+'mols_'+str(c).zfill(3)+'_'+str(f).zfill(3)+'.xyz',X[index],S)
Nbf += index.size
#if data['path'] == '/dimer7/grp_0':
# print(data['path'])
# print(E)
# print(F)
# CLear forces
X = X[indexk]
F = F[indexk]
E = E[indexk]
#exit(0)
#print(" MAX FORCE:", F.max(), S)
'''
print('meanforce:',F.flatten().mean())
print("FORCE:",F)
print(np.max(F.reshape(E.size,F.shape[1]*F.shape[2]),axis=1))
print("MAX FORCE:", F.max(),S)
if F.max() > 0.0:
print(np.mean(F.reshape(E.size,F.shape[1]*F.shape[2]),axis=1).shape, E.size)
plt.hist(np.max(np.abs(F).reshape(E.size,F.shape[1]*F.shape[2]),axis=1),bins=100)
plt.show()
plt.scatter(np.max(np.abs(F).reshape(E.size,F.shape[1]*F.shape[2]),axis=1), E)
plt.show()
'''
#Ru = np.random.uniform(0.0, 1.0, E.shape[0])
#nidx = np.where(Ru < fn[0])
#X = X[nidx]
#F = F[nidx]
#E = E[nidx]
Ndc += E.size
#for i in range(E.size):
# X[i] = X[0]
# F[i] = F[0]
# E[i] = E[0]
if (set(S).issubset(['C', 'N', 'O', 'H', 'F', 'S', 'Cl'])):
Si = int(E.shape[0]*0.9)
X_te = X[Si:]
E_te = E[Si:]
F_te = F[Si:]
testh5.store_data(Pn, coordinates=X_te, forces=F_te, energies=E_te, species=list(S))
X = X[0:Si]
E = E[0:Si]
F = F[0:Si]
# Random mask
R = np.random.uniform(0.0, 1.0, E.shape[0])
idx = np.array([interval(r,N) for r in R])
# Build random split lists
split = []
for j in range(N):
split.append([i for i, s in enumerate(idx) if s == j])
nd = len([i for i, s in enumerate(idx) if s == j])
Nd[j] = Nd[j] + nd
# Store data
for i,t,v in zip(range(N), cachet, cachev):
## Store training data
X_t = np.array(np.concatenate([X[s] for j, s in enumerate(split) if j != i]), order='C', dtype=np.float32)
F_t = np.array(np.concatenate([F[s] for j, s in enumerate(split) if j != i]), order='C', dtype=np.float32)
E_t = np.array(np.concatenate([E[s] for j, s in enumerate(split) if j != i]), order='C', dtype=np.float64)
if E_t.shape[0] != 0:
t.insertdata(X_t, F_t, E_t, list(S))
## Store Validation
if len(split[i]) > 0:
X_v = np.array(X[split[i]], order='C', dtype=np.float32)
F_v = np.array(F[split[i]], order='C', dtype=np.float32)
E_v = np.array(E[split[i]], order='C', dtype=np.float64)
if E_v.shape[0] != 0:
v.insertdata(X_v, F_v, E_v, list(S))
sys.stdout.write("\r%d%%" % int(100))
print(" Data Kept: ", Ndc, 'High Force: ', Nbf)
sys.stdout.flush()
print("")
# Print some stats
print('Data count:',Nd)
print('Data split:',100.0*Nd/np.sum(Nd),'%')
# Save train and valid meta file and cleanup testh5
for t,v in zip(cachet, cachev):
t.makemetadata()
v.makemetadata()
testh5.cleanup()
|
py
|
1a5e74c765963b4e17c2fb4533effaca7b33086e
|
import pytest
from ckan.plugins import toolkit
from ckanext.ytp_recommendation.logic.action import create, get
from ckanext.ytp_recommendation.model import Recommendation
from ckanext.ytp_recommendation.tests import factories as ytp_factories
@pytest.mark.usefixtures('clean_db', 'clean_recommendation_table')
class TestGetActions(object):
def test_get_user_can_make_recommendation_w_userobj(self, app):
package = ytp_factories.get_or_create_package_object()
user = ytp_factories.get_or_create_user_object()
with app.flask_app.test_request_context('/'):
with app.flask_app.app_context():
toolkit.request.environ['REMOTE_ADDR'] = ytp_factories.get_ip_address()
toolkit.c.userobj = user
result = get.get_user_can_make_recommendation({}, {'package_id': package.id})
assert result
ytp_factories.create_and_get_recommendation(
user_id=user.id,
package_id=package.id,
ip=ytp_factories.get_ip_address())
result = get.get_user_can_make_recommendation({}, {'package_id': package.id})
assert not result
def test_get_user_count_for_package(self):
package = ytp_factories.get_or_create_package_object()
user = ytp_factories.get_or_create_user_object()
ip = ytp_factories.get_ip_address()
data_dict = {'package_id': package.id}
assert get.get_recommendation_count_for_package({}, data_dict) == 0
ytp_factories.create_and_get_recommendation(package_id=package.id, ip=ip, user_id=user.id)
assert get.get_recommendation_count_for_package({}, data_dict) == 1
@pytest.mark.usefixtures('clean_db', 'clean_recommendation_table')
class TestCreateActions(object):
def test_create_recommendation_w_userbj(self, app):
package = ytp_factories.get_or_create_package_object()
user = ytp_factories.get_or_create_user_object()
data_dict = {'package_id': package.id}
with app.flask_app.test_request_context('/'):
with app.flask_app.app_context():
toolkit.request.environ['REMOTE_ADDR'] = ytp_factories.get_ip_address()
toolkit.c.userobj = user
recommendation_count = len(Recommendation.get_package_recommendations(package.id))
assert recommendation_count == 0
recommendation_count = create.create_recommendation({}, data_dict)
assert recommendation_count == 1
|
py
|
1a5e7534a7d0a5e972b8917fa926593aec5686c0
|
import json
from pyramid.security import (
remember,
forget,
NO_PERMISSION_REQUIRED,
)
from pyramid.httpexceptions import (
HTTPFound,
HTTPForbidden,
HTTPBadRequest,
)
from pyramid.view import (
view_config,
forbidden_view_config,
render_view,
)
from pyramid.response import Response
from requests_oauthlib import OAuth2Session
from sqlalchemy.orm.exc import NoResultFound
from whoahqa.models import (
DBSession,
UserProfile,
OnaUser,
)
def check_post_csrf(func):
def inner(context, request):
if request.method == "POST":
if request.session.get_csrf_token()\
!= request.POST.get('csrf_token'):
return HTTPBadRequest("Bad csrf token")
# fall through if not POST or token is valid
return func.__call__(context, request)
return inner
@forbidden_view_config()
def forbidden(context, request):
# if not authenticated, show login screen with unauthorized status code
if not request.user:
return Response(
render_view(
context, request, 'login', secure=False), status=401)
# otherwise, raise HTTPForbidden
return HTTPForbidden()
@view_config(route_name='auth',
match_param='action=login',
permission=NO_PERMISSION_REQUIRED,
renderer='login.jinja2')
@view_config(name='login',
context=HTTPForbidden,
permission=NO_PERMISSION_REQUIRED,
renderer='password_login.jinja2',
decorator=check_post_csrf)
def login(request):
return {}
@view_config(route_name='auth',
match_param='action=sign-in',
permission=NO_PERMISSION_REQUIRED,
renderer='password_login.jinja2',
decorator=check_post_csrf)
def password_login(context, request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
try:
user_profile = UserProfile.get(UserProfile.username == username)
except NoResultFound:
pass
else:
if user_profile.check_password(password):
headers = remember(request, user_profile.user_id)
return HTTPFound(request.route_url('default'), headers=headers)
# we're still here set the error message
request.session.flash(
u"Invalid username or password", 'error')
return {}
@view_config(
route_name='logout',
permission=NO_PERMISSION_REQUIRED)
def logout(request):
headers = forget(request)
if request.user.ona_user is None:
return HTTPFound(
request.route_url('auth', action='sign-in'), headers=headers)
return HTTPFound(
request.route_url('auth', action='login'), headers=headers)
@view_config(
route_name='auth',
match_param='action=authorize',
permission=NO_PERMISSION_REQUIRED)
def oauth_authorize(request):
client_id = request.registry.settings['oauth_client_id']
authorization_endpoint = "{base_url}{path}".format(
base_url=request.registry.settings['oauth_base_url'],
path=request.registry.settings['oauth_authorization_path'])
redirect_uri = request.route_url('auth', action='callback')
session = OAuth2Session(
client_id,
scope=['read', 'groups'],
redirect_uri=redirect_uri)
authorization_url, state = session.authorization_url(
authorization_endpoint)
# State is used to prevent CSRF, keep this for later.
request.session['oauth_state'] = state
return HTTPFound(authorization_url)
@view_config(
route_name='auth',
match_param='action=callback',
permission=NO_PERMISSION_REQUIRED)
def oauth_callback(request):
# check if we have `error` in our params, meaning user canceled
if 'error' in request.GET:
# redirect to login page with an alert
request.session.flash(
u"You must select authorize to continue", 'error')
return HTTPFound(request.route_url('auth', action='login'))
# TODO: validate the `oauth_state` session
base_url = request.registry.settings['oauth_base_url']
state = request.GET.get('state')
client_id = request.registry.settings['oauth_client_id']
client_secret = request.registry.settings['oauth_secret']
token_url = "{base_url}{path}".format(
base_url=base_url,
path=request.registry.settings['oauth_token_path'])
redirect_uri = request.route_url('auth', action='callback')
session = OAuth2Session(
client_id,
state=state,
redirect_uri=redirect_uri)
code = request.GET.get('code')
token = session.fetch_token(
token_url,
client_secret=client_secret,
code=code)
# retrieve username and store in db if it doesnt exist yet
user_api_url = "{base_url}{path}".format(
base_url=base_url,
path=request.registry.settings['oauth_user_api_path'])
response = session.request('GET', user_api_url)
try:
user_data = json.loads(response.text)
except ValueError:
# couldn't decode json
pass
else:
refresh_token = token['refresh_token']
try:
ona_user = OnaUser.get_or_create_from_api_data(
user_data, refresh_token)
except ValueError:
pass
else:
request.session['oauth_token'] = json.dumps(token)
# flash to get the auto-inc id
DBSession.flush()
user_id = ona_user.user.id
# login user
headers = remember(request, user_id)
# TODO: redirect to `came_from` url
return HTTPFound(request.route_url('default'), headers=headers)
request.session.flash(
u"Failed to login, please try again", 'error')
return HTTPFound(
request.route_url('auth', action='login'))
|
py
|
1a5e75b7a627a3d3ba8bfe0e945809cb13328438
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram ShippingOption."""
from telegram import TelegramObject
class ShippingOption(TelegramObject):
"""This object represents one shipping option.
Attributes:
id (:obj:`str`): Shipping option identifier.
title (:obj:`str`): Option title.
prices (List[:class:`telegram.LabeledPrice`]): List of price portions.
Args:
id (:obj:`str`): Shipping option identifier.
title (:obj:`str`): Option title.
prices (List[:class:`telegram.LabeledPrice`]): List of price portions.
**kwargs (:obj:`dict`): Arbitrary keyword arguments.
"""
def __init__(self, id, title, prices, **kwargs):
self.id = id
self.title = title
self.prices = prices
self._id_attrs = (self.id,)
def to_dict(self):
data = super(ShippingOption, self).to_dict()
data['prices'] = [p.to_dict() for p in self.prices]
return data
|
py
|
1a5e75edf0ab2b24a898b3c1759a3f7ab4a1b14a
|
#!/usr/bin/env python
import urllib
import sys
import smtplib
from xml.dom import minidom
import pdb
import smtplib
from email.mime.text import MIMEText
address = 'http://bart.gov/dev/eta/bart_eta.xml'
xmldoc = minidom.parse(urllib.urlopen(address))
"""
tags = []
station = 0
stationInfo = []
etaDests = []
name = []
abbr = []
date = []
time = []
"""
#print xmldoc.toprettyxml()
def whichTags(need):
global tags
tags = xmldoc.getElementsByTagName(need)
#print tags[0].toxml()
return tags
def whichStation(statN):
#pdb.set_trace()
station = statN
#print taggedxml
#print taggedxml[station].toxml()
stationInfo = taggedxml[station].childNodes
#print stationInfo
return stationInfo
def dataFields():
global name, abbr, date, time, etaDests
name = statInf[1].toxml()
abbr = statInf[3].toxml()
date = statInf[5].toxml()
time = statInf[7].toxml()
etaDests = statInf[9:]
#cleanEta(3)
def cleanEta(a):
for n in range(1,a):
del etaDests[a]
def displayETAs():
dataFields()
print name
print abbr
print date
print time
i = 0
for childNodes in etaDests:
print etaDests[i].toxml()
i = i+1
def emailETAs(emailReq):
#use this http://docs.python.org/library/email-examples.html#email-examples
smtpuser = '[email protected]'
smtppass = 'thebartisawesome'#change this password!
fromaddr = 'Service Account <[email protected]>'
toaddrs = emailReq.split()
bccaddrs = '[email protected]'
subject = "your next train from " + name
msg = ""
#dataFields()
msg += name+abbr+date+time
msgAddon = ""
i = 0
for childNodes in etaDests:
msgAddon += etaDests[i].toxml()
i = i + 1
msg = msg+msgAddon
server = smtplib.SMTP('smtp.gmail.com', 587)
server.set_debuglevel(1)
#
# Start the conversation with EHLO
#
server.ehlo()
#
# Request STARTTLS
#
server.starttls()
#
# And say EHLO again
#
server.ehlo()
#
# Login to the server with SMTP AUTH now that were TLSd and client identified #
server.login(smtpuser, smtppass)
#
# Finally, send the mail!
#
server.sendmail(fromaddr, toaddrs, msg)
try:
server.quit()
except:
pass
"""
fp = open(textfile, 'rb')
msg = MIMEText(fp.read())
fp.close()
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = 'The contents of %s' % textfile
msg['From'] = me
msg['To'] = you
# Send the message via our own SMTP server, but don't include the
# envelope header.
s = smtplib.SMTP()
s.sendmail(me, [you], msg.as_string())
s.quit()
"""
def userIn():
#print list of stations(fxn to convert 4 letter to station)
#iwant = (raw_input("station (ex. 'dbrk'): "))
#Connor N. contributer
iwant = 'dbrk'
statNum = ["12th", "16th","19th","24th","ashb", "balb","bayf", "cast", "civc", "cols", "colm", "conc", "daly", "dbrk", "dubl", "deln", "plza", "embr", "frmt", "ftvl", "glen", "hayw", "lafy", "lake", "mcar", "mlbr", "mont", "nbrk", "ncon", "orin", "pitt", "phil", "powl", "rich", "rock", "sbrn", "sfia", "sanl", "shay", "ssan", "ucty", "wcrk", "woak"]
n = statNum.index(iwant)
return n
def main():
"""
Here is how it works:
Send an email to [email protected]
with a subject line "nextBart dbrk"
server recieves email
email to a gmail account
gmail account only forwards properly formatted emails
postfix recieves emails
http://flurdy.com/docs/postfix/
uses python smtplib to parse email subject line
redirect subject line argument "dbrk"
to python script as follows:
bartETA.py dbrk
this calls previous script
to send text from method displayETA()
as email to the email sender
"""
# the main code goes here
global taggedxml,statInf
taggedxml = whichTags('station')
statInf = whichStation(userIn()) #userIn() for testing
displayETAs()
#emailETAs('[email protected]')
if __name__=="__main__":
main()
|
py
|
1a5e770d984de69563c2d6abba5db8847f35c943
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WeightLog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py
|
1a5e7766243cdb6e4559bf5ca57414205a7aa5a1
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import models
from bitcoinrpc.authproxy import JSONRPCException
import misc
import re
from misc import printdbg
import time
# mixin for GovObj composed classes like proposal and superblock, etc.
class GovernanceClass(object):
only_masternode_can_submit = False
# lazy
@property
def go(self):
return self.governance_object
# pass thru to GovernanceObject#vote
def vote(self, kauricoind, signal, outcome):
return self.go.vote(kauricoind, signal, outcome)
# pass thru to GovernanceObject#voted_on
def voted_on(self, **kwargs):
return self.go.voted_on(**kwargs)
def vote_validity(self, kauricoind):
if self.is_valid():
printdbg("Voting valid! %s: %d" % (self.__class__.__name__, self.id))
self.vote(kauricoind, models.VoteSignals.valid, models.VoteOutcomes.yes)
else:
printdbg("Voting INVALID! %s: %d" % (self.__class__.__name__, self.id))
self.vote(kauricoind, models.VoteSignals.valid, models.VoteOutcomes.no)
def get_submit_command(self):
object_fee_tx = self.go.object_fee_tx
import kauricoinlib
obj_data = kauricoinlib.SHIM_serialise_for_kauricoind(self.serialise())
cmd = ['gobject', 'submit', '0', '1', str(int(time.time())), obj_data, object_fee_tx]
return cmd
def list(self):
dikt = {
"DataHex": self.serialise(),
"Hash": self.object_hash,
"CollateralHash": self.go.object_fee_tx,
"AbsoluteYesCount": self.go.absolute_yes_count,
"YesCount": self.go.yes_count,
"NoCount": self.go.no_count,
"AbstainCount": self.go.abstain_count,
}
# return a dict similar to kauricoind "gobject list" output
return {self.object_hash: dikt}
def get_submit_command(self):
import kauricoinlib
obj_data = kauricoinlib.SHIM_serialise_for_kauricoind(self.serialise())
# new objects won't have parent_hash, revision, etc...
cmd = ['gobject', 'submit', '0', '1', str(int(time.time())), obj_data]
# some objects don't have a collateral tx to submit
if not self.only_masternode_can_submit:
cmd.append(go.object_fee_tx)
return cmd
def submit(self, kauricoind):
# don't attempt to submit a superblock unless a masternode
# note: will probably re-factor this, this has code smell
if (self.only_masternode_can_submit and not kauricoind.is_masternode()):
print("Not a masternode. Only masternodes may submit these objects")
return
try:
object_hash = kauricoind.rpc_command(*self.get_submit_command())
printdbg("Submitted: [%s]" % object_hash)
except JSONRPCException as e:
print("Unable to submit: %s" % e.message)
def serialise(self):
import inflection
import binascii
import simplejson
# 'proposal', 'superblock', etc.
name = self._meta.name
obj_type = inflection.singularize(name)
return binascii.hexlify(simplejson.dumps((obj_type, self.get_dict()), sort_keys=True).encode('utf-8')).decode('utf-8')
def kauricoind_serialise(self):
import kauricoinlib
return kauricoinlib.SHIM_serialise_for_kauricoind(self.serialise())
@classmethod
def serialisable_fields(self):
# Python is so not very elegant...
pk_column = self._meta.primary_key.db_column
fk_columns = [fk.db_column for fk in self._meta.rel.values()]
do_not_use = [pk_column]
do_not_use.extend(fk_columns)
do_not_use.append('object_hash')
fields_to_serialise = list(self._meta.columns.keys())
for field in do_not_use:
if field in fields_to_serialise:
fields_to_serialise.remove(field)
return fields_to_serialise
def get_dict(self):
dikt = {}
for field_name in self.serialisable_fields():
dikt[field_name] = getattr(self, field_name)
return dikt
|
py
|
1a5e777a05027a712d308788865b15f36f639536
|
"""
Expression class
Basic handling for microarray and rna-seq and realtime PCR like data
"""
import sys, os, csv, string, math, collections
from operator import itemgetter
import numpy
from numpy import array, arange, meshgrid, zeros, linspace, mean, object_, std # This use of array here is not good.
from . import config
from .flags import *
from .draw import draw
from .genelist import genelist
from .progress import progressbar
from .errors import AssertionError, ArgumentError, ExpressionNonUniqueConditionNameError
from .utils import qdeepcopy
class base_expression(genelist):
def __init__(self, filename=None, loadable_list=None, format=None, expn=None, silent:bool=False, **kargs):
"""
See the documentation in the expression class.
This is the underlying base expression object and is not designed for direct usage.
"""
'''
if not loadable_list:
# these are only required if not loading a list
assert expn, "'expn' argument cannot be empty"
assert filename, "no filename to load"
assert format, "required argument 'format' is missing"
assert os.path.exists(os.path.realpath(filename)), "'%s' not found" % filename
else:
# probably should put some more sanity checking in here.
assert loadable_list[0], "the list to load does not appear to be a proper list"
'''
if "cv_err" in kargs or "err_up" in kargs or "err_dn" in kargs:
raise NotImplementedError("Whoops! I haven't finished expression class - cv_err, err_up and err_dn are not implemented")
valig_args = ["cond_names", "name", "force_tsv", "nan_value"]
for k in kargs:
if k not in valig_args:
raise ArgumentError(self.__init__, k)
genelist.__init__(self)
self.filename = filename
self._conditions = [] # Provide a dummy conditions temporarily
self.name = "None"
if "name" in kargs and kargs["name"]:
self.name = kargs["name"]
elif filename:
self.name = "".join(self.filename.split(".")[:-1])
if not loadable_list and not expn:
config.log.info("expression: made an empty expression object")
return()
if loadable_list:
self.load_list(loadable_list, expn, **kargs)
else:
# This is a placeholder at the moment,
# I reload the expn and err values back into the format
# When you redo this, remember to also redo load_list()
newf = format
newf["conditions"] = {"code": expn}
if "err" in kargs and kargs["err"]:
newf["err"] = {"code": kargs["err"]}
elif "cv_err" in kargs and kargs["cv_err"]:
newf["cv_err"] = kargs["cv_err"]
if "force_tsv" in kargs and kargs["force_tsv"]:
newf["force_tsv"] = True
format = newf
self.loadCSV(filename=filename, format=format) # no need for error checking here - it's in genelist now.
if "cond_names" in kargs and kargs["cond_names"]:
self._conditions = kargs["cond_names"]
else:
# re-open the file and try to guess the conditions
# reopen the file to get the condition headers.
oh = open(filename, "rU")
if "force_tsv" in format and format["force_tsv"]:
reader = csv.reader(oh, dialect=csv.excel_tab)
elif "dialect" in format:
reader = csv.reader(oh, dialect=format["dialect"])
else:
reader = csv.reader(oh)
do = False
self._conditions = []
for index, column in enumerate(reader):
if "skiptill" in kargs:
if kargs["skiptill"] in column:
do = True
elif "skiplines" in kargs:
if index == kargs["skiplines"]:
do = True
else:
do = True # do anyway
if do:
names = eval("{0}".format(format["conditions"]["code"])) # yay, more nice happy arbitrary code execution.
if names:
self._conditions = [str(k) for k in names]
break
oh.close()
if not silent:
config.log.info("expression: I found the following conditions:")
config.log.info("\n".join(["%s\t%s" % (n, i) for n, i in enumerate(self._conditions)]))
# coerce the conditions errs etc to floats
nans = set(('nan', 'Nan', 'NaN'))
for idx, i in enumerate(self):
try:
# Nan policy:
if True in [t in nans for t in i["conditions"]]:
config.log.warning("line {0}, contains Nan, filling with 0".format(idx))
newc = []
for c in i['conditions']:
if c in nans:
newc.append(0.0) # nan policy
else:
newc.append(c)
i['conditions'] = newc
i["conditions"] = [float(str(t).replace(",", "")) for t in i["conditions"]] # because somebody once sent me a file with ',' for thousands!
except ValueError:
config.log.warning("line %s, contains missing data (%s), filling with 0" % (idx, i["conditions"]))
i["conditions"] = [0 for t in self._conditions] # Use conditions as the example I had here was also missing all of the other values.
# These will bomb on missing data...
if "err" in i:
i["err"] = [float(t) for t in i["err"]]
if "cv_err" in i:
i["cv_err"] = [float(t) for t in i["cv_err"]]
self.__check_condition_names_are_unique()
self._optimiseData()
if not silent:
config.log.info("expression: loaded %s items, %s conditions" % (len(self), len(self.getConditionNames())))
def __check_condition_names_are_unique(self):
"""
Bit of gotcha this one, but expression objects must have unique condition names
or lots of things break. Here, check the condition names are unique.
"""
if len(self._conditions) > len(set(self._conditions)):
raise ExpressionNonUniqueConditionNameError(self._conditions)
return(False)
def __repr__(self):
return("glbase.expression")
def _load_numpy_back_into_linearData(self):
"""
For routines that make a change in self.numpy_array_all_data
this must be called after to propogate the changes back into linearData
"""
for i, row in enumerate(self.numpy_array_all_data):
self.linearData[i]["conditions"] = list(row)
self._optimiseData()
def _optimiseData(self):
"""
(Override)
(Internal)
Add expression optimisations
"""
genelist._optimiseData(self) # do the parent optimise.
# generate a serialised version of the array conditions.
self.numpy_array_all_data = numpy.array([i["conditions"] for i in self.linearData])
# could be done with dict comp:
data = {}
for index, name in enumerate(self._conditions):
if not name in data:
data[name] = self.numpy_array_all_data[:,index]
self.serialisedArrayDataDict = data
# list;
self.serialisedArrayDataList = [self.serialisedArrayDataDict[key] for key in self._conditions]
#self.serialisedArrayDataList = all_array_data # This consumes massive amounts of memory.
# presumably something downstream is doing something nasty.
return(True)
def saveCSV(self, filename=None, interleave_errors=True, no_header=False, no_col1_header=False, **kargs):
"""
A CSV version of saveTSV(), see saveTSV() for syntax
"""
self.saveTSV(filename=filename, tsv=False, interleave_errors=True, no_header=False, no_col1_header=False, **kargs)
config.log.info("saveCSV(): Saved '%s'" % filename)
def saveTSV(self, filename=None, tsv=True, interleave_errors=True, no_header=False, no_col1_header=False, **kargs):
"""
(Override)
**Purpose**
Save the microarray data as a tsv file
This is a little different from the normal genelist.saveTSV()
as I want to make certain that the condition data is written in a sensible manner at
the end of the TSV.
I also need to deal with grid like structures etc.
As a general warning, use expression.save() in preference to this.
This save is not guaranteed to survive reloading into glbase, and is particularly
troublesome in the case of expression objects. Indeed, the default guesser when loading
a genelist object will incorrectly load an expression object with error values
and will probably bodge any other arrangement too.
**Arguments**
filename
The filename (with a valid path) to save the file to.
interleave_errors (Optional, default=True)
By default the errors are interleaved so that the sample data will be arranged:
Sample1 Err1 Sample2 Err2
if interleave_errors=False then:
Sample1 Sample2 Err1 Err2
no_col1_header (Optional, default=False)
In case you want a table like this:
A B C D
W 1 2 3 4
X 2 2 2 2
Y 2 2 2 2
Z 2 2 2 2
i.e. the top left column label is empty.
**Returns**
returns None
"""
self._save_TSV_CSV(filename=filename, tsv=tsv, interleave_errors=True, no_header=False, no_col1_header=False, **kargs)
config.log.info("saveTSV(): Saved '%s'" % filename)
def _save_TSV_CSV(self, filename=None, tsv=True, interleave_errors=True, no_header=False, no_col1_header=False, **kargs):
"""
Internal unified saveCSV/TSV for expression objects
"""
valig_args = ["filename", "tsv", "key_order", "no_header"]
for k in kargs:
if k not in valig_args:
raise ArgumentError(self.saveCSV, k)
assert filename, "you must specify a filename"
oh = open(os.path.realpath(filename), "w")
if tsv:
writer = csv.writer(oh, dialect=csv.excel_tab)
else:
writer = csv.writer(oh)
array_data_keys = ("conditions", "err", "cv_err")
write_keys = []
if "key_order" in kargs:
write_keys = kargs["key_order"]
# now add in any missing keys to the right side of the list:
for item in list(self.keys()):
if item not in write_keys and item not in array_data_keys: # But omit the array_data_keys
write_keys.append(item)
else:
# just select them all:
write_keys = [k for k in list(self.keys()) if not k in array_data_keys]
if "err" in list(self.keys()):
if interleave_errors:
conds = ["mean_%s" % c for c in self.getConditionNames()]
errs = ["err_%s" % c for c in self.getConditionNames()]
paired = [val for pair in zip(conds, errs) for val in pair]
if not no_header:
title_row = [k for k in write_keys if k in list(self.keys())]
writer.writerow(title_row + paired)
for data in self.linearData:
line = [data[k] for k in write_keys if k in data]
interleaved_data = [val for pair in zip(data["conditions"], data["err"]) for val in pair] # I never understand how these work, but what the hell.
writer.writerow(line + interleaved_data)# conditions go last.
oh.close()
else:
if not no_header:
title_row = [k for k in write_keys in k in list(self.keys())]
writer.writerow(write_keys + self.getConditionNames() + ["err_%s" % i for i in self.getConditionNames()])
for data in self.linearData:
line = [data[k] for k in write_keys if k in data]
writer.writerow(line + data["conditions"] + data["err"])# conditions go last.
oh.close()
else: # no error, very easy:
if not no_header:
title_row = [k for k in write_keys if k in list(self.keys())]
if no_col1_header:
title_row[0] = ""
writer.writerow(title_row + self.getConditionNames())
for data in self.linearData:
line = [data[k] for k in write_keys if k in data]
writer.writerow(line + data["conditions"])# conditions go last.
oh.close()
return(None)
def sort(self, key, reverse=False):
"""
This is slightly different from the vanilla genelist's sort - you can pass it the name of
a condition. Take care to make sure the condition name is not also a valid list key.
The algorithm searches the genelist before searching the array for your particular condition.
Also take care with this one: It is one of the few in-place list
modifiers.
**Arguments**
key
must be a valid key in the genelist or the name of an array condition.
reverse (Optional, default=False)
By default the list is sorted smallest to largest.
reverse = True sorts largest to smallest.
**Result**
returns True if succesful.
returns False if not valid.
"""
assert (key in self.linearData[0]) or key in self._conditions, "'%s' search key not found in list or array data" % key
if key in self.linearData[0]:
return(genelist.sort(self, key, reverse=reverse)) # use the parents sort.
else:
if key in self._conditions:
name_index = self._conditions.index(key)
self.linearData = sorted(self.linearData, key=lambda x: x["conditions"][name_index]) # the original sort() was overridden.
if reverse:
self.linearData.reverse()
self._optimiseData()
return(True)
return(False)
def load_list(self, list_to_load, expn=None, name=False, cond_names=None, nan_value=0):
"""
**Purpose**
You've generated your own [{ ... }, { ...}] like list
(A list of dicts) and you want to either reload it into
a genelist-like object or load it into an empty genelist.
This is the method to do that officially.
This method should be used with care. Some sanity
checking is done. But not very much.
This load_list is modified for expression-like genelists.
(eg. expression()). Here you can load keys into conditions based on
their key names.
**Arguments**
list_to_load
must be a list of dicts.
expn (optional)
A list of key names to construct the expression data from
If not specified then it assumes your list already has a correctly formatted
"conditions" key.
**Returns**
None. This is one of the few IN PLACE methods. and returns
None.
"""
assert list_to_load[0], "list_to_load does not appear to be a valid list"
__nan_warnings = False
nans = frozenset(["Inf", "-Inf", "NA", "Nan", "NaN"])
if expn:
assert isinstance(expn, list), "'expn' must be a list of keys"
# Bodge in a new "conditions" key:
newl = []
for i in list_to_load:
new = i.copy()
nl = [i[k] for k in expn]
# test for Inf, -Inf, NA, NaN, etc.
if True in [ti in nans for ti in nl]: # woah! Nan here.
t = []
for item in nl:
if item in nans:
t.append(nan_value)
else:
t.append(item)
nl = t
if not __nan_warnings:
__nan_warnings = True
config.log.warning("Expression list contains 'not a number' values, setting them to <nan_value=%s>" % nan_value)
new["conditions"] = nl
for k in expn:
del new[k]
newl.append(new)
self._conditions = expn
else:
newl = list_to_load
if cond_names: # user sent the conditions names. Hope they are in the same order
assert len(cond_names) == len(newl[0]["conditions"]), "cond_names is not the same length as the number of conditions"
self._conditions = cond_names
else:
# conditions can get lost in a loadable list. fill in a dummy one
if len(self._conditions) != len(newl[0]["conditions"]):
self._conditions = ["cond_%s" % i for i in range(len(newl[0]["conditions"]))]
# Now call parent with new list
genelist.load_list(self, newl, name)
def from_pandas(self, pandas_data_frame, condition_names=None):
"""
**Purpose**
Convert a pandas dataFrame to a genelist
NOTE: This is an INPLACE method that will REPLACE any exisiting data
in the
**Arguments**
pandas_data_frame (Required)
The pandas data frame to convert
condition_names (Required)
A list of Column names from the Pandas frame to use as expression data
**Result**
None
The object is populated by the Pandas object
"""
assert condition_names, 'You must specify condition_names'
assert isinstance(condition_names, list), 'condition_names must be a list of colun names'
if len(self) >0:
config.log.warning('expression.from_pandas() will overwrite the existing data in the expression')
newl = []
key_names = pandas_data_frame.columns
for index, row in pandas_data_frame.iterrows():
newitem = {}
# load normal keys:
for k, item in zip(key_names, row):
if k not in condition_names:
newitem[k] = item
# load conditions, in-order:
dict_items = dict(zip(key_names, row))
newitem['conditions'] = [dict_items[z] for z in condition_names]
newl.append(newitem)
self._conditions = condition_names
self.linearData = newl
self._optimiseData()
config.log.info("expression.from_pandas() imported dataFrame")
def getConditionNames(self):
"""
returns a list of the condition headers
"""
return(list(self._conditions))
def setConditionNames(self, new_cond_names):
"""
rename the conditions names for the expression data
THIS IS AN IN-PLACE method and returns None
"""
assert len(new_cond_names) == len(self._conditions), "setConditionNames(): new and old condition names are different lengths (%s vs. %s)" % (len(new_cond_names), len(self._conditions))
self.__check_condition_names_are_unique()
self._conditions = list(new_cond_names)
self._optimiseData()
return(self._conditions)
|
py
|
1a5e78a6c733120957593a2e54cf88b96951638d
|
""" philoseismos: engineering seismologist's toolbox.
author: Ivan Dubrovin
e-mail: [email protected] """
import setuptools
with open('README.md') as f:
long_description = f.read()
setuptools.setup(
name='philoseismos',
version='0.0.32_alpha',
author="Ivan Dubrovin",
author_email="[email protected]",
description="Engineering seismologist's toolbox",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/iod-ine/philoseismos",
packages=setuptools.find_packages(),
install_requires=[
'numpy',
'pandas',
'scipy',
'matplotlib',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Natural Language :: English",
],
python_requires='>=3.6',
)
|
py
|
1a5e78bee39ce25cacac400c631c6472f140501c
|
import uuid
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
class MyUserManager(BaseUserManager):
def _create_user(self, email, password, first_name, last_name, is_staff, is_superuser, **extra_fields):
"""
Create and save an User with the given email, password, name and phone number.
:param email: string
:param password: string
:param first_name: string
:param last_name: string
:param is_staff: boolean
:param is_superuser: boolean
:param extra_fields:
:return: User
"""
now = timezone.now()
email = self.normalize_email(email)
user = self.model(email=email,
first_name=first_name,
last_name=last_name,
is_staff=is_staff,
is_active=True,
is_superuser=is_superuser,
last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, first_name, last_name, password, **extra_fields):
"""
Create and save an User with the given email, password and name.
:param email: string
:param first_name: string
:param last_name: string
:param password: string
:param extra_fields:
:return: User
"""
return self._create_user(email, password, first_name, last_name, is_staff=False, is_superuser=False,
**extra_fields)
def create_superuser(self, email, first_name='', last_name='', password=None, **extra_fields):
"""
Create a super user.
:param email: string
:param first_name: string
:param last_name: string
:param password: string
:param extra_fields:
:return: User
"""
return self._create_user(email, password, first_name, last_name, is_staff=True, is_superuser=True,
**extra_fields)
class User(AbstractBaseUser):
"""
Model that represents an user.
To be active, the user must register and confirm his email.
"""
GENDER_MALE = 'ะ'
GENDER_FEMALE = 'ะ'
GENDER_CHOICES = (
(GENDER_MALE, 'ะัะถัะบะพะน'),
(GENDER_FEMALE, 'ะะตะฝัะบะธะน')
)
# we want primary key to be called id so need to ignore pytlint
# ะ ัะตะผ ัะธัะฐ ะดะฐะฝะฝะพะณะพ ะฟะพะปั?
# id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) # pylint: disable=invalid-name
first_name = models.CharField(_('ะะผั'), max_length=50)
last_name = models.CharField(_('ะคะฐะผะธะปะธั'), max_length=50)
email = models.EmailField(_('ะะพััะฐ'), unique=True)
position = models.CharField(_('ะะพะปะถะฝะพััั'), max_length=300, default='ะฃัะฐััะฝะธะบ')
image = models.ImageField(_("ะคะพัะพะณัะฐัะธั"), default='')
gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=GENDER_MALE)
confirmed_email = models.BooleanField(default=False)
is_staff = models.BooleanField(_('staff status'), default=False)
is_superuser = models.BooleanField(_('superuser status'), default=False)
is_active = models.BooleanField(_('active'), default=True)
date_joined = models.DateTimeField(_('ะะฐัะฐ ัะตะณะธัััะฐัะธะธ'), auto_now_add=True)
date_updated = models.DateTimeField(_('date updated'), auto_now=True)
activation_key = models.UUIDField(unique=True, default=uuid.uuid4) # email
USERNAME_FIELD = 'email'
objects = MyUserManager()
def has_perm(self, perm, obj=None):
return self.is_superuser
def has_module_perms(self, app_label):
return self.is_superuser
def __str__(self):
"""
Unicode representation for an user model.
:return: string
"""
return self.email
def get_full_name(self):
"""
Return the first_name plus the last_name, with a space in between.
:return: string
"""
return "{0} {1}".format(self.first_name, self.last_name)
def get_short_name(self):
"""
Return the first_name.
:return: string
"""
return self.first_name
def activation_expired(self):
"""
Check if user's activation has expired.
:return: boolean
"""
return self.date_joined + timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS) < timezone.now()
def confirm_email(self):
"""
Confirm email.
:return: boolean
"""
if not self.activation_expired() and not self.confirmed_email:
self.confirmed_email = True
self.save()
return True
return False
SOCIAL_CHOICES = (
('icon icon-github', "GitHub"),
('icon icon-vk', "ะะบะพะฝัะฐะบัะต"),
('icon icon-instagram', "Instagram"),
('icon icon-twitter', "Twitter"),
)
class SocialNetwork(models.Model):
name = models.CharField(_("ะะฐะทะฒะฐะฝะธะต"), max_length=300, choices=SOCIAL_CHOICES)
url = models.URLField(_("ะกััะปะบะฐ"), max_length=200)
user = models.ForeignKey(User, default=0, related_name='social_networks')
def __str__(self):
return self.url
|
py
|
1a5e79f82e38a52ede3792bb849a6d0d35823449
|
import typing
from collections import OrderedDict
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from rest_framework.viewsets import ModelViewSet
from pydis_site.apps.api.models.bot.infraction import Infraction
from pydis_site.apps.api.models.bot.metricity import Metricity, NotFoundError
from pydis_site.apps.api.models.bot.user import User
from pydis_site.apps.api.serializers import UserSerializer
class UserListPagination(PageNumberPagination):
"""Custom pagination class for the User Model."""
page_size = 2500
page_size_query_param = "page_size"
def get_next_page_number(self) -> typing.Optional[int]:
"""Get the next page number."""
if not self.page.has_next():
return None
page_number = self.page.next_page_number()
return page_number
def get_previous_page_number(self) -> typing.Optional[int]:
"""Get the previous page number."""
if not self.page.has_previous():
return None
page_number = self.page.previous_page_number()
return page_number
def get_paginated_response(self, data: list) -> Response:
"""Override method to send modified response."""
return Response(OrderedDict([
('count', self.page.paginator.count),
('next_page_no', self.get_next_page_number()),
('previous_page_no', self.get_previous_page_number()),
('results', data)
]))
class UserViewSet(ModelViewSet):
"""
View providing CRUD operations on Discord users through the bot.
## Routes
### GET /bot/users
Returns all users currently known with pagination.
#### Response format
>>> {
... 'count': 95000,
... 'next_page_no': "2",
... 'previous_page_no': None,
... 'results': [
... {
... 'id': 409107086526644234,
... 'name': "Python",
... 'discriminator': 4329,
... 'roles': [
... 352427296948486144,
... 270988689419665409,
... 277546923144249364,
... 458226699344019457
... ],
... 'in_guild': True
... },
... ]
... }
#### Optional Query Parameters
- page_size: number of Users in one page, defaults to 10,000
- page: page number
#### Status codes
- 200: returned on success
### GET /bot/users/<snowflake:int>
Gets a single user by ID.
#### Response format
>>> {
... 'id': 409107086526644234,
... 'name': "Python",
... 'discriminator': 4329,
... 'roles': [
... 352427296948486144,
... 270988689419665409,
... 277546923144249364,
... 458226699344019457
... ],
... 'in_guild': True
... }
#### Status codes
- 200: returned on success
- 404: if a user with the given `snowflake` could not be found
### GET /bot/users/<snowflake:int>/metricity_data
Gets metricity data for a single user by ID.
#### Response format
>>> {
... "joined_at": "2020-10-06T21:54:23.540766",
... "total_messages": 2,
... "voice_banned": False,
... "activity_blocks": 1
...}
#### Status codes
- 200: returned on success
- 404: if a user with the given `snowflake` could not be found
### GET /bot/users/<snowflake:int>/metricity_review_data
Gets metricity data for a single user's review by ID.
#### Response format
>>> {
... 'joined_at': '2020-08-26T08:09:43.507000',
... 'top_channel_activity': [['off-topic', 15],
... ['talent-pool', 4],
... ['defcon', 2]],
... 'total_messages': 22
... }
#### Status codes
- 200: returned on success
- 404: if a user with the given `snowflake` could not be found
### POST /bot/users
Adds a single or multiple new users.
The roles attached to the user(s) must be roles known by the site.
Users that already exist in the database will be skipped.
#### Request body
>>> {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... }
Alternatively, request users can be POSTed as a list of above objects,
in which case multiple users will be created at once. In this case,
the response is an empty list.
#### Status codes
- 201: returned on success
- 400: if one of the given roles does not exist, or one of the given fields is invalid
- 400: if multiple user objects with the same id are given
### PUT /bot/users/<snowflake:int>
Update the user with the given `snowflake`.
All fields in the request body are required.
#### Request body
>>> {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... }
#### Status codes
- 200: returned on success
- 400: if the request body was invalid, see response body for details
- 404: if the user with the given `snowflake` could not be found
### PATCH /bot/users/<snowflake:int>
Update the user with the given `snowflake`.
All fields in the request body are optional.
#### Request body
>>> {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... }
#### Status codes
- 200: returned on success
- 400: if the request body was invalid, see response body for details
- 404: if the user with the given `snowflake` could not be found
### BULK PATCH /bot/users/bulk_patch
Update users with the given `ids` and `details`.
`id` field and at least one other field is mandatory.
#### Request body
>>> [
... {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... },
... {
... 'id': int,
... 'name': str,
... 'discriminator': int,
... 'roles': List[int],
... 'in_guild': bool
... },
... ]
#### Status codes
- 200: returned on success
- 400: if the request body was invalid, see response body for details
- 400: if multiple user objects with the same id are given
- 404: if the user with the given id does not exist
### DELETE /bot/users/<snowflake:int>
Deletes the user with the given `snowflake`.
#### Status codes
- 204: returned on success
- 404: if a user with the given `snowflake` does not exist
"""
serializer_class = UserSerializer
queryset = User.objects.all().order_by("id")
pagination_class = UserListPagination
def get_serializer(self, *args, **kwargs) -> ModelSerializer:
"""Set Serializer many attribute to True if request body contains a list."""
if isinstance(kwargs.get('data', {}), list):
kwargs['many'] = True
return super().get_serializer(*args, **kwargs)
@action(detail=False, methods=["PATCH"], name='user-bulk-patch')
def bulk_patch(self, request: Request) -> Response:
"""Update multiple User objects in a single request."""
serializer = self.get_serializer(
instance=self.get_queryset(),
data=request.data,
many=True,
partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
@action(detail=True)
def metricity_data(self, request: Request, pk: str = None) -> Response:
"""Request handler for metricity_data endpoint."""
user = self.get_object()
try:
Infraction.objects.get(user__id=user.id, active=True, type="voice_ban")
except ObjectDoesNotExist:
voice_banned = False
else:
voice_banned = True
with Metricity() as metricity:
try:
data = metricity.user(user.id)
data["total_messages"] = metricity.total_messages(user.id)
data["activity_blocks"] = metricity.total_message_blocks(user.id)
data["voice_banned"] = voice_banned
return Response(data, status=status.HTTP_200_OK)
except NotFoundError:
return Response(dict(detail="User not found in metricity"),
status=status.HTTP_404_NOT_FOUND)
@action(detail=True)
def metricity_review_data(self, request: Request, pk: str = None) -> Response:
"""Request handler for metricity_review_data endpoint."""
user = self.get_object()
with Metricity() as metricity:
try:
data = metricity.user(user.id)
data["total_messages"] = metricity.total_messages(user.id)
data["top_channel_activity"] = metricity.top_channel_activity(user.id)
return Response(data, status=status.HTTP_200_OK)
except NotFoundError:
return Response(dict(detail="User not found in metricity"),
status=status.HTTP_404_NOT_FOUND)
|
py
|
1a5e7a7db33d9cb8b0743d9b21a3797dd1d41943
|
from .MediaPlayer import MediaPlayer
from .PlayLists import PlayLists, PlaylistOrdering
from .Player import Player
from .Root import Root
from .TrackList import TrackList
from .common import available_players, PyMPRISException
__version__ = '1.4'
__description__ = 'Library to control media players using MPRIS2 interfaces'
requires = [
# pympris depends on dbus-python,
# but dbus-python can't be installed automaticaly
# 'dbus-python>=1.0'
]
README = """pympris is a Python library used
to control media players using MPRIS2 interfaces.
Usage
=====
::
import gobject
import dbus
from dbus.mainloop.glib import DBusGMainLoop
import pympris
dbus_loop = DBusGMainLoop()
bus = dbus.SessionBus(mainloop=dbus_loop)
# get unique ids for all available players
players_ids = list(pympris.available_players())
mp = pympris.MediaPlayer(players_ids[1], bus)
# mp.root implements org.mpris.MediaPlayer2 interface
# mp.player implements org.mpris.MediaPlayer2.Player
# mp.track_list implements org.mpris.MediaPlayer2.TrackList
# mp.playlists implements org.mpris.MediaPlayer2.Playlists
# print player Identity
print mp.root.Identity
if mp.root.CanRaise:
mp.root.Raise()
if mp.player.CanPlay and mp.player.CanPause:
mp.player.PlayPause()
mp.player.Volume = mp.player.Volume*2
if mp.player.CanGoNext:
mp.player.Next()
tracks = mp.track_list.Tracks
for track_id in tracks:
print track_id
if len(tracks) > 1:
mp.track_list.RemoveTrack(tracks[-1])
mp.track_list.GoTo(tracks[0])
n = mp.playlists.PlaylistCount
ordering = pympris.PlaylistOrdering.LastPlayDate
playlists = mp.playlists.GetPlaylists(0, n, ordering, reversed=False)
pl_id, pl_name, pl_icon = playlists[-2]
mp.playlists.ActivatePlaylist(pl_id)
# setup signal handlers
def seeked(x):
print(x)
def PlaylistChanged(arg):
print "PlaylistChanged", arg
def TrackMetadataChanged(track_id, metadata):
print "TrackMetadataChanged", track_id, metadata
def TrackListReplaced(tracks, current_track):
print "TrackListReplaced", tracks, current_track
def TrackAdded(metadata, after_track):
print "TrackAdded", metadata, after_track
def TrackRemoved(track_id):
print "TrackRemoved", track_id
mp.player.register_signal_handler('Seeked', seeked)
mp.playlists.register_signal_handler('PlaylistChanged', PlaylistChanged)
mp.track_list.register_signal_handler('TrackMetadataChanged',
TrackMetadataChanged)
mp.track_list.register_signal_handler('TrackListReplaced', TrackListReplaced)
mp.track_list.register_signal_handler('TrackAdded', TrackAdded)
mp.track_list.register_signal_handler('TrackRemoved', TrackRemoved)
loop = gobject.MainLoop()
loop.run()
"""
|
py
|
1a5e7b36acafcc5d5d0a7bfae04fd8363c3234ea
|
#standart sapma islemini numpy ile atma
import numpy as np
a = [3, 4, 7, 3, 2, 4, 1]
a = np.array(a)
#burada a รผzerinde yapฤฑlan np islemleri vektรถrel iลlemler olduฤundan her elemana tek tek yapฤฑlฤฑyor
std = np.sqrt(np.sum((a - np.mean(a)) ** 2)) / a.shape[0]
print(std)
#rastgele nd array yaratmak istersem
x = np.ndarray((3, 2), dtype='float32')
print(x)
|
py
|
1a5e7c2ae136dab0520b7e6b5b8b6a1282c80213
|
# STDLIB
import logging
import pathlib
import subprocess
import sys
logger = logging.getLogger()
package_dir = "pct_python_default_test"
cli_filename = "pct_python_default_test_cli.py"
path_cli_command = pathlib.Path(__file__).resolve().parent.parent / package_dir / cli_filename
def call_cli_command(commandline_args: str = "") -> bool:
command = " ".join([sys.executable, str(path_cli_command), commandline_args])
try:
subprocess.run(command, shell=True, check=True)
except subprocess.CalledProcessError:
return False
return True
def test_cli_commands() -> None:
# due to a bug in python 3.8.1 with setup.py test on travis we need to cancel the click tests there !
if sys.version_info < (3, 8, 1) or sys.version_info >= (3, 8, 2):
assert not call_cli_command("--unknown_option")
assert call_cli_command("--version")
assert call_cli_command("-h")
assert call_cli_command("info")
assert call_cli_command("--traceback info")
|
py
|
1a5e7ecb85a0edd6fd7233be12ecbae74b412eb2
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import datetime
import six
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow import configuration
from airflow.utils.log.LoggingMixin import LoggingMixin
from airflow.utils.state import State
from qds_sdk.qubole import Qubole
from qds_sdk.commands import Command, HiveCommand, PrestoCommand, HadoopCommand, \
PigCommand, ShellCommand, SparkCommand, DbTapQueryCommand, DbExportCommand, \
DbImportCommand
COMMAND_CLASSES = {
"hivecmd": HiveCommand,
"prestocmd": PrestoCommand,
"hadoopcmd": HadoopCommand,
"shellcmd": ShellCommand,
"pigcmd": PigCommand,
"sparkcmd": SparkCommand,
"dbtapquerycmd": DbTapQueryCommand,
"dbexportcmd": DbExportCommand,
"dbimportcmd": DbImportCommand
}
HYPHEN_ARGS = ['cluster_label', 'app_id', 'note_id']
POSITIONAL_ARGS = ['sub_command', 'parameters']
COMMAND_ARGS = {
"hivecmd": ['query', 'script_location', 'macros', 'tags', 'sample_size',
'cluster_label', 'name'],
'prestocmd': ['query', 'script_location', 'macros', 'tags', 'cluster_label', 'name'],
'hadoopcmd': ['sub_command', 'tags', 'cluster_label', 'name'],
'shellcmd': ['script', 'script_location', 'files', 'archives', 'parameters', 'tags',
'cluster_label', 'name'],
'pigcmd': ['script', 'script_location', 'parameters', 'tags', 'cluster_label',
'name'],
'dbtapquerycmd': ['db_tap_id', 'query', 'macros', 'tags', 'name'],
'sparkcmd': ['program', 'cmdline', 'sql', 'script_location', 'macros', 'tags',
'cluster_label', 'language', 'app_id', 'name', 'arguments', 'note_id',
'user_program_arguments'],
'dbexportcmd': ['mode', 'hive_table', 'partition_spec', 'dbtap_id', 'db_table',
'db_update_mode', 'db_update_keys', 'export_dir',
'fields_terminated_by', 'tags', 'name'],
'dbimportcmd': ['mode', 'hive_table', 'dbtap_id', 'db_table', 'where_clause',
'parallelism', 'extract_query', 'boundary_query', 'split_column',
'tags', 'name']
}
class QuboleHook(BaseHook, LoggingMixin):
def __init__(self, *args, **kwargs):
conn = self.get_connection(kwargs['qubole_conn_id'])
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.task_id = kwargs['task_id']
self.dag_id = kwargs['dag'].dag_id
self.kwargs = kwargs
self.cls = COMMAND_CLASSES[self.kwargs['command_type']]
self.cmd = None
@staticmethod
def handle_failure_retry(context):
ti = context['ti']
cmd_id = ti.xcom_pull(key='qbol_cmd_id', task_ids=ti.task_id)
if cmd_id is not None:
cmd = Command.find(cmd_id)
if cmd is not None:
log = LoggingMixin().logger
if cmd.status == 'done':
log.info('Command ID: %s has been succeeded, hence marking this '
'TI as Success.', cmd_id)
ti.state = State.SUCCESS
elif cmd.status == 'running':
log.info('Cancelling the Qubole Command Id: %s', cmd_id)
cmd.cancel()
def execute(self, context):
args = self.cls.parse(self.create_cmd_args(context))
self.cmd = self.cls.create(**args)
context['task_instance'].xcom_push(key='qbol_cmd_id', value=self.cmd.id)
self.logger.info(
"Qubole command created with Id: %s and Status: %s",
self.cmd.id, self.cmd.status
)
while not Command.is_done(self.cmd.status):
time.sleep(Qubole.poll_interval)
self.cmd = self.cls.find(self.cmd.id)
self.logger.info("Command Id: %s and Status: %s", self.cmd.id, self.cmd.status)
if 'fetch_logs' in self.kwargs and self.kwargs['fetch_logs'] is True:
self.logger.info("Logs for Command Id: %s \n%s", self.cmd.id, self.cmd.get_log())
if self.cmd.status != 'done':
raise AirflowException('Command Id: {0} failed with Status: {1}'.format(
self.cmd.id, self.cmd.status))
def kill(self, ti):
"""
Kill (cancel) a Qubole commmand
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.logger.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(configuration.get('core', 'BASE_LOG_FOLDER'))
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name
def get_log(self, ti):
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(self.cls, cmd_id)
def get_jobs_id(self, ti):
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job informations assoiciated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(self.cls, cmd_id)
def create_cmd_args(self, context):
args = []
cmd_type = self.kwargs['command_type']
inplace_args = None
tags = set([self.dag_id, self.task_id, context['run_id']])
for k,v in self.kwargs.items():
if k in COMMAND_ARGS[cmd_type]:
if k in HYPHEN_ARGS:
args.append("--{0}={1}".format(k.replace('_', '-'),v))
elif k in POSITIONAL_ARGS:
inplace_args = v
elif k == 'tags':
if isinstance(v, six.string_types):
tags.add(v)
elif isinstance(v, (list, tuple)):
for val in v:
tags.add(val)
else:
args.append("--{0}={1}".format(k,v))
if k == 'notify' and v is True:
args.append("--notify")
args.append("--tags={0}".format(','.join(filter(None,tags))))
if inplace_args is not None:
if cmd_type == 'hadoopcmd':
args += inplace_args.split(' ', 1)
else:
args += inplace_args.split(' ')
return args
|
py
|
1a5e7ff73548186fdd3d47c0249e1cb88d2c3b56
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import asyncio
import contextvars
import json
import logging
import random
import sys
import time
import types
from collections import Counter, OrderedDict
from copy import deepcopy
from enum import Enum
from functools import total_ordering
from os.path import commonprefix
import ijson
from esrally import exceptions, track
# Mapping from operation type to specific runner
__RUNNERS = {}
def register_default_runners():
register_runner(track.OperationType.Bulk, BulkIndex(), async_runner=True)
register_runner(track.OperationType.ForceMerge, ForceMerge(), async_runner=True)
register_runner(track.OperationType.IndexStats, Retry(IndicesStats()), async_runner=True)
register_runner(track.OperationType.NodeStats, NodeStats(), async_runner=True)
register_runner(track.OperationType.Search, Query(), async_runner=True)
register_runner(track.OperationType.RawRequest, RawRequest(), async_runner=True)
register_runner(track.OperationType.Composite, Composite(), async_runner=True)
register_runner(track.OperationType.SubmitAsyncSearch, SubmitAsyncSearch(), async_runner=True)
register_runner(track.OperationType.GetAsyncSearch, Retry(GetAsyncSearch(), retry_until_success=True), async_runner=True)
register_runner(track.OperationType.DeleteAsyncSearch, DeleteAsyncSearch(), async_runner=True)
# This is an administrative operation but there is no need for a retry here as we don't issue a request
register_runner(track.OperationType.Sleep, Sleep(), async_runner=True)
# these requests should not be retried as they are not idempotent
register_runner(track.OperationType.CreateSnapshot, CreateSnapshot(), async_runner=True)
register_runner(track.OperationType.RestoreSnapshot, RestoreSnapshot(), async_runner=True)
# We treat the following as administrative commands and thus already start to wrap them in a retry.
register_runner(track.OperationType.ClusterHealth, Retry(ClusterHealth()), async_runner=True)
register_runner(track.OperationType.PutPipeline, Retry(PutPipeline()), async_runner=True)
register_runner(track.OperationType.Refresh, Retry(Refresh()), async_runner=True)
register_runner(track.OperationType.CreateIndex, Retry(CreateIndex()), async_runner=True)
register_runner(track.OperationType.DeleteIndex, Retry(DeleteIndex()), async_runner=True)
register_runner(track.OperationType.CreateComponentTemplate, Retry(CreateComponentTemplate()), async_runner=True)
register_runner(track.OperationType.DeleteComponentTemplate, Retry(DeleteComponentTemplate()), async_runner=True)
register_runner(track.OperationType.CreateComposableTemplate, Retry(CreateComposableTemplate()), async_runner=True)
register_runner(track.OperationType.DeleteComposableTemplate, Retry(DeleteComposableTemplate()), async_runner=True)
register_runner(track.OperationType.CreateDataStream, Retry(CreateDataStream()), async_runner=True)
register_runner(track.OperationType.DeleteDataStream, Retry(DeleteDataStream()), async_runner=True)
register_runner(track.OperationType.CreateIndexTemplate, Retry(CreateIndexTemplate()), async_runner=True)
register_runner(track.OperationType.DeleteIndexTemplate, Retry(DeleteIndexTemplate()), async_runner=True)
register_runner(track.OperationType.ShrinkIndex, Retry(ShrinkIndex()), async_runner=True)
register_runner(track.OperationType.CreateMlDatafeed, Retry(CreateMlDatafeed()), async_runner=True)
register_runner(track.OperationType.DeleteMlDatafeed, Retry(DeleteMlDatafeed()), async_runner=True)
register_runner(track.OperationType.StartMlDatafeed, Retry(StartMlDatafeed()), async_runner=True)
register_runner(track.OperationType.StopMlDatafeed, Retry(StopMlDatafeed()), async_runner=True)
register_runner(track.OperationType.CreateMlJob, Retry(CreateMlJob()), async_runner=True)
register_runner(track.OperationType.DeleteMlJob, Retry(DeleteMlJob()), async_runner=True)
register_runner(track.OperationType.OpenMlJob, Retry(OpenMlJob()), async_runner=True)
register_runner(track.OperationType.CloseMlJob, Retry(CloseMlJob()), async_runner=True)
register_runner(track.OperationType.DeleteSnapshotRepository, Retry(DeleteSnapshotRepository()), async_runner=True)
register_runner(track.OperationType.CreateSnapshotRepository, Retry(CreateSnapshotRepository()), async_runner=True)
register_runner(track.OperationType.WaitForSnapshotCreate, Retry(WaitForSnapshotCreate()), async_runner=True)
register_runner(track.OperationType.WaitForRecovery, Retry(IndicesRecovery()), async_runner=True)
register_runner(track.OperationType.PutSettings, Retry(PutSettings()), async_runner=True)
register_runner(track.OperationType.CreateTransform, Retry(CreateTransform()), async_runner=True)
register_runner(track.OperationType.StartTransform, Retry(StartTransform()), async_runner=True)
register_runner(track.OperationType.WaitForTransform, Retry(WaitForTransform()), async_runner=True)
register_runner(track.OperationType.DeleteTransform, Retry(DeleteTransform()), async_runner=True)
def runner_for(operation_type):
try:
return __RUNNERS[operation_type]
except KeyError:
raise exceptions.RallyError("No runner available for operation type [%s]" % operation_type)
def enable_assertions(enabled):
"""
Changes whether assertions are enabled. The status changes for all tasks that are executed after this call.
:param enabled: ``True`` to enable assertions, ``False`` to disable them.
"""
AssertingRunner.assertions_enabled = enabled
def register_runner(operation_type, runner, **kwargs):
logger = logging.getLogger(__name__)
async_runner = kwargs.get("async_runner", False)
if isinstance(operation_type, track.OperationType):
operation_type = operation_type.to_hyphenated_string()
if not async_runner:
raise exceptions.RallyAssertionError(
"Runner [{}] must be implemented as async runner and registered with async_runner=True.".format(str(runner)))
if getattr(runner, "multi_cluster", False):
if "__aenter__" in dir(runner) and "__aexit__" in dir(runner):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner object [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _multi_cluster_runner(runner, str(runner), context_manager_enabled=True)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering context-manager capable runner object [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _multi_cluster_runner(runner, str(runner))
# we'd rather use callable() but this will erroneously also classify a class as callable...
elif isinstance(runner, types.FunctionType):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner function [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _single_cluster_runner(runner, runner.__name__)
elif "__aenter__" in dir(runner) and "__aexit__" in dir(runner):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering context-manager capable runner object [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _single_cluster_runner(runner, str(runner), context_manager_enabled=True)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Registering runner object [%s] for [%s].", str(runner), str(operation_type))
cluster_aware_runner = _single_cluster_runner(runner, str(runner))
__RUNNERS[operation_type] = _with_completion(_with_assertions(cluster_aware_runner))
# Only intended for unit-testing!
def remove_runner(operation_type):
del __RUNNERS[operation_type]
class Runner:
"""
Base class for all operations against Elasticsearch.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logging.getLogger(__name__)
async def __aenter__(self):
return self
async def __call__(self, es, params):
"""
Runs the actual method that should be benchmarked.
:param args: All arguments that are needed to call this method.
:return: A pair of (int, String). The first component indicates the "weight" of this call. it is typically 1 but for bulk operations
it should be the actual bulk size. The second component is the "unit" of weight which should be "ops" (short for
"operations") by default. If applicable, the unit should always be in plural form. It is used in metrics records
for throughput and reports. A value will then be shown as e.g. "111 ops/s".
"""
raise NotImplementedError("abstract operation")
async def __aexit__(self, exc_type, exc_val, exc_tb):
return False
def _default_kw_params(self, params):
# map of API kwargs to Rally config parameters
kw_dict = {
"body": "body",
"headers": "headers",
"index": "index",
"opaque_id": "opaque-id",
"params": "request-params",
"request_timeout": "request-timeout",
}
full_result = {k: params.get(v) for (k, v) in kw_dict.items()}
# filter Nones
return dict(filter(lambda kv: kv[1] is not None, full_result.items()))
def _transport_request_params(self, params):
request_params = params.get("request-params", {})
request_timeout = params.get("request-timeout")
if request_timeout is not None:
request_params["request_timeout"] = request_timeout
headers = params.get("headers") or {}
opaque_id = params.get("opaque-id")
if opaque_id is not None:
headers.update({"x-opaque-id": opaque_id})
return request_params, headers
class Delegator:
"""
Mixin to unify delegate handling
"""
def __init__(self, delegate, *args, **kwargs):
super().__init__(*args, **kwargs)
self.delegate = delegate
def unwrap(runner):
"""
Unwraps all delegators until the actual runner.
:param runner: An arbitrarily nested chain of delegators around a runner.
:return: The innermost runner.
"""
delegate = getattr(runner, "delegate", None)
if delegate:
return unwrap(delegate)
else:
return runner
def _single_cluster_runner(runnable, name, context_manager_enabled=False):
# only pass the default ES client
return MultiClientRunner(runnable, name, lambda es: es["default"], context_manager_enabled)
def _multi_cluster_runner(runnable, name, context_manager_enabled=False):
# pass all ES clients
return MultiClientRunner(runnable, name, lambda es: es, context_manager_enabled)
def _with_assertions(delegate):
return AssertingRunner(delegate)
def _with_completion(delegate):
unwrapped_runner = unwrap(delegate)
if hasattr(unwrapped_runner, "completed") and hasattr(unwrapped_runner, "percent_completed"):
return WithCompletion(delegate, unwrapped_runner)
else:
return NoCompletion(delegate)
class NoCompletion(Runner, Delegator):
def __init__(self, delegate):
super().__init__(delegate=delegate)
@property
def completed(self):
return None
@property
def percent_completed(self):
return None
async def __call__(self, *args):
return await self.delegate(*args)
def __repr__(self, *args, **kwargs):
return repr(self.delegate)
async def __aenter__(self):
await self.delegate.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
class WithCompletion(Runner, Delegator):
def __init__(self, delegate, progressable):
super().__init__(delegate=delegate)
self.progressable = progressable
@property
def completed(self):
return self.progressable.completed
@property
def percent_completed(self):
return self.progressable.percent_completed
async def __call__(self, *args):
return await self.delegate(*args)
def __repr__(self, *args, **kwargs):
return repr(self.delegate)
async def __aenter__(self):
await self.delegate.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
class MultiClientRunner(Runner, Delegator):
def __init__(self, runnable, name, client_extractor, context_manager_enabled=False):
super().__init__(delegate=runnable)
self.name = name
self.client_extractor = client_extractor
self.context_manager_enabled = context_manager_enabled
async def __call__(self, *args):
return await self.delegate(self.client_extractor(args[0]), *args[1:])
def __repr__(self, *args, **kwargs):
if self.context_manager_enabled:
return "user-defined context-manager enabled runner for [%s]" % self.name
else:
return "user-defined runner for [%s]" % self.name
async def __aenter__(self):
if self.context_manager_enabled:
await self.delegate.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.context_manager_enabled:
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
else:
return False
class AssertingRunner(Runner, Delegator):
assertions_enabled = False
def __init__(self, delegate):
super().__init__(delegate=delegate)
self.predicates = {
">": self.greater_than,
">=": self.greater_than_or_equal,
"<": self.smaller_than,
"<=": self.smaller_than_or_equal,
"==": self.equal,
}
def greater_than(self, expected, actual):
return actual > expected
def greater_than_or_equal(self, expected, actual):
return actual >= expected
def smaller_than(self, expected, actual):
return actual < expected
def smaller_than_or_equal(self, expected, actual):
return actual <= expected
def equal(self, expected, actual):
return actual == expected
def check_assertion(self, op_name, assertion, properties):
path = assertion["property"]
predicate_name = assertion["condition"]
expected_value = assertion["value"]
actual_value = properties
for k in path.split("."):
actual_value = actual_value[k]
predicate = self.predicates[predicate_name]
success = predicate(expected_value, actual_value)
if not success:
if op_name:
msg = f"Expected [{path}] in [{op_name}] to be {predicate_name} [{expected_value}] but was [{actual_value}]."
else:
msg = f"Expected [{path}] to be {predicate_name} [{expected_value}] but was [{actual_value}]."
raise exceptions.RallyTaskAssertionError(msg)
async def __call__(self, *args):
params = args[1]
return_value = await self.delegate(*args)
if AssertingRunner.assertions_enabled and "assertions" in params:
op_name = params.get("name")
if isinstance(return_value, dict):
for assertion in params["assertions"]:
self.check_assertion(op_name, assertion, return_value)
else:
self.logger.debug("Skipping assertion check in [%s] as [%s] does not return a dict.",
op_name, repr(self.delegate))
return return_value
def __repr__(self, *args, **kwargs):
return repr(self.delegate)
async def __aenter__(self):
await self.delegate.__aenter__()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
def mandatory(params, key, op):
try:
return params[key]
except KeyError:
raise exceptions.DataError(
f"Parameter source for operation '{str(op)}' did not provide the mandatory parameter '{key}'. "
f"Add it to your parameter source and try again.")
# TODO: remove and use https://docs.python.org/3/library/stdtypes.html#str.removeprefix
# once Python 3.9 becomes the minimum version
def remove_prefix(string, prefix):
if string.startswith(prefix):
return string[len(prefix):]
return string
def escape(v):
"""
Escapes values so they can be used as query parameters
:param v: The raw value. May be None.
:return: The escaped value.
"""
if v is None:
return None
elif isinstance(v, bool):
return str(v).lower()
else:
return str(v)
class BulkIndex(Runner):
"""
Bulk indexes the given documents.
"""
async def __call__(self, es, params):
"""
Runs one bulk indexing operation.
:param es: The Elasticsearch client.
:param params: A hash with all parameters. See below for details.
:return: A hash with meta data for this bulk operation. See below for details.
It expects a parameter dict with the following mandatory keys:
* ``body``: containing all documents for the current bulk request.
* ``bulk-size``: An indication of the bulk size denoted in ``unit``.
* ``unit``: The name of the unit in which the bulk size is provided.
* ``action_metadata_present``: if ``True``, assume that an action and metadata line is present (meaning only half of the lines
contain actual documents to index)
* ``index``: The name of the affected index in case ``action_metadata_present`` is ``False``.
* ``type``: The name of the affected type in case ``action_metadata_present`` is ``False``.
The following keys are optional:
* ``pipeline``: If present, runs the the specified ingest pipeline for this bulk.
* ``detailed-results``: If ``True``, the runner will analyze the response and add detailed meta-data. Defaults to ``False``. Note
that this has a very significant impact on performance and will very likely cause a bottleneck in the benchmark driver so please
be very cautious enabling this feature. Our own measurements have shown a median overhead of several thousand times (execution time
is in the single digit microsecond range when this feature is disabled and in the single digit millisecond range when this feature
is enabled; numbers based on a bulk size of 500 elements and no errors). For details please refer to the respective benchmarks
in ``benchmarks/driver``.
* ``request-timeout``: a non-negative float indicating the client-side timeout for the operation. If not present, defaults to
``None`` and potentially falls back to the global timeout setting.
"""
detailed_results = params.get("detailed-results", False)
api_kwargs = self._default_kw_params(params)
bulk_params = {}
if "pipeline" in params:
bulk_params["pipeline"] = params["pipeline"]
with_action_metadata = mandatory(params, "action-metadata-present", self)
bulk_size = mandatory(params, "bulk-size", self)
unit = mandatory(params, "unit", self)
# parse responses lazily in the standard case - responses might be large thus parsing skews results and if no
# errors have occurred we only need a small amount of information from the potentially large response.
if not detailed_results:
es.return_raw_response()
if with_action_metadata:
api_kwargs.pop("index", None)
# only half of the lines are documents
response = await es.bulk(params=bulk_params, **api_kwargs)
else:
response = await es.bulk(doc_type=params.get("type"), params=bulk_params, **api_kwargs)
stats = self.detailed_stats(params, response) if detailed_results else self.simple_stats(bulk_size, unit, response)
meta_data = {
"index": params.get("index"),
"weight": bulk_size,
"unit": unit,
}
meta_data.update(stats)
if not stats["success"]:
meta_data["error-type"] = "bulk"
return meta_data
def detailed_stats(self, params, response):
ops = {}
shards_histogram = OrderedDict()
bulk_error_count = 0
bulk_success_count = 0
error_details = set()
bulk_request_size_bytes = 0
total_document_size_bytes = 0
with_action_metadata = mandatory(params, "action-metadata-present", self)
if isinstance(params["body"], str):
bulk_lines = params["body"].split("\n")
elif isinstance(params["body"], list):
bulk_lines = params["body"]
else:
raise exceptions.DataError("bulk body is neither string nor list")
for line_number, data in enumerate(bulk_lines):
line_size = len(data.encode('utf-8'))
if with_action_metadata:
if line_number % 2 == 1:
total_document_size_bytes += line_size
else:
total_document_size_bytes += line_size
bulk_request_size_bytes += line_size
for item in response["items"]:
# there is only one (top-level) item
op, data = next(iter(item.items()))
if op not in ops:
ops[op] = Counter()
ops[op]["item-count"] += 1
if "result" in data:
ops[op][data["result"]] += 1
if "_shards" in data:
s = data["_shards"]
sk = "%d-%d-%d" % (s["total"], s["successful"], s["failed"])
if sk not in shards_histogram:
shards_histogram[sk] = {
"item-count": 0,
"shards": s
}
shards_histogram[sk]["item-count"] += 1
if data["status"] > 299 or ("_shards" in data and data["_shards"]["failed"] > 0):
bulk_error_count += 1
self.extract_error_details(error_details, data)
else:
bulk_success_count += 1
stats = {
"took": response.get("took"),
"success": bulk_error_count == 0,
"success-count": bulk_success_count,
"error-count": bulk_error_count,
"ops": ops,
"shards_histogram": list(shards_histogram.values()),
"bulk-request-size-bytes": bulk_request_size_bytes,
"total-document-size-bytes": total_document_size_bytes
}
if bulk_error_count > 0:
stats["error-type"] = "bulk"
stats["error-description"] = self.error_description(error_details)
if "ingest_took" in response:
stats["ingest_took"] = response["ingest_took"]
return stats
def simple_stats(self, bulk_size, unit, response):
bulk_success_count = bulk_size if unit == "docs" else None
bulk_error_count = 0
error_details = set()
# parse lazily on the fast path
props = parse(response, ["errors", "took"])
if props.get("errors", False):
# determine success count regardless of unit because we need to iterate through all items anyway
bulk_success_count = 0
# Reparse fully in case of errors - this will be slower
parsed_response = json.loads(response.getvalue())
for item in parsed_response["items"]:
data = next(iter(item.values()))
if data["status"] > 299 or ('_shards' in data and data["_shards"]["failed"] > 0):
bulk_error_count += 1
self.extract_error_details(error_details, data)
else:
bulk_success_count += 1
stats = {
"took": props.get("took"),
"success": bulk_error_count == 0,
"success-count": bulk_success_count,
"error-count": bulk_error_count
}
if bulk_error_count > 0:
stats["error-type"] = "bulk"
stats["error-description"] = self.error_description(error_details)
return stats
def extract_error_details(self, error_details, data):
error_data = data.get("error", {})
error_reason = error_data.get("reason") if isinstance(error_data, dict) else str(error_data)
if error_data:
error_details.add((data["status"], error_reason))
else:
error_details.add((data["status"], None))
def error_description(self, error_details):
error_description = ""
for status, reason in error_details:
if reason:
error_description += "HTTP status: %s, message: %s" % (str(status), reason)
else:
error_description += "HTTP status: %s" % str(status)
return error_description
def __repr__(self, *args, **kwargs):
return "bulk-index"
class ForceMerge(Runner):
"""
Runs a force merge operation against Elasticsearch.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
max_num_segments = params.get("max-num-segments")
mode = params.get("mode")
merge_params = self._default_kw_params(params)
if max_num_segments:
merge_params["max_num_segments"] = max_num_segments
if mode == "polling":
complete = False
try:
await es.indices.forcemerge(**merge_params)
complete = True
except elasticsearch.ConnectionTimeout:
pass
while not complete:
await asyncio.sleep(params.get("poll-period"))
tasks = await es.tasks.list(params={"actions": "indices:admin/forcemerge"})
if len(tasks["nodes"]) == 0:
# empty nodes response indicates no tasks
complete = True
else:
await es.indices.forcemerge(**merge_params)
def __repr__(self, *args, **kwargs):
return "force-merge"
class IndicesStats(Runner):
"""
Gather index stats for all indices.
"""
def _get(self, v, path):
if v is None:
return None
elif len(path) == 1:
return v.get(path[0])
else:
return self._get(v.get(path[0]), path[1:])
def _safe_string(self, v):
return str(v) if v is not None else None
async def __call__(self, es, params):
api_kwargs = self._default_kw_params(params)
index = api_kwargs.pop("index", "_all")
condition = params.get("condition")
response = await es.indices.stats(index=index, metric="_all", **api_kwargs)
if condition:
path = mandatory(condition, "path", repr(self))
expected_value = mandatory(condition, "expected-value", repr(self))
actual_value = self._get(response, path.split("."))
return {
"weight": 1,
"unit": "ops",
"condition": {
"path": path,
# avoid mapping issues in the ES metrics store by always rendering values as strings
"actual-value": self._safe_string(actual_value),
"expected-value": self._safe_string(expected_value)
},
# currently we only support "==" as a predicate but that might change in the future
"success": actual_value == expected_value
}
else:
return {
"weight": 1,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "indices-stats"
class NodeStats(Runner):
"""
Gather node stats for all nodes.
"""
async def __call__(self, es, params):
request_timeout = params.get("request-timeout")
await es.nodes.stats(metric="_all", request_timeout=request_timeout)
def __repr__(self, *args, **kwargs):
return "node-stats"
def parse(text, props, lists=None):
"""
Selectively parsed the provided text as JSON extracting only the properties provided in ``props``. If ``lists`` is
specified, this function determines whether the provided lists are empty (respective value will be ``True``) or
contain elements (respective key will be ``False``).
:param text: A text to parse.
:param props: A mandatory list of property paths (separated by a dot character) for which to extract values.
:param lists: An optional list of property paths to JSON lists in the provided text.
:return: A dict containing all properties and lists that have been found in the provided text.
"""
text.seek(0)
parser = ijson.parse(text)
parsed = {}
parsed_lists = {}
current_list = None
expect_end_array = False
try:
for prefix, event, value in parser:
if expect_end_array:
# True if the list is empty, False otherwise
parsed_lists[current_list] = event == "end_array"
expect_end_array = False
if prefix in props:
parsed[prefix] = value
elif lists is not None and prefix in lists and event == "start_array":
current_list = prefix
expect_end_array = True
# found all necessary properties
if len(parsed) == len(props) and (lists is None or len(parsed_lists) == len(lists)):
break
except ijson.IncompleteJSONError:
# did not find all properties
pass
parsed.update(parsed_lists)
return parsed
class Query(Runner):
"""
Runs a request body search against Elasticsearch.
It expects at least the following keys in the `params` hash:
* `index`: The index or indices against which to issue the query.
* `type`: See `index`
* `cache`: True iff the request cache should be used.
* `body`: Query body
The following parameters are optional:
* `detailed-results` (default: ``False``): Records more detailed meta-data about queries. As it analyzes the
corresponding response in more detail, this might incur additional
overhead which can skew measurement results. This flag is ineffective
for scroll queries (detailed meta-data are always returned).
* ``request-timeout``: a non-negative float indicating the client-side timeout for the operation. If not present,
defaults to ``None`` and potentially falls back to the global timeout setting.
If the following parameters are present in addition, a scroll query will be issued:
* `pages`: Number of pages to retrieve at most for this scroll. If a scroll query does yield less results than the specified number of
pages we will terminate earlier.
* `results-per-page`: Number of results to retrieve per page.
"""
async def __call__(self, es, params):
if "pages" in params and "results-per-page" in params:
return await self.scroll_query(es, params)
else:
return await self.request_body_query(es, params)
async def request_body_query(self, es, params):
request_params, headers = self._transport_request_params(params)
# Mandatory to ensure it is always provided. This is especially important when this runner is used in a
# composite context where there is no actual parameter source and the entire request structure must be provided
# by the composite's parameter source.
index = mandatory(params, "index", self)
body = mandatory(params, "body", self)
doc_type = params.get("type")
detailed_results = params.get("detailed-results", False)
encoding_header = self._query_headers(params)
if encoding_header is not None:
headers.update(encoding_header)
cache = params.get("cache")
if cache is not None:
request_params["request_cache"] = str(cache).lower()
if not bool(headers):
# counter-intuitive but preserves prior behavior
headers = None
# disable eager response parsing - responses might be huge thus skewing results
es.return_raw_response()
r = await self._raw_search(es, doc_type, index, body, request_params, headers=headers)
if detailed_results:
props = parse(r, ["hits.total", "hits.total.value", "hits.total.relation", "timed_out", "took"])
hits_total = props.get("hits.total.value", props.get("hits.total", 0))
hits_relation = props.get("hits.total.relation", "eq")
timed_out = props.get("timed_out", False)
took = props.get("took", 0)
return {
"weight": 1,
"unit": "ops",
"success": True,
"hits": hits_total,
"hits_relation": hits_relation,
"timed_out": timed_out,
"took": took
}
else:
return {
"weight": 1,
"unit": "ops",
"success": True
}
async def scroll_query(self, es, params):
request_params, headers = self._transport_request_params(params)
hits = 0
hits_relation = None
retrieved_pages = 0
timed_out = False
took = 0
# explicitly convert to int to provoke an error otherwise
total_pages = sys.maxsize if params["pages"] == "all" else int(params["pages"])
size = params.get("results-per-page")
encoding_header = self._query_headers(params)
if encoding_header is not None:
headers.update(encoding_header)
scroll_id = None
cache = params.get("cache")
if cache is not None:
request_params["request_cache"] = str(cache).lower()
if not bool(headers):
# counter-intuitive but preserves prior behavior
headers = None
# disable eager response parsing - responses might be huge thus skewing results
es.return_raw_response()
try:
for page in range(total_pages):
if page == 0:
# Mandatory to ensure it is always provided. This is especially important when this runner is used
# in a composite context where there is no actual parameter source and the entire request structure
# must be provided by the composite's parameter source.
index = mandatory(params, "index", self)
body = mandatory(params, "body", self)
sort = "_doc"
scroll = "10s"
doc_type = params.get("type")
params = request_params.copy()
params["sort"] = sort
params["scroll"] = scroll
params["size"] = size
r = await self._raw_search(es, doc_type, index, body, params, headers=headers)
props = parse(r,
["_scroll_id", "hits.total", "hits.total.value", "hits.total.relation", "timed_out", "took"],
["hits.hits"])
scroll_id = props.get("_scroll_id")
hits = props.get("hits.total.value", props.get("hits.total", 0))
hits_relation = props.get("hits.total.relation", "eq")
timed_out = props.get("timed_out", False)
took = props.get("took", 0)
all_results_collected = (size is not None and hits < size) or hits == 0
else:
r = await es.transport.perform_request("GET", "/_search/scroll",
body={"scroll_id": scroll_id, "scroll": "10s"},
params=request_params,
headers=headers)
props = parse(r, ["hits.total", "hits.total.value", "hits.total.relation", "timed_out", "took"], ["hits.hits"])
timed_out = timed_out or props.get("timed_out", False)
took += props.get("took", 0)
# is the list of hits empty?
all_results_collected = props.get("hits.hits", False)
retrieved_pages += 1
if all_results_collected:
break
finally:
if scroll_id:
# noinspection PyBroadException
try:
await es.clear_scroll(body={"scroll_id": [scroll_id]})
except BaseException:
self.logger.exception("Could not clear scroll [%s]. This will lead to excessive resource usage in "
"Elasticsearch and will skew your benchmark results.", scroll_id)
return {
"weight": retrieved_pages,
"pages": retrieved_pages,
"hits": hits,
"hits_relation": hits_relation,
"unit": "pages",
"timed_out": timed_out,
"took": took
}
async def _raw_search(self, es, doc_type, index, body, params, headers=None):
components = []
if index:
components.append(index)
if doc_type:
components.append(doc_type)
components.append("_search")
path = "/".join(components)
return await es.transport.perform_request("GET", "/" + path, params=params, body=body, headers=headers)
def _query_headers(self, params):
# reduces overhead due to decompression of very large responses
if params.get("response-compression-enabled", True):
return None
else:
return {"Accept-Encoding": "identity"}
def __repr__(self, *args, **kwargs):
return "query"
class ClusterHealth(Runner):
"""
Get cluster health
"""
async def __call__(self, es, params):
@total_ordering
class ClusterHealthStatus(Enum):
UNKNOWN = 0
RED = 1
YELLOW = 2
GREEN = 3
def __lt__(self, other):
if self.__class__ is other.__class__:
# pylint: disable=comparison-with-callable
return self.value < other.value
return NotImplemented
def status(v):
try:
return ClusterHealthStatus[v.upper()]
except (KeyError, AttributeError):
return ClusterHealthStatus.UNKNOWN
request_params = params.get("request-params", {})
api_kw_params = self._default_kw_params(params)
# by default, Elasticsearch will not wait and thus we treat this as success
expected_cluster_status = request_params.get("wait_for_status", str(ClusterHealthStatus.UNKNOWN))
# newer ES versions >= 5.0
if "wait_for_no_relocating_shards" in request_params:
expected_relocating_shards = 0
else:
# older ES versions
# either the user has defined something or we're good with any count of relocating shards.
expected_relocating_shards = int(request_params.get("wait_for_relocating_shards", sys.maxsize))
result = await es.cluster.health(**api_kw_params)
cluster_status = result["status"]
relocating_shards = result["relocating_shards"]
return {
"weight": 1,
"unit": "ops",
"success": status(cluster_status) >= status(expected_cluster_status) and relocating_shards <= expected_relocating_shards,
"cluster-status": cluster_status,
"relocating-shards": relocating_shards
}
def __repr__(self, *args, **kwargs):
return "cluster-health"
class PutPipeline(Runner):
"""
Execute the `put pipeline API <https://www.elastic.co/guide/en/elasticsearch/reference/current/put-pipeline-api.html>`_. Note that this
API is only available from Elasticsearch 5.0 onwards.
"""
async def __call__(self, es, params):
await es.ingest.put_pipeline(id=mandatory(params, "id", self),
body=mandatory(params, "body", self),
master_timeout=params.get("master-timeout"),
timeout=params.get("timeout"),
)
def __repr__(self, *args, **kwargs):
return "put-pipeline"
class Refresh(Runner):
"""
Execute the `refresh API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html>`_.
"""
async def __call__(self, es, params):
await es.indices.refresh(index=params.get("index", "_all"))
def __repr__(self, *args, **kwargs):
return "refresh"
class CreateIndex(Runner):
"""
Execute the `create index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-index.html>`_.
"""
async def __call__(self, es, params):
indices = mandatory(params, "indices", self)
api_params = self._default_kw_params(params)
## ignore invalid entries rather than erroring
for term in ["index", "body"]:
api_params.pop(term, None)
for index, body in indices:
await es.indices.create(index=index, body=body, **api_params)
return {
"weight": len(indices),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-index"
class CreateDataStream(Runner):
"""
Execute the `create data stream API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-create-data-stream.html>`_.
"""
async def __call__(self, es, params):
data_streams = mandatory(params, "data-streams", self)
request_params = mandatory(params, "request-params", self)
for data_stream in data_streams:
await es.indices.create_data_stream(data_stream, params=request_params)
return {
"weight": len(data_streams),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-data-stream"
class DeleteIndex(Runner):
"""
Execute the `delete index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html>`_.
"""
async def __call__(self, es, params):
ops = 0
indices = mandatory(params, "indices", self)
only_if_exists = params.get("only-if-exists", False)
request_params = params.get("request-params", {})
for index_name in indices:
if not only_if_exists:
await es.indices.delete(index=index_name, params=request_params)
ops += 1
elif only_if_exists and await es.indices.exists(index=index_name):
self.logger.info("Index [%s] already exists. Deleting it.", index_name)
await es.indices.delete(index=index_name, params=request_params)
ops += 1
return {
"weight": ops,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-index"
class DeleteDataStream(Runner):
"""
Execute the `delete data stream API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-data-stream.html>`_.
"""
async def __call__(self, es, params):
ops = 0
data_streams = mandatory(params, "data-streams", self)
only_if_exists = mandatory(params, "only-if-exists", self)
request_params = mandatory(params, "request-params", self)
for data_stream in data_streams:
if not only_if_exists:
await es.indices.delete_data_stream(data_stream, ignore=[404], params=request_params)
ops += 1
elif only_if_exists and await es.indices.exists(index=data_stream):
self.logger.info("Data stream [%s] already exists. Deleting it.", data_stream)
await es.indices.delete_data_stream(data_stream, params=request_params)
ops += 1
return {
"weight": ops,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-data-stream"
class CreateComponentTemplate(Runner):
"""
Execute the `PUT component template API
<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-component-template.html>`_.
"""
async def __call__(self, es, params):
templates = mandatory(params, "templates", self)
request_params = mandatory(params, "request-params", self)
for template, body in templates:
await es.cluster.put_component_template(name=template, body=body,
params=request_params)
return {
"weight": len(templates),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-component-template"
class DeleteComponentTemplate(Runner):
"""
Execute the `DELETE component template API
<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-component-template.html>`_.
"""
async def __call__(self, es, params):
template_names = mandatory(params, "templates", self)
only_if_exists = mandatory(params, "only-if-exists", self)
request_params = mandatory(params, "request-params", self)
async def _exists(name):
# pylint: disable=import-outside-toplevel
from elasticsearch.client import _make_path
# currently not supported by client and hence custom request
return await es.transport.perform_request(
"HEAD", _make_path("_component_template", name)
)
ops_count = 0
for template_name in template_names:
if not only_if_exists:
await es.cluster.delete_component_template(name=template_name, params=request_params, ignore=[404])
ops_count += 1
elif only_if_exists and await _exists(template_name):
self.logger.info("Component Index template [%s] already exists. Deleting it.", template_name)
await es.cluster.delete_component_template(name=template_name, params=request_params)
ops_count += 1
return {
"weight": ops_count,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-component-template"
class CreateComposableTemplate(Runner):
"""
Execute the `PUT index template API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-put-template.html>`_.
"""
async def __call__(self, es, params):
templates = mandatory(params, "templates", self)
request_params = mandatory(params, "request-params", self)
for template, body in templates:
await es.cluster.put_index_template(name=template, body=body, params=request_params)
return {
"weight": len(templates),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-composable-template"
class DeleteComposableTemplate(Runner):
"""
Execute the `PUT index template API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-template.html>`_.
"""
async def __call__(self, es, params):
templates = mandatory(params, "templates", self)
only_if_exists = mandatory(params, "only-if-exists", self)
request_params = mandatory(params, "request-params", self)
ops_count = 0
for template_name, delete_matching_indices, index_pattern in templates:
if not only_if_exists:
await es.indices.delete_index_template(name=template_name, params=request_params, ignore=[404])
ops_count += 1
elif only_if_exists and await es.indices.exists_template(template_name):
self.logger.info("Composable Index template [%s] already exists. Deleting it.", template_name)
await es.indices.delete_index_template(name=template_name, params=request_params)
ops_count += 1
# ensure that we do not provide an empty index pattern by accident
if delete_matching_indices and index_pattern:
await es.indices.delete(index=index_pattern)
ops_count += 1
return {
"weight": ops_count,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-composable-template"
class CreateIndexTemplate(Runner):
"""
Execute the `PUT index template API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html>`_.
"""
async def __call__(self, es, params):
templates = mandatory(params, "templates", self)
request_params = params.get("request-params", {})
for template, body in templates:
await es.indices.put_template(name=template,
body=body,
params=request_params)
return {
"weight": len(templates),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "create-index-template"
class DeleteIndexTemplate(Runner):
"""
Execute the `delete index template API
<https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#delete>`_.
"""
async def __call__(self, es, params):
template_names = mandatory(params, "templates", self)
only_if_exists = params.get("only-if-exists", False)
request_params = params.get("request-params", {})
ops_count = 0
for template_name, delete_matching_indices, index_pattern in template_names:
if not only_if_exists:
await es.indices.delete_template(name=template_name, params=request_params)
ops_count += 1
elif only_if_exists and await es.indices.exists_template(template_name):
self.logger.info("Index template [%s] already exists. Deleting it.", template_name)
await es.indices.delete_template(name=template_name, params=request_params)
ops_count += 1
# ensure that we do not provide an empty index pattern by accident
if delete_matching_indices and index_pattern:
await es.indices.delete(index=index_pattern)
ops_count += 1
return {
"weight": ops_count,
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "delete-index-template"
class ShrinkIndex(Runner):
"""
Execute the `shrink index API <https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html>`_.
This is a high-level runner that actually executes multiple low-level operations under the hood.
"""
def __init__(self):
super().__init__()
self.cluster_health = Retry(ClusterHealth())
async def _wait_for(self, es, idx, description):
# wait a little bit before the first check
await asyncio.sleep(3)
result = await self.cluster_health(es, params={
"index": idx,
"retries": sys.maxsize,
"request-params": {
"wait_for_no_relocating_shards": "true"
}
})
if not result["success"]:
raise exceptions.RallyAssertionError("Failed to wait for [{}].".format(description))
async def __call__(self, es, params):
source_index = mandatory(params, "source-index", self)
source_indices_get = await es.indices.get(source_index)
source_indices = list(source_indices_get.keys())
source_indices_stem = commonprefix(source_indices)
target_index = mandatory(params, "target-index", self)
# we need to inject additional settings so we better copy the body
target_body = deepcopy(mandatory(params, "target-body", self))
shrink_node = params.get("shrink-node")
# Choose a random data node if none is specified
if shrink_node:
node_names = [shrink_node]
else:
node_names = []
# choose a random data node
node_info = await es.nodes.info()
for node in node_info["nodes"].values():
if "data" in node["roles"]:
node_names.append(node["name"])
if not node_names:
raise exceptions.RallyAssertionError("Could not choose a suitable shrink-node automatically. Specify it explicitly.")
for source_index in source_indices:
shrink_node = random.choice(node_names)
self.logger.info("Using [%s] as shrink node.", shrink_node)
self.logger.info("Preparing [%s] for shrinking.", source_index)
# prepare index for shrinking
await es.indices.put_settings(index=source_index,
body={
"settings": {
"index.routing.allocation.require._name": shrink_node,
"index.blocks.write": "true"
}
},
preserve_existing=True)
self.logger.info("Waiting for relocation to finish for index [%s] ...", source_index)
await self._wait_for(es, source_index, f"shard relocation for index [{source_index}]")
self.logger.info("Shrinking [%s] to [%s].", source_index, target_index)
if "settings" not in target_body:
target_body["settings"] = {}
target_body["settings"]["index.routing.allocation.require._name"] = None
target_body["settings"]["index.blocks.write"] = None
# kick off the shrink operation
index_suffix = remove_prefix(source_index, source_indices_stem)
final_target_index = target_index if len(index_suffix) == 0 else target_index+index_suffix
await es.indices.shrink(index=source_index, target=final_target_index, body=target_body)
self.logger.info("Waiting for shrink to finish for index [%s] ...", source_index)
await self._wait_for(es, final_target_index, f"shrink for index [{final_target_index}]")
self.logger.info("Shrinking [%s] to [%s] has finished.", source_index, final_target_index)
# ops_count is not really important for this operation...
return {
"weight": len(source_indices),
"unit": "ops",
"success": True
}
def __repr__(self, *args, **kwargs):
return "shrink-index"
class CreateMlDatafeed(Runner):
"""
Execute the `create datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
datafeed_id = mandatory(params, "datafeed-id", self)
body = mandatory(params, "body", self)
try:
await es.xpack.ml.put_datafeed(datafeed_id=datafeed_id, body=body)
except elasticsearch.TransportError as e:
# fallback to old path
if e.status_code == 400:
await es.transport.perform_request(
"PUT",
f"/_xpack/ml/datafeeds/{datafeed_id}",
body=body,
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "create-ml-datafeed"
class DeleteMlDatafeed(Runner):
"""
Execute the `delete datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
datafeed_id = mandatory(params, "datafeed-id", self)
force = params.get("force", False)
try:
# we don't want to fail if a datafeed does not exist, thus we ignore 404s.
await es.xpack.ml.delete_datafeed(datafeed_id=datafeed_id, force=force, ignore=[404])
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"DELETE",
f"/_xpack/ml/datafeeds/{datafeed_id}",
params={
"force": escape(force),
"ignore": 404
},
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "delete-ml-datafeed"
class StartMlDatafeed(Runner):
"""
Execute the `start datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
datafeed_id = mandatory(params, "datafeed-id", self)
body = params.get("body")
start = params.get("start")
end = params.get("end")
timeout = params.get("timeout")
try:
await es.xpack.ml.start_datafeed(datafeed_id=datafeed_id, body=body, start=start, end=end, timeout=timeout)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"POST",
f"/_xpack/ml/datafeeds/{datafeed_id}/_start",
body=body,
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "start-ml-datafeed"
class StopMlDatafeed(Runner):
"""
Execute the `stop datafeed API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
datafeed_id = mandatory(params, "datafeed-id", self)
force = params.get("force", False)
timeout = params.get("timeout")
try:
await es.xpack.ml.stop_datafeed(datafeed_id=datafeed_id, force=force, timeout=timeout)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
request_params = {
"force": escape(force),
}
if timeout:
request_params["timeout"] = escape(timeout)
await es.transport.perform_request(
"POST",
f"/_xpack/ml/datafeeds/{datafeed_id}/_stop",
params=request_params
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "stop-ml-datafeed"
class CreateMlJob(Runner):
"""
Execute the `create job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
job_id = mandatory(params, "job-id", self)
body = mandatory(params, "body", self)
try:
await es.xpack.ml.put_job(job_id=job_id, body=body)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"PUT",
f"/_xpack/ml/anomaly_detectors/{job_id}",
body=body,
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "create-ml-job"
class DeleteMlJob(Runner):
"""
Execute the `delete job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-job.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
job_id = mandatory(params, "job-id", self)
force = params.get("force", False)
# we don't want to fail if a job does not exist, thus we ignore 404s.
try:
await es.xpack.ml.delete_job(job_id=job_id, force=force, ignore=[404])
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"DELETE",
f"/_xpack/ml/anomaly_detectors/{job_id}",
params={
"force": escape(force),
"ignore": 404
},
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "delete-ml-job"
class OpenMlJob(Runner):
"""
Execute the `open job API <https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-open-job.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
job_id = mandatory(params, "job-id", self)
try:
await es.xpack.ml.open_job(job_id=job_id)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
await es.transport.perform_request(
"POST",
f"/_xpack/ml/anomaly_detectors/{job_id}/_open",
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "open-ml-job"
class CloseMlJob(Runner):
"""
Execute the `close job API <http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-close-job.html>`_.
"""
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
job_id = mandatory(params, "job-id", self)
force = params.get("force", False)
timeout = params.get("timeout")
try:
await es.xpack.ml.close_job(job_id=job_id, force=force, timeout=timeout)
except elasticsearch.TransportError as e:
# fallback to old path (ES < 7)
if e.status_code == 400:
request_params = {
"force": escape(force),
}
if timeout:
request_params["timeout"] = escape(timeout)
await es.transport.perform_request(
"POST",
f"/_xpack/ml/anomaly_detectors/{job_id}/_close",
params=request_params,
)
else:
raise e
def __repr__(self, *args, **kwargs):
return "close-ml-job"
class RawRequest(Runner):
async def __call__(self, es, params):
request_params, headers = self._transport_request_params(params)
if "ignore" in params:
request_params["ignore"] = params["ignore"]
path = mandatory(params, "path", self)
if not path.startswith("/"):
self.logger.error("RawRequest failed. Path parameter: [%s] must begin with a '/'.", path)
raise exceptions.RallyAssertionError(f"RawRequest [{path}] failed. Path parameter must begin with a '/'.")
if not bool(headers):
#counter-intuitive, but preserves prior behavior
headers = None
await es.transport.perform_request(method=params.get("method", "GET"),
url=path,
headers=headers,
body=params.get("body"),
params=request_params)
def __repr__(self, *args, **kwargs):
return "raw-request"
class Sleep(Runner):
"""
Sleeps for the specified duration not issuing any request.
"""
async def __call__(self, es, params):
es.on_request_start()
try:
await asyncio.sleep(mandatory(params, "duration", "sleep"))
finally:
es.on_request_end()
def __repr__(self, *args, **kwargs):
return "sleep"
class DeleteSnapshotRepository(Runner):
"""
Deletes a snapshot repository
"""
async def __call__(self, es, params):
await es.snapshot.delete_repository(repository=mandatory(params, "repository", repr(self)))
def __repr__(self, *args, **kwargs):
return "delete-snapshot-repository"
class CreateSnapshotRepository(Runner):
"""
Creates a new snapshot repository
"""
async def __call__(self, es, params):
request_params = params.get("request-params", {})
await es.snapshot.create_repository(repository=mandatory(params, "repository", repr(self)),
body=mandatory(params, "body", repr(self)),
params=request_params)
def __repr__(self, *args, **kwargs):
return "create-snapshot-repository"
class CreateSnapshot(Runner):
"""
Creates a new snapshot repository
"""
async def __call__(self, es, params):
wait_for_completion = params.get("wait-for-completion", False)
repository = mandatory(params, "repository", repr(self))
snapshot = mandatory(params, "snapshot", repr(self))
# just assert, gets set in _default_kw_params
mandatory(params, "body", repr(self))
api_kwargs = self._default_kw_params(params)
await es.snapshot.create(repository=repository,
snapshot=snapshot,
wait_for_completion=wait_for_completion,
**api_kwargs)
def __repr__(self, *args, **kwargs):
return "create-snapshot"
class WaitForSnapshotCreate(Runner):
async def __call__(self, es, params):
repository = mandatory(params, "repository", repr(self))
snapshot = mandatory(params, "snapshot", repr(self))
wait_period = params.get("completion-recheck-wait-period", 1)
snapshot_done = False
stats = {}
while not snapshot_done:
response = await es.snapshot.status(repository=repository,
snapshot=snapshot,
ignore_unavailable=True)
if "snapshots" in response:
response_state = response["snapshots"][0]["state"]
# Possible states:
# https://www.elastic.co/guide/en/elasticsearch/reference/current/get-snapshot-status-api.html#get-snapshot-status-api-response-body
if response_state == "FAILED":
self.logger.error("Snapshot [%s] failed. Response:\n%s", snapshot, json.dumps(response, indent=2))
raise exceptions.RallyAssertionError(f"Snapshot [{snapshot}] failed. Please check logs.")
snapshot_done = response_state == "SUCCESS"
stats = response["snapshots"][0]["stats"]
if not snapshot_done:
await asyncio.sleep(wait_period)
size = stats["total"]["size_in_bytes"]
file_count = stats["total"]["file_count"]
start_time_in_millis = stats["start_time_in_millis"]
duration_in_millis = stats["time_in_millis"]
duration_in_seconds = duration_in_millis / 1000
return {
"weight": size,
"unit": "byte",
"success": True,
"throughput": size / duration_in_seconds,
"start_time_millis": start_time_in_millis,
"stop_time_millis": start_time_in_millis + duration_in_millis,
"duration": duration_in_millis,
"file_count": file_count
}
def __repr__(self, *args, **kwargs):
return "wait-for-snapshot-create"
class RestoreSnapshot(Runner):
"""
Restores a snapshot from an already registered repository
"""
async def __call__(self, es, params):
api_kwargs = self._default_kw_params(params)
await es.snapshot.restore(repository=mandatory(params, "repository", repr(self)),
snapshot=mandatory(params, "snapshot", repr(self)),
wait_for_completion=params.get("wait-for-completion", False),
**api_kwargs)
def __repr__(self, *args, **kwargs):
return "restore-snapshot"
class IndicesRecovery(Runner):
async def __call__(self, es, params):
index = mandatory(params, "index", repr(self))
wait_period = params.get("completion-recheck-wait-period", 1)
all_shards_done = False
total_recovered = 0
total_start_millis = sys.maxsize
total_end_millis = 0
# wait until recovery is done
# The nesting level is ok here given the structure of the API response
# pylint: disable=too-many-nested-blocks
while not all_shards_done:
response = await es.indices.recovery(index=index)
# This might happen if we happen to call the API before the next recovery is scheduled.
if not response:
self.logger.debug("Empty index recovery response for [%s].", index)
else:
# check whether all shards are done
all_shards_done = True
total_recovered = 0
total_start_millis = sys.maxsize
total_end_millis = 0
for _, idx_data in response.items():
for _, shard_data in idx_data.items():
for shard in shard_data:
current_shard_done = shard["stage"] == "DONE"
all_shards_done = all_shards_done and current_shard_done
if current_shard_done:
total_start_millis = min(total_start_millis, shard["start_time_in_millis"])
total_end_millis = max(total_end_millis, shard["stop_time_in_millis"])
idx_size = shard["index"]["size"]
total_recovered += idx_size["recovered_in_bytes"]
self.logger.debug("All shards done for [%s]: [%s].", index, all_shards_done)
if not all_shards_done:
await asyncio.sleep(wait_period)
response_time_in_seconds = (total_end_millis - total_start_millis) / 1000
return {
"weight": total_recovered,
"unit": "byte",
"success": True,
"throughput": total_recovered / response_time_in_seconds,
"start_time_millis": total_start_millis,
"stop_time_millis": total_end_millis
}
def __repr__(self, *args, **kwargs):
return "wait-for-recovery"
class PutSettings(Runner):
"""
Updates cluster settings with the
`cluster settings API <http://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html>_.
"""
async def __call__(self, es, params):
await es.cluster.put_settings(body=mandatory(params, "body", repr(self)))
def __repr__(self, *args, **kwargs):
return "put-settings"
class CreateTransform(Runner):
"""
Execute the `create transform API https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html`_.
"""
async def __call__(self, es, params):
transform_id = mandatory(params, "transform-id", self)
body = mandatory(params, "body", self)
defer_validation = params.get("defer-validation", False)
await es.transform.put_transform(transform_id=transform_id, body=body, defer_validation=defer_validation)
def __repr__(self, *args, **kwargs):
return "create-transform"
class StartTransform(Runner):
"""
Execute the `start transform API
https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html`_.
"""
async def __call__(self, es, params):
transform_id = mandatory(params, "transform-id", self)
timeout = params.get("timeout")
await es.transform.start_transform(transform_id=transform_id, timeout=timeout)
def __repr__(self, *args, **kwargs):
return "start-transform"
class WaitForTransform(Runner):
"""
Wait for the transform until it reaches a certain checkpoint.
"""
def __init__(self):
super().__init__()
self._completed = False
self._percent_completed = 0.0
self._start_time = None
self._last_documents_processed = 0
self._last_processing_time = 0
@property
def completed(self):
return self._completed
@property
def percent_completed(self):
return self._percent_completed
async def __call__(self, es, params):
"""
stop the transform and wait until transform has finished return stats
:param es: The Elasticsearch client.
:param params: A hash with all parameters. See below for details.
:return: A hash with stats from the run.
Different to the `stop transform API
https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html`_ this command will wait
until the transform is stopped and a checkpoint has been reached.
It expects a parameter dict with the following mandatory keys:
* ``transform-id``: the transform id to start, the transform must have been created upfront.
The following keys are optional:
* ``force``: forcefully stop a transform, default false
* ``wait-for-checkpoint``: whether to wait until all data has been processed till the next checkpoint, default true
* ``wait-for-completion``: whether to block until the transform has stopped, default true
* ``transform-timeout``: overall runtime timeout of the transform in seconds, default 3600 (1h)
* ``poll-interval``: how often transform stats are polled, used to set progress and check the state, default 0.5.
"""
transform_id = mandatory(params, "transform-id", self)
force = params.get("force", False)
timeout = params.get("timeout")
wait_for_completion = params.get("wait-for-completion", True)
wait_for_checkpoint = params.get("wait-for-checkpoint", True)
transform_timeout = params.get("transform-timeout", 60.0 * 60.0)
poll_interval = params.get("poll-interval", 0.5)
if not self._start_time:
self._start_time = time.monotonic()
await es.transform.stop_transform(transform_id=transform_id,
force=force,
timeout=timeout,
wait_for_completion=False,
wait_for_checkpoint=wait_for_checkpoint)
while True:
stats_response = await es.transform.get_transform_stats(transform_id=transform_id)
state = stats_response["transforms"][0].get("state")
transform_stats = stats_response["transforms"][0].get("stats", {})
if (time.monotonic() - self._start_time) > transform_timeout:
raise exceptions.RallyAssertionError(
f"Transform [{transform_id}] timed out after [{transform_timeout}] seconds. "
"Please consider increasing the timeout in the track.")
if state == "failed":
failure_reason = stats_response["transforms"][0].get("reason", "unknown")
raise exceptions.RallyAssertionError(
f"Transform [{transform_id}] failed with [{failure_reason}].")
elif state == "stopped" or wait_for_completion is False:
self._completed = True
self._percent_completed = 1.0
else:
self._percent_completed = stats_response["transforms"][0].get("checkpointing", {}).get("next", {}).get(
"checkpoint_progress", {}).get("percent_complete", 0.0) / 100.0
documents_processed = transform_stats.get("documents_processed", 0)
processing_time = transform_stats.get("search_time_in_ms", 0)
processing_time += transform_stats.get("processing_time_in_ms", 0)
processing_time += transform_stats.get("index_time_in_ms", 0)
documents_processed_delta = documents_processed - self._last_documents_processed
processing_time_delta = processing_time - self._last_processing_time
# only report if we have enough data or transform has completed
if self._completed or (documents_processed_delta > 5000 and processing_time_delta > 500):
stats = {
"transform-id": transform_id,
"weight": transform_stats.get("documents_processed", 0),
"unit": "docs",
"success": True
}
throughput = 0
if self._completed:
# take the overall throughput
if processing_time > 0:
throughput = documents_processed / processing_time * 1000
elif processing_time_delta > 0:
throughput = documents_processed_delta / processing_time_delta * 1000
stats["throughput"] = throughput
self._last_documents_processed = documents_processed
self._last_processing_time = processing_time
return stats
else:
# sleep for a while, so stats is not called to often
await asyncio.sleep(poll_interval)
def __repr__(self, *args, **kwargs):
return "wait-for-transform"
class DeleteTransform(Runner):
"""
Execute the `delete transform API
https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html`_.
"""
async def __call__(self, es, params):
transform_id = mandatory(params, "transform-id", self)
force = params.get("force", False)
# we don't want to fail if a job does not exist, thus we ignore 404s.
await es.transform.delete_transform(transform_id=transform_id, force=force, ignore=[404])
def __repr__(self, *args, **kwargs):
return "delete-transform"
class SubmitAsyncSearch(Runner):
async def __call__(self, es, params):
request_params = params.get("request-params", {})
response = await es.async_search.submit(body=mandatory(params, "body", self),
index=params.get("index"),
params=request_params)
op_name = mandatory(params, "name", self)
# id may be None if the operation has already returned
search_id = response.get("id")
CompositeContext.put(op_name, search_id)
def __repr__(self, *args, **kwargs):
return "submit-async-search"
def async_search_ids(op_names):
subjects = [op_names] if isinstance(op_names, str) else op_names
for subject in subjects:
subject_id = CompositeContext.get(subject)
# skip empty ids, searches have already completed
if subject_id:
yield subject_id, subject
class GetAsyncSearch(Runner):
async def __call__(self, es, params):
success = True
searches = mandatory(params, "retrieve-results-for", self)
request_params = params.get("request-params", {})
stats = {}
for search_id, search in async_search_ids(searches):
response = await es.async_search.get(id=search_id,
params=request_params)
is_running = response["is_running"]
success = success and not is_running
if not is_running:
stats[search] = {
"hits": response["response"]["hits"]["total"]["value"],
"hits_relation": response["response"]["hits"]["total"]["relation"],
"timed_out": response["response"]["timed_out"],
"took": response["response"]["took"]
}
return {
# only count completed searches - there is one key per search id in `stats`
"weight": len(stats),
"unit": "ops",
"success": success,
"stats": stats
}
def __repr__(self, *args, **kwargs):
return "get-async-search"
class DeleteAsyncSearch(Runner):
async def __call__(self, es, params):
searches = mandatory(params, "delete-results-for", self)
for search_id, search in async_search_ids(searches):
await es.async_search.delete(id=search_id)
CompositeContext.remove(search)
def __repr__(self, *args, **kwargs):
return "delete-async-search"
class CompositeContext:
ctx = contextvars.ContextVar("composite_context")
def __init__(self):
self.token = None
async def __aenter__(self):
self.token = CompositeContext.ctx.set({})
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
CompositeContext.ctx.reset(self.token)
return False
@staticmethod
def put(key, value):
CompositeContext._ctx()[key] = value
@staticmethod
def get(key):
try:
return CompositeContext._ctx()[key]
except KeyError:
raise KeyError(f"Unknown property [{key}]. Currently recognized "
f"properties are [{', '.join(CompositeContext._ctx().keys())}].") from None
@staticmethod
def remove(key):
try:
CompositeContext._ctx().pop(key)
except KeyError:
raise KeyError(f"Unknown property [{key}]. Currently recognized "
f"properties are [{', '.join(CompositeContext._ctx().keys())}].") from None
@staticmethod
def _ctx():
try:
return CompositeContext.ctx.get()
except LookupError:
raise exceptions.RallyAssertionError("This operation is only allowed inside a composite operation.") from None
class Composite(Runner):
"""
Executes a complex request structure which is measured by Rally as one composite operation.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.supported_op_types = [
"raw-request",
"sleep",
"search",
"submit-async-search",
"get-async-search",
"delete-async-search"
]
async def run_stream(self, es, stream, connection_limit):
streams = []
try:
for item in stream:
if "stream" in item:
streams.append(asyncio.create_task(self.run_stream(es, item["stream"], connection_limit)))
elif "operation-type" in item:
# consume all prior streams first
if streams:
await asyncio.gather(*streams)
streams = []
op_type = item["operation-type"]
if op_type not in self.supported_op_types:
raise exceptions.RallyAssertionError(
f"Unsupported operation-type [{op_type}]. Use one of [{', '.join(self.supported_op_types)}].")
runner = runner_for(op_type)
async with connection_limit:
async with runner:
await runner({"default": es}, item)
else:
raise exceptions.RallyAssertionError("Requests structure must contain [stream] or [operation-type].")
except BaseException:
# stop all already created tasks in case of exceptions
for s in streams:
if not s.done():
s.cancel()
raise
# complete any outstanding streams
if streams:
await asyncio.gather(*streams)
async def __call__(self, es, params):
requests = mandatory(params, "requests", self)
max_connections = params.get("max-connections", sys.maxsize)
async with CompositeContext():
await self.run_stream(es, requests, asyncio.BoundedSemaphore(max_connections))
def __repr__(self, *args, **kwargs):
return "composite"
# TODO: Allow to use this from (selected) regular runners and add user documentation.
# TODO: It would maybe be interesting to add meta-data on how many retries there were.
class Retry(Runner, Delegator):
"""
This runner can be used as a wrapper around regular runners to retry operations.
It defines the following parameters:
* ``retries`` (optional, default 0): The number of times the operation is retried.
* ``retry-until-success`` (optional, default False): Retries until the delegate returns a success. This will also
forcibly set ``retry-on-error`` to ``True``.
* ``retry-wait-period`` (optional, default 0.5): The time in seconds to wait after an error.
* ``retry-on-timeout`` (optional, default True): Whether to retry on connection timeout.
* ``retry-on-error`` (optional, default False): Whether to retry on failure (i.e. the delegate
returns ``success == False``)
"""
def __init__(self, delegate, retry_until_success=False):
super().__init__(delegate=delegate)
self.retry_until_success = retry_until_success
async def __aenter__(self):
await self.delegate.__aenter__()
return self
async def __call__(self, es, params):
# pylint: disable=import-outside-toplevel
import elasticsearch
import socket
retry_until_success = params.get("retry-until-success", self.retry_until_success)
if retry_until_success:
max_attempts = sys.maxsize
retry_on_error = True
else:
max_attempts = params.get("retries", 0) + 1
retry_on_error = params.get("retry-on-error", False)
sleep_time = params.get("retry-wait-period", 0.5)
retry_on_timeout = params.get("retry-on-timeout", True)
for attempt in range(max_attempts):
last_attempt = attempt + 1 == max_attempts
try:
return_value = await self.delegate(es, params)
if last_attempt or not retry_on_error:
return return_value
# we can determine success if and only if the runner returns a dict. Otherwise, we have to assume it was fine.
elif isinstance(return_value, dict):
if return_value.get("success", True):
self.logger.debug("%s has returned successfully", repr(self.delegate))
return return_value
else:
self.logger.debug("%s has returned with an error: %s.", repr(self.delegate), return_value)
await asyncio.sleep(sleep_time)
else:
return return_value
except (socket.timeout, elasticsearch.exceptions.ConnectionError):
if last_attempt or not retry_on_timeout:
raise
else:
await asyncio.sleep(sleep_time)
except elasticsearch.exceptions.TransportError as e:
if last_attempt or not retry_on_timeout:
raise e
elif e.status_code == 408:
self.logger.debug("%s has timed out.", repr(self.delegate))
await asyncio.sleep(sleep_time)
else:
raise e
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.delegate.__aexit__(exc_type, exc_val, exc_tb)
def __repr__(self, *args, **kwargs):
return "retryable %s" % repr(self.delegate)
|
py
|
1a5e8124dfc5dcab3fed388bcce2f92e2ac9c92e
|
"""
Support for Insteon fans via local hub control.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/fan.insteon_local/
"""
import logging
from datetime import timedelta
from homeassistant.components.fan import (
ATTR_SPEED, SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH,
SUPPORT_SET_SPEED, FanEntity)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant import util
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['insteon_local']
DOMAIN = 'fan'
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
SUPPORT_INSTEON_LOCAL = SUPPORT_SET_SPEED
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Insteon local fan platform."""
insteonhub = hass.data['insteon_local']
if discovery_info is None:
return
linked = discovery_info['linked']
device_list = []
for device_id in linked:
if (linked[device_id]['cat_type'] == 'dimmer' and
linked[device_id]['sku'] == '2475F'):
device = insteonhub.fan(device_id)
device_list.append(
InsteonLocalFanDevice(device)
)
add_devices(device_list)
class InsteonLocalFanDevice(FanEntity):
"""An abstract Class for an Insteon node."""
def __init__(self, node):
"""Initialize the device."""
self.node = node
self._speed = SPEED_OFF
@property
def name(self):
"""Return the name of the node."""
return self.node.device_id
@property
def unique_id(self):
"""Return the ID of this Insteon node."""
return self.node.device_id
@property
def speed(self) -> str:
"""Return the current speed."""
return self._speed
@property
def speed_list(self: ToggleEntity) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Update state of the fan."""
resp = self.node.status()
if 'cmd2' in resp:
if resp['cmd2'] == '00':
self._speed = SPEED_OFF
elif resp['cmd2'] == '55':
self._speed = SPEED_LOW
elif resp['cmd2'] == 'AA':
self._speed = SPEED_MEDIUM
elif resp['cmd2'] == 'FF':
self._speed = SPEED_HIGH
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_INSTEON_LOCAL
def turn_on(self: ToggleEntity, speed: str = None, **kwargs) -> None:
"""Turn device on."""
if speed is None:
speed = kwargs.get(ATTR_SPEED, SPEED_MEDIUM)
self.set_speed(speed)
def turn_off(self: ToggleEntity, **kwargs) -> None:
"""Turn device off."""
self.node.off()
def set_speed(self: ToggleEntity, speed: str) -> None:
"""Set the speed of the fan."""
if self.node.on(speed):
self._speed = speed
|
py
|
1a5e82269a963d2e149acac722ca2de2f1d1b517
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, wildcard-import
"""A global module storing everything needed to interpret or compile a Relay program."""
from .base import register_relay_node, RelayNode
from .._ffi import base as _base
from . import _make
from . import _module
from . import expr as _expr
from . import ty as _ty
@register_relay_node
class Module(RelayNode):
"""The global Relay module containing collection of functions.
Each global function is identified by an unique tvm.relay.GlobalVar.
tvm.relay.GlobalVar and Module is necessary in order to enable
recursions in function to avoid cyclic reference in the function.x
Parameters
----------
functions: Optional[dict].
Map of global var to Function
"""
def __init__(self, functions=None, type_definitions=None):
if functions is None:
functions = {}
elif isinstance(functions, dict):
mapped_funcs = {}
for k, v in functions.items():
if isinstance(k, _base.string_types):
k = _expr.GlobalVar(k)
if not isinstance(k, _expr.GlobalVar):
raise TypeError("Expect functions to be Dict[GlobalVar, Function]")
mapped_funcs[k] = v
functions = mapped_funcs
if type_definitions is None:
type_definitions = {}
elif isinstance(type_definitions, dict):
mapped_type_defs = {}
for k, v in type_definitions.items():
if isinstance(k, _base.string_types):
k = _ty.GlobalTypeVar(k)
if not isinstance(k, _ty.GlobalTypeVar):
raise TypeError("Expect type_definitions to be Dict[GlobalTypeVar, Type]")
mapped_type_defs[k] = v
type_definitions = mapped_type_defs
self.__init_handle_by_constructor__(_make.Module, functions, type_definitions)
def __setitem__(self, var, val):
"""Add a mapping to the module.
Parameters
---------
var: GlobalVar
The global variable.
val: Union[Function, Type]
The value.
"""
return self._add(var, val)
def _add(self, var, val, update=False):
if isinstance(val, _expr.Expr):
if isinstance(var, _base.string_types):
var = _expr.GlobalVar(var)
# TODO(@jroesch): Port this logic to C++.
if not isinstance(val, _expr.Function):
if isinstance(val, _expr.GlobalVar):
val = ir_pass.eta_expand(val, self)
else:
val = _expr.Function([], val)
_make.Module_Add(self, var, val, update)
else:
assert isinstance(val, _ty.Type)
if isinstance(var, _base.string_types):
var = _ty.GlobalTypeVar(var)
_module.Module_AddDef(self, var, val)
def __getitem__(self, var):
"""Lookup a global definition by name or by variable.
Parameters
----------
var: Union[String, GlobalVar, GlobalTypeVar]
The name or global variable.
Returns
-------
val: Union[Function, Type]
The definition referenced by :code:`var` (either a function or type).
"""
if isinstance(var, _base.string_types):
return _module.Module_Lookup_str(self, var)
elif isinstance(var, _expr.GlobalVar):
return _module.Module_Lookup(self, var)
else:
return _module.Module_LookupDef(self, var)
def update(self, other):
"""Insert functions in another Module to current one.
Parameters
----------
other: Module
The module to merge into the current Module.
"""
if isinstance(other, dict):
other = Module(other)
return _module.Module_Update(self, other)
def get_global_var(self, name):
"""Get a global variable in the function by name.
Parameters
----------
name: str
The name of the global variable.
Returns
-------
global_var: GlobalVar
The global variable mapped to :code:`name`.
Raises
------
tvm.TVMError if we cannot find corresponding global var.
"""
return _module.Module_GetGlobalVar(self, name)
def get_global_type_var(self, name):
"""Get a global type variable in the function by name.
Parameters
----------
name: str
The name of the global type variable.
Returns
-------
global_type_var: GlobalTypeVar
The global variable mapped to :code:`name`.
Raises
------
tvm.TVMError if we cannot find corresponding global type var.
"""
return _module.Module_GetGlobalTypeVar(self, name)
@staticmethod
def from_expr(expr):
return _module.Module_FromExpr(expr)
|
py
|
1a5e822b484ab7498253a8c2bc228870cd2bd0bb
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import hungarian_matcher
from object_detection.utils import shape_utils
from object_detection.utils import target_assigner_utils as ta_utils
from object_detection.utils import tf_version
if tf_version.is_tf1():
from object_detection.matchers import bipartite_matcher # pylint: disable=g-import-not-at-top
ResizeMethod = tf2.image.ResizeMethod
_DEFAULT_KEYPOINT_OFFSET_STD_DEV = 1.0
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder_instance,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder_instance: an object_detection.core.BoxCoder used to encode
matching groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder_instance, box_coder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder_instance
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: an int32 tensor of shape [num_anchors] containing result of anchor
groundtruth matching. Each position in the tensor indicates an anchor
and holds the following meaning:
(1) if match[i] >= 0, anchor i is matched with groundtruth match[i].
(2) if match[i]=-1, anchor i is marked to be background .
(3) if match[i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
# set scores on the gt boxes
scores = 1 - groundtruth_labels[:, 0]
groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return (cls_targets, cls_weights, reg_targets, reg_weights,
match.match_results)
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
if tf_version.is_tf2():
raise ValueError('GreedyBipartiteMatcher is not supported in TF 2.X.')
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder_instance = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder_instance = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder_instance,
negative_class_weight=negative_class_weight)
def batch_assign(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label,
gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
# Assign an alias to avoid large refactor of existing users.
batch_assign_targets = batch_assign
def batch_get_targets(batch_match, groundtruth_tensor_list,
groundtruth_weights_list, unmatched_value,
unmatched_weight):
"""Returns targets based on anchor-groundtruth box matching results.
Args:
batch_match: An int32 tensor of shape [batch, num_anchors] containing the
result of target assignment returned by TargetAssigner.assign(..).
groundtruth_tensor_list: A list of groundtruth tensors of shape
[num_groundtruth, d_1, d_2, ..., d_k]. The tensors can be of any type.
groundtruth_weights_list: A list of weights, one per groundtruth tensor, of
shape [num_groundtruth].
unmatched_value: A tensor of shape [d_1, d_2, ..., d_k] of the same type as
groundtruth tensor containing target value for anchors that remain
unmatched.
unmatched_weight: Scalar weight to assign to anchors that remain unmatched.
Returns:
targets: A tensor of shape [batch, num_anchors, d_1, d_2, ..., d_k]
containing targets for anchors.
weights: A float tensor of shape [batch, num_anchors] containing the weights
to assign to each target.
"""
match_list = tf.unstack(batch_match)
targets_list = []
weights_list = []
for match_tensor, groundtruth_tensor, groundtruth_weight in zip(
match_list, groundtruth_tensor_list, groundtruth_weights_list):
match_object = mat.Match(match_tensor)
targets = match_object.gather_based_on_match(
groundtruth_tensor,
unmatched_value=unmatched_value,
ignored_value=unmatched_value)
targets_list.append(targets)
weights = match_object.gather_based_on_match(
groundtruth_weight,
unmatched_value=unmatched_weight,
ignored_value=tf.zeros_like(unmatched_weight))
weights_list.append(weights)
return tf.stack(targets_list), tf.stack(weights_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match: an int32 tensor of shape [batch_size, num_anchors] containing result
of anchor groundtruth matching. Each position in the tensor indicates an
anchor and holds the following meaning:
(1) if match[x, i] >= 0, anchor i is matched with groundtruth match[x, i].
(2) if match[x, i]=-1, anchor i is marked to be background .
(3) if match[x, i]=-2, anchor i is ignored since it is not background and
does not have sufficient overlap to call it a foreground.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.cast(positive_anchors, dtype=tf.float32)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background *
(1 - tf.cast(negative_mask, dtype=tf.float32)))
cls_weights_without_background = ((1 - implicit_class_weight) * tf.cast(
explicit_example_mask, dtype=tf.float32) + implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
batch_match = tf.stack(match_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, batch_match)
def _smallest_positive_root(a, b, c):
"""Returns the smallest positive root of a quadratic equation."""
discriminant = tf.sqrt(b ** 2 - 4 * a * c)
# TODO(vighneshb) We are currently using the slightly incorrect
# CenterNet implementation. The commented lines implement the fixed version
# in https://github.com/princeton-vl/CornerNet. Change the implementation
# after verifying it has no negative impact.
# root1 = (-b - discriminant) / (2 * a)
# root2 = (-b + discriminant) / (2 * a)
# return tf.where(tf.less(root1, 0), root2, root1)
return (-b + discriminant) / (2.0)
def max_distance_for_overlap(height, width, min_iou):
"""Computes how far apart bbox corners can lie while maintaining the iou.
Given a bounding box size, this function returns a lower bound on how far
apart the corners of another box can lie while still maintaining the given
IoU. The implementation is based on the `gaussian_radius` function in the
Objects as Points github repo: https://github.com/xingyizhou/CenterNet
Args:
height: A 1-D float Tensor representing height of the ground truth boxes.
width: A 1-D float Tensor representing width of the ground truth boxes.
min_iou: A float representing the minimum IoU desired.
Returns:
distance: A 1-D Tensor of distances, of the same length as the input
height and width tensors.
"""
# Given that the detected box is displaced at a distance `d`, the exact
# IoU value will depend on the angle at which each corner is displaced.
# We simplify our computation by assuming that each corner is displaced by
# a distance `d` in both x and y direction. This gives us a lower IoU than
# what is actually realizable and ensures that any box with corners less
# than `d` distance apart will always have an IoU greater than or equal
# to `min_iou`
# The following 3 cases can be worked on geometrically and come down to
# solving a quadratic inequality. In each case, to ensure `min_iou` we use
# the smallest positive root of the equation.
# Case where detected box is offset from ground truth and no box completely
# contains the other.
distance_detection_offset = _smallest_positive_root(
a=1, b=-(height + width),
c=width * height * ((1 - min_iou) / (1 + min_iou))
)
# Case where detection is smaller than ground truth and completely contained
# in it.
distance_detection_in_gt = _smallest_positive_root(
a=4, b=-2 * (height + width),
c=(1 - min_iou) * width * height
)
# Case where ground truth is smaller than detection and completely contained
# in it.
distance_gt_in_detection = _smallest_positive_root(
a=4 * min_iou, b=(2 * min_iou) * (width + height),
c=(min_iou - 1) * width * height
)
return tf.reduce_min([distance_detection_offset,
distance_gt_in_detection,
distance_detection_in_gt], axis=0)
def get_batch_predictions_from_indices(batch_predictions, indices):
"""Gets the values of predictions in a batch at the given indices.
The indices are expected to come from the offset targets generation functions
in this library. The returned value is intended to be used inside a loss
function.
Args:
batch_predictions: A tensor of shape [batch_size, height, width, channels]
or [batch_size, height, width, class, channels] for class-specific
features (e.g. keypoint joint offsets).
indices: A tensor of shape [num_instances, 3] for single class features or
[num_instances, 4] for multiple classes features.
Returns:
values: A tensor of shape [num_instances, channels] holding the predicted
values at the given indices.
"""
# Note, gather_nd (and its gradient scatter_nd) runs significantly slower (on
# TPU) than gather with flattened inputs, so reshape the tensor, flatten the
# indices, and run gather.
shape = shape_utils.combined_static_and_dynamic_shape(batch_predictions)
# [B, H, W, C] -> [H*W, W, 1] or [B, H, W, N, C] -> [H*W*N, W*N, N, 1]
rev_cum_interior_indices = tf.reverse(tf.math.cumprod(shape[-2:0:-1]), [0])
rev_cum_interior_indices = tf.concat([rev_cum_interior_indices, [1]], axis=0)
# Compute flattened indices and gather.
flattened_inds = tf.linalg.matmul(
indices, rev_cum_interior_indices[:, tf.newaxis])[:, 0]
batch_predictions_2d = tf.reshape(batch_predictions, [-1, shape[-1]])
return tf.gather(batch_predictions_2d, flattened_inds, axis=0)
def _compute_std_dev_from_box_size(boxes_height, boxes_width, min_overlap):
"""Computes the standard deviation of the Gaussian kernel from box size.
Args:
boxes_height: A 1D tensor with shape [num_instances] representing the height
of each box.
boxes_width: A 1D tensor with shape [num_instances] representing the width
of each box.
min_overlap: The minimum IOU overlap that boxes need to have to not be
penalized.
Returns:
A 1D tensor with shape [num_instances] representing the computed Gaussian
sigma for each of the box.
"""
# We are dividing by 3 so that points closer than the computed
# distance have a >99% CDF.
sigma = max_distance_for_overlap(boxes_height, boxes_width, min_overlap)
sigma = (2 * tf.math.maximum(tf.math.floor(sigma), 0.0) + 1) / 6.0
return sigma
def _preprocess_keypoints_and_weights(out_height, out_width, keypoints,
class_onehot, class_weights,
keypoint_weights, class_id,
keypoint_indices):
"""Preprocesses the keypoints and the corresponding keypoint weights.
This function performs several common steps to preprocess the keypoints and
keypoint weights features, including:
1) Select the subset of keypoints based on the keypoint indices, fill the
keypoint NaN values with zeros and convert to absolute coordinates.
2) Generate the weights of the keypoint using the following information:
a. The class of the instance.
b. The NaN value of the keypoint coordinates.
c. The provided keypoint weights.
Args:
out_height: An integer or an integer tensor indicating the output height
of the model.
out_width: An integer or an integer tensor indicating the output width of
the model.
keypoints: A float tensor of shape [num_instances, num_total_keypoints, 2]
representing the original keypoint grountruth coordinates.
class_onehot: A float tensor of shape [num_instances, num_classes]
containing the class targets with the 0th index assumed to map to the
first non-background class.
class_weights: A float tensor of shape [num_instances] containing weights
for groundtruth instances.
keypoint_weights: A float tensor of shape
[num_instances, num_total_keypoints] representing the weights of each
keypoints.
class_id: int, the ID of the class (0-indexed) that contains the target
keypoints to consider in this task.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints that should be considered in this task.
Returns:
A tuple of two tensors:
keypoint_absolute: A float tensor of shape
[num_instances, num_keypoints, 2] which is the selected and updated
keypoint coordinates.
keypoint_weights: A float tensor of shape [num_instances, num_keypoints]
representing the updated weight of each keypoint.
"""
# Select the targets keypoints by their type ids and generate the mask
# of valid elements.
valid_mask, keypoints = ta_utils.get_valid_keypoint_mask_for_class(
keypoint_coordinates=keypoints,
class_id=class_id,
class_onehot=class_onehot,
class_weights=class_weights,
keypoint_indices=keypoint_indices)
# Keypoint coordinates in absolute coordinate system.
# The shape of the tensors: [num_instances, num_keypoints, 2].
keypoints_absolute = keypoint_ops.to_absolute_coordinates(
keypoints, out_height, out_width)
# Assign default weights for the keypoints.
if keypoint_weights is None:
keypoint_weights = tf.ones_like(keypoints[:, :, 0])
else:
keypoint_weights = tf.gather(
keypoint_weights, indices=keypoint_indices, axis=1)
keypoint_weights = keypoint_weights * valid_mask
return keypoints_absolute, keypoint_weights
class CenterNetCenterHeatmapTargetAssigner(object):
"""Wrapper to compute the object center heatmap."""
def __init__(self,
stride,
min_overlap=0.7,
compute_heatmap_sparse=False,
keypoint_class_id=None,
keypoint_indices=None,
keypoint_weights_for_center=None,
box_heatmap_type='adaptive_gaussian',
heatmap_exponent=1.0):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
min_overlap: The minimum IOU overlap that boxes need to have to not be
penalized.
compute_heatmap_sparse: bool, indicating whether or not to use the sparse
version of the Op that computes the heatmap. The sparse version scales
better with number of classes, but in some cases is known to cause
OOM error. See (b/170989061).
keypoint_class_id: int, the ID of the class (0-indexed) that contains the
target keypoints to consider in this task.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
keypoint_weights_for_center: The keypoint weights used for calculating the
location of object center. The number of weights need to be the same as
the number of keypoints. The object center is calculated by the weighted
mean of the keypoint locations. If not provided, the object center is
determined by the center of the bounding box (default behavior).
box_heatmap_type: str, the algorithm used to compute the box heatmap,
used when calling the assign_center_targets_from_boxes method.
Options are:
'adaptaive_gaussian': A box-size adaptive Gaussian from the original
paper[1].
'iou': IOU based heatmap target where each point is assigned an IOU
based on its location, assuming that it produced a box centered at
that point with the correct size.
heatmap_exponent: float, The generated heatmap is exponentiated with
this number. A number > 1 will result in the heatmap being more peaky
and a number < 1 will cause the heatmap to be more spreadout.
"""
self._stride = stride
self._min_overlap = min_overlap
self._compute_heatmap_sparse = compute_heatmap_sparse
self._keypoint_class_id = keypoint_class_id
self._keypoint_indices = keypoint_indices
self._keypoint_weights_for_center = keypoint_weights_for_center
self._box_heatmap_type = box_heatmap_type
self._heatmap_exponent = heatmap_exponent
def assign_center_targets_from_boxes(self,
height,
width,
gt_boxes_list,
gt_classes_list,
gt_weights_list=None,
maximum_normalized_coordinate=1.1):
"""Computes the object center heatmap target.
Args:
height: int, height of input to the model. This is used to
determine the height of the output.
width: int, width of the input to the model. This is used to
determine the width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The box coordinates are expected in normalized coordinates.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_weights_list: A list of float tensors with shape [num_boxes]
representing the weight of each groundtruth detection box.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
heatmap: A Tensor of size [batch_size, output_height, output_width,
num_classes] representing the per class center heatmap. output_height
and output_width are computed by dividing the input height and width by
the stride specified during initialization.
"""
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)
heatmaps = []
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
# TODO(vighneshb) Replace the for loop with a batch version.
for boxes, class_targets, weights in zip(gt_boxes_list, gt_classes_list,
gt_weights_list):
boxes = box_list.BoxList(boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1),
maximum_normalized_coordinate=maximum_normalized_coordinate)
# Get the box center coordinates. Each returned tensors have the shape of
# [num_instances]
(y_center, x_center, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
# Compute the sigma from box size. The tensor shape: [num_instances].
sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width,
self._min_overlap)
# Apply the Gaussian kernel to the center coordinates. Returned heatmap
# has shape of [out_height, out_width, num_classes]
if self._box_heatmap_type == 'adaptive_gaussian':
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=y_center,
x_coordinates=x_center,
sigma=sigma,
channel_onehot=class_targets,
channel_weights=weights,
sparse=self._compute_heatmap_sparse)
elif self._box_heatmap_type == 'iou':
heatmap = ta_utils.coordinates_to_iou(y_grid, x_grid, boxes,
class_targets, weights)
else:
raise ValueError(f'Unknown heatmap type - {self._box_heatmap_type}')
heatmaps.append(heatmap)
# Return the stacked heatmaps over the batch.
stacked_heatmaps = tf.stack(heatmaps, axis=0)
return (tf.pow(stacked_heatmaps, self._heatmap_exponent) if
self._heatmap_exponent != 1.0 else stacked_heatmaps)
def assign_center_targets_from_keypoints(self,
height,
width,
gt_classes_list,
gt_keypoints_list,
gt_weights_list=None,
gt_keypoints_weights_list=None):
"""Computes the object center heatmap target using keypoint locations.
Args:
height: int, height of input to the model. This is used to
determine the height of the output.
width: int, width of the input to the model. This is used to
determine the width of the output.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_keypoints_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The box coordinates are expected in normalized coordinates.
gt_weights_list: A list of float tensors with shape [num_boxes]
representing the weight of each groundtruth detection box.
gt_keypoints_weights_list: [Optional] a list of 3D tf.float32 tensors of
shape [num_instances, num_total_keypoints] representing the weights of
each keypoints. If not provided, then all not NaN keypoints will be
equally weighted.
Returns:
heatmap: A Tensor of size [batch_size, output_height, output_width,
num_classes] representing the per class center heatmap. output_height
and output_width are computed by dividing the input height and width by
the stride specified during initialization.
"""
assert (self._keypoint_weights_for_center is not None and
self._keypoint_class_id is not None and
self._keypoint_indices is not None)
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
(y_grid, x_grid) = ta_utils.image_shape_to_grids(out_height, out_width)
heatmaps = []
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
for keypoints, classes, kp_weights, weights in zip(
gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=out_height,
out_width=out_width,
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._keypoint_class_id,
keypoint_indices=self._keypoint_indices)
# _, num_keypoints, _ = (
# shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# Update the keypoint weights by the specified keypoints weights.
kp_loc_weights = tf.constant(
self._keypoint_weights_for_center, dtype=tf.float32)
updated_kp_weights = kp_weights * kp_loc_weights[tf.newaxis, :]
# Obtain the sum of the weights for each instance.
# instance_weight_sum has shape: [num_instance].
instance_weight_sum = tf.reduce_sum(updated_kp_weights, axis=1)
# Weight the keypoint coordinates by updated_kp_weights.
# weighted_keypoints has shape: [num_instance, num_keypoints, 2]
weighted_keypoints = keypoints_absolute * tf.expand_dims(
updated_kp_weights, axis=2)
# Compute the mean of the keypoint coordinates over the weighted
# keypoints.
# keypoint_mean has shape: [num_instance, 2]
keypoint_mean = tf.math.divide(
tf.reduce_sum(weighted_keypoints, axis=1),
tf.expand_dims(instance_weight_sum, axis=-1))
# Replace the NaN values (due to divided by zeros in the above operation)
# by 0.0 where the sum of instance weight is zero.
# keypoint_mean has shape: [num_instance, 2]
keypoint_mean = tf.where(
tf.stack([instance_weight_sum, instance_weight_sum], axis=1) > 0.0,
keypoint_mean, tf.zeros_like(keypoint_mean))
# Compute the distance from each keypoint to the mean location using
# broadcasting and weighted by updated_kp_weights.
# keypoint_dist has shape: [num_instance, num_keypoints]
keypoint_mean = tf.expand_dims(keypoint_mean, axis=1)
keypoint_dist = tf.math.sqrt(
tf.reduce_sum(
tf.math.square(keypoints_absolute - keypoint_mean), axis=2))
keypoint_dist = keypoint_dist * updated_kp_weights
# Compute the average of the distances from each keypoint to the mean
# location and update the average value by zero when the instance weight
# is zero.
# avg_radius has shape: [num_instance]
avg_radius = tf.math.divide(
tf.reduce_sum(keypoint_dist, axis=1), instance_weight_sum)
avg_radius = tf.where(
instance_weight_sum > 0.0, avg_radius, tf.zeros_like(avg_radius))
# Update the class instance weight. If the instance doesn't contain enough
# valid keypoint values (i.e. instance_weight_sum == 0.0), then set the
# instance weight to zero.
# updated_class_weights has shape: [num_instance]
updated_class_weights = tf.where(
instance_weight_sum > 0.0, weights, tf.zeros_like(weights))
# Compute the sigma from average distance. We use 2 * average distance to
# to approximate the width/height of the bounding box.
# sigma has shape: [num_instances].
sigma = _compute_std_dev_from_box_size(2 * avg_radius, 2 * avg_radius,
self._min_overlap)
# Apply the Gaussian kernel to the center coordinates. Returned heatmap
# has shape of [out_height, out_width, num_classes]
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=keypoint_mean[:, 0, 0],
x_coordinates=keypoint_mean[:, 0, 1],
sigma=sigma,
channel_onehot=classes,
channel_weights=updated_class_weights,
sparse=self._compute_heatmap_sparse)
heatmaps.append(heatmap)
# Return the stacked heatmaps over the batch.
return tf.stack(heatmaps, axis=0)
class CenterNetBoxTargetAssigner(object):
"""Wrapper to compute target tensors for the object detection task.
This class has methods that take as input a batch of ground truth tensors
(in the form of a list) and return the targets required to train the object
detection task.
"""
def __init__(self, stride):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
"""
self._stride = stride
def assign_size_and_offset_targets(self,
height,
width,
gt_boxes_list,
gt_weights_list=None,
maximum_normalized_coordinate=1.1):
"""Returns the box height/width and center offset targets and their indices.
The returned values are expected to be used with predicted tensors
of size (batch_size, height//self._stride, width//self._stride, 2). The
predicted values at the relevant indices can be retrieved with the
get_batch_predictions_from_indices function.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_weights_list: A list of tensors with shape [num_boxes] corresponding to
the weight of each groundtruth detection box.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1. This is used to check bounds during
converting normalized coordinates to absolute coordinates.
Returns:
batch_indices: an integer tensor of shape [num_boxes, 3] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively.
batch_box_height_width: a float tensor of shape [num_boxes, 2] holding
expected height and width of each box in the output space.
batch_offsets: a float tensor of shape [num_boxes, 2] holding the
expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_boxes] indicating the
weight of each prediction.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_box_height_width = []
batch_weights = []
batch_offsets = []
for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1),
maximum_normalized_coordinate=maximum_normalized_coordinate)
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_boxes, 2]
# indices: [num_boxes, 2]
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_box_height_width.append(
tf.stack([boxes_height, boxes_width], axis=1))
batch_weights.append(weights)
batch_offsets.append(offsets)
batch_indices = tf.concat(batch_indices, axis=0)
batch_box_height_width = tf.concat(batch_box_height_width, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_box_height_width, batch_offsets, batch_weights)
# TODO(yuhuic): Update this class to handle the instance/keypoint weights.
# Currently those weights are used as "mask" to indicate whether an
# instance/keypoint should be considered or not (expecting only either 0 or 1
# value). In reality, the weights can be any value and this class should handle
# those values properly.
class CenterNetKeypointTargetAssigner(object):
"""Wrapper to compute target tensors for the CenterNet keypoint estimation.
This class has methods that take as input a batch of groundtruth tensors
(in the form of a list) and returns the targets required to train the
CenterNet model for keypoint estimation. Specifically, the class methods
expect the groundtruth in the following formats (consistent with the
standard Object Detection API). Note that usually the groundtruth tensors are
packed with a list which represents the batch dimension:
gt_classes_list: [Required] a list of 2D tf.float32 one-hot
(or k-hot) tensors of shape [num_instances, num_classes] containing the
class targets with the 0th index assumed to map to the first non-background
class.
gt_keypoints_list: [Required] a list of 3D tf.float32 tensors of
shape [num_instances, num_total_keypoints, 2] containing keypoint
coordinates. Note that the "num_total_keypoints" should be the sum of the
num_keypoints over all possible keypoint types, e.g. human pose, face.
For example, if a dataset contains both 17 human pose keypoints and 5 face
keypoints, then num_total_keypoints = 17 + 5 = 22.
If an intance contains only a subet of keypoints (e.g. human pose keypoints
but not face keypoints), the face keypoints will be filled with zeros.
Also note that keypoints are assumed to be provided in normalized
coordinates and missing keypoints should be encoded as NaN.
gt_keypoints_weights_list: [Optional] a list 3D tf.float32 tensors of shape
[num_instances, num_total_keypoints] representing the weights of each
keypoints. If not provided, then all not NaN keypoints will be equally
weighted.
gt_boxes_list: [Optional] a list of 2D tf.float32 tensors of shape
[num_instances, 4] containing coordinates of the groundtruth boxes.
Groundtruth boxes are provided in [y_min, x_min, y_max, x_max] format and
assumed to be normalized and clipped relative to the image window with
y_min <= y_max and x_min <= x_max.
Note that the boxes are only used to compute the center targets but are not
considered as required output of the keypoint task. If the boxes were not
provided, the center targets will be inferred from the keypoints
[not implemented yet].
gt_weights_list: [Optional] A list of 1D tf.float32 tensors of shape
[num_instances] containing weights for groundtruth boxes. Only useful when
gt_boxes_list is also provided.
"""
def __init__(self,
stride,
class_id,
keypoint_indices,
keypoint_std_dev=None,
per_keypoint_offset=False,
peak_radius=0,
compute_heatmap_sparse=False,
per_keypoint_depth=False):
"""Initializes a CenterNet keypoints target assigner.
Args:
stride: int, the stride of the network in output pixels.
class_id: int, the ID of the class (0-indexed) that contains the target
keypoints to consider in this task. For example, if the task is human
pose estimation, the class id should correspond to the "human" class.
keypoint_indices: A list of integers representing the indices of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
keypoint_std_dev: A list of floats represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap (in the unit of
output pixels). It is to provide the flexibility of using different
sizes of Gaussian kernel for each keypoint type. If not provided, then
all standard deviation will be the same as the default value (10.0 in
the output pixel space). If provided, the length of keypoint_std_dev
needs to be the same as the length of keypoint_indices, indicating the
standard deviation of each keypoint type.
per_keypoint_offset: boolean, indicating whether to assign offset for
each keypoint channel. If set False, the output offset target will have
the shape [batch_size, out_height, out_width, 2]. If set True, the
output offset target will have the shape [batch_size, out_height,
out_width, 2 * num_keypoints].
peak_radius: int, the radius (in the unit of output pixel) around heatmap
peak to assign the offset targets.
compute_heatmap_sparse: bool, indicating whether or not to use the sparse
version of the Op that computes the heatmap. The sparse version scales
better with number of keypoint types, but in some cases is known to
cause an OOM error. See (b/170989061).
per_keypoint_depth: A bool indicates whether the model predicts the depth
of each keypoints in independent channels. Similar to
per_keypoint_offset but for the keypoint depth.
"""
self._stride = stride
self._class_id = class_id
self._keypoint_indices = keypoint_indices
self._per_keypoint_offset = per_keypoint_offset
self._per_keypoint_depth = per_keypoint_depth
self._peak_radius = peak_radius
self._compute_heatmap_sparse = compute_heatmap_sparse
if keypoint_std_dev is None:
self._keypoint_std_dev = ([_DEFAULT_KEYPOINT_OFFSET_STD_DEV] *
len(keypoint_indices))
else:
assert len(keypoint_indices) == len(keypoint_std_dev)
self._keypoint_std_dev = keypoint_std_dev
def assign_keypoint_heatmap_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoints_weights_list=None,
gt_weights_list=None,
gt_boxes_list=None):
"""Returns the keypoint heatmap targets for the CenterNet model.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of float tensors with shape [num_instances,
num_total_keypoints, 2]. See class-level description for more detail.
gt_classes_list: A list of float tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See
class-level description for more detail. If provided, the keypoint
standard deviations will be scaled based on the box sizes.
Returns:
heatmap: A float tensor of shape [batch_size, output_height, output_width,
num_keypoints] representing the per keypoint type center heatmap.
output_height and output_width are computed by dividing the input height
and width by the stride specified during initialization. Note that the
"num_keypoints" is defined by the length of keypoint_indices, which is
not necessarily equal to "num_total_keypoints".
num_instances_batch: A 2D int tensor of shape
[batch_size, num_keypoints] representing number of instances for each
keypoint type.
valid_mask: A float tensor with shape [batch_size, output_height,
output_width, num_keypoints] where all values within the regions of the
blackout boxes are 0.0 and 1.0 else where. Note that the blackout boxes
are per keypoint type and are blacked out if the keypoint
visibility/weight (of the corresponding keypoint type) is zero.
"""
out_width = tf.cast(tf.maximum(width // self._stride, 1), tf.float32)
out_height = tf.cast(tf.maximum(height // self._stride, 1), tf.float32)
# Compute the yx-grid to be used to generate the heatmap. Each returned
# tensor has shape of [out_height, out_width]
y_grid, x_grid = ta_utils.image_shape_to_grids(out_height, out_width)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_boxes_list is None:
gt_boxes_list = [None] * len(gt_keypoints_list)
heatmaps = []
num_instances_list = []
valid_mask_list = []
for keypoints, classes, kp_weights, weights, boxes in zip(
gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list, gt_boxes_list):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=out_height,
out_width=out_width,
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# A tensor of shape [num_instances, num_keypoints] with
# each element representing the type dimension for each corresponding
# keypoint:
# [[0, 1, ..., k-1],
# [0, 1, ..., k-1],
# :
# [0, 1, ..., k-1]]
keypoint_types = tf.tile(
input=tf.expand_dims(tf.range(num_keypoints), axis=0),
multiples=[num_instances, 1])
# A tensor of shape [num_instances, num_keypoints] with
# each element representing the sigma of the Gaussian kernel for each
# keypoint.
keypoint_std_dev = tf.tile(
input=tf.expand_dims(tf.constant(self._keypoint_std_dev), axis=0),
multiples=[num_instances, 1])
# If boxes is not None, then scale the standard deviation based on the
# size of the object bounding boxes similar to object center heatmap.
if boxes is not None:
boxes = box_list.BoxList(boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box height and width. Each returned tensors have the shape
# of [num_instances]
(_, _, boxes_height,
boxes_width) = boxes.get_center_coordinates_and_sizes()
# Compute the sigma from box size. The tensor shape: [num_instances].
sigma = _compute_std_dev_from_box_size(boxes_height, boxes_width, 0.7)
keypoint_std_dev = keypoint_std_dev * tf.stack(
[sigma] * num_keypoints, axis=1)
# Generate the per-keypoint type valid region mask to ignore regions
# with keypoint weights equal to zeros (e.g. visibility is 0).
# shape of valid_mask: [out_height, out_width, num_keypoints]
kp_weight_list = tf.unstack(kp_weights, axis=1)
valid_mask_channel_list = []
for kp_weight in kp_weight_list:
blackout = kp_weight < 1e-3
valid_mask_channel_list.append(
ta_utils.blackout_pixel_weights_by_box_regions(
out_height, out_width, boxes.get(), blackout))
valid_mask = tf.stack(valid_mask_channel_list, axis=2)
valid_mask_list.append(valid_mask)
# Apply the Gaussian kernel to the keypoint coordinates. Returned heatmap
# has shape of [out_height, out_width, num_keypoints].
heatmap = ta_utils.coordinates_to_heatmap(
y_grid=y_grid,
x_grid=x_grid,
y_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),
x_coordinates=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]),
sigma=tf.keras.backend.flatten(keypoint_std_dev),
channel_onehot=tf.one_hot(
tf.keras.backend.flatten(keypoint_types), depth=num_keypoints),
channel_weights=tf.keras.backend.flatten(kp_weights))
num_instances_list.append(
tf.cast(tf.reduce_sum(kp_weights, axis=0), dtype=tf.int32))
heatmaps.append(heatmap)
return (tf.stack(heatmaps, axis=0), tf.stack(num_instances_list, axis=0),
tf.stack(valid_mask_list, axis=0))
def _get_keypoint_types(self, num_instances, num_keypoints, num_neighbors):
"""Gets keypoint type index tensor.
The function prepares the tensor of keypoint indices with shape
[num_instances, num_keypoints, num_neighbors]. Each element represents the
keypoint type index for each corresponding keypoint and tiled along the 3rd
axis:
[[0, 1, ..., num_keypoints - 1],
[0, 1, ..., num_keypoints - 1],
:
[0, 1, ..., num_keypoints - 1]]
Args:
num_instances: int, the number of instances, used to define the 1st
dimension.
num_keypoints: int, the number of keypoint types, used to define the 2nd
dimension.
num_neighbors: int, the number of neighborhood pixels to consider for each
keypoint, used to define the 3rd dimension.
Returns:
A integer tensor of shape [num_instances, num_keypoints, num_neighbors].
"""
keypoint_types = tf.range(num_keypoints)[tf.newaxis, :, tf.newaxis]
tiled_keypoint_types = tf.tile(keypoint_types,
multiples=[num_instances, 1, num_neighbors])
return tiled_keypoint_types
def assign_keypoints_offset_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the offsets and indices of the keypoints for location refinement.
The returned values are used to refine the location of each keypoints in the
heatmap. The predicted values at the relevant indices can be retrieved with
the get_batch_predictions_from_indices function.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of tensors with shape [num_instances,
num_total_keypoints]. See class-level description for more detail.
gt_classes_list: A list of tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_total_instances, 3] (or
[num_total_instances, 4] if 'per_keypoint_offset' is set True) holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column corresponds to the channel
dimension (if 'per_keypoint_offset' is set True).
batch_offsets: a float tensor of shape [num_total_instances, 2] holding
the expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_total_instances] indicating
the weight of each prediction.
Note that num_total_instances = batch_size * num_instances *
num_keypoints * num_neighbors
"""
batch_indices = []
batch_offsets = []
batch_weights = []
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, kp_weights, weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list, gt_keypoints_weights_list,
gt_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# [num_instances * num_keypoints]
y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])
x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])
# All keypoint coordinates and their neighbors:
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
y_source, x_source,
self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
# Update the valid keypoint weights.
# [num_instance * num_keypoints, num_neighbors]
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_instances * num_keypoints, num_neighbors, 2]
# indices: [num_instances * num_keypoints, num_neighbors, 2]
offsets, indices = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=y_source,
x_target=x_source)
# Reshape to:
# offsets: [num_instances * num_keypoints * num_neighbors, 2]
# indices: [num_instances * num_keypoints * num_neighbors, 2]
offsets = tf.reshape(offsets, [-1, 2])
indices = tf.reshape(indices, [-1, 2])
# Prepare the batch indices to be prepended.
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
if self._per_keypoint_offset:
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
else:
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_offsets.append(offsets)
batch_weights.append(tf.keras.backend.flatten(valid_keypoints))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or
# [batch_size * num_instances * num_keypoints * num_neighbors, 4] if
# 'per_keypoint_offset' is set to True.
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 2]
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_offsets, batch_weights)
def assign_keypoints_depth_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_keypoint_depths_list,
gt_keypoint_depth_weights_list,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the target depths of the keypoints.
The returned values are the relative depth information of each keypoints.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of tensors with shape [num_instances,
num_total_keypoints, 2]. See class-level description for more detail.
gt_classes_list: A list of tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_keypoint_depths_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the relative depth of the
keypoints.
gt_keypoint_depth_weights_list: A list of tensors with shape
[num_instances, num_total_keypoints] corresponding to the weights of
the relative depth.
gt_keypoints_weights_list: A list of tensors with shape [num_instances,
num_total_keypoints] corresponding to the weight of each keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_total_instances, 3] (or
[num_total_instances, 4] if 'per_keypoint_depth' is set True) holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column corresponds to the channel
dimension (if 'per_keypoint_offset' is set True).
batch_depths: a float tensor of shape [num_total_instances, 1] (or
[num_total_instances, num_keypoints] if per_keypoint_depth is set True)
indicating the target depth of each keypoint.
batch_weights: a float tensor of shape [num_total_instances] indicating
the weight of each prediction.
Note that num_total_instances = batch_size * num_instances *
num_keypoints * num_neighbors
"""
batch_indices = []
batch_weights = []
batch_depths = []
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * len(gt_keypoints_list)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
if gt_keypoint_depths_list is None:
gt_keypoint_depths_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, kp_weights, weights,
keypoint_depths, keypoint_depth_weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list,
gt_keypoints_weights_list, gt_weights_list,
gt_keypoint_depths_list, gt_keypoint_depth_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# [num_instances * num_keypoints]
y_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 0])
x_source = tf.keras.backend.flatten(keypoints_absolute[:, :, 1])
# All keypoint coordinates and their neighbors:
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
y_source, x_source,
self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
# Update the valid keypoint weights.
# [num_instance * num_keypoints, num_neighbors]
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# indices: [num_instances * num_keypoints, num_neighbors, 2]
_, indices = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=y_source,
x_target=x_source)
# Reshape to:
# indices: [num_instances * num_keypoints * num_neighbors, 2]
indices = tf.reshape(indices, [-1, 2])
# Gather the keypoint depth from corresponding keypoint indices:
# [num_instances, num_keypoints]
keypoint_depths = tf.gather(
keypoint_depths, self._keypoint_indices, axis=1)
# Tile the depth target to surrounding pixels.
# [num_instances, num_keypoints, num_neighbors]
tiled_keypoint_depths = tf.tile(
tf.expand_dims(keypoint_depths, axis=-1),
multiples=[1, 1, num_neighbors])
# [num_instances, num_keypoints]
keypoint_depth_weights = tf.gather(
keypoint_depth_weights, self._keypoint_indices, axis=1)
# [num_instances, num_keypoints, num_neighbors]
keypoint_depth_weights = tf.tile(
tf.expand_dims(keypoint_depth_weights, axis=-1),
multiples=[1, 1, num_neighbors])
# Update the weights of keypoint depth by the weights of the keypoints.
# A keypoint depth target is valid only if its corresponding keypoint
# target is also valid.
# [num_instances, num_keypoints, num_neighbors]
tiled_depth_weights = (
tf.reshape(valid_keypoints,
[num_instances, num_keypoints, num_neighbors]) *
keypoint_depth_weights)
invalid_depths = tf.logical_or(
tf.math.is_nan(tiled_depth_weights),
tf.math.is_nan(tiled_keypoint_depths))
# Assign zero values and weights to NaN values.
final_keypoint_depths = tf.where(invalid_depths,
tf.zeros_like(tiled_keypoint_depths),
tiled_keypoint_depths)
final_keypoint_depth_weights = tf.where(
invalid_depths,
tf.zeros_like(tiled_depth_weights),
tiled_depth_weights)
# [num_instances * num_keypoints * num_neighbors, 1]
batch_depths.append(tf.reshape(final_keypoint_depths, [-1, 1]))
# Prepare the batch indices to be prepended.
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
if self._per_keypoint_depth:
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
else:
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(
tf.keras.backend.flatten(final_keypoint_depth_weights))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 3] or
# [batch_size * num_instances * num_keypoints * num_neighbors, 4] if
# 'per_keypoint_offset' is set to True.
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints * num_neighbors, 1]
batch_depths = tf.concat(batch_depths, axis=0)
return (batch_indices, batch_depths, batch_weights)
def assign_joint_regression_targets(self,
height,
width,
gt_keypoints_list,
gt_classes_list,
gt_boxes_list=None,
gt_keypoints_weights_list=None,
gt_weights_list=None):
"""Returns the joint regression from center grid to keypoints.
The joint regression is used as the grouping cue from the estimated
keypoints to instance center. The offsets are the vectors from the floored
object center coordinates to the keypoint coordinates.
Args:
height: int, height of input to the CenterNet model. This is used to
determine the height of the output.
width: int, width of the input to the CenterNet model. This is used to
determine the width of the output.
gt_keypoints_list: A list of float tensors with shape [num_instances,
num_total_keypoints]. See class-level description for more detail.
gt_classes_list: A list of float tensors with shape [num_instances,
num_classes]. See class-level description for more detail.
gt_boxes_list: A list of float tensors with shape [num_instances, 4]. See
class-level description for more detail. If provided, then the center
targets will be computed based on the center of the boxes.
gt_keypoints_weights_list: A list of float tensors with shape
[num_instances, num_total_keypoints] representing to the weight of each
keypoint.
gt_weights_list: A list of float tensors with shape [num_instances]. See
class-level description for more detail.
Returns:
batch_indices: an integer tensor of shape [num_instances, 4] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively, the last dimension refers to the keypoint type
dimension.
batch_offsets: a float tensor of shape [num_instances, 2] holding the
expected y and x offset of each box in the output space.
batch_weights: a float tensor of shape [num_instances] indicating the
weight of each prediction.
Note that num_total_instances = batch_size * num_instances * num_keypoints
Raises:
NotImplementedError: currently the object center coordinates need to be
computed from groundtruth bounding boxes. The functionality of
generating the object center coordinates from keypoints is not
implemented yet.
"""
batch_indices = []
batch_offsets = []
batch_weights = []
batch_size = len(gt_keypoints_list)
if gt_keypoints_weights_list is None:
gt_keypoints_weights_list = [None] * batch_size
if gt_boxes_list is None:
gt_boxes_list = [None] * batch_size
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_classes_list)
for i, (keypoints, classes, boxes, kp_weights, weights) in enumerate(
zip(gt_keypoints_list, gt_classes_list,
gt_boxes_list, gt_keypoints_weights_list, gt_weights_list)):
keypoints_absolute, kp_weights = _preprocess_keypoints_and_weights(
out_height=tf.maximum(height // self._stride, 1),
out_width=tf.maximum(width // self._stride, 1),
keypoints=keypoints,
class_onehot=classes,
class_weights=weights,
keypoint_weights=kp_weights,
class_id=self._class_id,
keypoint_indices=self._keypoint_indices)
num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoints_absolute))
# If boxes are provided, compute the joint center from it.
if boxes is not None:
# Compute joint center from boxes.
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
y_center, x_center, _, _ = boxes.get_center_coordinates_and_sizes()
else:
# TODO(yuhuic): Add the logic to generate object centers from keypoints.
raise NotImplementedError((
'The functionality of generating object centers from keypoints is'
' not implemented yet. Please provide groundtruth bounding boxes.'
))
# Tile the yx center coordinates to be the same shape as keypoints.
y_center_tiled = tf.tile(
tf.reshape(y_center, shape=[num_instances, 1]),
multiples=[1, num_keypoints])
x_center_tiled = tf.tile(
tf.reshape(x_center, shape=[num_instances, 1]),
multiples=[1, num_keypoints])
# [num_instance * num_keypoints, num_neighbors]
(y_source_neighbors, x_source_neighbors,
valid_sources) = ta_utils.get_surrounding_grids(
tf.cast(tf.maximum(height // self._stride, 1), tf.float32),
tf.cast(tf.maximum(width // self._stride, 1), tf.float32),
tf.keras.backend.flatten(y_center_tiled),
tf.keras.backend.flatten(x_center_tiled), self._peak_radius)
_, num_neighbors = shape_utils.combined_static_and_dynamic_shape(
y_source_neighbors)
valid_keypoints = tf.cast(
valid_sources, dtype=tf.float32) * tf.stack(
[tf.keras.backend.flatten(kp_weights)] * num_neighbors, axis=-1)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_instances * num_keypoints, 2]
# indices: [num_instances * num_keypoints, 2]
(offsets, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_source_neighbors,
x_source=x_source_neighbors,
y_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 0]),
x_target=tf.keras.backend.flatten(keypoints_absolute[:, :, 1]))
# Reshape to:
# offsets: [num_instances * num_keypoints * num_neighbors, 2]
# indices: [num_instances * num_keypoints * num_neighbors, 2]
offsets = tf.reshape(offsets, [-1, 2])
indices = tf.reshape(indices, [-1, 2])
# keypoint type tensor: [num_instances, num_keypoints, num_neighbors].
tiled_keypoint_types = self._get_keypoint_types(
num_instances, num_keypoints, num_neighbors)
batch_index = tf.fill(
[num_instances * num_keypoints * num_neighbors, 1], i)
batch_indices.append(
tf.concat([batch_index, indices,
tf.reshape(tiled_keypoint_types, [-1, 1])], axis=1))
batch_offsets.append(offsets)
batch_weights.append(tf.keras.backend.flatten(valid_keypoints))
# Concatenate the tensors in the batch in the first dimension:
# shape: [batch_size * num_instances * num_keypoints, 4]
batch_indices = tf.concat(batch_indices, axis=0)
# shape: [batch_size * num_instances * num_keypoints]
batch_weights = tf.concat(batch_weights, axis=0)
# shape: [batch_size * num_instances * num_keypoints, 2]
batch_offsets = tf.concat(batch_offsets, axis=0)
return (batch_indices, batch_offsets, batch_weights)
def _resize_masks(masks, height, width, method):
# Resize segmentation masks to conform to output dimensions. Use TF2
# image resize because TF1's version is buggy:
# https://yaqs.corp.google.com/eng/q/4970450458378240
masks = tf2.image.resize(
masks[:, :, :, tf.newaxis],
size=(height, width),
method=method)
return masks[:, :, :, 0]
class CenterNetMaskTargetAssigner(object):
"""Wrapper to compute targets for segmentation masks."""
def __init__(self, stride, boxes_scale=1.0):
"""Constructor.
Args:
stride: The stride of the network. Targets are assigned at the output
stride.
boxes_scale: Scale to apply to boxes before producing mask weights. This
is meant to ensure the full object region is properly weighted prior to
applying loss. A value of ~1.05 is typically applied when object regions
should be blacked out (perhaps because valid groundtruth masks are not
present).
"""
self._stride = stride
self._boxes_scale = boxes_scale
def assign_segmentation_targets(
self, gt_masks_list, gt_classes_list, gt_boxes_list=None,
gt_mask_weights_list=None, mask_resize_method=ResizeMethod.BILINEAR):
"""Computes the segmentation targets.
This utility produces a semantic segmentation mask for each class, starting
with whole image instance segmentation masks. Effectively, each per-class
segmentation target is the union of all masks from that class.
Args:
gt_masks_list: A list of float tensors with shape [num_boxes,
input_height, input_width] with values in {0, 1} representing instance
masks for each object.
gt_classes_list: A list of float tensors with shape [num_boxes,
num_classes] representing the one-hot encoded class labels for each box
in the gt_boxes_list.
gt_boxes_list: An optional list of float tensors with shape [num_boxes, 4]
with normalized boxes corresponding to each mask. The boxes are used to
spatially allocate mask weights.
gt_mask_weights_list: An optional list of float tensors with shape
[num_boxes] with weights for each mask. If a mask has a zero weight, it
indicates that the box region associated with the mask should not
contribute to the loss. If not provided, will use a per-pixel weight of
1.
mask_resize_method: A `tf.compat.v2.image.ResizeMethod`. The method to use
when resizing masks from input resolution to output resolution.
Returns:
segmentation_targets: An int32 tensor of size [batch_size, output_height,
output_width, num_classes] representing the class of each location in
the output space.
segmentation_weight: A float32 tensor of size [batch_size, output_height,
output_width] indicating the loss weight to apply at each location.
"""
_, num_classes = shape_utils.combined_static_and_dynamic_shape(
gt_classes_list[0])
_, input_height, input_width = (
shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))
output_height = tf.maximum(input_height // self._stride, 1)
output_width = tf.maximum(input_width // self._stride, 1)
if gt_boxes_list is None:
gt_boxes_list = [None] * len(gt_masks_list)
if gt_mask_weights_list is None:
gt_mask_weights_list = [None] * len(gt_masks_list)
segmentation_targets_list = []
segmentation_weights_list = []
for gt_boxes, gt_masks, gt_mask_weights, gt_classes in zip(
gt_boxes_list, gt_masks_list, gt_mask_weights_list, gt_classes_list):
if gt_boxes is not None and gt_mask_weights is not None:
boxes = box_list.BoxList(gt_boxes)
# Convert the box coordinates to absolute output image dimension space.
boxes_absolute = box_list_ops.to_absolute_coordinates(
boxes, output_height, output_width)
# Generate a segmentation weight that applies mask weights in object
# regions.
blackout = gt_mask_weights <= 0
segmentation_weight_for_image = (
ta_utils.blackout_pixel_weights_by_box_regions(
output_height, output_width, boxes_absolute.get(), blackout,
weights=gt_mask_weights, boxes_scale=self._boxes_scale))
segmentation_weights_list.append(segmentation_weight_for_image)
else:
segmentation_weights_list.append(tf.ones((output_height, output_width),
dtype=tf.float32))
gt_masks = _resize_masks(gt_masks, output_height, output_width,
mask_resize_method)
gt_masks = gt_masks[:, :, :, tf.newaxis]
gt_classes_reshaped = tf.reshape(gt_classes, [-1, 1, 1, num_classes])
# Shape: [h, w, num_classes].
segmentations_for_image = tf.reduce_max(
gt_masks * gt_classes_reshaped, axis=0)
# Avoid the case where max of an empty array is -inf.
segmentations_for_image = tf.maximum(segmentations_for_image, 0.0)
segmentation_targets_list.append(segmentations_for_image)
segmentation_target = tf.stack(segmentation_targets_list, axis=0)
segmentation_weight = tf.stack(segmentation_weights_list, axis=0)
return segmentation_target, segmentation_weight
class CenterNetDensePoseTargetAssigner(object):
"""Wrapper to compute targets for DensePose task."""
def __init__(self, stride, num_parts=24):
self._stride = stride
self._num_parts = num_parts
def assign_part_and_coordinate_targets(self,
height,
width,
gt_dp_num_points_list,
gt_dp_part_ids_list,
gt_dp_surface_coords_list,
gt_weights_list=None):
"""Returns the DensePose part_id and coordinate targets and their indices.
The returned values are expected to be used with predicted tensors
of size (batch_size, height//self._stride, width//self._stride, 2). The
predicted values at the relevant indices can be retrieved with the
get_batch_predictions_from_indices function.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_dp_num_points_list: a list of 1-D tf.int32 tensors of shape [num_boxes]
containing the number of DensePose sampled points per box.
gt_dp_part_ids_list: a list of 2-D tf.int32 tensors of shape
[num_boxes, max_sampled_points] containing the DensePose part ids
(0-indexed) for each sampled point. Note that there may be padding, as
boxes may contain a different number of sampled points.
gt_dp_surface_coords_list: a list of 3-D tf.float32 tensors of shape
[num_boxes, max_sampled_points, 4] containing the DensePose surface
coordinates (normalized) for each sampled point. Note that there may be
padding.
gt_weights_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_total_points, 4] holding
the indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively. The fourth column is the part index.
batch_part_ids: an int tensor of shape [num_total_points, num_parts]
holding 1-hot encodings of parts for each sampled point.
batch_surface_coords: a float tensor of shape [num_total_points, 2]
holding the expected (v, u) coordinates for each sampled point.
batch_weights: a float tensor of shape [num_total_points] indicating the
weight of each prediction.
Note that num_total_points = batch_size * num_boxes * max_sampled_points.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_dp_num_points_list)
batch_indices = []
batch_part_ids = []
batch_surface_coords = []
batch_weights = []
for i, (num_points, part_ids, surface_coords, weights) in enumerate(
zip(gt_dp_num_points_list, gt_dp_part_ids_list,
gt_dp_surface_coords_list, gt_weights_list)):
num_boxes, max_sampled_points = (
shape_utils.combined_static_and_dynamic_shape(part_ids))
part_ids_flattened = tf.reshape(part_ids, [-1])
part_ids_one_hot = tf.one_hot(part_ids_flattened, depth=self._num_parts)
# Get DensePose coordinates in the output space.
surface_coords_abs = densepose_ops.to_absolute_coordinates(
surface_coords,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
surface_coords_abs = tf.reshape(surface_coords_abs, [-1, 4])
# Each tensor has shape [num_boxes * max_sampled_points].
yabs, xabs, v, u = tf.unstack(surface_coords_abs, axis=-1)
# Get the indices (in output space) for the DensePose coordinates. Note
# that if self._stride is larger than 1, this will have the effect of
# reducing spatial resolution of the groundtruth points.
indices_y = tf.cast(yabs, tf.int32)
indices_x = tf.cast(xabs, tf.int32)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Create per-point weights.
weights_per_point = tf.reshape(
tf.tile(weights[:, tf.newaxis], multiples=[1, max_sampled_points]),
shape=[-1])
# Mask out invalid (i.e. padded) DensePose points.
num_points_tiled = tf.tile(num_points[:, tf.newaxis],
multiples=[1, max_sampled_points])
range_tiled = tf.tile(tf.range(max_sampled_points)[tf.newaxis, :],
multiples=[num_boxes, 1])
valid_points = tf.math.less(range_tiled, num_points_tiled)
valid_points = tf.cast(tf.reshape(valid_points, [-1]), dtype=tf.float32)
weights_per_point = weights_per_point * valid_points
# Shape of [num_boxes * max_sampled_points] integer tensor filled with
# current batch index.
batch_index = i * tf.ones_like(indices_y, dtype=tf.int32)
batch_indices.append(
tf.stack([batch_index, indices_y, indices_x, part_ids_flattened],
axis=1))
batch_part_ids.append(part_ids_one_hot)
batch_surface_coords.append(tf.stack([v, u], axis=1))
batch_weights.append(weights_per_point)
batch_indices = tf.concat(batch_indices, axis=0)
batch_part_ids = tf.concat(batch_part_ids, axis=0)
batch_surface_coords = tf.concat(batch_surface_coords, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
return batch_indices, batch_part_ids, batch_surface_coords, batch_weights
class CenterNetTrackTargetAssigner(object):
"""Wrapper to compute targets for tracking task.
Reference paper: A Simple Baseline for Multi-Object Tracking [1]
[1]: https://arxiv.org/abs/2004.01888
"""
def __init__(self, stride, num_track_ids):
self._stride = stride
self._num_track_ids = num_track_ids
def assign_track_targets(self,
height,
width,
gt_track_ids_list,
gt_boxes_list,
gt_weights_list=None):
"""Computes the track ID targets.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_track_ids_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the track ID of each groundtruth detection box.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_weights_list: A list of 1-D tensors with shape [num_boxes]
corresponding to the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [batch_size, num_boxes, 3]
holding the indices inside the predicted tensor which should be
penalized. The first column indicates the index along the batch
dimension and the second and third columns indicate the index
along the y and x dimensions respectively.
batch_weights: a float tensor of shape [batch_size, num_boxes] indicating
the weight of each prediction.
track_id_targets: An int32 tensor of size [batch_size, num_boxes,
num_track_ids] containing the one-hot track ID vector of each
groundtruth detection box.
"""
track_id_targets = tf.one_hot(
gt_track_ids_list, depth=self._num_track_ids, axis=-1)
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_weights = []
for i, (boxes, weights) in enumerate(zip(gt_boxes_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the indices of the box centers. Shape:
# indices: [num_boxes, 2]
(_, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(weights)
batch_indices = tf.stack(batch_indices, axis=0)
batch_weights = tf.stack(batch_weights, axis=0)
return batch_indices, batch_weights, track_id_targets
def filter_mask_overlap_min_area(masks):
"""If a pixel belongs to 2 instances, remove it from the larger instance."""
num_instances = tf.shape(masks)[0]
def _filter_min_area():
"""Helper function to filter non empty masks."""
areas = tf.reduce_sum(masks, axis=[1, 2], keepdims=True)
per_pixel_area = masks * areas
# Make sure background is ignored in argmin.
per_pixel_area = (masks * per_pixel_area +
(1 - masks) * per_pixel_area.dtype.max)
min_index = tf.cast(tf.argmin(per_pixel_area, axis=0), tf.int32)
filtered_masks = (
tf.range(num_instances)[:, tf.newaxis, tf.newaxis]
==
min_index[tf.newaxis, :, :]
)
return tf.cast(filtered_masks, tf.float32) * masks
return tf.cond(num_instances > 0, _filter_min_area,
lambda: masks)
def filter_mask_overlap(masks, method='min_area'):
if method == 'min_area':
return filter_mask_overlap_min_area(masks)
else:
raise ValueError('Unknown mask overlap filter type - {}'.format(method))
class CenterNetCornerOffsetTargetAssigner(object):
"""Wrapper to compute corner offsets for boxes using masks."""
def __init__(self, stride, overlap_resolution='min_area'):
"""Initializes the corner offset target assigner.
Args:
stride: int, the stride of the network in output pixels.
overlap_resolution: string, specifies how we handle overlapping
instance masks. Currently only 'min_area' is supported which assigns
overlapping pixels to the instance with the minimum area.
"""
self._stride = stride
self._overlap_resolution = overlap_resolution
def assign_corner_offset_targets(
self, gt_boxes_list, gt_masks_list):
"""Computes the corner offset targets and foreground map.
For each pixel that is part of any object's foreground, this function
computes the relative offsets to the top-left and bottom-right corners of
that instance's bounding box. It also returns a foreground map to indicate
which pixels contain valid corner offsets.
Args:
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_masks_list: A list of float tensors with shape [num_boxes,
input_height, input_width] with values in {0, 1} representing instance
masks for each object.
Returns:
corner_offsets: A float tensor of shape [batch_size, height, width, 4]
containing, in order, the (y, x) offsets to the top left corner and
the (y, x) offsets to the bottom right corner for each foregroung pixel
foreground: A float tensor of shape [batch_size, height, width] in which
each pixel is set to 1 if it is a part of any instance's foreground
(and thus contains valid corner offsets) and 0 otherwise.
"""
_, input_height, input_width = (
shape_utils.combined_static_and_dynamic_shape(gt_masks_list[0]))
output_height = tf.maximum(input_height // self._stride, 1)
output_width = tf.maximum(input_width // self._stride, 1)
y_grid, x_grid = tf.meshgrid(
tf.range(output_height), tf.range(output_width),
indexing='ij')
y_grid, x_grid = tf.cast(y_grid, tf.float32), tf.cast(x_grid, tf.float32)
corner_targets = []
foreground_targets = []
for gt_masks, gt_boxes in zip(gt_masks_list, gt_boxes_list):
gt_masks = _resize_masks(gt_masks, output_height, output_width,
method=ResizeMethod.NEAREST_NEIGHBOR)
gt_masks = filter_mask_overlap(gt_masks, self._overlap_resolution)
output_height = tf.cast(output_height, tf.float32)
output_width = tf.cast(output_width, tf.float32)
ymin, xmin, ymax, xmax = tf.unstack(gt_boxes, axis=1)
ymin, ymax = ymin * output_height, ymax * output_height
xmin, xmax = xmin * output_width, xmax * output_width
top_y = ymin[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]
left_x = xmin[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]
bottom_y = ymax[:, tf.newaxis, tf.newaxis] - y_grid[tf.newaxis]
right_x = xmax[:, tf.newaxis, tf.newaxis] - x_grid[tf.newaxis]
foreground_target = tf.cast(tf.reduce_sum(gt_masks, axis=0) > 0.5,
tf.float32)
foreground_targets.append(foreground_target)
corner_target = tf.stack([
tf.reduce_sum(top_y * gt_masks, axis=0),
tf.reduce_sum(left_x * gt_masks, axis=0),
tf.reduce_sum(bottom_y * gt_masks, axis=0),
tf.reduce_sum(right_x * gt_masks, axis=0),
], axis=2)
corner_targets.append(corner_target)
return (tf.stack(corner_targets, axis=0),
tf.stack(foreground_targets, axis=0))
class CenterNetTemporalOffsetTargetAssigner(object):
"""Wrapper to compute target tensors for the temporal offset task.
This class has methods that take as input a batch of ground truth tensors
(in the form of a list) and returns the targets required to train the
temporal offset task.
"""
def __init__(self, stride):
"""Initializes the target assigner.
Args:
stride: int, the stride of the network in output pixels.
"""
self._stride = stride
def assign_temporal_offset_targets(self,
height,
width,
gt_boxes_list,
gt_offsets_list,
gt_match_list,
gt_weights_list=None):
"""Returns the temporal offset targets and their indices.
For each ground truth box, this function assigns it the corresponding
temporal offset to train the model.
Args:
height: int, height of input to the model. This is used to determine the
height of the output.
width: int, width of the input to the model. This is used to determine the
width of the output.
gt_boxes_list: A list of float tensors with shape [num_boxes, 4]
representing the groundtruth detection bounding boxes for each sample in
the batch. The coordinates are expected in normalized coordinates.
gt_offsets_list: A list of 2-D tf.float32 tensors of shape [num_boxes, 2]
containing the spatial offsets of objects' centers compared with the
previous frame.
gt_match_list: A list of 1-D tf.float32 tensors of shape [num_boxes]
containing flags that indicate if an object has existed in the
previous frame.
gt_weights_list: A list of tensors with shape [num_boxes] corresponding to
the weight of each groundtruth detection box.
Returns:
batch_indices: an integer tensor of shape [num_boxes, 3] holding the
indices inside the predicted tensor which should be penalized. The
first column indicates the index along the batch dimension and the
second and third columns indicate the index along the y and x
dimensions respectively.
batch_temporal_offsets: a float tensor of shape [num_boxes, 2] of the
expected y and x temporal offset of each object center in the
output space.
batch_weights: a float tensor of shape [num_boxes] indicating the
weight of each prediction.
"""
if gt_weights_list is None:
gt_weights_list = [None] * len(gt_boxes_list)
batch_indices = []
batch_weights = []
batch_temporal_offsets = []
for i, (boxes, offsets, match_flags, weights) in enumerate(zip(
gt_boxes_list, gt_offsets_list, gt_match_list, gt_weights_list)):
boxes = box_list.BoxList(boxes)
boxes = box_list_ops.to_absolute_coordinates(
boxes,
tf.maximum(height // self._stride, 1),
tf.maximum(width // self._stride, 1))
# Get the box center coordinates. Each returned tensors have the shape of
# [num_boxes]
(y_center, x_center, _, _) = boxes.get_center_coordinates_and_sizes()
num_boxes = tf.shape(x_center)
# Compute the offsets and indices of the box centers. Shape:
# offsets: [num_boxes, 2]
# indices: [num_boxes, 2]
(_, indices) = ta_utils.compute_floor_offsets_with_indices(
y_source=y_center, x_source=x_center)
# Assign ones if weights are not provided.
# if an object is not matched, its weight becomes zero.
if weights is None:
weights = tf.ones(num_boxes, dtype=tf.float32)
weights *= match_flags
# Shape of [num_boxes, 1] integer tensor filled with current batch index.
batch_index = i * tf.ones_like(indices[:, 0:1], dtype=tf.int32)
batch_indices.append(tf.concat([batch_index, indices], axis=1))
batch_weights.append(weights)
batch_temporal_offsets.append(offsets)
batch_indices = tf.concat(batch_indices, axis=0)
batch_weights = tf.concat(batch_weights, axis=0)
batch_temporal_offsets = tf.concat(batch_temporal_offsets, axis=0)
return (batch_indices, batch_temporal_offsets, batch_weights)
class DETRTargetAssigner(object):
"""Target assigner for DETR (https://arxiv.org/abs/2005.12872).
Detection Transformer (DETR) matches predicted boxes to groundtruth directly
to determine targets instead of matching anchors to groundtruth. Hence, the
new target assigner.
"""
def __init__(self):
"""Construct Object Detection Target Assigner."""
self._similarity_calc = sim_calc.DETRSimilarity()
self._matcher = hungarian_matcher.HungarianBipartiteMatcher()
def batch_assign(self,
pred_box_batch,
gt_box_batch,
pred_class_batch,
gt_class_targets_batch,
gt_weights_batch=None,
unmatched_class_label_batch=None):
"""Batched assignment of classification and regression targets.
Args:
pred_box_batch: a tensor of shape [batch_size, num_queries, 4]
representing predicted bounding boxes.
gt_box_batch: a tensor of shape [batch_size, num_queries, 4]
representing groundtruth bounding boxes.
pred_class_batch: A list of tensors with length batch_size, where each
each tensor has shape [num_queries, num_classes] to be used
by certain similarity calculators.
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, num_classes] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
unmatched_class_label_batch: a float32 tensor with shape
[d_1, d_2, ..., d_k] which is consistent with the classification target
for each anchor (and can be empty for scalar targets). This shape must
thus be compatible with the `gt_class_targets_batch`.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_pred_boxes,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_pred_boxes,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_pred_boxes,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_pred_boxes].
"""
pred_box_batch = [
box_list.BoxList(pred_box)
for pred_box in tf.unstack(pred_box_batch)]
gt_box_batch = [
box_list.BoxList(gt_box)
for gt_box in tf.unstack(gt_box_batch)]
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
if unmatched_class_label_batch is None:
unmatched_class_label_batch = [None] * len(gt_class_targets_batch)
pred_class_batch = tf.unstack(pred_class_batch)
for (pred_boxes, gt_boxes, pred_class_batch, gt_class_targets, gt_weights,
unmatched_class_label) in zip(pred_box_batch, gt_box_batch,
pred_class_batch, gt_class_targets_batch,
gt_weights_batch,
unmatched_class_label_batch):
(cls_targets, cls_weights, reg_targets,
reg_weights) = self.assign(pred_boxes, gt_boxes, pred_class_batch,
gt_class_targets, gt_weights,
unmatched_class_label)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights)
def assign(self,
pred_boxes,
gt_boxes,
pred_classes,
gt_labels,
gt_weights=None,
unmatched_class_label=None):
"""Assign classification and regression targets to each box_pred.
For a given set of pred_boxes and groundtruth detections, match pred_boxes
to gt_boxes and assign classification and regression targets to
each box_pred as well as weights based on the resulting match (specifying,
e.g., which pred_boxes should not contribute to training loss).
pred_boxes that are not matched to anything are given a classification
target of `unmatched_cls_target`.
Args:
pred_boxes: a BoxList representing N pred_boxes
gt_boxes: a BoxList representing M groundtruth boxes
pred_classes: A tensor with shape [max_num_boxes, num_classes]
to be used by certain similarity calculators.
gt_labels: a tensor of shape [M, num_classes]
with labels for each of the ground_truth boxes. The subshape
[num_classes] can be empty (corresponding to scalar inputs). When set
to None, gt_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
gt_weights: a float tensor of shape [M] indicating the weight to
assign to all pred_boxes match to a particular groundtruth box. The
weights must be in [0., 1.]. If None, all weights are set to 1.
Generally no groundtruth boxes with zero weight match to any pred_boxes
as matchers are aware of groundtruth weights. Additionally,
`cls_weights` and `reg_weights` are calculated using groundtruth
weights as an added safety.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
Returns:
cls_targets: a float32 tensor with shape [num_pred_boxes, num_classes],
where the subshape [num_classes] is compatible with gt_labels
which has shape [num_gt_boxes, num_classes].
cls_weights: a float32 tensor with shape [num_pred_boxes, num_classes],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_pred_boxes,
box_code_dimension]
reg_weights: a float32 tensor with shape [num_pred_boxes]
"""
if not unmatched_class_label:
unmatched_class_label = tf.constant(
[1] + [0] * (gt_labels.shape[1] - 1), tf.float32)
if gt_weights is None:
num_gt_boxes = gt_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = gt_boxes.num_boxes()
gt_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
gt_boxes.add_field(fields.BoxListFields.classes, gt_labels)
pred_boxes.add_field(fields.BoxListFields.classes, pred_classes)
match_quality_matrix = self._similarity_calc.compare(
gt_boxes,
pred_boxes)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(gt_weights, 0))
matched_gt_boxes = match.gather_based_on_match(
gt_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
ty, tx, th, tw = matched_gt_boxlist.get_center_coordinates_and_sizes()
reg_targets = tf.transpose(tf.stack([ty, tx, th, tw]))
cls_targets = match.gather_based_on_match(
gt_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
reg_weights = match.gather_based_on_match(
gt_weights,
ignored_value=0.,
unmatched_value=0.)
cls_weights = match.gather_based_on_match(
gt_weights,
ignored_value=0.,
unmatched_value=1)
# convert cls_weights from per-box_pred to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_multiple = tf.concat(
[tf.constant([1]), class_label_shape],
axis=0)
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
return (cls_targets, cls_weights, reg_targets, reg_weights)
|
py
|
1a5e82a90c3a6230466b00320cd9d72c611ebc06
|
""" Tools to get keyword tags (e.g., for XMP metadata) from iNaturalist observations """
from datetime import timedelta
from logging import getLogger
from os import makedirs
from os.path import dirname, getsize
from typing import Dict, List, Optional, Tuple
import requests_cache
import xmltodict
from pyinaturalist.constants import RANKS
from pyinaturalist.v0 import get_observations
from pyinaturalist.v1 import get_observation, get_observation_species_counts, get_taxa, get_taxa_by_id
from naturtag.constants import (
API_CACHE_EXPIRY_HOURS,
CACHE_BACKEND,
CACHE_PATH,
COMMON_NAME_IGNORE_TERMS,
DWC_NAMESPACES,
DWC_TAXON_TERMS,
OBSERVATION_KEYS,
TAXON_KEYS,
IntTuple,
StrTuple,
)
from naturtag.validation import format_file_size
# Patch requests to use CachedSession for pyinaturalist API calls
makedirs(dirname(CACHE_PATH), exist_ok=True)
requests_cache.install_cache(
CACHE_PATH,
backend=CACHE_BACKEND,
expire_after=timedelta(hours=API_CACHE_EXPIRY_HOURS),
)
logger = getLogger().getChild(__name__)
def get_http_cache_size() -> str:
"""Get the current size of the HTTP request cache, in human-readable format"""
return format_file_size(getsize(f'{CACHE_PATH}.{CACHE_BACKEND}'))
def get_observation_taxon(observation_id: int) -> int:
"""Get the current taxon ID for the given observation ID"""
logger.info(f'API: Fetching observation {observation_id}')
obs = get_observation(observation_id)
if obs.get('community_tax_id') and obs['community_tax_id'] != obs['taxon']['id']:
logger.warning('API: Community ID does not match selected taxon')
return obs['taxon']['id']
def get_observation_dwc_terms(observation_id: int) -> Dict[str, str]:
"""Get all DWC terms for an iNaturalist observation"""
logger.info(f'API: Getting Darwin Core terms for observation {observation_id}')
obs_dwc = get_observations(id=observation_id, response_format='dwc')
return convert_dwc_to_xmp(obs_dwc)
def get_taxon_dwc_terms(taxon_id: int) -> Dict[str, str]:
"""Get all DWC terms for an iNaturalist taxon.
Since there is no DWC format for ``GET /taxa``, we'll just search for a random observation
with this taxon ID, strip off the observation metadata, and keep only the taxon metadata.
"""
logger.info(f'API: Getting Darwin Core terms for taxon {taxon_id}')
obs_dwc = get_observations(taxon_id=taxon_id, per_page=1, response_format='dwc')
dwc_xmp = convert_dwc_to_xmp(obs_dwc)
return {k: v for k, v in dwc_xmp.items() if k in DWC_TAXON_TERMS}
# TODO: separate species, binomial, trinomial
def get_keywords(
observation_id: int = None,
taxon_id: int = None,
common: bool = False,
hierarchical: bool = False,
) -> List[str]:
"""Get all taxonomic keywords for a given observation or taxon"""
min_tax_id = taxon_id or get_observation_taxon(observation_id)
taxa = get_taxon_with_ancestors(min_tax_id)
keywords = get_taxonomy_keywords(taxa)
if hierarchical:
keywords.extend(get_hierarchical_keywords(keywords))
if common:
keywords.extend(get_common_keywords(taxa))
keywords.append(f'inaturalist:taxon_id={min_tax_id}')
keywords.append(f'dwc:taxonID={min_tax_id}')
if observation_id:
keywords.append(f'inaturalist:observation_id={observation_id}')
keywords.append(f'dwc:catalogNumber={observation_id}')
logger.info(f'API: {len(keywords)} total keywords generated')
return keywords
def get_taxon_children(taxon_id: int) -> List[Dict]:
"""Get a taxon's children"""
logger.info(f'API: Fetching children of taxon {taxon_id}')
r = get_taxa(parent_id=taxon_id)
logger.info(f'API: {len(r["results"])} child taxa found')
return r['results']
def get_taxon_ancestors(taxon_id: int) -> List[Dict]:
"""Get a taxon's parents"""
return get_taxon_with_ancestors(taxon_id)[:-1]
def get_taxon_with_ancestors(taxon_id: int) -> List[Dict]:
"""Get a taxon with all its parents"""
logger.info(f'API: Fetching parents of taxon {taxon_id}')
results = get_taxa_by_id(taxon_id).get('results', [])
if not results:
logger.info(f'API: taxon {taxon_id} not found')
return []
taxon = results[0]
logger.info(f'API: {len(taxon["ancestors"])} parent taxa found')
return taxon['ancestors'] + [taxon]
# TODO: This should be reorganized somehow, I don't quite like the look if it;
# image_metadata module depends on this module and vice versa (kinda)
def get_taxon_and_obs_from_metadata(metadata) -> Tuple[Dict, Dict]:
logger.info(f'API: Searching for matching taxon and/or observation for {metadata.image_path}')
taxon, observation = get_observation_from_metadata(metadata)
if not taxon and metadata.has_taxon:
taxon = get_taxon_from_metadata(metadata)
if not taxon:
logger.info('API: No taxon found')
return taxon, observation
def get_observation_from_metadata(metadata) -> Tuple[Dict, Dict]:
if not metadata.observation_id:
logger.info('API: No observation ID specified')
return None, None
observation = get_observation(metadata.observation_id)
taxon = None
taxon_id = observation.get('taxon', {}).get('id')
# Handle observation with no taxon ID (e.g., not yet identified)
if taxon_id:
taxon = get_taxa_by_id(taxon_id).get('results', [None])[0]
logger.info(f'API: Found observation {metadata.observation_id} and taxon {taxon_id}')
else:
logger.warning(f'API: Observation {metadata.observation_id} is unidentified')
return taxon, observation
def get_taxon_from_metadata(metadata) -> Optional[Dict]:
"""Fetch taxon record from MetaMetadata object: either by ID or rank + name"""
rank, name = metadata.min_rank
params = {'id': metadata.taxon_id} if metadata.taxon_id else {'rank': rank, 'q': name}
logger.info(f'API: Querying taxon by: {params}')
results = get_taxa(**params)['results']
if results:
logger.info('API: Taxon found')
return results[0]
else:
return None
def get_taxonomy_keywords(taxa: List[Dict]) -> List[str]:
"""Format a list of taxa into rank keywords"""
return [quote(f'taxonomy:{t["rank"]}={t["name"]}') for t in taxa]
def get_common_keywords(taxa: List[Dict]) -> List[str]:
"""Format a list of taxa into common name keywords.
Filters out terms that aren't useful to keep as tags
"""
keywords = [t.get('preferred_common_name', '') for t in taxa]
def is_ignored(kw):
return any([ignore_term in kw.lower() for ignore_term in COMMON_NAME_IGNORE_TERMS])
common_keywords = [quote(kw) for kw in keywords if kw and not is_ignored(kw)]
logger.info(
f'API: {len(keywords) - len(common_keywords)} out of {len(keywords)} common names ignored'
)
return common_keywords
def get_observed_taxa(username: str, include_casual: bool = False) -> Dict[int, int]:
"""Get counts of taxa observed by the user, ordered by number of observations descending"""
if not username:
return {}
logger.info(f'API: Searching for user-observed taxa (casual: {include_casual})')
response = get_observation_species_counts(
user_login=username,
verifiable=None if include_casual else True, # False will return *only* casual observations
)
logger.info(f'API: {len(response["results"])} user-observed taxa found')
observed_taxa = {r['taxon']['id']: r['count'] for r in response['results']}
return dict(sorted(observed_taxa.items(), key=lambda x: x[1], reverse=True))
# TODO: Also include common names in hierarchy?
def get_hierarchical_keywords(keywords: List) -> List[str]:
hier_keywords = [keywords[0]]
for rank_name in keywords[1:]:
hier_keywords.append(f'{hier_keywords[-1]}|{rank_name}')
return hier_keywords
def sort_taxonomy_keywords(keywords: List[str]) -> List[str]:
"""Sort keywords by taxonomic rank, where applicable"""
def _get_rank_idx(tag):
return get_rank_idx(tag.split(':')[-1].split('=')[0])
return sorted(keywords, key=_get_rank_idx, reverse=True)
def get_rank_idx(rank: str) -> int:
return RANKS.index(rank) if rank in RANKS else 0
def get_inaturalist_ids(metadata):
"""Look for taxon and/or observation IDs from metadata if available"""
# Get first non-None value from specified keys, if any; otherwise return None
def _first_match(d, keys):
id = next(filter(None, map(d.get, keys)), None)
return int(id) if id else None
# Check all possible keys for valid taxon and observation IDs
taxon_id = _first_match(metadata, TAXON_KEYS)
observation_id = _first_match(metadata, OBSERVATION_KEYS)
logger.info(f'API: Taxon ID: {taxon_id} | Observation ID: {observation_id}')
return taxon_id, observation_id
def get_min_rank(metadata: Dict[str, str]) -> StrTuple:
"""Get the lowest (most specific) taxonomic rank from tags, if any"""
for rank in RANKS:
if rank in metadata:
logger.info(f'API: Found minimum rank: {rank} = {metadata[rank]}')
return rank, metadata[rank]
return None, None
def quote(s: str) -> str:
"""Surround keyword in quotes if it contains whitespace"""
return f'"{s}"' if ' ' in s else s
def convert_dwc_to_xmp(dwc: str) -> Dict[str, str]:
"""
Get all DWC terms from XML content containing a SimpleDarwinRecordSet, and format them as
XMP tags. For example: ``'dwc:species' -> 'Xmp.dwc.species'``
"""
# Get inner record as a dict, if it exists
xml_dict = xmltodict.parse(dwc)
dwr = xml_dict.get('dwr:SimpleDarwinRecordSet', {}).get('dwr:SimpleDarwinRecord')
if not dwr:
logger.warning('API: No SimpleDarwinRecord found')
return {}
# iNat sometimes includes duplicate occurrence IDs
if isinstance(dwr['dwc:occurrenceID'], list):
dwr['dwc:occurrenceID'] = dwr['dwc:occurrenceID'][0]
def _format_term(k):
ns, term = k.split(':')
return f'Xmp.{ns}.{term}'
def _include_term(k):
ns = k.split(':')[0]
return ns in DWC_NAMESPACES
# Format as XMP tags
return {_format_term(k): v for k, v in dwr.items() if _include_term(k)}
def get_ids_from_url(value: str) -> IntTuple:
"""If a URL is provided containing an ID, return the taxon and/or observation ID.
If it's an observation, fetch its taxon ID as well.
Returns:
taxon_id, observation_id
"""
taxon_id, observation_id = None, None
id = strip_url(value)
# TODO: Update after finishing Observation model
if 'observation' in value:
observation_id = id
json = get_observation(id)
taxon_id = json.get('taxon', {}).get('id')
elif 'taxa' in value:
taxon_id = id
return taxon_id, observation_id
def strip_url(value: str) -> Optional[int]:
"""If a URL is provided containing an ID, return just the ID"""
try:
return int(value.split('/')[-1].split('-')[0]) if value else None
except (TypeError, ValueError):
return None
|
py
|
1a5e837534a9bbd97a8352f47913b1795bf086be
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import mock
import unittest
import difflib
from six import StringIO
from collections import namedtuple
from azure.cli.core import AzCommandsLoader, MainCommandsLoader
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.parser import AzCliCommandParser
from azure.cli.core.mock import DummyCli
from knack.arguments import enum_choice_list
class TestParser(unittest.TestCase):
def setUp(self):
self.io = StringIO()
def tearDown(self):
self.io.close()
def test_register_simple_commands(self):
def test_handler1():
pass
def test_handler2():
pass
cli = DummyCli()
cli.loader = mock.MagicMock()
cli.loader.cli_ctx = cli
command = AzCliCommand(cli.loader, 'command the-name', test_handler1)
command2 = AzCliCommand(cli.loader, 'sub-command the-second-name', test_handler2)
cmd_table = {'command the-name': command, 'sub-command the-second-name': command2}
cli.commands_loader.command_table = cmd_table
parser = AzCliCommandParser(cli)
parser.load_command_table(cli.commands_loader)
args = parser.parse_args('command the-name'.split())
self.assertIs(args.func, command)
args = parser.parse_args('sub-command the-second-name'.split())
self.assertIs(args.func, command2)
with mock.patch('azure.cli.core.parser.AzCliCommandParser.error', new=VerifyError(self)):
parser.parse_args('sub-command'.split())
self.assertTrue(AzCliCommandParser.error.called)
def test_required_parameter(self):
def test_handler(args): # pylint: disable=unused-argument
pass
cli = DummyCli()
cli.loader = mock.MagicMock()
cli.loader.cli_ctx = cli
command = AzCliCommand(cli.loader, 'test command', test_handler)
command.add_argument('req', '--req', required=True)
cmd_table = {'test command': command}
cli.commands_loader.command_table = cmd_table
parser = AzCliCommandParser(cli)
parser.load_command_table(cli.commands_loader)
args = parser.parse_args('test command --req yep'.split())
self.assertIs(args.func, command)
with mock.patch('azure.cli.core.parser.AzCliCommandParser.error', new=VerifyError(self)):
parser.parse_args('test command'.split())
self.assertTrue(AzCliCommandParser.error.called)
def test_nargs_parameter(self):
def test_handler():
pass
cli = DummyCli()
cli.loader = mock.MagicMock()
cli.loader.cli_ctx = cli
command = AzCliCommand(cli.loader, 'test command', test_handler)
command.add_argument('req', '--req', required=True, nargs=2)
cmd_table = {'test command': command}
cli.commands_loader.command_table = cmd_table
parser = AzCliCommandParser(cli)
parser.load_command_table(cli.commands_loader)
args = parser.parse_args('test command --req yep nope'.split())
self.assertIs(args.func, command)
with mock.patch('azure.cli.core.parser.AzCliCommandParser.error', new=VerifyError(self)):
parser.parse_args('test command -req yep'.split())
self.assertTrue(AzCliCommandParser.error.called)
def test_case_insensitive_enum_choices(self):
from enum import Enum
class TestEnum(Enum): # pylint: disable=too-few-public-methods
opt1 = "ALL_CAPS"
opt2 = "camelCase"
opt3 = "snake_case"
def test_handler():
pass
cli = DummyCli()
cli.loader = mock.MagicMock()
cli.loader.cli_ctx = cli
command = AzCliCommand(cli.loader, 'test command', test_handler)
command.add_argument('opt', '--opt', required=True, **enum_choice_list(TestEnum))
cmd_table = {'test command': command}
cli.commands_loader.command_table = cmd_table
parser = AzCliCommandParser(cli)
parser.load_command_table(cli.commands_loader)
args = parser.parse_args('test command --opt alL_cAps'.split())
self.assertEqual(args.opt, 'ALL_CAPS')
args = parser.parse_args('test command --opt CAMELCASE'.split())
self.assertEqual(args.opt, 'camelCase')
args = parser.parse_args('test command --opt sNake_CASE'.split())
self.assertEqual(args.opt, 'snake_case')
def _mock_import_lib(_):
mock_obj = mock.MagicMock()
mock_obj.__path__ = __name__
return mock_obj
def _mock_iter_modules(_):
return [(None, __name__, None)]
def _mock_extension_modname(ext_name, ext_dir):
return ext_name
def _mock_get_extensions():
MockExtension = namedtuple('Extension', ['name', 'preview', 'experimental', 'path', 'get_metadata'])
return [MockExtension(name=__name__ + '.ExtCommandsLoader', preview=False, experimental=False, path=None, get_metadata=lambda: {}),
MockExtension(name=__name__ + '.Ext2CommandsLoader', preview=False, experimental=False, path=None, get_metadata=lambda: {})]
def _mock_load_command_loader(loader, args, name, prefix):
from enum import Enum
class TestEnum(Enum): # pylint: disable=too-few-public-methods
enum_1 = 'enum_1'
enum_2 = 'enum_2'
def test_handler():
pass
class TestCommandsLoader(AzCommandsLoader):
def load_command_table(self, args):
super(TestCommandsLoader, self).load_command_table(args)
command = AzCliCommand(loader, 'test module', test_handler)
command.add_argument('opt', '--opt', required=True, **enum_choice_list(TestEnum))
self.command_table['test module'] = command
return self.command_table
# A command from an extension
class ExtCommandsLoader(AzCommandsLoader):
def load_command_table(self, args):
super(ExtCommandsLoader, self).load_command_table(args)
command = AzCliCommand(loader, 'test extension', test_handler)
command.add_argument('opt', '--opt', required=True, **enum_choice_list(TestEnum))
self.command_table['test extension'] = command
return self.command_table
if prefix == 'azure.cli.command_modules.':
command_loaders = {'TestCommandsLoader': TestCommandsLoader}
else:
command_loaders = {'ExtCommandsLoader': ExtCommandsLoader}
module_command_table = {}
for _, loader_cls in command_loaders.items():
command_loader = loader_cls(cli_ctx=loader.cli_ctx)
command_table = command_loader.load_command_table(args)
if command_table:
module_command_table.update(command_table)
loader.loaders.append(command_loader) # this will be used later by the load_arguments method
return module_command_table, command_loader.command_group_table
@mock.patch('importlib.import_module', _mock_import_lib)
@mock.patch('pkgutil.iter_modules', _mock_iter_modules)
@mock.patch('azure.cli.core.commands._load_command_loader', _mock_load_command_loader)
@mock.patch('azure.cli.core.extension.get_extension_modname', _mock_extension_modname)
@mock.patch('azure.cli.core.extension.get_extensions', _mock_get_extensions)
def test_parser_error_spellchecker(self):
cli = DummyCli()
main_loader = MainCommandsLoader(cli)
cli.loader = main_loader
cli.loader.load_command_table(None)
parser = cli.parser_cls(cli)
parser.load_command_table(cli.loader)
logger_msgs = []
choice_lists = []
original_get_close_matches = difflib.get_close_matches
def mock_log_error(_, msg):
logger_msgs.append(msg)
def mock_get_close_matches(*args, **kwargs):
choice_lists.append(original_get_close_matches(*args, **kwargs))
def mock_ext_cmd_tree_load(*args, **kwargs):
return {"test": {"new-ext": {"create": "new-ext-name", "reset": "another-ext-name"}}}
def mock_add_extension(*args, **kwargs):
pass
# run multiple faulty commands and save error logs, as well as close matches
with mock.patch('logging.Logger.error', mock_log_error), \
mock.patch('difflib.get_close_matches', mock_get_close_matches):
faulty_cmd_args = [
'test module1 --opt enum_1',
'test extension1 --opt enum_1',
'test foo_bar --opt enum_3',
'test module --opt enum_3',
'test extension --opt enum_3'
]
for text in faulty_cmd_args:
with self.assertRaises(SystemExit):
parser.parse_args(text.split())
parser.parse_args('test module --opt enum_1'.split())
# assert the right type of error msg is logged for command vs argument parsing
self.assertEqual(len(logger_msgs), 5)
for msg in logger_msgs[:3]:
self.assertIn("CommandNotFoundError", msg)
for msg in logger_msgs[3:]:
self.assertIn("not a valid value for '--opt'.", msg)
# assert the right choices are matched as "close".
# If these don't hold, matching algorithm should be deemed flawed.
for choices in choice_lists[:2]:
self.assertEqual(len(choices), 1)
self.assertEqual(len(choice_lists[2]), 0)
for choices in choice_lists[3:]:
self.assertEqual(len(choices), 2)
for choice in ['enum_1', 'enum_2']:
self.assertIn(choice, choices)
# test dynamic extension install
with mock.patch('logging.Logger.error', mock_log_error), \
mock.patch('azure.cli.core.extension.operations.add_extension', mock_add_extension), \
mock.patch('azure.cli.core.parser.AzCliCommandParser._get_extension_command_tree', mock_ext_cmd_tree_load), \
mock.patch('azure.cli.core.parser.AzCliCommandParser._get_extension_use_dynamic_install_config', return_value='yes_without_prompt'), \
mock.patch('azure.cli.core.parser.AzCliCommandParser._get_extension_run_after_dynamic_install_config', return_value=False):
with self.assertRaises(SystemExit):
parser.parse_args('test new-ext create --opt enum_2'.split())
self.assertIn("Extension new-ext-name installed. Please rerun your command.", logger_msgs[5])
with self.assertRaises(SystemExit):
parser.parse_args('test new-ext reset pos1 pos2'.split()) # test positional args
self.assertIn("Extension another-ext-name installed. Please rerun your command.", logger_msgs[6])
@mock.patch('importlib.import_module', _mock_import_lib)
@mock.patch('pkgutil.iter_modules', _mock_iter_modules)
@mock.patch('azure.cli.core.commands._load_command_loader', _mock_load_command_loader)
@mock.patch('azure.cli.core.extension.get_extension_modname', _mock_extension_modname)
@mock.patch('azure.cli.core.extension.get_extensions', _mock_get_extensions)
def test_parser_failure_recovery_recommendations(self):
cli = DummyCli()
main_loader = MainCommandsLoader(cli)
cli.loader = main_loader
cli.loader.load_command_table(None)
parser = cli.parser_cls(cli)
parser.load_command_table(cli.loader)
recommendation_provider_parameters = []
version = cli.get_cli_version()
expected_recommendation_provider_parameters = [
# version, command, parameters, extension
ExpectedParameters(version, 'test module1', ['--opt'], False),
ExpectedParameters(version, 'test extension1', ['--opt'], False),
ExpectedParameters(version, 'foo_bar', ['--opt'], False),
ExpectedParameters(version, 'test module', ['--opt'], False),
ExpectedParameters(version, 'test extension', ['--opt'], True)
]
def mock_recommendation_provider(*args):
recommendation_provider_parameters.append(tuple(args))
return []
AzCliCommandParser.recommendation_provider = mock_recommendation_provider
faulty_cmd_args = [
'test module1 --opt enum_1',
'test extension1 --opt enum_1',
'test foo_bar --opt enum_3',
'test module --opt enum_3',
'test extension --opt enum_3'
]
for text in faulty_cmd_args:
with self.assertRaises(SystemExit):
parser.parse_args(text.split())
for i, parameters in enumerate(recommendation_provider_parameters):
version, command, parameters, extension = parameters
expected = expected_recommendation_provider_parameters[i]
self.assertEqual(expected.version, version)
self.assertIn(expected.command, command)
self.assertEqual(expected.parameters, parameters)
if expected.has_extension:
self.assertIsNotNone(extension)
else:
self.assertIsNone(extension)
class VerifyError(object): # pylint: disable=too-few-public-methods
def __init__(self, test, substr=None):
self.test = test
self.substr = substr
self.called = False
def __call__(self, message):
if self.substr:
self.test.assertTrue(message.find(self.substr) >= 0)
self.called = True
ExpectedParameters = namedtuple('ExpectedParameters', ['version', 'command', 'parameters', 'has_extension'])
if __name__ == '__main__':
unittest.main()
|
py
|
1a5e845c5ac907b890df75c0d7229eec0e04a2c2
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration code for a LibAFL-based fuzzer."""
import os
import shutil
import subprocess
from fuzzers import utils
def prepare_fuzz_environment(input_corpus):
"""Prepare to fuzz with a LibAFL-based fuzzer."""
os.environ['ASAN_OPTIONS'] = "abort_on_error=1:detect_leaks=0:"\
"malloc_context_size=0:symbolize=0:"\
"allocator_may_return_null=1:"\
"detect_odr_violation=0:handle_segv=0:"\
"handle_sigbus=0:handle_abort=0:"\
"handle_sigfpe=0:handle_sigill=0"
os.environ['UBSAN_OPTIONS'] = "abort_on_error=1:"\
"allocator_release_to_os_interval_ms=500:"\
"handle_abort=0:handle_segv=0:"\
"handle_sigbus=0:handle_sigfpe=0:"\
"handle_sigill=0:print_stacktrace=0:"\
"symbolize=0:symbolize_inline_frames=0"
# Create at least one non-empty seed to start.
utils.create_seed_file_for_empty_corpus(input_corpus)
def build(): # pylint: disable=too-many-branches,too-many-statements
"""Build benchmark."""
benchmark_name = os.environ['BENCHMARK'].lower()
if 'php' in benchmark_name:
copy_file = '/libafl_fuzzbench/grammars/php_nautilus.json'
elif 'ruby' in benchmark_name:
copy_file = '/libafl_fuzzbench/grammars/ruby_nautilus.json'
elif 'js' in benchmark_name or 'javascript' in benchmark_name:
copy_file = '/libafl_fuzzbench/grammars/js_nautilus.json'
else:
raise RuntimeError('Unsupported benchmark, unavailable grammar')
dest = os.path.join(os.environ['OUT'], 'grammar.json')
shutil.copy(copy_file, dest)
os.environ['CC'] = '/libafl_fuzzbench/target/release/token_level_cc'
os.environ['CXX'] = '/libafl_fuzzbench/target/release/token_level_cxx'
os.environ['ASAN_OPTIONS'] = 'abort_on_error=0:allocator_may_return_null=1'
os.environ['UBSAN_OPTIONS'] = 'abort_on_error=0'
cflags = ['--libafl']
utils.append_flags('CFLAGS', cflags)
utils.append_flags('CXXFLAGS', cflags)
os.environ['FUZZER_LIB'] = '/emptylib.a'
utils.build_benchmark()
def fuzz(input_corpus, output_corpus, target_binary):
"""Run fuzzer."""
prepare_fuzz_environment(input_corpus)
command = [target_binary]
grammar = os.path.join(os.environ['OUT'], 'grammar.json')
command += (['-o', output_corpus, '-g', grammar])
print(command)
subprocess.check_call(command, cwd=os.environ['OUT'])
|
py
|
1a5e84c793fca8c0bbe674b8249b861ec5417a37
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, absolute_import, division, unicode_literals
"""
This may become an Astropy affiliated package.
"""
|
py
|
1a5e860d46e64a9846e29a7b6e2663e974f1e315
|
# example of extracting bounding boxes from an annotation file
from xml.etree import ElementTree
from os import listdir
from os.path import isfile, join
from sys import stdout
mypath = '../annotations/xmls/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
from os import walk
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
f.extend(filenames)
break
# function to extract bounding boxes from an annotation file
def extract_boxes(filename):
# load and parse the file
tree = ElementTree.parse(filename)
# get the root of the document
root = tree.getroot()
# extract each bounding box
boxes = list()
for box in root.findall('.//bndbox'):
xmin = int(box.find('xmin').text)
ymin = int(box.find('ymin').text)
xmax = int(box.find('xmax').text)
ymax = int(box.find('ymax').text)
coors = [xmin, ymin, xmax, ymax]
boxes.append(coors)
# extract image dimensions
width = int(root.find('.//size/width').text)
height = int(root.find('.//size/height').text)
return boxes, width, height
# extract details form annotation file
# summarize extracted details
for foo in range(f.__len__()):
if f[foo].__contains__("xml"):
boxes, w, h = extract_boxes('../annotations/xmls/'+f[foo])
stdout.write(f[foo])
stdout.flush()
print(boxes, w, h)
|
py
|
1a5e869a2be789ca7a06e603d24fdda3372a3e14
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 00:47:20 2021
@author: roberta
"""
class shortestSeekTimeFirst:
def processData(self, fileName):
moves = 0
fp = open(fileName, 'r')
lines = fp.readlines()
# number_of_cylinders = int(lines[0])
if len(lines) > 2:
init_head = int(lines[1])
req = lines[2:]
req = [int(r) for r in req]
number_of_req = len(req)
while number_of_req > 0:
distances = [abs(r - init_head) for r in req]
min_distance = min(distances)
i = distances.index(min_distance)
# i, m = short_distance(req,init_head)
moves += min_distance
init_head = req[i]
req = [r for r in req if r!=req[i]]
number_of_req -= 1
fp.close()
print('SSF ', moves)
|
py
|
1a5e87ab20ea658ab2634a8e3d5be059c91a8de3
|
from django.contrib import admin
from .models import Book
class BookAdmin(admin.ModelAdmin):
pass
admin.site.register(Book, BookAdmin)
|
py
|
1a5e887087dbba552dcbcc997000092b7247cb41
|
# -*- coding:utf-8 -*-
import argparse
import os
import sys
import zipfile
import konlpy
from tqdm import tqdm
sys.path.append("..")
from common.mecab import encode_mecab
def pos_corpus(args, name, tagger):
"""
ํํ์ ๋ถ์๊ธฐ ๋ณ corpus ์์ฑ
:param args: input arguments
:param name: ํํ์ ๋ถ์๊ธฐ ์ด๋ฆ
:param tagger: ํํ์ ๋ถ์๊ธฐ ๊ฐ์ฒด
:return: ๊ฒฐ๊ณผ ํ์ผ
"""
output = os.path.join(args.data_dir, f"kowiki_{name}.txt")
with zipfile.ZipFile(f"{args.data_dir}/{args.zip}") as z:
total = 0
with z.open(args.txt) as i_f:
for _, _ in enumerate(i_f):
total += 1
with z.open(args.txt) as i_f:
with open(output, "w") as o_f:
for i, line in enumerate(tqdm(i_f, total=total, desc=f"{name}")):
line = line.strip().decode("UTF-8", "ignore")
if line:
tokens, _ = encode_mecab(tagger, line)
o_f.write(" ".join(tokens))
o_f.write("\n")
return output
def main(args):
"""
main function
:param args: input arguments
"""
output = pos_corpus(args, "mecab", konlpy.tag.Mecab())
basename = os.path.basename(output)
# zip
with zipfile.ZipFile(os.path.join(args.data_dir, f"{basename}.zip"), "w") as z:
z.write(output, os.path.basename(output))
os.remove(output)
def parse_args():
"""
build arguments
:return args: input arguments
"""
parser = argparse.ArgumentParser(description="Make mecab corpus arguments.")
parser.add_argument("--data_dir", type=str, default="kowiki", required=False, help="kowiki data directory")
parser.add_argument("--zip", type=str, default="kowiki.txt.zip", required=False, help="kowiki source zip file")
parser.add_argument("--txt", type=str, default="kowiki.txt", required=False, help="kowiki source txt file")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
py
|
1a5e8ad46301553e71ca3c3997b3a2da46e58d61
|
from PIL import Image
import numpy as np
img = Image.open("img2.jpg")
arr = np.array(img)
a = len(arr)
a1 = len(arr[1])
i = 0
while i < a:
j = 0
while j < a1:
s = 0
for n in range(i, i + 10):
for n1 in range(j, j + 10):
s += (arr[n][n1][0]/3 + arr[n][n1][1]/3 + arr[n][n1][2]/3)
s = int(s // 100)
for n in range(i, i + 10):
for n1 in range(j, j + 10):
arr[n][n1][0] = int(s // 50) * 50
arr[n][n1][1] = int(s // 50) * 50
arr[n][n1][2] = int(s // 50) * 50
j = j + 10
i = i + 10
res = Image.fromarray(arr)
res.save('res.jpg')
|
py
|
1a5e8bc803e3c1a36e5be87f6b144a5d13223f1f
|
# ----------------------------------------------------------------
# Copyright 2017 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""test_restconf_provider.py
RestconfServiceProvider test
"""
from __future__ import absolute_import
import sys
import unittest
from ydk.providers import NetconfServiceProvider
from test_utils import ParametrizedTestCase
from test_utils import get_device_info
class SanityTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ncc = NetconfServiceProvider(
cls.hostname,
cls.username,
cls.password,
cls.port,
cls.protocol,
cls.on_demand,
cls.common_cache,
cls.timeout)
def test_get_session(self):
session = self.ncc.get_session()
self.assertEqual(session is not None, True)
def test_get_encoding(self):
encoding = self.ncc.get_encoding()
self.assertEqual(encoding is not None, True)
def test_get_capabilities(self):
capabilities = self.ncc.get_capabilities()
self.assertEqual(capabilities is not None, True)
if __name__ == '__main__':
device, non_demand, common_cache, timeout = get_device_info()
suite = unittest.TestSuite()
suite.addTest(ParametrizedTestCase.parametrize(
SanityTest,
device=device,
non_demand=non_demand,
common_cache=common_cache,
timeout=timeout))
ret = not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
sys.exit(ret)
|
py
|
1a5e8d471edd17ecee39d9afbc2fea32e3f9de73
|
# Load the custom app config
default_app_config = 'heron_visual.apps.HeronVisualConfig'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.