max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
phy/plot/gloo/shader.py | fjflores/phy | 118 | 12622995 | <reponame>fjflores/phy
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 <NAME>. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
"""
A Shader is a user-defined program designed to run on some stage of a
graphics processor. Its purpose is to execute one of the programmable stages of
the rendering pipeline.
Read more on shaders on `OpenGL Wiki <https://www.opengl.org/wiki/Shader>`_
**Example usage**
.. code:: python
vertex = '''
attribute vec2 position;
void main (void)
{
gl_Position = vec4(0.85*position, 0.0, 1.0);
} '''
fragment = '''
void main(void)
{
gl_FragColor = vec4(1.0,1.0,0.0,1.0);
} '''
quad = gloo.Program(vertex, fragment, count=4)
quad['position'] = [(-1,-1), (-1,+1), (+1,-1), (+1,+1)]
"""
import logging
import os.path
import re
from .import gl
from .snippet import Snippet
from .globject import GLObject
from .parser import (remove_comments, preprocess,
get_uniforms, get_attributes, get_hooks)
log = logging.getLogger(__name__)
# ------------------------------------------------------------ Shader class ---
class Shader(GLObject):
"""
Abstract shader class.
:param gl.GLEnum target:
* gl.GL_VERTEX_SHADER
* gl.GL_FRAGMENT_SHADER
* gl.GL_GEOMETRY_SHADER
:param str code: Shader code or a filename containing shader code
.. note::
If the shader code is actually a filename, the filename must be prefixed
with ``file:``. Note that you can also get shader code from the library
module.
"""
_gtypes = {
'float': gl.GL_FLOAT,
'vec2': gl.GL_FLOAT_VEC2,
'vec3': gl.GL_FLOAT_VEC3,
'vec4': gl.GL_FLOAT_VEC4,
'int': gl.GL_INT,
'ivec2': gl.GL_INT_VEC2,
'ivec3': gl.GL_INT_VEC3,
'ivec4': gl.GL_INT_VEC4,
'bool': gl.GL_BOOL,
'bvec2': gl.GL_BOOL_VEC2,
'bvec3': gl.GL_BOOL_VEC3,
'bvec4': gl.GL_BOOL_VEC4,
'mat2': gl.GL_FLOAT_MAT2,
'mat3': gl.GL_FLOAT_MAT3,
'mat4': gl.GL_FLOAT_MAT4,
'sampler1D': gl.GL_SAMPLER_1D,
'sampler2D': gl.GL_SAMPLER_2D,
'samplerCube': gl.GL_SAMPLER_CUBE,
}
def __init__(self, target, code, version="120"):
"""
Initialize the shader.
"""
GLObject.__init__(self)
self._target = target
self._snippets = {}
self._version = version
if os.path.isfile(code):
with open(str(code), 'rt') as file:
self._code = preprocess(file.read())
self._source = os.path.basename(code)
else:
self._code = preprocess(str(code))
self._source = '<string>'
self._hooked = self._code
self._need_update = True
self._program = None
def __setitem__(self, name, snippet):
"""
Set a snippet on the given hook in the source code.
"""
self._snippets[name] = snippet
def _replace_hooks(self, name, snippet):
#re_hook = r"(?P<hook>%s)(\.(?P<subhook>\w+))?" % name
re_hook = r"(?P<hook>%s)(\.(?P<subhook>[\.\w\!]+))?" % name
re_args = r"(\((?P<args>[^<>]+)\))?"
# re_hooks = re.compile("\<" + re_hook + re_args + "\>", re.VERBOSE)
pattern = r"\<" + re_hook + re_args + r"\>"
# snippet is not a Snippet (it should be a string)
if not isinstance(snippet, Snippet):
def replace(match):
# hook = match.group('hook')
subhook = match.group('subhook')
if subhook:
return snippet + '.' + subhook
return snippet
self._hooked = re.sub(pattern, replace, self._hooked)
return
# Store snippet code for later inclusion
# self._snippets.append(snippet)
# Replace expression of type <hook.subhook(args)>
def replace_with_args(match):
#hook = match.group('hook')
subhook = match.group('subhook')
#args = match.group('args')
if subhook and '.' in subhook:
s = snippet
for item in subhook.split('.')[:-1]:
if isinstance(s[item], Snippet):
s = s[item]
subhook = subhook.split('.')[-1]
# If the last snippet name endswith "!" this means to call
# the snippet with given arguments and not the ones stored.
# If S = A(B(C))("t"):
# <S> -> A(B(C("t")))
# <S!>(t) -> A("t")
override = False
if subhook[-1] == "!":
override = True
subhook = subhook[:-1]
# Do we have a class alias ? We don't return it yet since we
# need its translation from the symbol table
if subhook in s.aliases.keys():
subhook = s.aliases[subhook]
# If subhook is a variable (uniform/attribute/varying)
if subhook in s.globals:
return s.globals[subhook]
return s.mangled_call(subhook, match.group("args"), override=override)
# If subhook is a variable (uniform/attribute/varying)
if subhook in snippet.globals:
return snippet.globals[subhook]
return snippet.mangled_call(subhook, match.group("args"))
self._hooked = re.sub(pattern, replace_with_args, self._hooked)
def reset(self):
""" Reset shader snippets """
self._snippets = {}
@property
def code(self):
""" Shader source code (built from original and snippet codes) """
# Last minute hook settings
self._hooked = self._code
for name, snippet in self._snippets.items():
self._replace_hooks(name, snippet)
snippet_code = "// --- Snippets code : start --- //\n"
deps = []
for snippet in self._snippets.values():
if isinstance(snippet, Snippet):
deps.extend(snippet.dependencies)
for snippet in list(set(deps)):
snippet_code += snippet.mangled_code()
snippet_code += "// --- Snippets code : end --- //\n"
return snippet_code + self._hooked
def _create(self):
""" Create the shader """
log.log(5, "GPU: Creating shader")
# Check if we have something to compile
if not self.code:
raise RuntimeError("No code has been given")
# Check that shader object has been created
if self._handle <= 0:
self._handle = gl.glCreateShader(self._target)
if self._handle <= 0:
raise RuntimeError("Cannot create shader object")
def _update(self):
""" Compile the source and checks everything's ok """
log.log(5, "GPU: Compiling shader")
if len(self.hooks):
hooks = [name for name, snippet in self.hooks]
error = "Shader has pending hooks (%s), cannot compile" % hooks
raise RuntimeError(error)
# Set shader version
code = ("#version %s\n" % self._version) + self.code
gl.glShaderSource(self._handle, code)
# Actual compilation
gl.glCompileShader(self._handle)
status = gl.glGetShaderiv(self._handle, gl.GL_COMPILE_STATUS)
if not status:
error = gl.glGetShaderInfoLog(self._handle).decode()
parsed_errors = self._parse_error(error)
for lineno, mesg in parsed_errors:
self._print_error(mesg, lineno - 1)
raise RuntimeError("Shader compilation error")
def _delete(self):
""" Delete shader from GPU memory (if it was present). """
gl.glDeleteShader(self._handle)
_ERROR_RE = [
# Nvidia
# 0(7): error C1008: undefined variable "MV"
# 0(2) : error C0118: macros prefixed with '__' are reserved
re.compile(
r'^\s*(\d+)\((?P<line_no>\d+)\)\s*:\s(?P<error_msg>.*)', re.MULTILINE),
# ATI / Intel
# ERROR: 0:131: '{' : syntax error parse error
re.compile(
r'^\s*ERROR:\s(\d+):(?P<line_no>\d+):\s(?P<error_msg>.*)', re.MULTILINE),
# Nouveau
# 0:28(16): error: syntax error, unexpected ')', expecting '('
re.compile(
r'^\s*(\d+):(?P<line_no>\d+)\((\d+)\):\s(?P<error_msg>.*)', re.MULTILINE)
]
def _parse_error(self, error):
"""
Parses a single GLSL error and extracts the line number and error
description.
Parameters
----------
error : str
An error string as returned by the compilation process
"""
for error_re in self._ERROR_RE:
matches = list(error_re.finditer(error))
if matches:
errors = [(int(m.group('line_no')), m.group('error_msg'))
for m in matches]
return sorted(errors, key=lambda elem: elem[0])
else:
raise ValueError('Unknown GLSL error format:\n{}\n'.format(error))
def _print_error(self, error, lineno):
"""
Print error and show the faulty line + some context
Parameters
----------
error : str
An error string as returned byt the compilation process
lineno: int
Line where error occurs
"""
lines = self.code.split('\n')
start = max(0, lineno - 3)
end = min(len(lines), lineno + 3)
print('Error in %s' % (repr(self)))
print(' -> %s' % error)
print()
if start > 0:
print(' ...')
for i, line in enumerate(lines[start:end]):
if (i + start) == lineno:
print(' %03d %s' % (i + start, line))
else:
if len(line):
print(' %03d %s' % (i + start, line))
if end < len(lines):
print(' ...')
print()
@property
def hooks(self):
""" Shader hooks (place where snippets can be inserted) """
# We get hooks from the original code, not the hooked one
code = remove_comments(self._hooked)
return get_hooks(code)
@property
def uniforms(self):
""" Shader uniforms obtained from source code """
code = remove_comments(self.code)
gtypes = Shader._gtypes
return [(n, gtypes[t]) for (n, t) in get_uniforms(code)]
@property
def attributes(self):
""" Shader attributes obtained from source code """
code = remove_comments(self.code)
gtypes = Shader._gtypes
return [(n, gtypes[t]) for (n, t) in get_attributes(code)]
# ------------------------------------------------------ VertexShader class ---
class VertexShader(Shader):
""" Vertex shader class """
def __init__(self, code=None, version="120"):
Shader.__init__(self, gl.GL_VERTEX_SHADER, code, version)
@property
def code(self):
code = super(VertexShader, self).code
code = "#define _GLUMPY__VERTEX_SHADER__\n" + code
return code
def __repr__(self):
return "Vertex shader %d (%s)" % (self._id, self._source)
class FragmentShader(Shader):
""" Fragment shader class """
def __init__(self, code=None, version="120"):
Shader.__init__(self, gl.GL_FRAGMENT_SHADER, code, version)
@property
def code(self):
code = super(FragmentShader, self).code
code = "#define _GLUMPY__FRAGMENT_SHADER__\n" + code
return code
def __repr__(self):
return "Fragment shader %d (%s)" % (self._id, self._source)
class GeometryShader(Shader):
""" Geometry shader class.
:param str code: Shader code or a filename containing shader code
:param int vertices_out: Number of output vertices
:param gl.GLEnum input_type:
* GL_POINTS
* GL_LINES, GL_LINE_STRIP, GL_LINE_LIST
* GL_LINES_ADJACENCY, GL_LINE_STRIP_ADJACENCY
* GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN
* GL_TRIANGLES_ADJACENCY, GL_TRIANGLE_STRIP_ADJACENCY
:param gl.GLEnum output_type:
* GL_POINTS, GL_LINES, GL_LINE_STRIP
* GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN
"""
def __init__(self, code=None,
vertices_out=None, input_type=None, output_type=None, version="120"):
Shader.__init__(self, gl.GL_GEOMETRY_SHADER_EXT, code, version)
self._vertices_out = vertices_out
# GL_POINTS
# GL_LINES, GL_LINE_STRIP, GL_LINE_LIST
# GL_LINES_ADJACENCY, GL_LINE_STRIP_ADJACENCY
# GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN
# GL_TRIANGLES_ADJACENCY, GL_TRIANGLE_STRIP_ADJACENCY
self._input_type = input_type
# GL_POINTS, GL_LINES, GL_LINE_STRIP
# GL_TRIANGLES, GL_TRIANGLE_STRIP, GL_TRIANGLE_FAN
self._output_type = output_type
@property
def vertices_out(self):
return self._vertices_out
@vertices_out.setter
def vertices_out(self, value):
self._vertices_out = value
@property
def input_type(self):
""" """
return self._input_type
@input_type.setter
def input_type(self, value):
self._input_type = value
@property
def output_type(self):
return self._output_type
@output_type.setter
def output_type(self, value):
self._output_type = value
def __repr__(self):
return "Geometry shader %d (%s)" % (self._id, self._source)
|
tensorboard/plugins/graph/keras_util_test.py | Digitaltransform/tensorboard | 6,139 | 12622996 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras Utility."""
import json
import tensorflow as tf
from tensorboard.plugins.graph import keras_util
class KerasUtilTest(tf.test.TestCase):
def assertGraphDefToModel(self, expected_proto, model):
model_config = json.loads(model.to_json())
self.assertProtoEquals(
expected_proto, keras_util.keras_model_to_graph_def(model_config)
)
def DISABLED_test_keras_model_to_graph_def_sequential_model(self):
expected_proto = """
node {
name: "sequential/dense_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential/dense"
input: "sequential/dense_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential/my_relu"
input: "sequential/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
node {
name: "sequential/dense_1"
input: "sequential/my_relu"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential/activation"
input: "sequential/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
"""
model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(32, input_shape=(784,)),
tf.keras.layers.Activation("relu", name="my_relu"),
tf.keras.layers.Dense(10),
tf.keras.layers.Activation("softmax"),
]
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_functional_model(self):
expected_proto = """
node {
name: "model/functional_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/dense"
input: "model/functional_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/dense_1"
input: "model/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/dense_2"
input: "model/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(784,), name="functional_input")
d0 = tf.keras.layers.Dense(64, activation="relu")
d1 = tf.keras.layers.Dense(64, activation="relu")
d2 = tf.keras.layers.Dense(64, activation="relu")
model = tf.keras.models.Model(
inputs=inputs, outputs=d2(d1(d0(inputs))), name="model"
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_functional_model_with_cycle(self):
expected_proto = """
node {
name: "model/cycle_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/dense"
input: "model/cycle_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/dense_1"
input: "model/dense"
input: "model/dense_2"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/dense_2"
input: "model/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(784,), name="cycle_input")
d0 = tf.keras.layers.Dense(64, activation="relu")
d1 = tf.keras.layers.Dense(64, activation="relu")
d2 = tf.keras.layers.Dense(64, activation="relu")
model = tf.keras.models.Model(
inputs=inputs, outputs=d1(d2(d1(d0(inputs)))), name="model"
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_lstm_model(self):
expected_proto = """
node {
name: "model/lstm_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/simple_rnn"
input: "model/lstm_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "SimpleRNN"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(None, 5), name="lstm_input")
encoder = tf.keras.layers.SimpleRNN(256)
model = tf.keras.models.Model(
inputs=inputs, outputs=encoder(inputs), name="model"
)
self.assertGraphDefToModel(expected_proto, model)
def DISABLED_test_keras_model_to_graph_def_nested_sequential_model(self):
expected_proto = """
node {
name: "sequential_2/sequential_1_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential_2/sequential_1/sequential_input"
input: "sequential_2/sequential_1_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential_2/sequential_1/sequential/dense_input"
input: "sequential_2/sequential_1/sequential_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential_2/sequential_1/sequential/dense"
input: "sequential_2/sequential_1/sequential/dense_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential_2/sequential_1/sequential/activation"
input: "sequential_2/sequential_1/sequential/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
node {
name: "sequential_2/sequential_1/my_relu"
input: "sequential_2/sequential_1/sequential/activation"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
node {
name: "sequential_2/dense_1"
input: "sequential_2/sequential_1/my_relu"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential_2/activation_1"
input: "sequential_2/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
"""
sub_sub_model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(32, input_shape=(784,)),
tf.keras.layers.Activation("relu"),
]
)
sub_model = tf.keras.models.Sequential(
[
sub_sub_model,
tf.keras.layers.Activation("relu", name="my_relu"),
]
)
model = tf.keras.models.Sequential(
[
sub_model,
tf.keras.layers.Dense(10),
tf.keras.layers.Activation("softmax"),
]
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_functional_multi_inputs(self):
expected_proto = """
node {
name: "model/main_input"
attr {
key: "dtype"
value {
type: DT_INT32
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/embedding"
input: "model/main_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Embedding"
}
}
}
node {
name: "model/simple_rnn"
input: "model/embedding"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "SimpleRNN"
}
}
}
node {
name: "model/aux_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/concatenate"
input: "model/simple_rnn"
input: "model/aux_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Concatenate"
}
}
}
node {
name: "model/dense"
input: "model/concatenate"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/main_output"
input: "model/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/aux_output"
input: "model/simple_rnn"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
main_input = tf.keras.layers.Input(
shape=(100,), dtype="int32", name="main_input"
)
x = tf.keras.layers.Embedding(
output_dim=512, input_dim=10000, input_length=100
)(main_input)
rnn_out = tf.keras.layers.SimpleRNN(32)(x)
auxiliary_output = tf.keras.layers.Dense(
1, activation="sigmoid", name="aux_output"
)(rnn_out)
auxiliary_input = tf.keras.layers.Input(shape=(5,), name="aux_input")
x = tf.keras.layers.concatenate([rnn_out, auxiliary_input])
x = tf.keras.layers.Dense(64, activation="relu")(x)
main_output = tf.keras.layers.Dense(
1, activation="sigmoid", name="main_output"
)(x)
model = tf.keras.models.Model(
inputs=[main_input, auxiliary_input],
outputs=[main_output, auxiliary_output],
name="model",
)
self.assertGraphDefToModel(expected_proto, model)
def test_keras_model_to_graph_def_functional_model_as_layer(self):
expected_proto = """
node {
name: "model_1/sub_func_input_2"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model_1/sub_func_input_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model_1/model/sub_func_input_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model_1/model/sub_func_input_2"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model_1/model/dense"
input: "model_1/model/sub_func_input_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model_1/model/dense_1"
input: "model_1/model/sub_func_input_2"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model_1/concatenate"
input: "model_1/model/dense"
input: "model_1/model/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Concatenate"
}
}
}
node {
name: "model_1/dense_2"
input: "model_1/concatenate"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
inputs1 = tf.keras.layers.Input(shape=(784,), name="sub_func_input_1")
inputs2 = tf.keras.layers.Input(shape=(784,), name="sub_func_input_2")
d0 = tf.keras.layers.Dense(64, activation="relu")
d1 = tf.keras.layers.Dense(64, activation="relu")
d2 = tf.keras.layers.Dense(64, activation="relu")
sub_model = tf.keras.models.Model(
inputs=[inputs2, inputs1],
outputs=[d0(inputs1), d1(inputs2)],
name="model",
)
main_outputs = d2(
tf.keras.layers.concatenate(sub_model([inputs2, inputs1]))
)
model = tf.keras.models.Model(
inputs=[inputs2, inputs1],
outputs=main_outputs,
name="model_1",
)
self.assertGraphDefToModel(expected_proto, model)
def DISABLED_test_keras_model_to_graph_def_functional_sequential_model(
self,
):
expected_proto = """
node {
name: "model/func_seq_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/sequential/dense_input"
input: "model/func_seq_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "model/sequential/dense"
input: "model/sequential/dense_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "model/sequential/my_relu"
input: "model/sequential/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
node {
name: "model/dense_1"
input: "model/sequential/my_relu"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(784,), name="func_seq_input")
sub_model = tf.keras.models.Sequential(
[
tf.keras.layers.Dense(32, input_shape=(784,)),
tf.keras.layers.Activation("relu", name="my_relu"),
]
)
dense = tf.keras.layers.Dense(64, activation="relu")
model = tf.keras.models.Model(
inputs=inputs, outputs=dense(sub_model(inputs))
)
self.assertGraphDefToModel(expected_proto, model)
def DISABLED_test_keras_model_to_graph_def_sequential_functional_model(
self,
):
expected_proto = """
node {
name: "sequential/model_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential/model/func_seq_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "InputLayer"
}
}
}
node {
name: "sequential/model/dense"
input: "sequential/model/func_seq_input"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential/dense_1"
input: "sequential/model/dense"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Dense"
}
}
}
node {
name: "sequential/my_relu"
input: "sequential/dense_1"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "keras_class"
value {
s: "Activation"
}
}
}
"""
inputs = tf.keras.layers.Input(shape=(784,), name="func_seq_input")
dense = tf.keras.layers.Dense(64, activation="relu")
sub_model = tf.keras.models.Model(inputs=inputs, outputs=dense(inputs))
model = tf.keras.models.Sequential(
[
sub_model,
tf.keras.layers.Dense(32, input_shape=(784,)),
tf.keras.layers.Activation("relu", name="my_relu"),
]
)
self.assertGraphDefToModel(expected_proto, model)
if __name__ == "__main__":
tf.test.main()
|
macdivert/macdivert.py | FinalTheory/WirelessNetworkReproduction | 263 | 12623012 | # encoding: utf8
import os
import Queue
import threading
import libdivert as nids
from copy import deepcopy
from ctypes import cdll
from enum import Defaults, Flags
from ctypes import POINTER, pointer, cast
from ctypes import (c_void_p, c_uint32, c_char_p, c_int, CFUNCTYPE,
create_string_buffer, c_ushort, c_ssize_t, c_char)
from models import ProcInfo, IpHeader, PacketHeader, DivertHandleRaw
__author__ = '<EMAIL>'
class MacDivert:
divert_argtypes = {
# divert functions
"divert_create": [c_int, c_uint32],
"divert_activate": [POINTER(DivertHandleRaw)],
"divert_update_ipfw": [POINTER(DivertHandleRaw), c_char_p],
"divert_loop": [POINTER(DivertHandleRaw), c_int],
"divert_is_looping": [POINTER(DivertHandleRaw)],
"divert_loop_stop": [POINTER(DivertHandleRaw)],
"divert_loop_wait": [POINTER(DivertHandleRaw)],
"divert_reinject": [POINTER(DivertHandleRaw), c_char_p, c_ssize_t, c_char_p],
"divert_close": [POINTER(DivertHandleRaw)],
"divert_is_inbound": [c_char_p, c_void_p],
"divert_is_outbound": [c_char_p],
"divert_set_callback": [c_void_p, c_void_p, c_void_p],
"divert_init_pcap": [c_void_p],
"divert_dump_pcap": [c_void_p, c_void_p],
"divert_find_tcp_stream": [c_char_p],
"divert_set_device": [c_void_p, c_char_p],
# util functions
"divert_load_kext": [c_char_p],
"divert_unload_kext": [],
"divert_dump_packet": [c_char_p, POINTER(PacketHeader), c_uint32, c_char_p],
# note that we use char[] to store the ipfw rule for convenience
# although the type is mismatched, the length of pointer variable is the same
# so this would work
"ipfw_compile_rule": [c_char_p, c_ushort, c_ushort, c_char_p, c_char_p],
"ipfw_print_rule": [c_char_p],
"ipfw_flush": [c_char_p],
}
divert_restypes = {
"divert_create": POINTER(DivertHandleRaw),
"divert_activate": c_int,
"divert_update_ipfw": c_int,
"divert_loop": c_int,
"divert_is_looping": c_int,
"divert_loop_stop": None,
"divert_loop_wait": None,
"divert_reinject": c_ssize_t,
"divert_close": c_int,
"divert_is_inbound": c_int,
"divert_is_outbound": c_int,
"divert_set_callback": c_int,
"divert_init_pcap": c_int,
"divert_dump_pcap": c_int,
"divert_find_tcp_stream": c_void_p,
"divert_set_device": c_int,
"divert_load_kext": c_int,
"divert_unload_kext": c_int,
"divert_dump_packet": c_char_p,
"ipfw_compile_rule": c_int,
"ipfw_print_rule": None,
"ipfw_flush": c_int,
}
def __init__(self, lib_path='', kext_path='', encoding='utf-8'):
"""
Constructs a new driver instance
:param lib_path: The OS path where to load the libdivert.so
:param lib_path: The OS path where to load the kernel extension
:param encoding: The character encoding to use (defaults to UTF-8)
:return:
"""
if not (lib_path and os.path.exists(lib_path) and os.path.isfile(lib_path)):
lib_path = self._find_lib()
if not lib_path:
raise RuntimeError("Unable to find libdivert.so")
if not (kext_path and os.path.exists(kext_path) and os.path.isdir(kext_path)):
kext_path = self._find_kext()
if not kext_path:
raise RuntimeError("Unable to find PacketPID.kext")
self.dll_path = lib_path
self.kext_path = kext_path
self.encoding = encoding
self._load_lib(lib_path)
self._load_kext(kext_path)
@staticmethod
def _find_lib():
module_path = os.sep.join(__file__.split(os.sep)[0:-1])
return os.path.join(module_path, 'libdivert.so')
@staticmethod
def _find_kext():
module_path = os.sep.join(__file__.split(os.sep)[0:-1])
return os.path.join(module_path, 'PacketPID.kext')
def _load_lib(self, lib_path):
"""
Loads the libdivert library, and configuring its arguments type
:param lib_path: The OS path where to load the libdivert.so
:return: None
"""
self._lib = cdll.LoadLibrary(lib_path)
# set the types of parameters
for func_name, argtypes in self.divert_argtypes.items():
# first check if function exists
if not hasattr(self._lib, func_name):
raise RuntimeError("Not a valid libdivert library")
setattr(getattr(self._lib, func_name), "argtypes", argtypes)
# set the types of return value
for func_name, restype in self.divert_restypes.items():
setattr(getattr(self._lib, func_name), "restype", restype)
@staticmethod
def chown_recursive(path, uid, gid):
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for item in dirs:
os.chown(os.path.join(root, item), uid, gid)
for item in files:
os.chown(os.path.join(root, item), uid, gid)
def _load_kext(self, kext_path):
uid, gid = os.stat(kext_path).st_uid, os.stat(kext_path).st_gid
self.chown_recursive(kext_path, 0, 0)
ret_val = self._lib.divert_load_kext(kext_path)
self.chown_recursive(kext_path, uid, gid)
if ret_val != 0:
raise OSError("Could not load kernel extension for libdivert")
def get_reference(self):
"""
Return a reference to the internal dylib
:return: The dylib object
"""
return self._lib
def open_handle(self, port=0, filter_str="", flags=0, count=-1):
"""
Return a new handle already opened
:param port: the port number to be diverted to, use 0 to auto select a unused port
:param filter_str: the filter string
:param flags: choose different mode
:param count: how many packets to divert, negative number means unlimited
:return: An opened DivertHandle instance
"""
return DivertHandle(self, port, filter_str, flags, count, self.encoding).open()
class DivertHandle:
cmp_func_type = CFUNCTYPE(None, c_void_p, POINTER(ProcInfo),
POINTER(c_char), POINTER(c_char))
def __init__(self, libdivert=None, port=0, filter_str="",
flags=0, count=-1, encoding='utf-8'):
if not libdivert:
# Try to construct by loading from the library path
self._libdivert = MacDivert()
else:
self._libdivert = libdivert
self._lib = self._libdivert.get_reference()
self._port = port
self._count = count
self._filter = filter_str.encode(encoding)
self._flags = flags
self.encoding = encoding
self.packet_queue = Queue.Queue()
self.num_queued = 0
# create divert handle
self._handle = self._lib.divert_create(self._port, self._flags)
def ip_callback(args, proc_info, ip_data, sockaddr):
packet = Packet()
# check if IP packet is legal
ptr_packet = cast(ip_data, POINTER(IpHeader))
header_len = ptr_packet[0].get_header_length()
packet_length = ptr_packet[0].get_total_length()
if packet_length > 0 and header_len > 0:
packet.valid = True
# try to extract the process information
if proc_info[0].pid != -1 or proc_info[0].epid != -1:
packet.proc = deepcopy(proc_info[0])
# save the IP packet data
packet.ip_data = ip_data[0:packet_length]
# save the sockaddr info for re-inject
packet.sockaddr = sockaddr[0:Defaults.SOCKET_ADDR_SIZE]
self.packet_queue.put(packet)
# convert callback function type into C type
self.ip_callback = self.cmp_func_type(ip_callback)
# and register it into divert handle
self._lib.divert_set_callback(self._handle, self.ip_callback, self._handle)
# finally activate the divert handle
if self._lib.divert_activate(self._handle) != 0:
raise RuntimeError(self._handle[0].errmsg)
self._cleaned = False
self.thread = None
def __del__(self):
self.close()
if not self._cleaned:
self._cleaned = True
# free close the divert handle
if self._lib.divert_close(self._handle) != 0:
raise RuntimeError(self._handle[0].errmsg)
def ipfw_compile_rule(self, rule_str, port):
errmsg = create_string_buffer(Defaults.DIVERT_ERRBUF_SIZE)
rule_data = create_string_buffer(Defaults.IPFW_RULE_SIZE)
if self._lib.ipfw_compile_rule(rule_data, port, rule_str, errmsg) != 0:
raise RuntimeError("Error rule: %s" % errmsg.value)
return rule_data[0:Defaults.IPFW_RULE_SIZE]
def ipfw_print_rule(self, rule_data):
self._lib.ipfw_print_rule(rule_data)
@property
def eof(self):
"""
:return: True if there is no data to read at this time
"""
return self.packet_queue.qsize() == 0
@property
def closed(self):
"""
:return: True if there is no data to read any more
"""
if self.thread is not None:
if not self.thread.isAlive():
self.thread = None
return self.thread is None and self.eof
def close(self):
for i in range(self.num_queued):
packet = Packet()
packet.valid = False
self.packet_queue.put(packet)
if self.thread is not None:
# stop the event loop only when thread is alive
if self.thread.isAlive():
self._lib.divert_loop_stop(self._handle)
self.thread.join()
self.thread = None
def open(self):
def _loop():
self._lib.divert_loop(self._handle, self._count)
# set the ipfw filter
if self._filter:
self.set_filter(self._filter)
# and start background thread
self.thread = threading.Thread(target=_loop)
self.thread.start()
return self
def open_pcap(self, filename):
return PcapHandle(filename, self._libdivert)
def set_filter(self, filter_str):
if filter_str:
if self._lib.divert_update_ipfw(self._handle,
filter_str) != 0:
raise RuntimeError("Error rule: %s" %
self._handle[0].errmsg)
else:
return True
else:
return False
def read(self, *args, **kwargs):
self.num_queued += 1
res = self.packet_queue.get(*args, **kwargs)
self.num_queued -= 1
return res
def write(self, packet_obj):
if self.closed:
raise RuntimeError("Divert handle closed.")
if not packet_obj or not packet_obj.sockaddr or not packet_obj.ip_data:
raise RuntimeError("Invalid packet data.")
return self._lib.divert_reinject(self._handle, packet_obj.ip_data,
-1, packet_obj.sockaddr)
def is_inbound(self, sockaddr):
return self._lib.divert_is_inbound(sockaddr, None) != 0
def is_outbound(self, sockaddr):
return self._lib.divert_is_outbound(sockaddr) != 0
def find_tcp_stream(self, packet):
if self.closed:
raise RuntimeError("Divert socket is closed.")
stream_p = self._lib.divert_find_tcp_stream(packet.ip_data)
if stream_p:
return nids.convert(c_void_p(stream_p))
# Context Manager protocol
def __enter__(self):
return self.open()
def __exit__(self, *args):
self.close()
class PcapHandle:
libc_argtypes = {
"fopen": [c_char_p, c_char_p],
"fclose": [c_void_p],
}
libc_restypes = {
'fopen': c_void_p,
'fclose': c_int,
}
def __init__(self, filename=None, libdivert=None):
self.filename = filename
self._load_libc()
self._lib = libdivert.get_reference()
self._errmsg = create_string_buffer(Defaults.DIVERT_ERRBUF_SIZE)
self._fp = self._libc.fopen(filename, 'wb')
if not self._fp:
raise RuntimeError("Couldn't create file %s" % self.filename)
if self._lib.divert_init_pcap(self._fp, self._errmsg) != 0:
raise RuntimeError("Couldn't init file %s: %s" %
(self.filename, self._errmsg.value))
def __del__(self):
if self._fp:
self.close()
def _load_libc(self):
self._libc = cdll.LoadLibrary('libc.dylib')
# set the types of parameters
for func_name, argtypes in self.libc_argtypes.items():
if not hasattr(self._libc, func_name):
raise RuntimeError("Not a valid libC library")
setattr(getattr(self._libc, func_name), "argtypes", argtypes)
# set the types of return value
for func_name, restype in self.libc_restypes.items():
setattr(getattr(self._libc, func_name), "restype", restype)
def write(self, packet):
if self._lib.divert_dump_pcap(packet.ip_data,
self._fp, self._errmsg) != 0:
raise RuntimeError("Couldn't write into %s: %s" %
(self.filename, self._errmsg.value))
def close(self):
if self._fp:
if self._libc.fclose(self._fp) == 0:
self._fp = None
else:
raise RuntimeError("File %s could not be closed!" % self.filename)
else:
raise RuntimeError("File %s is not opened!" % self.filename)
class Packet:
def __init__(self):
self.proc = None
self.ip_data = None
self.sockaddr = None
self.valid = False
self.flag = 0
def __setitem__(self, key, value):
if key == 'proc':
self.proc = value
elif key == 'ip_data':
self.ip_data = value
elif key == 'sockaddr':
self.sockaddr = value
elif key == 'flag':
self.flag = value
else:
raise KeyError("No suck key: %s" % key)
def __getitem__(self, item):
if item == 'proc':
return self.proc
elif item == 'ip_data':
return self.ip_data
elif item == 'sockaddr':
return self.sockaddr
elif item == 'flag':
return self.flag
else:
return None
|
scripts/python/common_setup.py | wyzero/BladeDISC | 328 | 12623028 | # Copyright 2022 The BladeDISC Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import time
import subprocess
import shutil
import logging
import sys
import re
from subprocess import Popen, PIPE, STDOUT
from contextlib import contextmanager
from datetime import datetime
class StageTiming:
def __init__(self):
self.durs = []
self.log_file = "/tmp/tao_build.log"
def report(self):
if len(self.durs) == 0:
return
lines = []
# load previous info from file
if os.path.exists(self.log_file):
lines += open(self.log_file, "r").read().splitlines()
for name, sec, ts in self.durs:
lines.append("{}: {} - {:.2f} minutes".format(ts, name, sec * 1.0 / 60))
# logger.info("****** Stage timing report: ******\n{}".format("\n".join(lines)))
# logger.info("**********************************")
# save to file
with open(self.log_file, "w") as of:
of.write("\n".join(lines))
def append(self, name, secs):
self.durs.append((name, secs, datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
stage_time = StageTiming()
def time_stage(incl_args=[], incl_kwargs=[]):
def time_stage_impl(entry):
def wrapper(*args, **kwargs):
start = time.time()
try:
ret = entry(*args, **kwargs)
except Exception:
logger.exception("{} failed on exception".format(entry.__name__))
raise Exception("run error")
finally:
end = time.time()
name = entry.__name__
if len(incl_args) > 0 or len(incl_kwargs) > 0:
name += "("
for idx in incl_args:
name += args[idx] + ","
for k in incl_kwargs:
name += kwargs[k] + ","
name = name[:-1] + ")"
stage_time.append(name, end - start)
return ret
return wrapper
return time_stage_impl
def script_dir():
return os.path.dirname(os.path.abspath(__file__))
def get_source_root_dir():
root = os.path.join(script_dir(), os.pardir, os.pardir)
return os.path.abspath(root)
def __create_logger():
"""Create a logger with color."""
# The background is set with 40 plus the number of the color, and the foreground with 30
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
# These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
"WARNING": YELLOW,
"INFO": GREEN,
"DEBUG": BLUE,
"CRITICAL": YELLOW,
"ERROR": RED,
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg, use_color=False):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = (
COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
)
record.levelname = levelname_color
return logging.Formatter.format(self, record)
class ColoredLogger(logging.Logger):
FORMAT = "{}%(asctime)s{} %(levelname)19s %(message)s".format(
BOLD_SEQ, RESET_SEQ
)
def __init__(self, name):
logging.Logger.__init__(self, name, logging.DEBUG)
color_formatter = ColoredFormatter(
self.FORMAT, use_color=sys.stdout.isatty() and sys.stderr.isatty()
)
console = logging.StreamHandler()
console.setFormatter(color_formatter)
self.addHandler(console)
return
logging.setLoggerClass(ColoredLogger)
logger = logging.getLogger("tao_ci")
logger.setLevel(logging.INFO)
return logger
logger = __create_logger()
def execute(cmd):
"""Execute a shell command, exception raised on failure."""
shell_setting = "set -e; set -o pipefail; "
logger.info("Execute shell command: `" + cmd + "`, cwd: " + os.getcwd())
subprocess.check_call(shell_setting + cmd, shell=True, executable="/bin/bash")
def ensure_empty_dir(dir, clear_hidden=True):
"""
Make sure the given directory exists and is empty.
This function will create an empty directory if the directory doesn't exits,
or it will clean all content under the directory. Hidden files and sub
direcotries will be deleted if clear_hidden is True.
"""
if not os.path.exists(dir):
logger.info("make dir: " + dir)
os.makedirs(dir)
return
logger.info("clear dir: {}, clear hidden files: {}".format(dir, clear_hidden))
for filename in os.listdir(dir):
if clear_hidden or not filename.startswith("."):
file_path = os.path.join(dir, filename)
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
else:
shutil.rmtree(file_path, ignore_errors=True)
def which(cmd):
"""Same as `which` command of bash."""
from distutils.spawn import find_executable
found = find_executable(cmd)
if not found:
raise Exception("failed to find command: " + cmd)
return found
@contextmanager
def cwd(path):
"""
Change the current working directory to `path` to do somthing and then
recover the current cwd when it's done.
"""
old_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_dir)
def running_on_ci():
"""
Return true if the building job is running on CI host.
"""
if os.getenv("GITHUB_WORKFLOW"):
return True
return False
def ci_build_flag():
if running_on_ci():
return " --noshow_loading_progress --show_progress_rate_limit=600"
return ""
def remote_cache_token():
"""
Return a remote cache token if exists
"""
fn = os.path.expanduser("~/.cache/remote_cache_token")
if os.path.exists(fn):
with open(fn) as f:
return str(f.read()).strip()
else:
if "TF_REMOTE_CACHE" in os.environ:
token = os.getenv("TF_REMOTE_CACHE")
return token
return None
def symlink_files(root):
with cwd(root):
logger.info("configuring tao_compiler ......")
# map compiler codes into tf tree for build
with open("tao_compiler/file_map") as fh:
for line in fh:
if line.startswith("#") or line.strip() == "":
continue
info = line.strip().split(",")
if len(info) != 2:
continue
src_file = os.path.join(root, "tao_compiler", info[0])
link_in_tf = os.path.join("tf_community", info[1])
dst_folder = os.path.dirname(link_in_tf)
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
execute("rm -rf {0} && ln -s {1} {0}".format(link_in_tf, src_file))
logger.info("linking ./tao to tf_community/tao")
execute(
"rm -rf {0} && ln -s {1} {0}".format(
os.path.join("tf_community", "tao"), os.path.join(root, "tao")
)
)
def mkldnn_build_dir(root=None):
if root is None:
root = get_source_root_dir()
return os.path.join(root, "tao", "third_party", "mkldnn", "build")
def mkl_install_dir(root):
return os.path.join(mkldnn_build_dir(root), "intel")
def acl_root_dir(root):
return os.path.join(mkldnn_build_dir(root), 'acl', 'ComputeLibrary')
def config_mkldnn(root, args):
build_dir = mkldnn_build_dir(root)
ensure_empty_dir(build_dir, clear_hidden=False)
mkl_dir = mkl_install_dir(root)
acl_dir = acl_root_dir(root)
ensure_empty_dir(mkl_dir, clear_hidden=False)
ensure_empty_dir(acl_dir, clear_hidden=False)
if args.x86:
with cwd(mkl_dir):
# download mkl-lib/include
download_cmd = """
unset HTTPS_PROXY
curl -fsSL https://hlomodule.oss-cn-zhangjiakou.aliyuncs.com/mkl_package/mkl-static-2022.0.1-intel_117.tar.bz2 | tar xjv
curl -fsSL https://hlomodule.oss-cn-zhangjiakou.aliyuncs.com/mkl_package/mkl-include-2022.0.1-h8d4b97c_803.tar.bz2 | tar xjv
"""
execute(download_cmd)
if args.aarch64:
with cwd(acl_dir):
# downlaod and build acl for onednn
cmd = '''
readonly ACL_REPO="https://github.com/ARM-software/ComputeLibrary.git"
MAKE_NP="-j$(grep -c processor /proc/cpuinfo)"
ACL_DIR={}
git clone --branch v22.02 --depth 1 $ACL_REPO $ACL_DIR
cd $ACL_DIR
scons --silent $MAKE_NP Werror=0 debug=0 neon=1 opencl=0 embed_kernels=0 os=linux arch=arm64-v8a build=native extra_cxx_flags="-fPIC"
exit $?
'''.format(acl_dir)
execute(cmd)
# a workaround for static linking
execute('rm -f build/*.so')
execute('mv build/libarm_compute-static.a build/libarm_compute.a')
execute('mv build/libarm_compute_core-static.a build/libarm_compute_core.a')
execute('mv build/libarm_compute_graph-static.a build/libarm_compute_graph.a')
with cwd(build_dir):
cc = which("gcc")
cxx = which("g++")
# always link patine statically
flags = " -DMKL_ROOT={} ".format(mkl_dir)
envs = " CC={} CXX={} ".format(cc, cxx)
if args.aarch64:
envs += " ACL_ROOT_DIR={} ".format(acl_dir)
flags += " -DDNNL_AARCH64_USE_ACL=ON "
if args.ral_cxx11_abi:
flags += " -DUSE_CXX11_ABI=ON"
cmake_cmd = "{} cmake .. {}".format(envs, flags)
logger.info("configuring mkldnn ......")
execute(cmake_cmd)
logger.info("mkldnn configure success.")
@time_stage()
def build_mkldnn(root):
build_dir = mkldnn_build_dir(root)
with cwd(build_dir):
execute("make -j")
execute("make install")
logger.info("Stage [build_mkldnn] success.")
def is_x86():
import platform
# TODO(disc): fine-grained check for intel/amd64
return platform.processor() == 'x86_64'
def is_aarch64():
import platform
return platform.processor() == 'aarch64'
def auto_detect_host_cpu(args):
if not hasattr(args, 'x86') or not args.x86:
args.x86 = is_x86()
if not hasattr(args, 'aarch64') or not args.aarch64:
args.aarch64 = is_aarch64()
enabled_cpus = int(args.x86) + int(args.aarch64)
if enabled_cpus > 1:
raise RuntimeError("invalid config: more than oen cpu type is specified")
if enabled_cpus < 1:
raise RuntimeError("auto_detect_host_cpu failed")
def update_cpu_specific_setting(args):
if not hasattr(args, 'x86'):
args.x86 = False
if not hasattr(args, 'aarch64'):
args.aarch64 = False
if not hasattr(args, 'enable_mkldnn'):
args.enable_mkldnn = False
if not hasattr(args, 'cpu_only'):
args.cpu_only = False
if args.cpu_only:
auto_detect_host_cpu(args)
args.enable_mkldnn = (args.x86 or args.aarch64)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cxx11_abi",
required=False,
action="store_true",
help="Build with cxx11 abi or not",
)
parser.add_argument(
"--cpu_only",
required=False,
action="store_true",
help="Build tao with cpu support only",
)
args = parser.parse_args()
# backward compatibility
args.ral_cxx11_abi = args.cxx11_abi
update_cpu_specific_setting(args)
root = get_source_root_dir()
symlink_files(root)
if args.enable_mkldnn:
config_mkldnn(root, args)
build_mkldnn(root)
def get_tf_info(python_executable):
output = subprocess.check_output(
'{} -c "import tensorflow as tf; print(tf.__version__); print(\'\\n\'.join(tf.sysconfig.get_compile_flags())); print(\'\\n\'.join(tf.sysconfig.get_link_flags()))"'.format(
python_executable
),
shell=True,
).decode()
lines = output.split("\n")
major, minor, _ = lines[0].split(".") # lines[0] is version like 1.15.0
is_pai = "PAI" in lines[0]
header_dir, lib_dir, lib_name, cxx11_abi = '', '', '', ''
for line in lines[1:]:
if line.startswith("-I"):
header_dir = line[2:]
elif line.startswith("-L"):
lib_dir = line[2:]
elif line.startswith("-l:"): # in case of -l:libtensorflow_framework.so.1
lib_name = line[3:]
elif line.startswith("-l"): # in case of -ltensorflow_framework
lib_name = 'lib' + line[2:] + '.so'
elif '_GLIBCXX_USE_CXX11_ABI' in line:
cxx11_abi = line.split('=')[-1]
PB_HEADER_FILE = "google/protobuf/stubs/common.h"
proto_file_path = os.path.join(header_dir, PB_HEADER_FILE)
if os.path.exists(proto_file_path):
with open(proto_file_path, 'r') as f:
content = f.read()
try:
match = re.findall("#define GOOGLE_PROTOBUF_VERSION [0-9]+", content)[0]
raw_version = int(re.findall("[^0-9]+([0-9]+)$", match)[0])
major_version = int(raw_version / 1000000)
minor_version = int(raw_version / 1000) - major_version * 1000
micro_version = raw_version - major_version * 1000000 - minor_version * 1000
tf_pb_version = f"{major_version}.{minor_version}.{micro_version}"
except IndexError as err:
raise Exception("Can not find tensorflow's built-in pb version!")
else:
raise Exception("Can not find {PB_HEADER_FILE} in tf's include dir!")
return major, minor, is_pai, header_dir, lib_dir, lib_name, cxx11_abi, tf_pb_version
def deduce_cuda_info():
"""Deduce cuda major and minor version and cuda directory."""
def _deduce_from_version_file(cuda_home):
version_file = os.path.join(cuda_home, "version.txt")
if os.path.exists(version_file):
with open(version_file) as f:
matched = re.findall(r"[0-9]+\.[0-9]+\.[0-9]+", f.read())
if len(matched) == 1:
# return major and minor only.
return ".".join(matched[0].split(".")[0:2])
version_file = os.path.join(cuda_home, "version.json")
if os.path.exists(version_file):
with open(version_file) as f:
data = json.loads(f.read())
parts = data['cuda']['version'].split(".")
return parts[0] + "." + parts[1]
return None
def _deduce_from_nvcc():
out = safe_run("nvcc --version", shell=True, verbose=False)
patt = re.compile(r"release ([0-9]+\.[0-9]+)", re.M)
found = patt.findall(out)
if len(found) == 1:
nvcc = which("nvcc")
cuda_home = os.path.join(os.path.dirname(nvcc), os.path.pardir)
return found[0], os.path.abspath(cuda_home)
else:
return None, None
cuda_home = os.environ.get("TF_CUDA_HOME", None)
if cuda_home:
ver = _deduce_from_version_file(cuda_home)
if ver is not None:
return ver, cuda_home
else:
raise Exception(
f"Failed to deduce cuda version from BLADE_CUDA_HOME: {cuda_home}"
)
ver = _deduce_from_version_file("/usr/local/cuda")
if ver is not None:
return ver, "/usr/local/cuda"
all_cuda = [
os.path.join("/usr/local", d)
for d in os.listdir("/usr/local")
if d.startswith("cuda-")
]
if (len(all_cuda) != 1):
logger.info("Mutiple cuda installed.")
else:
ver = _deduce_from_version_file(all_cuda[0])
if ver is not None:
return ver, all_cuda[0]
ver, cuda_home = _deduce_from_nvcc()
if ver is not None:
return ver, cuda_home
raise Exception("Failed to deduce cuda version from local installation.")
def get_cudnn_version(cuda_home):
serched = []
for hdr in ["cudnn.h", "cudnn_version.h"]:
fname = os.path.join(cuda_home, "include", hdr)
serched.append(fname)
if not os.path.exists(fname):
fname = os.path.join("/usr/include", hdr)
with open(fname, "r") as f:
major, minor, patch = None, None, None
for line in f.readlines():
line = line.strip()
if "#define CUDNN_MAJOR" in line:
major = line.split(" ")[2]
elif "#define CUDNN_MINOR" in line:
minor = line.split(" ")[2]
elif "#define CUDNN_PATCHLEVEL" in line:
patch = line.split(" ")[2]
if None not in [major, minor, patch]:
return ".".join([major, minor, patch])
raise Exception(f"Failed to decuce cuDNN version after searching: {fname}")
def safe_run(cmd, shell=False, verbose=True):
assert isinstance(cmd, str) or isinstance(cmd, unicode)
if shell:
popen = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=shell)
else:
args = shlex.split(cmd)
popen = Popen(args, stdout=PIPE, stderr=STDOUT, shell=shell)
# wait until subprocess terminated
# stdout, stderr = popen.communicate()
stdout = ""
for line in iter(popen.stdout.readline, b""):
clean_line = line.strip().decode("utf-8")
if verbose:
logger.info(clean_line)
stdout += "{}\n".format(clean_line)
if stdout and "error" in stdout.lower():
logger.info(
'Running "{}" with shell mode {}'.format(cmd, "ON" if shell else "OFF")
)
raise AssertionError("{} failed!".format(cmd))
return stdout
|
dro_sfm/loggers/__init__.py | aliyun/dro-sfm | 147 | 12623039 | <reponame>aliyun/dro-sfm
from dro_sfm.loggers.wandb_logger import WandbLogger
__all__ = ["WandbLogger"] |
examples/multisource_adapt/config.py | 19valentin99/pykale | 324 | 12623041 | """
Default configurations for multi-source domain adapation
"""
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASET = CN()
_C.DATASET.ROOT = "../data"
_C.DATASET.NAME = "digits" # choices=['office', 'digits', 'office_caltech', 'office31']
_C.DATASET.TARGET = "MNIST"
# -----------------------------------------------------------------------------
_C.DATASET.SOURCE = None
# a list of source domain names (e.g. ["SVHN", "USPS_RGB"]) or None. If None, all domains (excluding the target)
# will be used as sources
# -----------------------------------------------------------------------------
_C.DATASET.NUM_CLASSES = 10
_C.DATASET.NUM_REPEAT = 10 # 10
_C.DATASET.NUM_CHANNELS = 3
_C.DATASET.DIMENSION = 784
_C.DATASET.WEIGHT_TYPE = "natural"
_C.DATASET.SIZE_TYPE = "source"
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
_C.SOLVER = CN()
_C.SOLVER.SEED = 2021
_C.SOLVER.BASE_LR = 0.001 # Initial learning rate
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005 # 1e-4
_C.SOLVER.NESTEROV = True
_C.SOLVER.TYPE = "SGD"
_C.SOLVER.MAX_EPOCHS = 120 # "nb_adapt_epochs": 100,
# _C.SOLVER.WARMUP = True
_C.SOLVER.MIN_EPOCHS = 20 # "nb_init_epochs": 20,
_C.SOLVER.TRAIN_BATCH_SIZE = 100
_C.SOLVER.TEST_BATCH_SIZE = 100
# Adaptation-specific solver config
_C.SOLVER.AD_LAMBDA = True
_C.SOLVER.AD_LR = True
_C.SOLVER.INIT_LAMBDA = 1
# ---------------------------------------------------------------------------- #
# Domain Adaptation Net (DAN) configs
# ---------------------------------------------------------------------------- #
_C.DAN = CN()
_C.DAN.METHOD = "M3SDA" # choices=['M3SDA', 'MFSAN']
_C.DAN.USERANDOM = False
_C.DAN.RANDOM_DIM = 1024
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
_C.OUTPUT = CN()
_C.OUTPUT.ROOT = "./outputs" # output_dir
_C.OUTPUT.VERBOSE = False # To discuss, for HPC jobs
_C.OUTPUT.PB_FRESH = 0 # 0 # 50 # 0 to disable ; MAYBE make it a command line option
_C.OUTPUT.TB_DIR = os.path.join("lightning_logs", "Tgt" + _C.DATASET.TARGET)
def get_cfg_defaults():
return _C.clone()
|
src/gamemodes/mudkip.py | iamgreaser/lykos | 122 | 12623049 | <reponame>iamgreaser/lykos<filename>src/gamemodes/mudkip.py<gh_stars>100-1000
from src.gamemodes import game_mode, GameMode, InvalidModeException
from src.messages import messages
from src.events import EventListener
from src import channels, users
# someone let woffle commit while drunk again... tsk tsk
@game_mode("mudkip", minp=5, maxp=17, likelihood=5)
class MudkipMode(GameMode):
"""Why are all the professors named after trees?"""
def __init__(self, arg=""):
super().__init__(arg)
self.ABSTAIN_ENABLED = False
self.TOTEM_CHANCES = {
"death" : {"shaman": 1, "wolf shaman": 0, "crazed shaman": 0},
"protection" : {"shaman": 0, "wolf shaman": 1, "crazed shaman": 1},
"silence" : {"shaman": 0, "wolf shaman": 1, "crazed shaman": 0},
"revealing" : {"shaman": 0, "wolf shaman": 0, "crazed shaman": 0},
"desperation" : {"shaman": 0, "wolf shaman": 0, "crazed shaman": 0},
"impatience" : {"shaman": 0, "wolf shaman": 1, "crazed shaman": 0},
"pacifism" : {"shaman": 1, "wolf shaman": 0, "crazed shaman": 0},
"influence" : {"shaman": 1, "wolf shaman": 0, "crazed shaman": 1},
"narcolepsy" : {"shaman": 0, "wolf shaman": 0, "crazed shaman": 0},
"exchange" : {"shaman": 0, "wolf shaman": 0, "crazed shaman": 0},
"lycanthropy" : {"shaman": 0, "wolf shaman": 0, "crazed shaman": 1},
"luck" : {"shaman": 0, "wolf shaman": 0, "crazed shaman": 1},
"pestilence" : {"shaman": 1, "wolf shaman": 0, "crazed shaman": 1},
"retribution" : {"shaman": 0, "wolf shaman": 0, "crazed shaman": 1},
"misdirection" : {"shaman": 0, "wolf shaman": 1, "crazed shaman": 0},
"deceit" : {"shaman": 0, "wolf shaman": 0, "crazed shaman": 0},
}
self.set_default_totem_chances()
# make assassin a primary role
self.SECONDARY_ROLES.pop("assassin", None)
self.START_WITH_DAY = True
self.ROLE_GUIDE = {
6: ["wolf", "cult leader", "investigator", "insomniac"],
7: ["jester"],
8: ["assassin"],
9: ["-jester", "doomsayer"],
10: ["priest"],
11: ["crazed shaman"],
12: ["vengeful ghost"],
13: ["wolf shaman"],
14: ["amnesiac"],
15: ["succubus"],
16: ["shaman"],
17: ["dullahan"]
}
self.EVENTS = {
"lynch_behaviour": EventListener(self.lynch_behaviour),
"daylight_warning": EventListener(self.daylight_warning)
}
def lynch_behaviour(self, evt, var):
evt.data["kill_ties"] = True
voters = sum(map(len, evt.params.votes.values()))
if voters == evt.params.players:
evt.data["force"] = True
def daylight_warning(self, evt, var):
evt.data["message"] = "daylight_warning_killtie"
|
veros/runtime.py | AkasDutta/veros | 115 | 12623058 | <reponame>AkasDutta/veros<filename>veros/runtime.py
import os
from threading import local
from collections import namedtuple
from veros.backend import BACKENDS
from veros.logs import LOGLEVELS
# globals
log_args = local()
log_args.log_all_processes = False
log_args.loglevel = "info"
# MPI helpers
def _default_mpi_comm():
try:
from mpi4py import MPI
except ImportError:
return None
else:
return MPI.COMM_WORLD
# validators
def parse_two_ints(v):
return (int(v[0]), int(v[1]))
def parse_choice(choices, preserve_case=False):
def validate(choice):
if isinstance(choice, str) and not preserve_case:
choice = choice.lower()
if choice not in choices:
raise ValueError(f"must be one of {choices}")
return choice
return validate
def parse_bool(obj):
if not isinstance(obj, str):
return bool(obj)
return obj.lower() in {"1", "true", "on"}
def check_mpi_comm(comm):
if comm is not None:
from mpi4py import MPI
if not isinstance(comm, MPI.Comm):
raise TypeError("mpi_comm must be Comm instance or None")
return comm
def set_loglevel(val):
from veros import logs
log_args.loglevel = parse_choice(LOGLEVELS)(val)
logs.setup_logging(loglevel=log_args.loglevel, log_all_processes=log_args.log_all_processes)
return log_args.loglevel
def set_log_all_processes(val):
from veros import logs
log_args.log_all_processes = parse_bool(val)
logs.setup_logging(loglevel=log_args.loglevel, log_all_processes=log_args.log_all_processes)
return log_args.log_all_processes
DEVICES = ("cpu", "gpu", "tpu")
FLOAT_TYPES = ("float64", "float32")
LINEAR_SOLVERS = ("scipy", "scipy_jax", "petsc", "best")
# settings
RuntimeSetting = namedtuple("RuntimeSetting", ("type", "default", "read_from_env"))
RuntimeSetting.__new__.__defaults__ = (None, None, True)
AVAILABLE_SETTINGS = {
"backend": RuntimeSetting(parse_choice(BACKENDS), "numpy"),
"device": RuntimeSetting(parse_choice(DEVICES), "cpu"),
"float_type": RuntimeSetting(parse_choice(FLOAT_TYPES), "float64"),
"linear_solver": RuntimeSetting(parse_choice(LINEAR_SOLVERS), "best"),
"petsc_options": RuntimeSetting(str, ""),
"monitor_streamfunction_residual": RuntimeSetting(parse_bool, True),
"num_proc": RuntimeSetting(parse_two_ints, (1, 1), read_from_env=False),
"profile_mode": RuntimeSetting(parse_bool, False),
"loglevel": RuntimeSetting(set_loglevel, "info"),
"mpi_comm": RuntimeSetting(check_mpi_comm, _default_mpi_comm(), read_from_env=False),
"log_all_processes": RuntimeSetting(set_log_all_processes, False),
"use_io_threads": RuntimeSetting(parse_bool, False),
"io_timeout": RuntimeSetting(float, 20),
"hdf5_gzip_compression": RuntimeSetting(bool, True),
"force_overwrite": RuntimeSetting(bool, False),
"diskless_mode": RuntimeSetting(bool, False),
"pyom_compatibility_mode": RuntimeSetting(bool, False),
}
class RuntimeSettings:
__slots__ = ["__locked__", "__setting_types__", "__settings__", *AVAILABLE_SETTINGS.keys()]
def __init__(self, **kwargs):
self.__locked__ = False
self.__setting_types__ = {}
for name, setting in AVAILABLE_SETTINGS.items():
setting_envvar = f"VEROS_{name.upper()}"
if name in kwargs:
val = kwargs[name]
elif setting.read_from_env:
val = os.environ.get(setting_envvar, setting.default)
else:
val = setting.default
self.__setting_types__[name] = setting.type
self.__setattr__(name, val)
self.__settings__ = set(self.__setting_types__.keys())
def update(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
return self
def __setattr__(self, attr, val):
if getattr(self, "__locked__", False):
raise RuntimeError("Runtime settings cannot be modified after import of core modules")
if attr.startswith("_"):
return super().__setattr__(attr, val)
# coerce type
stype = self.__setting_types__.get(attr)
if stype is not None:
try:
val = stype(val)
except (TypeError, ValueError) as e:
raise ValueError(f'Got invalid value for runtime setting "{attr}": {e!s}') from None
return super().__setattr__(attr, val)
def __repr__(self):
setval = ", ".join(f"{key}={repr(getattr(self, key))}" for key in self.__settings__)
return f"{self.__class__.__name__}({setval})"
# state
class RuntimeState:
"""Unifies attributes from various modules in a simple read-only object"""
__slots__ = ()
@property
def proc_rank(self):
from veros import runtime_settings
comm = runtime_settings.mpi_comm
if comm is None:
return 0
return comm.Get_rank()
@property
def proc_num(self):
from veros import runtime_settings
comm = runtime_settings.mpi_comm
if comm is None:
return 1
return comm.Get_size()
@property
def proc_idx(self):
from veros import distributed
return distributed.proc_rank_to_index(self.proc_rank)
@property
def backend_module(self):
from veros import backend, runtime_settings
return backend.get_backend_module(runtime_settings.backend)
def __setattr__(self, attr, val):
raise TypeError(f"Cannot modify {self.__class__.__name__} objects")
|
src/ralph/signals.py | DoNnMyTh/ralph | 1,668 | 12623059 | from django.db import connection
from django.db.models.signals import post_save
from django.dispatch import receiver
# TODO(mkurek): make this working as a decorator, example:
# @post_commit(MyModel)
# def my_handler(instance):
# ...
def post_commit(func, model, signal=post_save, single_call=True):
"""
Post commit signal for specific model.
It's better than Django's post_save, because:
* it handles transaction rollback (transaction could be rolled back
after calling post_save)
* it handles M2M relations (post_save is (usually) called when main model
is saved, before related M2M instances are saved)
Writing tests:
Remember to make your TestCase inheriting from one of:
- TransactionTestCase (Django)
- APITransactionTestCase (Django Rest Framework)
Unless `on_commit` signal won't be called.
Requirements:
* you have to use database supporting transactions (ex. MySQL)
* you have to use django-transaction-hooks
(https://github.com/carljm/django-transaction-hooks) for Django<=1.8
(it was merged into Django 1.9)
Notice that this feature will work whether or not you're using transactions
in your code. Possible scenarios are as follows:
* `ATOMIC_REQUESTS` is set to True in settings - then every request is
wrapped in transaction - at the end of processing each (saving) request,
this hook will be processed (for models which were saved)
* view is decorated using `transaction.atomic` - at the end of processing
the view, this hook will be called (if any of registered models was saved)
* if transaction is not started for current request, then this hook will
behave as post_save (will be called immediately)
"""
@receiver(signal, sender=model, weak=False)
def wrap(sender, instance, **kwargs):
def wrapper():
# prevent from calling the same func multiple times for single
# instance
called_already_attr = '_' + func.__name__ + '_called'
if not (
getattr(instance, called_already_attr, False) and
single_call
):
func(instance)
setattr(instance, called_already_attr, True)
# TODO(mkurek): replace connection by transaction after upgrading to
# Django 1.9
connection.on_commit(wrapper)
|
word2gauss/words.py | seomoz/word2gauss | 203 | 12623093 | <reponame>seomoz/word2gauss
from itertools import islice
import numpy as np
from .embeddings import text_to_pairs
LARGEST_UINT32 = 4294967295
def tokenizer(s):
'''
Whitespace tokenizer
'''
return s.strip().split()
class Vocabulary(object):
'''
Implemetation of the Vocabulary interface
.word2id: given a token, return the id or raise KeyError if not in the vocab
.id2word: given a token id, return the token or raise IndexError if invalid
.tokenize: given a string, tokenize it using the tokenizer and then
remove all OOV tokens
.tokenize_ids: given a string, tokenize and return the token ids
.random_ids: given an integer, return a numpy array of random token ids
'''
def __init__(self, tokens, tokenizer=tokenizer):
'''
tokens: a {'token1': 0, 'token2': 1, ...} map of token -> id
the ids must run from 0 to n_tokens - 1 inclusive
tokenizer: accepts a string and returns a list of strings
'''
self._tokens = tokens
self._ids = {i: token for token, i in tokens.items()}
self._ntokens = len(tokens)
self._tokenizer = tokenizer
def word2id(self, word):
return self._tokens[word]
def id2word(self, i):
try:
return self._ids[i]
except KeyError:
raise IndexError
def tokenize(self, s):
'''
Removes OOV tokens using built
'''
tokens = self._tokenizer(s)
return [token for token in tokens if token in self._tokens]
def tokenize_ids(self, s, remove_oov=True):
tokens = self._tokenizer(s)
if remove_oov:
return np.array([self.word2id(token)
for token in tokens if token in self._tokens],
dtype=np.uint32)
else:
ret = np.zeros(len(tokens), dtype=np.uint32)
for k, token in enumerate(tokens):
try:
ret[k] = self.word2id(token)
except KeyError:
ret[k] = LARGEST_UINT32
return ret
def random_ids(self, num):
return np.random.randint(0, self._ntokens, size=num).astype(np.uint32)
def iter_pairs(fin, vocab, batch_size=10, nsamples=2, window=5):
'''
Convert a document stream to batches of pairs used for training embeddings.
iter_pairs is a generator that yields batches of pairs that can
be passed to GaussianEmbedding.train
fin = an iterator of documents / sentences (e.g. a file like object)
Each element is a string of raw text
vocab = something implementing the Vocabulary interface
batch_size = size of batches
window = Number of words to the left and right of center word to include
as positive pairs
nsamples = number of negative samples to drawn for each center word
'''
documents = iter(fin)
batch = list(islice(documents, batch_size))
while len(batch) > 0:
text = [
vocab.tokenize_ids(doc, remove_oov=False)
for doc in batch
]
pairs = text_to_pairs(text, vocab.random_ids,
nsamples_per_word=nsamples,
half_window_size=window)
yield pairs
batch = list(islice(documents, batch_size))
|
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/image/tga.py | SabheeR/hobbits | 304 | 12623094 | <filename>src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/image/tga.py
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import collections
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class Tga(KaitaiStruct):
"""TGA (AKA Truevision TGA, AKA TARGA), is a raster image file format created by Truevision. It supports up to 32 bits per pixel (three 8-bit RGB channels + 8-bit alpha channel), color mapping and optional lossless RLE compression.
.. seealso::
Source - http://www.dca.fee.unicamp.br/~martino/disciplinas/ea978/tgaffs.pdf
"""
class ColorMapEnum(Enum):
no_color_map = 0
has_color_map = 1
class ImageTypeEnum(Enum):
no_image_data = 0
uncomp_color_mapped = 1
uncomp_true_color = 2
uncomp_bw = 3
rle_color_mapped = 9
rle_true_color = 10
rle_bw = 11
SEQ_FIELDS = ["image_id_len", "color_map_type", "image_type", "color_map_ofs", "num_color_map", "color_map_depth", "x_offset", "y_offset", "width", "height", "image_depth", "img_descriptor", "image_id", "color_map"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['image_id_len']['start'] = self._io.pos()
self.image_id_len = self._io.read_u1()
self._debug['image_id_len']['end'] = self._io.pos()
self._debug['color_map_type']['start'] = self._io.pos()
self.color_map_type = KaitaiStream.resolve_enum(Tga.ColorMapEnum, self._io.read_u1())
self._debug['color_map_type']['end'] = self._io.pos()
self._debug['image_type']['start'] = self._io.pos()
self.image_type = KaitaiStream.resolve_enum(Tga.ImageTypeEnum, self._io.read_u1())
self._debug['image_type']['end'] = self._io.pos()
self._debug['color_map_ofs']['start'] = self._io.pos()
self.color_map_ofs = self._io.read_u2le()
self._debug['color_map_ofs']['end'] = self._io.pos()
self._debug['num_color_map']['start'] = self._io.pos()
self.num_color_map = self._io.read_u2le()
self._debug['num_color_map']['end'] = self._io.pos()
self._debug['color_map_depth']['start'] = self._io.pos()
self.color_map_depth = self._io.read_u1()
self._debug['color_map_depth']['end'] = self._io.pos()
self._debug['x_offset']['start'] = self._io.pos()
self.x_offset = self._io.read_u2le()
self._debug['x_offset']['end'] = self._io.pos()
self._debug['y_offset']['start'] = self._io.pos()
self.y_offset = self._io.read_u2le()
self._debug['y_offset']['end'] = self._io.pos()
self._debug['width']['start'] = self._io.pos()
self.width = self._io.read_u2le()
self._debug['width']['end'] = self._io.pos()
self._debug['height']['start'] = self._io.pos()
self.height = self._io.read_u2le()
self._debug['height']['end'] = self._io.pos()
self._debug['image_depth']['start'] = self._io.pos()
self.image_depth = self._io.read_u1()
self._debug['image_depth']['end'] = self._io.pos()
self._debug['img_descriptor']['start'] = self._io.pos()
self.img_descriptor = self._io.read_u1()
self._debug['img_descriptor']['end'] = self._io.pos()
self._debug['image_id']['start'] = self._io.pos()
self.image_id = self._io.read_bytes(self.image_id_len)
self._debug['image_id']['end'] = self._io.pos()
if self.color_map_type == Tga.ColorMapEnum.has_color_map:
self._debug['color_map']['start'] = self._io.pos()
self.color_map = [None] * (self.num_color_map)
for i in range(self.num_color_map):
if not 'arr' in self._debug['color_map']:
self._debug['color_map']['arr'] = []
self._debug['color_map']['arr'].append({'start': self._io.pos()})
self.color_map[i] = self._io.read_bytes((self.color_map_depth + 7) // 8)
self._debug['color_map']['arr'][i]['end'] = self._io.pos()
self._debug['color_map']['end'] = self._io.pos()
class TgaFooter(KaitaiStruct):
SEQ_FIELDS = ["ext_area_ofs", "dev_dir_ofs", "version_magic"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['ext_area_ofs']['start'] = self._io.pos()
self.ext_area_ofs = self._io.read_u4le()
self._debug['ext_area_ofs']['end'] = self._io.pos()
self._debug['dev_dir_ofs']['start'] = self._io.pos()
self.dev_dir_ofs = self._io.read_u4le()
self._debug['dev_dir_ofs']['end'] = self._io.pos()
self._debug['version_magic']['start'] = self._io.pos()
self.version_magic = self._io.read_bytes(18)
self._debug['version_magic']['end'] = self._io.pos()
@property
def is_valid(self):
if hasattr(self, '_m_is_valid'):
return self._m_is_valid if hasattr(self, '_m_is_valid') else None
self._m_is_valid = self.version_magic == b"\x54\x52\x55\x45\x56\x49\x53\x49\x4F\x4E\x2D\x58\x46\x49\x4C\x45\x2E\x00"
return self._m_is_valid if hasattr(self, '_m_is_valid') else None
@property
def ext_area(self):
if hasattr(self, '_m_ext_area'):
return self._m_ext_area if hasattr(self, '_m_ext_area') else None
if self.is_valid:
_pos = self._io.pos()
self._io.seek(self.ext_area_ofs)
self._debug['_m_ext_area']['start'] = self._io.pos()
self._m_ext_area = Tga.TgaExtArea(self._io, self, self._root)
self._m_ext_area._read()
self._debug['_m_ext_area']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_ext_area if hasattr(self, '_m_ext_area') else None
class TgaExtArea(KaitaiStruct):
SEQ_FIELDS = ["ext_area_size", "author_name", "comments", "timestamp", "job_id", "job_time", "software_id", "software_version", "key_color", "pixel_aspect_ratio", "gamma_value", "color_corr_ofs", "postage_stamp_ofs", "scan_line_ofs", "attributes"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['ext_area_size']['start'] = self._io.pos()
self.ext_area_size = self._io.read_u2le()
self._debug['ext_area_size']['end'] = self._io.pos()
self._debug['author_name']['start'] = self._io.pos()
self.author_name = (self._io.read_bytes(41)).decode(u"ASCII")
self._debug['author_name']['end'] = self._io.pos()
self._debug['comments']['start'] = self._io.pos()
self.comments = [None] * (4)
for i in range(4):
if not 'arr' in self._debug['comments']:
self._debug['comments']['arr'] = []
self._debug['comments']['arr'].append({'start': self._io.pos()})
self.comments[i] = (self._io.read_bytes(81)).decode(u"ASCII")
self._debug['comments']['arr'][i]['end'] = self._io.pos()
self._debug['comments']['end'] = self._io.pos()
self._debug['timestamp']['start'] = self._io.pos()
self.timestamp = self._io.read_bytes(12)
self._debug['timestamp']['end'] = self._io.pos()
self._debug['job_id']['start'] = self._io.pos()
self.job_id = (self._io.read_bytes(41)).decode(u"ASCII")
self._debug['job_id']['end'] = self._io.pos()
self._debug['job_time']['start'] = self._io.pos()
self.job_time = (self._io.read_bytes(6)).decode(u"ASCII")
self._debug['job_time']['end'] = self._io.pos()
self._debug['software_id']['start'] = self._io.pos()
self.software_id = (self._io.read_bytes(41)).decode(u"ASCII")
self._debug['software_id']['end'] = self._io.pos()
self._debug['software_version']['start'] = self._io.pos()
self.software_version = self._io.read_bytes(3)
self._debug['software_version']['end'] = self._io.pos()
self._debug['key_color']['start'] = self._io.pos()
self.key_color = self._io.read_u4le()
self._debug['key_color']['end'] = self._io.pos()
self._debug['pixel_aspect_ratio']['start'] = self._io.pos()
self.pixel_aspect_ratio = self._io.read_u4le()
self._debug['pixel_aspect_ratio']['end'] = self._io.pos()
self._debug['gamma_value']['start'] = self._io.pos()
self.gamma_value = self._io.read_u4le()
self._debug['gamma_value']['end'] = self._io.pos()
self._debug['color_corr_ofs']['start'] = self._io.pos()
self.color_corr_ofs = self._io.read_u4le()
self._debug['color_corr_ofs']['end'] = self._io.pos()
self._debug['postage_stamp_ofs']['start'] = self._io.pos()
self.postage_stamp_ofs = self._io.read_u4le()
self._debug['postage_stamp_ofs']['end'] = self._io.pos()
self._debug['scan_line_ofs']['start'] = self._io.pos()
self.scan_line_ofs = self._io.read_u4le()
self._debug['scan_line_ofs']['end'] = self._io.pos()
self._debug['attributes']['start'] = self._io.pos()
self.attributes = self._io.read_u1()
self._debug['attributes']['end'] = self._io.pos()
@property
def footer(self):
if hasattr(self, '_m_footer'):
return self._m_footer if hasattr(self, '_m_footer') else None
_pos = self._io.pos()
self._io.seek((self._io.size() - 26))
self._debug['_m_footer']['start'] = self._io.pos()
self._m_footer = Tga.TgaFooter(self._io, self, self._root)
self._m_footer._read()
self._debug['_m_footer']['end'] = self._io.pos()
self._io.seek(_pos)
return self._m_footer if hasattr(self, '_m_footer') else None
|
test/distributed/pipeline/sync/skip/test_verify_skippables.py | Hacky-DH/pytorch | 60,067 | 12623100 | <reponame>Hacky-DH/pytorch
# Copyright 2019 <NAME>
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from torch import nn
from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables
def test_matching():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
verify_skippables(nn.Sequential(Layer1(), Layer2()))
def test_stash_not_pop():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "no module declared 'foo' as poppable but stashed" in str(e.value)
def test_pop_unknown():
@skippable(pop=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "'0' declared 'foo' as poppable but it was not stashed" in str(e.value)
def test_stash_again():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(stash=["foo"])
class Layer2(nn.Module):
pass
@skippable(pop=["foo"])
class Layer3(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
assert "'1' redeclared 'foo' as stashable" in str(e.value)
def test_pop_again():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(pop=["foo"])
class Layer3(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
assert "'2' redeclared 'foo' as poppable" in str(e.value)
def test_stash_pop_together_different_names():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"], stash=["bar"])
class Layer2(nn.Module):
pass
@skippable(pop=["bar"])
class Layer3(nn.Module):
pass
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
def test_stash_pop_together_same_name():
@skippable(stash=["foo"], pop=["foo"])
class Layer1(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1()))
assert "'0' declared 'foo' both as stashable and as poppable" in str(e.value)
def test_double_stash_pop():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(stash=["foo"])
class Layer3(nn.Module):
pass
@skippable(pop=["foo"])
class Layer4(nn.Module):
pass
with pytest.raises(TypeError) as e:
verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3(), Layer4()))
assert "'2' redeclared 'foo' as stashable" in str(e.value)
assert "'3' redeclared 'foo' as poppable" in str(e.value)
def test_double_stash_pop_but_isolated():
@skippable(stash=["foo"])
class Layer1(nn.Module):
pass
@skippable(pop=["foo"])
class Layer2(nn.Module):
pass
@skippable(stash=["foo"])
class Layer3(nn.Module):
pass
@skippable(pop=["foo"])
class Layer4(nn.Module):
pass
ns1 = Namespace()
ns2 = Namespace()
verify_skippables(
nn.Sequential(Layer1().isolate(ns1), Layer2().isolate(ns1), Layer3().isolate(ns2), Layer4().isolate(ns2),)
)
|
tests/test_console.py | ericchiang/kpm | 121 | 12623117 | from kpm.console import KubernetesExec
def test_console_default():
k = KubernetesExec("myrc", "echo titi")
assert k is not None
|
roleutils/utils.py | Onii-Chan-Discord/phen-cogs | 105 | 12623131 | <filename>roleutils/utils.py
"""
MIT License
Copyright (c) 2020-2021 phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
from typing import List, Optional, Tuple
import discord
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import humanize_list
async def is_allowed_by_hierarchy(bot: Red, mod: discord.Member, member: discord.Member) -> bool:
return (
mod.guild.owner_id == mod.id or mod.top_role >= member.top_role or await bot.is_owner(mod)
)
async def is_allowed_by_role_hierarchy(
bot: Red,
bot_me: discord.Member,
mod: discord.Member,
role: discord.Role,
) -> Tuple[bool, str]:
if role >= bot_me.top_role and bot_me.id != mod.guild.owner_id:
return (False, f"I am not higher than `{role}` in hierarchy.")
else:
return (
(mod.top_role > role) or mod.id == mod.guild.owner_id or await bot.is_owner(mod),
f"You are not higher than `{role}` in hierarchy.",
)
def my_role_heirarchy(guild: discord.Guild, role: discord.Role) -> bool:
return guild.me.top_role > role
MENTION_RE = re.compile(r"@(everyone|here|&[0-9]{17,21})")
def escape_mentions(text: str):
return MENTION_RE.sub("@\u200b\\1", text)
def humanize_roles(
roles: List[discord.Role], *, mention: bool = False, bold: bool = True
) -> Optional[str]:
if not roles:
return None
role_strings = []
for role in roles:
role_name = escape_mentions(role.name)
if mention:
role_strings.append(role.mention)
elif bold:
role_strings.append(f"**{role_name}**")
else:
role_strings.append(role_name)
return humanize_list(role_strings)
humanize_members = humanize_roles
async def can_run_command(ctx: commands.Context, command: str) -> bool:
try:
result = await ctx.bot.get_command(command).can_run(ctx, check_all_parents=True)
except commands.CommandError:
result = False
return result
async def delete_quietly(message: discord.Message):
if message.channel.permissions_for(message.guild.me).manage_messages:
try:
await message.delete()
except discord.HTTPException:
pass
def guild_roughly_chunked(guild: discord.Guild) -> bool:
return len(guild.members) / guild.member_count > 0.9
|
mmdet/models/dense_heads/anchor_aug.py | Qianna00/InstanceLoc | 120 | 12623161 | import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from mmdet.core import build_anchor_generator, build_bbox_coder
from mmdet.ops import batched_nms
from ..builder import HEADS
@HEADS.register_module()
class AnchorAugHead(nn.Module):
def __init__(
self,
anchor_generator,
train_cfg=None,
test_cfg=None,
):
super(AnchorAugHead, self).__init__()
self.anchor_generator = build_anchor_generator(anchor_generator)
def init_weights(self, pretrained=None):
pass
def forward_train(
self,
x,
img_metas,
gt_bboxes,
proposal_cfg=None,
):
proposal_list = self.get_fixed_bboxes(
x, img_metas, cfg=proposal_cfg, gt_bboxes=gt_bboxes)
return proposal_list
def get_fixed_bboxes(self,
featmaps,
img_metas,
cfg=None,
rescale=False,
gt_bboxes=None):
if cfg is not None and cfg.get('generate_from_single_level',
None) is not None:
featmaps = tuple(
[featmaps[cfg.get('generate_from_single_level', 2)]])
num_levels = len(featmaps)
num_imgs = len(gt_bboxes)
device = featmaps[0].device
featmap_sizes = [featmaps[i].shape[-2:] for i in range(num_levels)]
mlvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device=device)
proposals = torch.cat(mlvl_anchors, 0)
if not hasattr(self, 'iou_calculator'):
from mmdet.core.bbox.iou_calculators import build_iou_calculator
self.iou_calculator = build_iou_calculator(
dict(type='BboxOverlaps2D'))
overlaps = self.iou_calculator(torch.cat(gt_bboxes, 0), proposals)
iou_thr = cfg.get('iou_thr', 0.5)
nms_cfg = dict(type='nms', iou_thr=cfg.nms_thr)
pos_box_all = []
pos_scores_all = []
pos_idx_all = []
for i in range(num_imgs):
pos_proposals = proposals[overlaps[i] > iou_thr]
if pos_proposals.shape[0] > 0:
pass
else:
ranked_overlaps, ranked_idx = overlaps[i].sort(descending=True)
pos_proposals = proposals[ranked_idx[:cfg.nms_pre]]
scores = torch.rand(pos_proposals.shape[0], device=device)
IDX = torch.ones(scores.shape[0], dtype=torch.long) * i
pos_box_all.append(pos_proposals)
pos_scores_all.append(scores)
pos_idx_all.append(IDX)
# cat all bboxes across batch to perform nms
pos_box_all = torch.cat(pos_box_all, 0)
pos_scores_all = torch.cat(pos_scores_all, 0)
pos_idx_all = torch.cat(pos_idx_all)
cat_det, cat_keep = batched_nms(pos_box_all, pos_scores_all,
pos_idx_all, nms_cfg)
cat_dets = []
for i in range(num_imgs):
cat_dets_i = cat_det[pos_idx_all[cat_keep] == i]
cat_dets.append(cat_dets_i)
return cat_dets
|
tools/benchmark/matrix_vector_dotproduct.py | sumau/tick | 411 | 12623178 | <filename>tools/benchmark/matrix_vector_dotproduct.py
# License: BSD 3 clause
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tools.benchmark.benchmark_util import (
iter_executables, run_benchmark, default_result_dir, get_last_result_dir,
extract_build_from_name)
BASE_FILE_NAME = os.path.basename(__file__).replace('.py', '')
def run_matrix_vector_dotproduct_benchmarks():
result_dir = default_result_dir(base=BASE_FILE_NAME)
for executable in iter_executables('matrix_vector_dotproduct'):
run_benchmark(executable, [None], result_dir)
return result_dir
def _load_benchmark_data(result_dir=None):
if result_dir is None:
result_dir = get_last_result_dir(BASE_FILE_NAME)
cols = ["time", "iterations", "n_rows", "n_cols", "exectuable", "build"]
df = pd.DataFrame(columns=cols)
for result_file in [f for f in os.listdir(result_dir)
if f.endswith('tsv')]:
result_path = os.path.join(result_dir, result_file)
local_df = pd.read_csv(result_path, sep='\t', names=cols[:-1],
index_col=False)
local_df[cols[-1]] = extract_build_from_name(result_file)
df = df.append(local_df)
for num_col in [col for col in cols if col not in ['exectuable', 'build']]:
df[num_col] = pd.to_numeric(df[num_col])
return df, result_dir
def plot_matrix_vector_dotproduct_benchmark(result_dir=None):
df, result_dir = _load_benchmark_data(result_dir)
fig, ax = plt.subplots(1, 1)
grouped_times = df.groupby('build')['time']
mean_times = grouped_times.mean()
confidence_times = grouped_times.std() / np.sqrt(grouped_times.count())
confidence_times *= 1.96
mean_times.plot(kind='bar', yerr=confidence_times, ax=ax,
error_kw={'capsize': 10},
rot=0)
for p in ax.patches:
ax.annotate('{:.4f}'.format(p.get_height()),
(p.get_x() + p.get_width() / 4, p.get_height() / 2))
ax.set_ylabel('Time (s)')
ax.set_title(BASE_FILE_NAME)
fig.tight_layout()
plot_file_path = os.path.abspath(os.path.join(result_dir, 'result.png'))
plt.savefig(plot_file_path)
print('saved figure in {}'.format(plot_file_path))
run_matrix_vector_dotproduct_benchmarks()
plot_matrix_vector_dotproduct_benchmark()
|
tests/implementations/ormar_.py | quaternionmedia/fastapi-crudrouter | 686 | 12623185 | import os
import databases
import ormar
import pytest
import sqlalchemy
from fastapi import FastAPI
from fastapi_crudrouter import OrmarCRUDRouter
from tests import CarrotCreate, CarrotUpdate, PAGINATION_SIZE, CUSTOM_TAGS
DATABASE_URL = "sqlite:///./test.db"
database = databases.Database(DATABASE_URL)
metadata = sqlalchemy.MetaData()
@pytest.fixture(scope="function", autouse=True)
async def cleanup():
async with database:
await PotatoModel.objects.delete(each=True)
await CarrotModel.objects.delete(each=True)
class BaseMeta(ormar.ModelMeta):
metadata = metadata
database = database
def _setup_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.drop_all(engine)
metadata.create_all(engine)
return engine, database
class PotatoModel(ormar.Model):
class Meta(BaseMeta):
pass
id = ormar.Integer(primary_key=True)
thickness = ormar.Float()
mass = ormar.Float()
color = ormar.String(max_length=255)
type = ormar.String(max_length=255)
class CarrotModel(ormar.Model):
class Meta(BaseMeta):
pass
id = ormar.Integer(primary_key=True)
length = ormar.Float()
color = ormar.String(max_length=255)
class PotatoTypeModel(ormar.Model):
class Meta(BaseMeta):
tablename = "potato_type"
name = ormar.String(primary_key=True, max_length=300)
origin = ormar.String(max_length=300)
class CustomPotatoModel(ormar.Model):
class Meta(BaseMeta):
tablename = "custom_potatoes"
potato_id = ormar.Integer(primary_key=True)
thickness = ormar.Float()
mass = ormar.Float()
color = ormar.String(max_length=255)
type = ormar.String(max_length=255)
class UniquePotatoModel(ormar.Model):
class Meta(BaseMeta):
pass
id = ormar.Integer(primary_key=True)
thickness = ormar.Float()
mass = ormar.Float()
color = ormar.String(max_length=255, unique=True)
type = ormar.String(max_length=255)
def get_app():
[
os.remove(f"./db.sqlite3{s}")
for s in ["", "-wal", "-shm"]
if os.path.exists(f"./db.sqlite3{s}")
]
_setup_database()
app = FastAPI()
@app.on_event("startup")
async def startup():
await database.connect()
@app.on_event("shutdown")
async def shutdown():
await database.disconnect()
return app
def ormar_implementation(**kwargs):
app = get_app()
router_settings = [
dict(
schema=PotatoModel,
prefix="potato",
paginate=PAGINATION_SIZE,
),
dict(
schema=CarrotModel,
update_schema=CarrotUpdate,
prefix="carrot",
tags=CUSTOM_TAGS,
),
]
return (
app,
OrmarCRUDRouter,
router_settings,
)
# noinspection DuplicatedCode
def ormar_implementation_custom_ids():
app = get_app()
app.include_router(
OrmarCRUDRouter(
schema=CustomPotatoModel,
prefix="potatoes",
paginate=PAGINATION_SIZE,
)
)
return app
def ormar_implementation_string_pk():
app = get_app()
app.include_router(
OrmarCRUDRouter(
schema=PotatoTypeModel,
prefix="potato_type",
)
)
return app
def ormar_implementation_integrity_errors():
app = get_app()
app.include_router(
OrmarCRUDRouter(
schema=UniquePotatoModel,
prefix="potatoes",
paginate=PAGINATION_SIZE,
)
)
app.include_router(
OrmarCRUDRouter(
schema=CarrotModel,
create_schema=CarrotCreate,
update_schema=CarrotUpdate,
prefix="carrots",
)
)
return app
|
2020/05/16/Adding Extra Fields On Many-To-Many Relationships in Django/many_to_many_extra/many_to_many_extra/example/models.py | kenjitagawa/youtube_video_code | 492 | 12623189 | from django.db import models
class Student(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Course(models.Model):
name = models.CharField(max_length=30)
students = models.ManyToManyField(Student, through='Enrollment')
def __str__(self):
return self.name
class Enrollment(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date_enrolled = models.DateField()
final_grade = models.CharField(max_length=1, blank=True, null=True)
class Meta:
unique_together = [['student', 'course']] |
sonnet/src/moving_averages_test.py | ScriptBox99/deepmind-sonnet | 10,287 | 12623202 | <reponame>ScriptBox99/deepmind-sonnet
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.v2.src.moving_averages."""
from absl.testing import parameterized
from sonnet.src import moving_averages
from sonnet.src import test_utils
import tensorflow as tf
class ExponentialMovingAverageTest(test_utils.TestCase, parameterized.TestCase):
def testCall(self):
ema = moving_averages.ExponentialMovingAverage(0.50)
self.assertAllClose(ema(3.0).numpy(), 3.0)
self.assertAllClose(ema(6.0).numpy(), 5.0)
def testUpdateAndValue(self):
ema = moving_averages.ExponentialMovingAverage(0.50)
ema.update(3.0)
self.assertAllClose(ema.value.numpy(), 3.0, atol=1e-3, rtol=1e-5)
ema.update(6.0)
self.assertAllClose(ema.value.numpy(), 5.0, atol=1e-3, rtol=1e-5)
def testReset(self):
ema = moving_averages.ExponentialMovingAverage(0.90)
self.assertAllClose(ema(3.0).numpy(), 3.0, atol=1e-3, rtol=1e-5)
ema.reset()
self.assertEqual(ema.value.shape, ())
self.assertEqual(ema.value.numpy(), 0.0)
self.assertAllClose(ema(3.0).numpy(), 3.0, atol=1e-3, rtol=1e-5)
def testResetVector(self):
ema = moving_averages.ExponentialMovingAverage(0.90)
random_input = tf.random.normal((1, 5))
ema(random_input)
ema.reset()
self.assertEqual(ema.value.shape, (1, 5))
self.assertAllClose(ema.value.numpy(), tf.zeros_like(random_input))
self.assertEqual(ema._counter.dtype, tf.int64)
def testValueEqualsLatestUpdate(self):
ema = moving_averages.ExponentialMovingAverage(0.50)
self.assertAllClose(ema(3.0).numpy(), 3.0, atol=1e-3, rtol=1e-5)
self.assertAllClose(ema.value.numpy(), 3.0, atol=1e-3, rtol=1e-5)
self.assertAllClose(ema(6.0).numpy(), 5.0, atol=1e-3, rtol=1e-5)
self.assertAllClose(ema.value.numpy(), 5.0, atol=1e-3, rtol=1e-5)
@parameterized.parameters(True, False)
def testWithTFFunction(self, autograph):
ema_1 = moving_averages.ExponentialMovingAverage(0.95)
ema_2 = moving_averages.ExponentialMovingAverage(0.95)
ema_func = tf.function(ema_2, autograph=autograph)
for _ in range(10):
x = tf.random.uniform((), 0, 10)
self.assertAllClose(
ema_1(x).numpy(), ema_func(x).numpy(), atol=1e-3, rtol=1e-5)
@parameterized.parameters(True, False)
def testResetWithTFFunction(self, autograph):
ema = moving_averages.ExponentialMovingAverage(0.90)
ema_func = tf.function(ema, autograph=autograph)
self.assertAllClose(ema_func(3.0).numpy(), 3.0, atol=1e-3, rtol=1e-5)
ema.reset()
self.assertEqual(ema.value.numpy(), 0.0)
self.assertAllClose(ema_func(3.0).numpy(), 3.0, atol=1e-3, rtol=1e-5)
@parameterized.named_parameters(("2D", [2, 2]), ("3D", [1, 1, 3]))
def testAlternativeShape(self, shape):
ema = moving_averages.ExponentialMovingAverage(0.90)
value = tf.random.uniform(shape)
result = ema(value)
self.assertEqual(value.shape, result.shape)
if __name__ == "__main__":
tf.test.main()
|
corehq/apps/cloudcare/dbaccessors.py | dimagilg/commcare-hq | 471 | 12623206 | <filename>corehq/apps/cloudcare/dbaccessors.py<gh_stars>100-1000
from corehq.apps.app_manager.dbaccessors import get_brief_apps_in_domain
from corehq.apps.cloudcare.models import ApplicationAccess
from corehq.util.quickcache import quickcache
@quickcache(['domain'])
def get_application_access_for_domain(domain):
"""
There should only be one of these per domain,
return it if found, otherwise create it.
"""
return ApplicationAccess.objects.get_or_create(domain=domain)[0]
def get_cloudcare_apps(domain):
apps = get_brief_apps_in_domain(domain, include_remote=False)
return [app for app in apps if app.cloudcare_enabled]
|
cmt/mapclient_qt.py | nasa/CrisisMappingToolk | 178 | 12623235 | <filename>cmt/mapclient_qt.py<gh_stars>100-1000
# -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
"""A simple map GUI.
Implements a tiled map using QT. Displays map tiles using
whatever projection the tiles are in and only knows about tile coordinates,
(as opposed to geospatial coordinates.) This assumes that the tile-space is
organized as a power-of-two pyramid, with the origin in the upper left corner.
This currently has several spots that are hard-coded for 256x256 tiles, even
though TileManager tries to track this.
Supports mouse-based pan and zoom as well as tile upsampling while waiting
for new tiles to load. The map to display is specified by a TileManager, and
added to the GUI on creation or manually using addOverlay()
gui = GuiWrapper(MakeTileManager(mapid))
Tiles are referenced using a key of (level, x, y) throughout.
Several of the functions are named to match the Google Maps Javascript API,
and therefore violate style guidelines.
Based on the TK map interface from Google Earth Engine.
Terminology guide:
- overlay = One of the things that can be displayed on the map.
There is one of these for each "addToMap()" call.
- layer = Short for Layer Number, used for indexing a list of overlays.
This file contains the core GUI implementation. Customized GUI instances are
located in separate files.
"""
import collections
import cStringIO
import functools
import math
import random
import Queue
import sys
import time
import threading
import urllib2
import json
import ee
import os
import zipfile
import cPickle as pickle
# check if the Python imaging libraries used by the mapclient module are installed
try:
from PIL import ImageQt # pylint: disable=g-import-not-at-top
from PIL import Image, ImageChops # pylint: disable=g-import-not-at-top
except ImportError:
print("""
ERROR: A Python library (PILLOW) used by the CMT mapclient_qt module
was not found. Information on PILLOW can be found at:
https://pillow.readthedocs.org/
""")
raise
try:
import PyQt4 # pylint: disable=g-import-not-at-top
from PyQt4 import QtCore, QtGui
except ImportError:
print("""
ERROR: A Python library (PyQt4) used by the CMT mapclient_qt
module was not found.
""")
raise
import cmt.util.miscUtilities
# The default URL to fetch tiles from. We could pull this from the EE library,
# however this doesn't have any other dependencies on that yet, so let's not.
BASE_URL = 'https://earthengine.googleapis.com'
# Default directory to save images to
DEFAULT_SAVE_DIR = os.path.abspath(__file__)
# This is a URL pattern for creating an overlay from the google maps base map.
# The z, x and y arguments at the end correspond to level, x, y here.
DEFAULT_MAP_URL_PATTERN = ('http://mt1.google.com/vt/lyrs=m@176000000&hl=en&'
'src=app&z=%d&x=%d&y=%d')
# Tiles downloaded from Google Maps are cached here between
LOCAL_MAP_CACHE_PATH = '/home/smcmich1/repo/earthEngine/gm_tile_cache.dat'
# Text to display in "About" buttons for legal purposes
ABOUT_TEXT = '''Crisis Mapping Toolkit (CMT) v1
A tool for assisting in crisis measurement and detection using Google's Earth Engine.
Copyright * 2014, United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All rights reserved.
The Crisis Mapping Toolkit (CMT) v1 framework is licensed under the Apache License, Version 2.0 (the "License"); you may not use this application except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.'''
#================================================================================
# Classes that implement the GUI
class MapViewOverlay(object):
'''Structure that stores all information about a single overlay in a MapViewWidget'''
def __init__(self, tileManager, eeobject, name, show=True, vis_params=dict()):#, opacity=1.0):
self.tileManager = tileManager # A TileManager instance for this overlay
self.eeobject = eeobject # Earth Engine function object which computes the overlay.
self.name = name # Name of the overlay.
self.show = show # True/False if the overlay is currently being displayed.
self.vis_params = vis_params # EE-style visualization parameters string.
self.opacity = 1.0 # Current opacity level for display - starts at 1.0
def __str__(self):
s = 'MapViewOverlay object: ' + self.name
# The map will display a stack of these when you right click on it.
class MapViewOverlayInfoWidget(QtGui.QWidget):
'''Displays information for one layer at one location in a small horizontal bar. Easy to stack vertically.
Includes an opacity control and an on/off toggle checkbox.'''
def __init__(self, parent, layer, x, y):
super(MapViewOverlayInfoWidget, self).__init__()
self.parent = parent # The parent is a MapViewWidget object
self.layer = layer # The index of the layer in question
self.x = x # Click location
self.y = y
overlay = self.parent.overlays[self.layer] # This is a MapViewOverlay object
# Constants that define the field size
NAME_WIDTH = 130
ITEM_HEIGHT = 10
INFO_WIDTH = 450
SLIDER_WIDTH = 100
OPACITY_MAX = 100
# Set up the visibility checkbox
self.check_box = QtGui.QCheckBox(self)
self.check_box.setChecked(overlay.show)
self.check_box.stateChanged.connect(self.toggle_visible)
# Set up the opacity slider
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.slider.setRange(0, OPACITY_MAX) # 0 to 100 percent
self.slider.setValue(int(overlay.opacity * OPACITY_MAX))
self.slider.setTickInterval(25) # Add five tick marks
self.slider.setMinimumSize(SLIDER_WIDTH, ITEM_HEIGHT)
self.slider.valueChanged.connect(self.set_transparency) # Whenever the slider is moved, call set_transparency
# Add the overlay name
self.name = QtGui.QLabel(overlay.name, self)
self.name.setMinimumSize(NAME_WIDTH, ITEM_HEIGHT)
# Add the pixel value
self.value = QtGui.QLabel('...', self) # Display this until the real value is ready
self.value.setMinimumSize(INFO_WIDTH, ITEM_HEIGHT)
def get_pixel():
'''Helper function to retrieve the value of a single pixel in a single layer.'''
try:
return self.parent.getPixel(layer, x, y).getInfo()
except: # features throw ee exception, ignore
return None
self.pixel_loader = cmt.util.miscUtilities.waitForEeResult(get_pixel, self.set_pixel_value)
# Set up all the components in a horizontal box layout
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.check_box)
hbox.addWidget(self.name)
hbox.addWidget(self.slider)
hbox.addWidget(self.value)
self.setLayout(hbox) # Call QT function derived from parent QWidget class
def set_pixel_value(self, value):
'''Generate the text description for the pixel we clicked on'''
# Handle values with not enough data
if value == None:
self.value.setText('')
return
if len(value) <= 1:
self.value.setText('')
return
headers = value[0] # Extract the two parts of 'value'
data = value[1]
names = headers[4:] # Skip id, lon, lat, time
values = data[4:] # Skip id, lon, lat, time
# Get the object which contains information about the bands to display
vis_params = self.parent.overlays[self.layer].vis_params
text = ''
for i in range(len(names)):
# If bands were defined for this layer, only display the names of the selected bands.
if vis_params and ('bands' in vis_params):
if not (names[i] in vis_params['bands']): # WARNING: This parsing could be more robust!
continue
if len(text) > 0: # Add comma after first entry
text += ', '
text += str(names[i]) + ': ' + str(values[i]) # Just keep appending strings
self.value.setText(text)
def toggle_visible(self):
self.parent.overlays[self.layer].show = not self.parent.overlays[self.layer].show
self.parent.reload()
def set_transparency(self, value): # This is called whenever the slider bar is changed
'''Set the layer transparency with the input value'''
self.parent.overlays[self.layer].opacity = value / 100.0
self.parent.reload()
def hideEvent(self, event):
self.parent.setFocus()
class MapViewWidget(QtGui.QWidget):
"""A simple discrete zoom level map viewer.
This class handles user input, coordinate conversion, and image painting.
It requests tiles from the TileManager class when it needs them."""
# Signals are defined here which other widgets can listen in on
mapClickedSignal = QtCore.pyqtSignal(int, int) # x and y click coordinates.
def __init__(self, inputTileManager=None):
super(MapViewWidget, self).__init__()
# for adding new layers to map
self.executing_threads = []
self.thread_lock = threading.Lock()
self.tiles = {} # The cached stack of images at each grid cell.
self.qttiles = {} # The cached PhotoImage at each grid cell.
self.qttiles_lock = threading.RLock()
self.level = 2 # Starting zoom level
self.origin_x = None # The map origin x offset at the current level.
self.origin_y = None # The map origin y offset at the current level.
self.anchor_x = None # Drag anchor.
self.anchor_y = None # Drag anchor.
# Map origin offsets; start at the center of the map.
self.origin_x = (-(2 ** self.level) * 128) + self.width() / 2
self.origin_y = (-(2 ** self.level) * 128) + self.height() / 2
if not inputTileManager:
# Default to a google maps basemap
self.inputTileManager = TileManager(DEFAULT_MAP_URL_PATTERN)
else:
self.inputTileManager = inputTileManager
# The array of overlays are displayed as last on top.
self.overlays = [MapViewOverlay(self.inputTileManager, None, 'Google Maps')]
#print 'Added base overlay!'
def paintEvent(self, event):
'''Rasterize each of the tiles on to the output image display'''
painter = QtGui.QPainter()
with self.qttiles_lock:
painter.begin(self)
for key in self.qttiles.keys():
if key[0] != self.level:
continue
image = self.qttiles[key]
xpos = key[1] * image.width() + self.origin_x
ypos = key[2] * image.height() + self.origin_y
painter.drawImage(QtCore.QPoint(xpos, ypos), image)
painter.end()
def addOverlay(self, inputTileManager, eeobject, name, show, vis_params): # pylint: disable=g-bad-name
"""Add an overlay to the map."""
self.overlays.append(MapViewOverlay(inputTileManager, eeobject, name, show, vis_params))
#print 'Added overlay: ' + name
self.LoadTiles()
def GetViewport(self):
"""Return the visible portion of the map as [xlo, ylo, xhi, yhi] in weird Google coordinates."""
width, height = self.width(), self.height()
return [-self.origin_x, -self.origin_y,
-self.origin_x + width, -self.origin_y + height]
def GetMapBoundingBox(self):
"""Return the bounding box of the current view as [minLon, minLat, maxLon, maxLat]"""
# Just get the coordinates of the pixel corners of the map image
topLeftLonLat = self.pixelCoordToLonLat(0, 0)
botRightLonLat = self.pixelCoordToLonLat(self.width(), self.height())
return [topLeftLonLat[0], botRightLonLat[1], botRightLonLat[0], topLeftLonLat[1]]
def LoadTiles(self):
"""Refresh the entire map."""
#print 'Refreshing the map...'
# Start with the overlay on top.
for i, overlay in reversed(list(enumerate(self.overlays))):
if not overlay.show:
continue
#print 'Refreshing layer = ' + str(i)
tile_list = overlay.tileManager.CalcTiles(self.level, self.GetViewport())
for key in tile_list:
callback = functools.partial(self.AddTile, key=key, overlay=self.overlays[i], layer=i)
overlay.tileManager.getTile(key, callback)
def Flush(self):
"""Empty out all the image fetching queues."""
for overlay in self.overlays:
overlay.tileManager.Flush()
def CompositeTiles(self, key):
"""Composite together all the tiles in this cell into a single image."""
composite = None
numLayers = len(self.tiles[key])
numOverlays = len(self.overlays)
#if numLayers > numOverlays:
# print 'numLayers = ' + str(numLayers)
# print 'numOverlays = ' + str(numOverlays)
for layer in sorted(self.tiles[key]):
image = self.tiles[key][layer]
if not composite:
composite = image.copy() # Create output image buffer
else:
#composite = Image.blend(composite, image, self.overlays[layer].opacity)#composite.paste(image, (0, 0), image)
#if layer >= len(self.overlays):
# print 'Error coming!'
# print key
try:
composite.paste(image, (0, 0),
ImageChops.multiply(image.split()[3],
ImageChops.constant(image, int(self.overlays[layer].opacity * 255))))
except: # TODO: Why do we get errors here after deleting overlays?
pass
#print 'CompositeTiles Exception caught!'
#print image.split()
#print layer
#print self.overlays
#print '========================'
return composite
def AddTile(self, image, key, overlay, layer):
"""Add a tile to the map.
This keeps track of the tiles for each overlay in each grid cell.
As new tiles come in, all the tiles in a grid cell are composited together
into a new tile and any old tile for that spot is replaced.
Args:
image: The image tile to display.
key: A tuple containing the key of the image (level, x, y)
overlay: The overlay this tile belongs to (MapViewOverlay object).
layer: The layer number this overlay corresponds to. Only used
for caching purposes.
"""
# This function is called from multiple threads, and
# could use some synchronization, but it seems to work.
if self.level == key[0] and overlay.show: # Don't add late tiles from another level.
self.tiles[key] = self.tiles.get(key, {})
self.tiles[key][layer] = image
newtile = self.CompositeTiles(key) # Combine all images into a single tile image
newtile = ImageQt.ImageQt(newtile)
with self.qttiles_lock:
self.qttiles[key] = newtile
self.update()
def Zoom(self, event, direction):
"""Zoom the map.
Args:
event: The event that caused this zoom request.
direction: The direction to zoom. +1 for higher zoom, -1 for lower.
"""
if self.level + direction >= 0:
# Discard everything cached in the MapClient, and flush the fetch queues.
self.Flush()
self.tiles = {}
with self.qttiles_lock:
self.qttiles = {}
if direction > 0:
self.origin_x = self.origin_x * 2 - event.x()
self.origin_y = self.origin_y * 2 - event.y()
else:
self.origin_x = (self.origin_x + event.x()) / 2
self.origin_y = (self.origin_y + event.y()) / 2
self.level += direction
self.LoadTiles()
# Notes on level/zoom:
# : pixels_per_lon_degree = (mercator_range / 360.0) * (2**level)
# : Each level of zoom doubles pixels_per_degree
def wheelEvent(self, event):
self.Zoom(event, 1 if event.delta() > 0 else -1)
event.accept()
def reload(self):
self.Flush()
self.tiles = {}
with self.qttiles_lock:
self.qttiles = {}
self.LoadTiles()
def __showAboutText(self):
'''Pop up a little text box to display legal information'''
QtGui.QMessageBox.about(self, 'about', ABOUT_TEXT)
def __saveCurrentView(self):
'''Saves the current map view to disk as a GeoTIFF'''
# Get the handle of the currently active overlay
# - This is what we will save to disk
overlayToSave = None
for o in self.overlays:
if o.show:
overlayToSave = o
assert(overlayToSave != None) # Should at least be the google base map!
current_view_bbox = self.GetMapBoundingBox()
metersPerPixel = self.getApproxMetersPerPixel()
scale = metersPerPixel
# Pop open a window to get a file name from the user
file_path = str(QtGui.QFileDialog.getSaveFileName(self, 'Save image to', DEFAULT_SAVE_DIR))
## This will be used as a file name so it must be legal
#saveName = overlayToSave.name.replace(' ', '_').replace('/', '-')
#print overlayToSave.eeobject.getInfo()
cmt.util.miscUtilities.downloadEeImage(overlayToSave.eeobject, current_view_bbox, scale, file_path, overlayToSave.vis_params)
def contextMenuEvent(self, event):
menu = QtGui.QMenu(self)
TOP_BUTTON_HEIGHT = 20
TINY_BUTTON_WIDTH = 50
LARGE_BUTTON_WIDTH = 150
# Set up text showing the location which was right-clicked
(lon, lat) = self.pixelCoordToLonLat(event.x(), event.y()) # The event returns pixel coordinates
location_widget = QtGui.QWidgetAction(menu)
location_widget.setDefaultWidget(QtGui.QLabel(" Location: (%g, %g)" % (lon, lat)))
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel(" Location: (%g, %g)" % (lon, lat)))
# Add a "save image" button
saveButton = QtGui.QPushButton('Save Current View', self)
saveButton.setMinimumSize(LARGE_BUTTON_WIDTH, TOP_BUTTON_HEIGHT)
saveButton.setMaximumSize(LARGE_BUTTON_WIDTH, TOP_BUTTON_HEIGHT)
saveButton.clicked[bool].connect(self.__saveCurrentView)
hbox.addWidget(saveButton)
# Make a tiny "About" box for legal information
aboutButton = QtGui.QPushButton('About', self)
aboutButton.setMinimumSize(TINY_BUTTON_WIDTH, TOP_BUTTON_HEIGHT)
aboutButton.setMaximumSize(TINY_BUTTON_WIDTH, TOP_BUTTON_HEIGHT)
aboutButton.clicked[bool].connect(self.__showAboutText)
hbox.addWidget(aboutButton)
# Add the location and button to the pop up menu
mainWidget = QtGui.QWidget()
mainWidget.setLayout(hbox)
location_widget.setDefaultWidget(mainWidget)
menu.addAction(location_widget)
# Add a toggle for each layer and put it in the right click menu
for i in range(1, len(self.overlays)):
action = QtGui.QWidgetAction(menu)
item = MapViewOverlayInfoWidget(self, i, event.x(), event.y())
action.setDefaultWidget(item)
menu.addAction(action)
# Now pop up the new window!
menu.popup(QtGui.QCursor.pos())
def getPixel(self, layer, x, y):
collection = ee.ImageCollection([self.overlays[layer].eeobject])
# note: scale likely not correct
(lon, lat) = self.pixelCoordToLonLat(x, y)
point_extracted = collection.getRegion(ee.Geometry.Point(lon, lat), 1)
return point_extracted
def mousePressEvent(self, event):
"""Records the anchor location and sets drag handler."""
self.mapClickedSignal.emit(event.x(), event.y()) # Send out clicked signal
if event.button() == QtCore.Qt.LeftButton: # Now handle locally
self.anchor_x = event.x()
self.anchor_y = event.y()
event.accept()
return
event.ignore()
return
def mouseMoveEvent(self, event):
"""Updates the map position and anchor position."""
if self.anchor_x == None:
event.ignore()
return
dx = event.x() - self.anchor_x
dy = event.y() - self.anchor_y
if dx or dy:
self.origin_x += dx
self.origin_y += dy
self.anchor_x = event.x()
self.anchor_y = event.y()
self.update()
event.accept()
return
event.ignore()
def mouseReleaseEvent(self, event):
"""Unbind drag handler and redraw."""
if event.button() == QtCore.Qt.LeftButton:
self.anchor_x = None
self.anchor_y = None
self.LoadTiles()
event.accept()
return
event.ignore()
return
def resizeEvent(self, event):
"""Handle resize events."""
self.LoadTiles()
def getApproxMetersPerPixel(self):
'''Returns the approximate meters per pixel at the current location/zoom'''
# The actual value differs in the X and Y direction and across the image
mercator_range = 256.0
scale = 2 ** self.level
pixels_per_degree = (mercator_range / 360.0) * scale
# Get the lat/lon of the center pixel
width, height = self.width(), self.height()
lon, lat = self.pixelCoordToLonLat(width/2, height/2)
# Formula to compute the length of a degree at this latitude
m1 = 111132.92
m2 = -559.82
m3 = 1.175
m4 = -0.0023
p1 = 111412.84
p2 = -93.5
p3 = 0.118
lat_len_meters = m1 + (m2 * math.cos(2 * lat)) + (m3 * math.cos(4 * lat)) + (m4 * math.cos(6 * lat))
long_len_meters = (p1 * math.cos(lat)) + (p2 * math.cos(3 * lat)) + (p3 * math.cos(5 * lat))
# Just take the average of the vertical and horizontal size
meters_per_degree = (lat_len_meters + long_len_meters) / 2
# Convert to pixel units
meters_per_pixel = meters_per_degree / pixels_per_degree
return meters_per_pixel
def pixelCoordToLonLat(self, column, row):
'''Return the longitude and latitude of a pixel in the map'''
mercator_range = 256.0
scale = 2 ** self.level
origin_x = (mercator_range / 2.0) * scale
origin_y = (mercator_range / 2.0) * scale
pixels_per_lon_degree = (mercator_range / 360.0) * scale
pixels_per_lon_radian = (mercator_range / (2 * math.pi)) * scale
lng = (column - self.origin_x - origin_x) / pixels_per_lon_degree
latRadians = (row - self.origin_y - origin_y) / -pixels_per_lon_radian
lat = (2 * math.atan(math.exp(latRadians)) - math.pi / 2) / (math.pi / 180.0)
return (lng, lat)
def lonLatToPixelCoord(self, lon, lat):
'''Return the pixel coordinate in the map for a given longitude and latitude'''
# From maps/api/javascript/geometry/mercator_projection.js
mercator_range = 256.0
scale = 2 ** self.level
origin_x = (mercator_range / 2.0) * scale
origin_y = (mercator_range / 2.0) * scale
pixels_per_lon_degree = (mercator_range / 360.0) * scale
pixels_per_lon_radian = (mercator_range / (2 * math.pi)) * scale
column = origin_x + (lon * pixels_per_lon_degree)
siny = math.sin(lat * math.pi / 180.0)
# Prevent sin() overflow.
e = 1 - 1e-15
if siny > e:
siny = e
elif siny < -e:
siny = -e
row = origin_y + (0.5 * math.log((1 + siny) / (1 - siny)) *
-pixels_per_lon_radian)
return (column, row)
def CenterMap(self, lon, lat, opt_zoom=None):
"""Center the map at the given lon, lat and zoom level."""
self.Flush()
self.tiles = {}
with self.qttiles_lock:
self.qttiles = {}
width, height = self.width(), self.height()
if opt_zoom is not None:
self.level = opt_zoom
(column, row) = self.lonLatToPixelCoord(lon, lat)
self.origin_x = -column + width / 2
self.origin_y = -row + height / 2
self.LoadTiles()
def addToMap(self, eeobject, vis_params=None, name="", show=True):
'''Ads an EE object to the map'''
# Flatten any lists to comma separated strings - needed for eeobject.getMapId() call below!
if vis_params:
vis_params = dict(vis_params)
for key in vis_params.keys():
item = vis_params.get(key)
if (isinstance(item, collections.Iterable) and (not isinstance(item, basestring))):
vis_params[key] = ','.join([str(x) for x in item])
def execute_thread(waiting_threads):
# get thread before starting
with self.thread_lock:
pass
result = eeobject.getMapId(vis_params)
for t in waiting_threads:
t.join()
with self.thread_lock:
self.executing_threads.pop(0)
return result
with self.thread_lock:
self.executing_threads.append(cmt.util.miscUtilities.waitForEeResult(functools.partial(execute_thread, list(self.executing_threads)),
lambda a : self.addOverlay(MakeTileManager(a), eeobject, name, show, vis_params)))
def removeFromMap(self, eeobject):
'''Removes an overlay from the map by matching its EE object'''
self.Flush()
for i in range(len(self.overlays)):
if self.overlays[i].eeobject == eeobject:
#print 'Removing overlay: ' + self.overlays[i].name
del self.overlays[i]
break
self.LoadTiles()
return
class TileManager(object):
"""Retrieves tiles from EE, resizes them, and manages the tile cache.
Each overlay on the map requires its own TileManager instance."""
TILE_WIDTH = 256
TILE_HEIGHT = 256
MAX_CACHE = 1000 # The maximum number of tiles to cache.
_images = {} # The tile cache, keyed by (url, level, x, y). Static class variable.
_lru_keys = [] # Keys to the cached tiles, for cache ejection.
def __init__(self, url):
"""Initialize the TileManager."""
self.url = url
NUM_WORKERS = 10
self.delay = False
# Google's map tile server thinks we are automating queries and blocks us, so we forcibly slow down
if self.url == DEFAULT_MAP_URL_PATTERN:
print('Throttling tile download')
NUM_WORKERS = 1
self.delay = True
# Make 10 workers, each an instance of the TileFetcher helper class.
self.queue = Queue.Queue()
self.fetchers = [TileManager.TileFetcher(self) for unused_x in range(NUM_WORKERS)]
self.constant = None
def getTile(self, key, callback): # pylint: disable=g-bad-name
"""Get the requested tile.
If the requested tile is already cached, it's returned (sent to the
callback) directly. If it's not cached, a check is made to see if
a lower-res version is cached, and if so that's interpolated up, before
a request for the actual tile is made.
Args:
key: The key of the tile to fetch.
callback: The callback to call when the tile is available. The callback
may be called more than once if a low-res version is available.
"""
result = self.GetCachedTile(key)
if result:
callback(result) # Already have the tile, execute callback
else:
# Interpolate what we have and put the key on the fetch queue.
# - The callback will get called once now and once when we get the tile
self.queue.put((key, callback))
self.Interpolate(key, callback)
def Flush(self):
"""Empty the tile queue."""
while not self.queue.empty():
self.queue.get_nowait()
def CalcTiles(self, level, bbox):
"""Calculate which tiles to load based on the visible viewport.
Args:
level: The level at which to calculate the required tiles.
bbox: The viewport coordinates as a tuple (xlo, ylo, xhi, yhi])
Returns:
The list of tile keys to fill the given viewport.
"""
tile_list = []
for y in xrange(int(bbox[1] / TileManager.TILE_HEIGHT),
int(bbox[3] / TileManager.TILE_HEIGHT + 1)):
for x in xrange(int(bbox[0] / TileManager.TILE_WIDTH),
int(bbox[2] / TileManager.TILE_WIDTH + 1)):
tile_list.append((level, x, y))
return tile_list
def Interpolate(self, key, callback):
"""Upsample a lower res tile if one is available.
Args:
key: The tile key to upsample.
callback: The callback to call when the tile is ready.
"""
level, x, y = key
delta = 1
result = None
while level - delta > 0 and result is None:
prevkey = (level - delta, x / 2, y / 2)
result = self.GetCachedTile(prevkey)
if not result:
(_, x, y) = prevkey
delta += 1
if result:
px = (key[1] % 2 ** delta) * TileManager.TILE_WIDTH / 2 ** delta
py = (key[2] % 2 ** delta) * TileManager.TILE_HEIGHT / 2 ** delta
image = (result.crop([px, py,
px + TileManager.TILE_WIDTH / 2 ** delta,
py + TileManager.TILE_HEIGHT / 2 ** delta])
.resize((TileManager.TILE_WIDTH, TileManager.TILE_HEIGHT)))
callback(image)
def PutCacheTile(self, key, image):
"""Insert a new tile in the cache and eject old ones if it's too big."""
cache_key = (self.url,) + key # Generate key
TileManager._images[cache_key] = image # Store image in cache
TileManager._lru_keys.append(cache_key) # Record the key in insertion order
# When the cache gets too big, clear the oldest tile.
while len(TileManager._lru_keys) > TileManager.MAX_CACHE:
remove_key = TileManager._lru_keys.pop(0) # The first entry is the oldest
try:
TileManager._images.pop(remove_key)
except KeyError:
# Just in case someone removed this before we did, don't die on cache clear!
pass
def GetCachedTile(self, key):
"""Returns the specified tile if it's in the cache."""
cache_key = (self.url,) + key
return TileManager._images.get(cache_key, None)
def SaveCacheToDisk(self, path):
'''Record all tile cache information to a file on disk'''
def makePickleImage(image):
return {'pixels': image.tostring(),
'size' : image.size,
'mode' : image.mode}
# Prepare the images for pickle one at a time (the in-memory format is incompatible)
pickle_images = []
matched_keys = []
for key in TileManager._lru_keys:
if not (key in TileManager._images):
print('Warning: Key not found in _images: ' + str(key))
continue
pickle_images.append(makePickleImage(TileManager._images[key]))
matched_keys.append(key)
with open(path, 'wb') as f:
pickle.dump( (pickle_images, matched_keys), f)
print('Saved '+str(len(pickle_images))+' tiles from cache to path: ' + path)
def LoadCacheFromDisk(self, path):
'''Read a cache file from disk'''
def readPickleImage(pImage):
return Image.fromstring(pImage['mode'], pImage['size'], pImage['pixels'])
# Load the pickle formatted data
with open(path, 'rb') as f:
(pickle_images, TileManager._lru_keys) = pickle.load(f)
# Unpack images one at a time
TileManager._images = {}
for (pImage, key) in zip(pickle_images, TileManager._lru_keys):
TileManager._images[key] = readPickleImage(pImage)
print('Loaded '+str(len(TileManager._lru_keys))+' tiles to cache from path: ' + path)
class TileFetcher(threading.Thread):
"""A threaded URL fetcher used to retrieve tiles."""
def __init__(self, parentTileMananger):
threading.Thread.__init__(self)
self.manager = parentTileMananger
self.setDaemon(True)
self.start()
def run(self):
"""Pull URLs off the TileManager's queue and call the callback when done."""
MAX_403_ERRORS = 10
errorCount403 = 0
while True:
(key, callback) = self.manager.queue.get()
# Google tile manager thinks we are automating queries and blocks us, so slow down
if self.manager.delay and not self.manager.GetCachedTile(key):
delayTime = 0.05 + (random.random() * 0.2)
time.sleep(delayTime)
# Check one more time that we don't have this yet.
if not self.manager.GetCachedTile(key):
if errorCount403 > MAX_403_ERRORS:
continue
(level, x, y) = key
if x >= 0 and y >= 0 and x <= 2 ** level-1 and y <= 2 ** level-1:
url = self.manager.url % key
try:
data = urllib2.urlopen(url).read()
except urllib2.HTTPError as e:
print(e, file=sys.stderr)
print(e)
if 'HTTP Error 403' in e:
errorCount403 += 1
if errorCount403 > MAX_403_ERRORS:
print('Maximum HTTP Error 403 count exceeded, tile fetching disabled.')
else:
# PhotoImage can't handle alpha on LA images.
# - The convert command forces the image to be loaded into memory.
image = Image.open(cStringIO.StringIO(data)).convert('RGBA')
callback(image)
self.manager.PutCacheTile(key, image)
def MakeTileManager(mapid, baseurl=BASE_URL):
"""Create a TileManager from a mapid."""
# The url is generated in a particular manner from the map ID.
url = (baseurl + '/map/' + mapid['mapid'] + '/%d/%d/%d?token=' + mapid['token'])
return TileManager(url)
class QtGuiWrapper(object):
'''This class is created as a singleton and wraps the QT GUI.
It offers a few interface functions for manipulating the map.
The class is initialized with the TYPE of GUI class it will wrap.'''
def __init__(self, guiClass):
'''Initialize the class with the type of QT GUI to run'''
self.guiClass = guiClass # Record the class type
self.gui = None # The GUI is not initialized yet
self.ready = False
def run(self):
app = QtGui.QApplication(sys.argv) # Do required QT init
self.gui = self.guiClass() # Instantiate a GUI class object
self.ready = True # Now we are ready to rock
sys.exit(app.exec_())
def __getattr__(self, attr):
'''Forward any function call to the GUI class we instantiated'''
while not self.ready:
time.sleep(0.01) # Don't try anything until we are ready!
try:
return getattr(self.gui, attr) # Forward the call to the GUI class instance
except:
raise AttributeError(attr) # This happens if the GUI class does not support the call
#=================================================================================
# A Generic GUI implementation
class GenericMapGui(QtGui.QMainWindow):
'''This sets up the main viewing window in QT, fills it up with a MapViewWidget,
and then forwards all function calls to it.'''
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.tileManager = TileManager(DEFAULT_MAP_URL_PATTERN)
if os.path.exists(LOCAL_MAP_CACHE_PATH):
self.tileManager.LoadCacheFromDisk(LOCAL_MAP_CACHE_PATH)
#except:
# print 'Unable to load cache information from ' + LOCAL_MAP_CACHE_PATH
self.mapWidget = MapViewWidget(self.tileManager)
# Set up all the components in a vertical layout
vbox = QtGui.QVBoxLayout()
# Add the main map widget
vbox.addWidget(self.mapWidget)
# QMainWindow requires that its layout be set in this manner
mainWidget = QtGui.QWidget()
mainWidget.setLayout(vbox)
self.setCentralWidget(mainWidget)
# This is the initial window size, but the user can resize it.
self.setGeometry(100, 100, 720, 720)
self.setWindowTitle('EE Map View')
self.show()
def closeEvent(self,event):
'''Dump the cache to disk'''
#try:
print('Attempting to save tile cache...')
self.tileManager.SaveCacheToDisk(LOCAL_MAP_CACHE_PATH)
#except:
# print 'Unable to load cache information from ' + LOCAL_MAP_CACHE_PATH
def keyPressEvent(self, event):
"""Handle keypress events."""
if event.key() == QtCore.Qt.Key_Q:
QtGui.QApplication.quit()
def __getattr__(self, attr):
'''Forward any unknown function call to MapViewWidget() widget we created'''
try:
return getattr(self.mapWidget, attr) # Forward the call to the MapViewWidget class
except:
raise AttributeError(attr) # This happens if the MapViewWidget class does not support the call
#=================================================================================
# Global objects and functions for interacting with the GUI
# - These are common operations and every GUI needs to support them.
# - These interfaces match an old deprecated version of the Earth Engine interface.
# A global GuiWrapper instance for addToMap convenience.
map_instance = None
# This is the type of GUI the functions below will create.
# - This defaults to the generic GUI, but it can be overwritten in the importing file.
gui_type = GenericMapGui
def addEmptyGui():
'''Brings up the GUI without adding any new data to it'''
# This just requires map_instance to be constructed
global map_instance
if not map_instance:
map_instance = QtGuiWrapper(gui_type)
def run():
''' Runs the GUI thread (blocking). '''
addEmptyGui()
map_instance.run()
def addToMap(eeobject, vis_params=None, name="", show=True):
"""Adds a layer to the default map instance.
Args:
eeobject: The object to add to the map.
vis_params: A dictionary of visualization parameters. See
ee.data.getMapId().
*unused_args: Unused arguments, left for compatibility with the JS API.
This call exists to be an equivalent to the playground addToMap() call.
It uses a global MapInstance to hang on to "the map". If the MapInstance
isn't initialized, this creates a new one.
"""
addEmptyGui()
map_instance.addToMap(eeobject, vis_params, name, show)
def removeFromMap(eeobject):
"""Removes a layer to the default map instance.
Args:
eeobject: The object to add to the map.
This call uses a global MapInstance to hang on to "the map". If the MapInstance
isn't initialized, this creates a new one.
"""
addEmptyGui()
map_instance.removeFromMap(eeobject)
def centerMap(lng, lat, zoom): # pylint: disable=g-bad-name
"""Center the default map instance at the given lat, lon and zoom values."""
addEmptyGui()
map_instance.CenterMap(lng, lat, zoom)
|
24-class-metaprog/evaltime/metalib.py | SeirousLee/example-code-2e | 990 | 12623244 | # tag::METALIB_TOP[]
print('% metalib module start')
import collections
class NosyDict(collections.UserDict):
def __setitem__(self, key, value):
args = (self, key, value)
print(f'% NosyDict.__setitem__{args!r}')
super().__setitem__(key, value)
def __repr__(self):
return '<NosyDict instance>'
# end::METALIB_TOP[]
# tag::METALIB_BOTTOM[]
class MetaKlass(type):
print('% MetaKlass body')
@classmethod # <1>
def __prepare__(meta_cls, cls_name, bases): # <2>
args = (meta_cls, cls_name, bases)
print(f'% MetaKlass.__prepare__{args!r}')
return NosyDict() # <3>
def __new__(meta_cls, cls_name, bases, cls_dict): # <4>
args = (meta_cls, cls_name, bases, cls_dict)
print(f'% MetaKlass.__new__{args!r}')
def inner_2(self):
print(f'% MetaKlass.__new__:inner_2({self!r})')
cls = super().__new__(meta_cls, cls_name, bases, cls_dict.data) # <5>
cls.method_c = inner_2 # <6>
return cls # <7>
def __repr__(cls): # <8>
cls_name = cls.__name__
return f"<class {cls_name!r} built by MetaKlass>"
print('% metalib module end')
# end::METALIB_BOTTOM[]
|
addition_module/DMUE/preprocess/mtcnn/__init__.py | weihaoxie/FaceX-Zoo | 1,329 | 12623245 | from .mtcnn import MTCNN |
photutils/detection/core.py | rosteen/photutils | 167 | 12623256 | <reponame>rosteen/photutils
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements the base class and star finder kernel for
detecting stars in an astronomical image. Each star-finding class should
define a method called ``find_stars`` that finds stars in an image.
"""
import abc
import math
import warnings
from astropy.stats import gaussian_fwhm_to_sigma
import numpy as np
from .peakfinder import find_peaks
from ..utils.exceptions import NoDetectionsWarning
__all__ = ['StarFinderBase']
class StarFinderBase(metaclass=abc.ABCMeta):
"""
Abstract base class for star finders.
"""
def __call__(self, data, mask=None):
return self.find_stars(data, mask=mask)
@staticmethod
def _find_stars(convolved_data, kernel, threshold, *, min_separation=0.0,
mask=None, exclude_border=False):
"""
Find stars in an image.
Parameters
----------
convolved_data : 2D array_like
The convolved 2D array.
kernel : `_StarFinderKernel`
The convolution kernel.
threshold : float
The absolute image value above which to select sources. This
threshold should be the threshold input to the star finder class
multiplied by the kernel relerr.
min_separation : float, optional
The minimum separation for detected objects in pixels.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are ignored when searching for stars.
exclude_border : bool, optional
Set to `True` to exclude sources found within half the size of
the convolution kernel from the image borders. The default is
`False`, which is the mode used by IRAF's `DAOFIND`_ and
`starfind`_ tasks.
Returns
-------
result : Nx2 `~numpy.ndarray`
A Nx2 array containing the (x, y) pixel coordinates.
.. _DAOFIND: https://iraf.net/irafhelp.php?val=daofind
.. _starfind: https://iraf.net/irafhelp.php?val=starfind
"""
# define a local footprint for the peak finder
if min_separation == 0: # daofind
if isinstance(kernel, np.ndarray):
footprint = np.ones(kernel.shape)
else:
footprint = kernel.mask.astype(bool)
else:
# define a local circular footprint for the peak finder
idx = np.arange(-min_separation, min_separation + 1)
xx, yy = np.meshgrid(idx, idx)
footprint = np.array((xx**2 + yy**2) <= min_separation**2,
dtype=int)
# pad the convolved data and mask by half the kernel size (or
# x/y radius) to allow for detections near the edges
if isinstance(kernel, np.ndarray):
ypad = (kernel.shape[0] - 1) // 2
xpad = (kernel.shape[1] - 1) // 2
else:
ypad = kernel.yradius
xpad = kernel.xradius
if not exclude_border:
pad = ((ypad, ypad), (xpad, xpad))
pad_mode = 'constant'
convolved_data = np.pad(convolved_data, pad, mode=pad_mode,
constant_values=0.0)
if mask is not None:
mask = np.pad(mask, pad, mode=pad_mode, constant_values=False)
# find local peaks in the convolved data
# suppress any NoDetectionsWarning from find_peaks
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=NoDetectionsWarning)
tbl = find_peaks(convolved_data, threshold, footprint=footprint,
mask=mask)
if exclude_border:
xmax = convolved_data.shape[1] - xpad
ymax = convolved_data.shape[0] - ypad
mask = ((tbl['x_peak'] > xpad) & (tbl['y_peak'] > ypad)
& (tbl['x_peak'] < xmax) & (tbl['y_peak'] < ymax))
tbl = tbl[mask]
if tbl is None:
return None
xpos, ypos = tbl['x_peak'], tbl['y_peak']
if not exclude_border:
xpos -= xpad
ypos -= ypad
return np.transpose((xpos, ypos))
@abc.abstractmethod
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars. If no stars are found then `None` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
class _StarFinderKernel:
"""
Container class for a 2D Gaussian density enhancement kernel.
The kernel has negative wings and sums to zero. It is used by both
`DAOStarFinder` and `IRAFStarFinder`.
Parameters
----------
fwhm : float
The full-width half-maximum (FWHM) of the major axis of the
Gaussian kernel in units of pixels.
ratio : float, optional
The ratio of the minor and major axis standard deviations of the
Gaussian kernel. ``ratio`` must be strictly positive and less
than or equal to 1.0. The default is 1.0 (i.e., a circular
Gaussian kernel).
theta : float, optional
The position angle (in degrees) of the major axis of the
Gaussian kernel, measured counter-clockwise from the positive x
axis.
sigma_radius : float, optional
The truncation radius of the Gaussian kernel in units of sigma
(standard deviation) [``1 sigma = FWHM /
2.0*sqrt(2.0*log(2.0))``]. The default is 1.5.
normalize_zerosum : bool, optional
Whether to normalize the Gaussian kernel to have zero sum, The
default is `True`, which generates a density-enhancement kernel.
Notes
-----
The class attributes include the dimensions of the elliptical kernel
and the coefficients of a 2D elliptical Gaussian function expressed
as:
``f(x,y) = A * exp(-g(x,y))``
where
``g(x,y) = a*(x-x0)**2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)**2``
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
def __init__(self, fwhm, ratio=1.0, theta=0.0, sigma_radius=1.5,
normalize_zerosum=True):
if fwhm < 0:
raise ValueError('fwhm must be positive.')
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be positive and less or equal '
'than 1.')
if sigma_radius <= 0:
raise ValueError('sigma_radius must be positive.')
self.fwhm = fwhm
self.ratio = ratio
self.theta = theta
self.sigma_radius = sigma_radius
self.xsigma = self.fwhm * gaussian_fwhm_to_sigma
self.ysigma = self.xsigma * self.ratio
theta_radians = np.deg2rad(self.theta)
cost = np.cos(theta_radians)
sint = np.sin(theta_radians)
xsigma2 = self.xsigma**2
ysigma2 = self.ysigma**2
self.a = (cost**2 / (2.0 * xsigma2)) + (sint**2 / (2.0 * ysigma2))
# CCW
self.b = 0.5 * cost * sint * ((1.0 / xsigma2) - (1.0 / ysigma2))
self.c = (sint**2 / (2.0 * xsigma2)) + (cost**2 / (2.0 * ysigma2))
# find the extent of an ellipse with radius = sigma_radius*sigma;
# solve for the horizontal and vertical tangents of an ellipse
# defined by g(x,y) = f
self.f = self.sigma_radius**2 / 2.0
denom = (self.a * self.c) - self.b**2
# nx and ny are always odd
self.nx = 2 * int(max(2, math.sqrt(self.c * self.f / denom))) + 1
self.ny = 2 * int(max(2, math.sqrt(self.a * self.f / denom))) + 1
self.xc = self.xradius = self.nx // 2
self.yc = self.yradius = self.ny // 2
# define the kernel on a 2D grid
yy, xx = np.mgrid[0:self.ny, 0:self.nx]
self.circular_radius = np.sqrt((xx - self.xc)**2 + (yy - self.yc)**2)
self.elliptical_radius = (self.a * (xx - self.xc)**2
+ 2.0 * self.b * (xx - self.xc)
* (yy - self.yc)
+ self.c * (yy - self.yc)**2)
self.mask = np.where(
(self.elliptical_radius <= self.f)
| (self.circular_radius <= 2.0), 1, 0).astype(int)
self.npixels = self.mask.sum()
# NOTE: the central (peak) pixel of gaussian_kernel has a value of 1.
self.gaussian_kernel_unmasked = np.exp(-self.elliptical_radius)
self.gaussian_kernel = self.gaussian_kernel_unmasked * self.mask
# denom = variance * npixels
denom = ((self.gaussian_kernel**2).sum()
- (self.gaussian_kernel.sum()**2 / self.npixels))
self.relerr = 1.0 / np.sqrt(denom)
# normalize the kernel to zero sum
if normalize_zerosum:
self.data = ((self.gaussian_kernel
- (self.gaussian_kernel.sum() / self.npixels))
/ denom) * self.mask
else:
self.data = self.gaussian_kernel
self.shape = self.data.shape
|
src/IDA/grap/idagrap/ui/widgets/PatternGenerationWidget.py | AirbusCyber/grap | 171 | 12623283 | #!/usr/bin/env python
# Inspired by IDAscope.
from pygrap import graph_free
import idagrap.ui.helpers.QtShim as QtShim
import idc
import idaapi
from idagrap.config.General import config
from idagrap.patterns.Modules import MODULES
from idagrap.ui.widgets.EditorWidget import EditorWidget
import idagrap.ui.helpers.QtGrapSyntax as syntax
import os
QMainWindow = QtShim.get_QMainWindow()
class PatternGenerationWidget(QMainWindow):
def __init__(self, parent):
"""Initialization."""
# Initialization
self.cc = parent.cc
self.cc.QMainWindow.__init__(self)
# print "[|] loading PatternGenerationWidget"
# Enable access to shared IDAscope modules
self.parent = parent
self.name = "Pattern Generation"
self.icon = self.cc.QIcon(config['icons_path'] + "icons8-plus.png")
self.color = False
# This widget relies on the crypto identifier
self.central_widget = self.cc.QWidget()
self.setCentralWidget(self.central_widget)
self._createGui()
self.actionsDefined = False
self.real_time_option = True
def _createGui(self):
"""
Setup function for the full GUI of this widget.
"""
# Toolbar
self._createToolbar()
# Quick pattern text
self._createQuickPatternTextWidget()
# Text pattern
self._createTextWidget()
# Options widgets
self._createOptionsWidgets()
# Layout and fill the widget
generation_layout = self.cc.QVBoxLayout()
for options_widget in self.options_widgets:
generation_layout.addWidget(options_widget)
hbox = self.cc.QHBoxLayout()
hbox.addWidget(self.text_qp_widget)
hbox.addWidget(self.toolbar_qp)
generation_layout.addLayout(hbox)
generation_layout.addWidget(self.text_widget)
self.central_widget.setLayout(generation_layout)
def showEvent(self, QShowEvent):
# Update the UI if the graph is defined
if not self.actionsDefined and self.cc.PatternGenerator.graph.graph:
self._createContextActions()
self._updateContextMenus()
def _createToolbar(self):
"""
Creates the toolbar, containing buttons to control the widget.
"""
self.toolbar = self.addToolBar('Pattern Generation Toolbar')
self.toolbar.setMovable(False)
self._createLoadGraphAction()
self.toolbar.addAction(self.loadGraphAction)
self._createGenerateAction()
self.toolbar.addAction(self.generateAction)
self._createFuncAction()
self.toolbar.addAction(self.funcAction)
self._createResetAction()
self.toolbar.addAction(self.resetAction)
self._createSaveAction()
self.toolbar.addAction(self.saveAction)
self._createOpenAction()
self.toolbar.addAction(self.openAction)
def _createQuickPatternTextWidget(self):
self.text_qp_widget = self.cc.QLineEdit()
self.text_qp_widget.setReadOnly(False)
self.toolbar_qp = self.addToolBar('Pattern Generation Toolbar')
self._createGenerateQuickPatternAction()
self.toolbar_qp.addAction(self.generateQuickPatternAction)
self.text_qp_widget.returnPressed.connect(self._onGenerateQuickPatternButtonClicked)
def _createTextWidget(self):
self.text_widget = self.cc.QTextEdit()
self.text_widget.setReadOnly(False)
self.text_widget.setFontFamily("Monospace")
self.highlight = syntax.PythonHighlighter(self.text_widget.document())
def _createOptionsWidgets(self):
self.options_widgets = []
self.real_time_check = self.cc.QCheckBox("Automatically update the pattern")
self.real_time_check.setChecked(True)
self.real_time_check.stateChanged.connect(self._real_time_check_option_trigger)
self.options_widgets.append(self.real_time_check)
self.generic_arguments_check = self.cc.QCheckBox("Generic arguments")
self.generic_arguments_check.stateChanged.connect(self._generic_arguments_option_trigger)
self.options_widgets.append(self.generic_arguments_check)
self.lighten_memory_ops_check = self.cc.QCheckBox("Lighten memory handling operations")
self.lighten_memory_ops_check.stateChanged.connect(self._lighten_memory_ops_option_trigger)
self.options_widgets.append(self.lighten_memory_ops_check)
self.std_jmp_check = self.cc.QCheckBox("Standardize jump operations")
self.std_jmp_check.stateChanged.connect(self._std_jmp_check_option_trigger)
self.options_widgets.append(self.std_jmp_check)
self.factorize_check = self.cc.QCheckBox("Factorize")
self.factorize_check.stateChanged.connect(self._factorize_check_option_trigger)
self.options_widgets.append(self.factorize_check)
def _createOpenAction(self):
"""
Create an action for the open button of the toolbar and connect it.
"""
# Action
self.openAction = self.cc.QAction(
self.cc.QIcon(config['icons_path'] + "icons8-edit-property-52.png"),
"Open pattern file for editing",
self
)
self.openAction.triggered.connect(self._onOpenClicked)
def _onOpenClicked(self):
options = self.cc.QFileDialog.Options()
filename, _ = self.cc.QFileDialog.getOpenFileName(self, "Save pattern file (.grapp files in %APPDATA%\IDAgrap\patterns will be parsed as patterns)", self.default_filepath(), "Grap pattern (*.grapp)", options=options)
if filename:
editorWidget = EditorWidget(self.parent, filename)
basename=os.path.basename(filename)
self.parent.tabs.addTab(editorWidget, editorWidget.icon, basename)
def _generic_arguments_option_trigger(self, state):
self.cc.PatternGenerator.generic_arguments_option = (state == 2)
self._render()
def _lighten_memory_ops_option_trigger(self, state):
self.cc.PatternGenerator.lighten_memory_ops_option = (state == 2)
self._render()
def _std_jmp_check_option_trigger(self, state):
self.cc.PatternGenerator.std_jmp_option = (state == 2)
self._render()
def _factorize_check_option_trigger(self, state):
self.cc.PatternGenerator.factorize_option = (state == 2)
self._render()
def _real_time_check_option_trigger(self, state):
self.real_time_option = (state == 2)
if self.real_time_option:
self._render()
self._enable_options()
self.generateAction.setEnabled(not self.real_time_option)
def _createLoadGraphAction(self):
"""
Create an action for the load graph button of the toolbar and connect it.
"""
# Action
self.loadGraphAction = self.cc.QAction(
self.cc.QIcon(config['icons_path'] + "icons8-fingerprint-scan.png"),
"Load the Control Flow Graph from IDA (might take some time)",
self
)
self.loadGraphAction.triggered.connect(self._onLoadGraphButtonClickedThread)
def _createGenerateAction(self):
# Action
self.generateAction = self.cc.QAction(
self.cc.QIcon(config['icons_path'] + "icons8-workflow.png"),
"Generate a pattern (enabled only if you disable the \"Auto update\" option)",
self
)
self.generateAction.setEnabled(False)
self.generateAction.triggered.connect(self._onGenerateButtonClicked)
def _createGenerateQuickPatternAction(self):
# Action
self.generateQuickPatternAction = self.cc.QAction(
self.cc.QIcon(config['icons_path'] + "icons8-workflow.png"),
"Generate a pattern from this short pattern field (for instance: xor->add->xor)",
self
)
self.generateQuickPatternAction.triggered.connect(self._onGenerateQuickPatternButtonClicked)
def _createFuncAction(self):
# Action
self.funcAction = self.cc.QAction(
self.cc.QIcon(config['icons_path'] + "icons8-function-mac-32.png"),
"Target whole current function",
self
)
self.funcAction.triggered.connect(self._onFuncButtonClicked)
def _createResetAction(self):
# Action
self.resetAction = self.cc.QAction(
self.cc.QIcon(config['icons_path'] + "icons8-delete.png"),
"Reset the pattern",
self
)
self.resetAction.triggered.connect(self._onResetButtonClicked)
def _createSaveAction(self):
# Action
self.saveAction = self.cc.QAction(
self.cc.QIcon(config['icons_path'] + "icons8-add-file.png"),
"Save the pattern to disk",
self
)
self.saveAction.triggered.connect(self._onSaveButtonClicked)
def _createContextActions(self):
actions = [
("grap:pg:set_root", None, "[grap] Set root node", self._onSetRootNode),
("grap:pg:add_target", None, "[grap] Add target node", self._onAddTargetNode),
("grap:pg:match_default", config['icons_path'] + "icons8-asterisk-24.png", "[grap] Default match (apply options)", self._onSetMatchDefault),
("grap:pg:match_full", None, "[grap] Full match", self._onSetMatchFull),
("grap:pg:match_opcode_arg1", None, "[grap] Opcode+arg1", self._onSetMatchOpcodeArg1),
("grap:pg:match_opcode_arg2", None, "[grap] Opcode+arg2", self._onSetMatchOpcodeArg2),
("grap:pg:match_opcode_arg3", None, "[grap] Opcode+arg3", self._onSetMatchOpcodeArg3),
("grap:pg:match_opcode", None, "[grap] Opcode", self._onSetMatchOpcode),
("grap:pg:match_wildcard", None, "[grap] Wildcard: *", self._onSetMatchWildcard),
("grap:pg:remove_target", config['icons_path'] + "icons8-delete.png", "[grap] Remove target node", self._onRemoveTargetNode)
]
for actionId, icon_path, text, method in (a for a in actions):
if icon_path is not None and icon_path != "":
icon_number = idaapi.load_custom_icon(icon_path)
# Describe the action
action_desc = idaapi.action_desc_t(
actionId, # The action name. This acts like an ID and must be unique
text, # The action text.
PatternGenerationHandler(method), # The action handler.
None,
None,
icon_number)
else:
# Describe the action
action_desc = idaapi.action_desc_t(
actionId, # The action name. This acts like an ID and must be unique
text, # The action text.
PatternGenerationHandler(method)) # The action handler.
# Register the action
idaapi.register_action(action_desc)
self.actionsDefined = True
def _updateContextMenus(self):
self.hooks = PatternGenerationHooks(self.cc)
self.hooks.hook()
def _render(self):
self.updateWantedName()
self.text_widget.setText(self.cc.PatternGenerator.generate(auto=True))
def _render_if_real_time(self):
if self.real_time_option:
self._render()
self._enable_options()
def _onSetRootNode(self):
try:
self.cc.PatternGenerator.setRootNode(idc.get_screen_ea())
except:
self.cc.PatternGenerator.setRootNode(idc.ScreenEA())
self._render_if_real_time()
def _onAddTargetNode(self):
try:
self.cc.PatternGenerator.addTargetNode(idc.get_screen_ea())
except:
self.cc.PatternGenerator.addTargetNode(idc.ScreenEA())
self._render_if_real_time()
def setMatchType(self, type):
try:
selection, begin, end = None, None, None
err = idaapi.read_selection(selection, begin, end)
if err and selection:
for ea in range(begin, end+1):
self.cc.PatternGenerator.setMatchType(ea, type)
else:
self.cc.PatternGenerator.setMatchType(idc.get_screen_ea(), type)
except:
self.cc.PatternGenerator.setMatchType(idc.ScreenEA(), type)
self._render_if_real_time()
def _onSetMatchDefault(self):
self.setMatchType("match_default")
def _onSetMatchFull(self):
self.setMatchType("match_full")
def _onSetMatchOpcodeArg1(self):
self.setMatchType("match_opcode_arg1")
def _onSetMatchOpcodeArg2(self):
self.setMatchType("match_opcode_arg2")
def _onSetMatchOpcodeArg3(self):
self.setMatchType("match_opcode_arg3")
def _onSetMatchOpcode(self):
self.setMatchType("match_opcode")
def _onSetMatchWildcard(self):
self.setMatchType("match_wildcard")
def _onRemoveTargetNode(self):
try:
self.cc.PatternGenerator.removeTargetNode(idc.get_screen_ea())
except:
self.cc.PatternGenerator.removeTargetNode(idc.ScreenEA())
self._render_if_real_time()
def _onLoadGraphButtonClickedThread(self):
self._onLoadGraphButtonClicked()
def _onLoadGraphButtonClicked(self):
existing = False
if self.cc.PatternGenerator.graph.graph:
existing = True
# Analyzing
self.cc.PatternGenerator.graph.force_extract()
# Update the UI
if not self.actionsDefined:
self._createContextActions()
self._updateContextMenus()
# UI information
if existing:
print("[I] CFG updated. You can now define your pattern's root node and target nodes (right click on an instruction in IDA View).")
else:
print("[I] CFG loaded. You can now define your pattern's root node and target nodes (right click on an instruction in IDA View).")
def _onGenerateQuickPatternButtonClicked(self):
print("[I] Generation of quick pattern")
self.text_widget.setText(self.cc.PatternGenerator.generate_quick_pattern(self.text_qp_widget.text()))
self.generateAction.setEnabled(True)
self._disable_options()
def _onGenerateButtonClicked(self):
print("[I] Generation of pattern")
self._render()
self._enable_options()
def _onFuncButtonClicked(self):
if not self.cc.PatternGenerator.graph.graph:
print("WARNING: Unloaded CFG. Make sure to first \"Load the CFG\"")
return
ea = idaapi.get_screen_ea()
if ea:
func = idaapi.ida_funcs.get_func(ea)
if func:
if self.cc.PatternGenerator.rootNode is None:
print("[I] Adding root node as function entrypoint: %x", func.start_ea)
self.cc.PatternGenerator.setRootNode(func.start_ea)
print("[I] Adding nodes to cover whole function")
flowchart = idaapi.FlowChart(func)
for bb in flowchart:
last_inst_addr = idc.prev_head(bb.end_ea)
self.cc.PatternGenerator.addTargetNode(last_inst_addr)
self._render_if_real_time()
def _onResetButtonClicked(self):
print("[I] Reset pattern")
self.cc.PatternGenerator.resetPattern()
self.text_widget.clear()
self._enable_options()
def updateWantedName(self):
pattern_text = self.text_widget.toPlainText()
lines = pattern_text.split("\n")
if len(lines) >= 1:
l = lines[0]
s = l.strip().split(" ")
if len(s) >= 2:
if "graph" in s[0].lower():
fn = s[1]
if len(fn) >= 1:
self.cc.PatternGenerator.wantedName = str(s[1])
def default_filepath(self):
if "user_patterns_path" in config:
default_path = config["user_patterns_path"]
else:
default_path = config["patterns_path"] + os.path.sep + "test"+ os.path.sep + "misc" + os.path.sep + "files"
default_filepath = default_path + os.path.sep + self.cc.PatternGenerator.wantedName + ".grapp"
return default_filepath
def _onSaveButtonClicked(self):
self.updateWantedName()
pattern_text = self.text_widget.toPlainText()
if len(pattern_text.strip()) == 0:
print("WARNING: Pattern is empty.")
return
#print("[I] Saving pattern")
options = self.cc.QFileDialog.Options()
#options |= self.cc.QFileDialog.DontUseNativeDialog
filename, _ = self.cc.QFileDialog.getSaveFileName(self, "Save pattern file (.grapp files in %APPDATA%\IDAgrap\patterns will be parsed as patterns)", self.default_filepath(), "Grap pattern (*.grapp)", options=options)
if filename:
try:
f = open(filename, "w")
f.write(pattern_text)
f.close()
except Exception as e:
print("WARNING:", e)
def _disable_options(self):
self.real_time_check.setEnabled(False)
self.generic_arguments_check.setEnabled(False)
self.lighten_memory_ops_check.setEnabled(False)
self.std_jmp_check.setEnabled(False)
self.factorize_check.setEnabled(False)
def _enable_options(self):
self.real_time_check.setEnabled(True)
self.generic_arguments_check.setEnabled(True)
self.lighten_memory_ops_check.setEnabled(True)
self.std_jmp_check.setEnabled(True)
self.factorize_check.setEnabled(True)
class PatternGenerationHandler(idaapi.action_handler_t):
def __init__(self, callback):
idaapi.action_handler_t.__init__(self)
self.callback = callback
def activate(self, ctx):
self.callback()
# This action is always available.
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class PatternGenerationHooks(idaapi.UI_Hooks):
def __init__(self, cc):
idaapi.UI_Hooks.__init__(self)
self.cc = cc
self.selected_icon_number = idaapi.load_custom_icon(config['icons_path'] + "icons8-asterisk-24.png")
def populating_widget_popup(self, form, popup):
pass
def finish_populating_widget_popup(self, form, popup):
try:
b = idaapi.get_widget_type(form) == idaapi.BWN_DISASM
except:
b = idaapi.get_tform_type(form) == idaapi.BWN_DISASM
if b:
# Add separator
idaapi.attach_action_to_popup(form, popup, None, None)
# Add actions
try:
currentAddress = idc.get_screen_ea()
except:
currentAddress = idc.ScreenEA()
#if currentAddress in [node.node_id for node in self.cc.PatternGenerator.targetNodes]:
if currentAddress in self.cc.PatternGenerator.coloredNodes:
idaapi.attach_action_to_popup(form, popup, "grap:pg:match_default", None)
idaapi.attach_action_to_popup(form, popup, "grap:pg:match_full", None)
idaapi.update_action_label("grap:pg:match_full", self.cc.PatternGenerator.preview_match(currentAddress, "[grap] Full match", "match_full"))
idaapi.attach_action_to_popup(form, popup, "grap:pg:match_opcode_arg1", None)
idaapi.update_action_label("grap:pg:match_opcode_arg1", self.cc.PatternGenerator.preview_match(currentAddress, "[grap] Opcode+arg1", "match_opcode_arg1"))
idaapi.attach_action_to_popup(form, popup, "grap:pg:match_opcode_arg2", None)
idaapi.update_action_label("grap:pg:match_opcode_arg2", self.cc.PatternGenerator.preview_match(currentAddress, "[grap] Opcode+arg2", "match_opcode_arg2"))
idaapi.attach_action_to_popup(form, popup, "grap:pg:match_opcode_arg3", None)
idaapi.update_action_label("grap:pg:match_opcode_arg3", self.cc.PatternGenerator.preview_match(currentAddress, "[grap] Opcode+arg3", "match_opcode_arg3"))
idaapi.attach_action_to_popup(form, popup, "grap:pg:match_opcode", None)
idaapi.update_action_label("grap:pg:match_opcode", self.cc.PatternGenerator.preview_match(currentAddress, "[grap] Opcode", "match_opcode"))
idaapi.attach_action_to_popup(form, popup, "grap:pg:match_wildcard", None)
idaapi.attach_action_to_popup(form, popup, "grap:pg:remove_target", None)
for type in ["match_default", "match_full", "match_opcode_arg1", "match_opcode_arg2", "match_opcode_arg3", "match_opcode", "match_wildcard"]:
idaapi.update_action_icon("grap:pg:"+type, -1)
if currentAddress not in self.cc.PatternGenerator.targetNodeType:
type = "match_default"
else:
type = self.cc.PatternGenerator.targetNodeType[currentAddress]
idaapi.update_action_icon("grap:pg:"+type, self.selected_icon_number)
elif self.cc.PatternGenerator.rootNode is None or currentAddress != self.cc.PatternGenerator.rootNode.node_id:
idaapi.attach_action_to_popup(form, popup, "grap:pg:set_root", None)
idaapi.attach_action_to_popup(form, popup, "grap:pg:add_target", None)
|
gui.py | mraza007/videodownloader | 232 | 12623296 | #!/usr/bin/env python3.6
import tkinter as tk
import os.path
from pytube import YouTube
from threading import Thread
from tkinter import filedialog, messagebox, ttk
from download_youtube_video import download_youtube_video
from pytube.exceptions import PytubeError, RegexMatchError
class YouTubeDownloadGUI(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
self.label_video_title = None
self.btn_download = None
self.btn_output_browse = None
self.btn_check_id = None
self.text_url = None
self.text_output_path = None
self.text_filename_override = None
self.text_proxy = None
self.radio_video_audio = []
self.audio_only = tk.BooleanVar(self)
self.output_path = tk.StringVar(self)
self.filename_override = tk.StringVar(self)
self.proxy = tk.StringVar(self)
self.video = None
self.stream = tk.IntVar(self)
self.streams = []
self.stream_widgets = []
self.file_size = 0
self.progress_bar = None
self.last_row = 0
self.create_widgets()
def create_widgets(self):
tk.Label(self, text='YouTube URL/ID').grid(row=0, column=0)
self.text_url = tk.Entry(self, width=60)
self.text_url.grid(row=0, column=1, columnspan=2)
self.btn_check_id = tk.Button(self, width=10)
self.btn_check_id['text'] = 'Check Video'
self.btn_check_id['command'] = self.check_video
self.btn_check_id.grid(row=0, column=3)
tk.Label(self, text='Output Directory').grid(row=1, column=0)
self.text_output_path = tk.Entry(self, width=60, textvariable=self.output_path)
self.text_output_path.grid(row=1, column=1, columnspan=2)
self.btn_output_browse = tk.Button(self, width=10)
self.btn_output_browse['text'] = 'Browse...'
self.btn_output_browse['command'] = self.browse_output_path
self.btn_output_browse.grid(row=1, column=3)
tk.Label(self, text='Filename Override').grid(row=2, column=0)
self.text_filename_override = tk.Entry(self, width=60, textvariable=self.filename_override)
self.text_filename_override.grid(row=2, column=1, columnspan=2)
tk.Label(self, text='Proxy').grid(row=3, column=0)
self.text_proxy = tk.Entry(self, width=60, textvariable=self.proxy)
self.text_proxy.grid(row=3, column=1, columnspan=2)
tk.Label(self, text='Media Type').grid(row=4, column=0)
self.radio_video_audio.append(tk.Radiobutton(self, text='Video', variable=self.audio_only,
value=False, command=self.check_video))
self.radio_video_audio.append(tk.Radiobutton(self, text='Audio (Takes Longer)', variable=self.audio_only,
value=True, command=self.check_video))
self.radio_video_audio[0].grid(row=4, column=1)
self.radio_video_audio[1].grid(row=4, column=2)
self.label_video_title = tk.Label(self)
self.label_video_title.grid(row=5, column=0, columnspan=4)
self.content = tk.Frame(self, relief='groove', bd=3)
self.canvas = tk.Canvas(self.content, borderwidth=0, height=250, width=600)
self.scrll_bar = tk.Scrollbar(self.content, orient="vertical", command=self.canvas.yview)
self.frame = tk.Frame(self.canvas)
self.canvas.configure(yscrollcommand=self.scrll_bar.set)
self.content.grid_configure(row=6, column=0, rowspan=1, columnspan=4, sticky='NSEW')
self.scrll_bar.pack(side="right", fill="y")
self.canvas.pack(side='left')
self.canvas.create_window((0, 0), window=self.frame, anchor="nw",
tags="self.frame")
self.frame.bind("<Configure>", self.on_frame_configure)
self.progress_bar = ttk.Progressbar(self, orient='horizontal', length=350, mode='determinate')
self.progress_bar.grid(row=7, column=1, columnspan=2)
self.progress_bar['value'] = 0
self.progress_bar['maximum'] = 100
self.btn_download = tk.Button(self)
self.btn_download['text'] = 'Download'
self.btn_download['command'] = self.download
self.btn_download.config(state=tk.NORMAL)
self.btn_download.grid(row=8, column=1, columnspan=2)
def on_frame_configure(self, event):
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
def browse_output_path(self):
self.output_path.set(filedialog.askdirectory(initialdir='/', title='Select Output Folder'))
def check_video(self):
self.btn_check_id['text'] = 'Checking...'
self.btn_check_id.config(state=tk.DISABLED)
Thread(target=self.threaded_check_video).start()
def threaded_check_video(self):
self.last_row = 0
self.stream.set(0)
[radio_button.destroy() for radio_button in self.stream_widgets]
url = self.text_url.get()
if 'https' not in url:
url = 'https://www.youtube.com/watch?v=%s' % url
try:
if self.proxy.get() != '':
self.video = YouTube(url, proxies={self.proxy.get().split(':')[0]: self.proxy.get()})
else:
self.video = YouTube(url)
self.label_video_title['text'] = self.video.title
self.streams = self.video.streams.filter(only_audio=self.audio_only.get()).all()
for stream in self.streams:
if self.audio_only.get():
text = f'Codec: {stream.audio_codec}, ' \
f'ABR: {stream.abr} ' \
f'File Type: {stream.mime_type.split("/")[1]}, Size: {stream.filesize // 1024} KB'
else:
if stream.video_codec is None:
continue
text = f'Res: {stream.resolution}, FPS: {stream.fps},' \
f' Video Codec: {stream.video_codec}, Audio Codec: {stream.audio_codec}, ' \
f'File Type: {stream.mime_type.split("/")[1]}, Size: {stream.filesize // 1024} KB'
radio_button = tk.Radiobutton(self.frame, text=text, variable=self.stream, value=stream.itag)
self.last_row += 1
radio_button.grid(row=self.last_row, column=0, columnspan=4)
self.stream_widgets.append(radio_button)
except PytubeError as e:
messagebox.showerror('Something went wrong...', e)
except RegexMatchError as e:
messagebox.showerror('Something went wrong...', e)
finally:
self.btn_check_id['text'] = 'Check Video'
self.btn_check_id.config(state=tk.NORMAL)
def download(self):
self.btn_download['text'] = 'Downloading...'
self.btn_download.config(state=tk.DISABLED)
self.btn_check_id.config(state=tk.DISABLED)
self.btn_output_browse.config(state=tk.DISABLED)
[radio_button.config(state=tk.DISABLED) for radio_button in self.radio_video_audio]
Thread(target=self.threaded_download).start()
def update_progress_bar(self, stream, chunk, file_handle, bytes_remaining):
percentage = ((self.file_size - bytes_remaining) / self.file_size) * 100
self.progress_bar['value'] = percentage
def threaded_download(self):
try:
if self.proxy.get() != '':
proxy = {self.proxy.get().split(':')[0]: self.proxy.get()}
else:
proxy = None
for search_stream in self.streams:
if int(search_stream.itag) == int(self.stream.get()):
self.file_size = search_stream.filesize
break
filename = download_youtube_video(self.text_url.get(), itag=self.stream.get(),
output_path=self.output_path.get(),
filename=self.filename_override.get()
if self.filename_override.get() != '' else None,
proxies=proxy, progress_callback=self.update_progress_bar)
messagebox.showinfo('Download Complete!', 'Download Complete!\n%s' % filename)
except PytubeError as e:
messagebox.showerror('Something went wrong...', e)
except RegexMatchError as e:
messagebox.showerror('Something went wrong...', e)
except Exception as e:
messagebox.showerror('Something went wrong',
'Something unknown went wrong. Is this a live stream? Wait until the stream ends.'
'\n\n%s' % e)
finally:
self.btn_download['text'] = 'Download'
self.btn_download.config(state=tk.NORMAL)
self.btn_check_id.config(state=tk.NORMAL)
self.btn_output_browse.config(state=tk.NORMAL)
[radio_button.config(state=tk.NORMAL) for radio_button in self.radio_video_audio]
def resource_path(relative_path):
import sys
if hasattr(sys, '_MEIPASS'):
return os.path.join(sys._MEIPASS, relative_path)
return os.path.join(os.path.abspath('.'), relative_path)
if __name__ == '__main__':
root = tk.Tk()
app = YouTubeDownloadGUI(master=root)
app.master.title('YouTube Video/Audio Downloader')
app.master.tk.call('wm', 'iconphoto', app.master._w, tk.PhotoImage(file=resource_path('assets/ytdl.png')))
app.mainloop()
|
akshare/bond/bond_convert.py | akfamily/akshare | 721 | 12623300 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/4/13 10:50
Desc: 债券-集思录-可转债
集思录:https://app.jisilu.cn/data/cbnew/#cb
"""
import pandas as pd
import requests
def bond_cov_jsl(cookie: str = None) -> pd.DataFrame:
"""
集思录可转债
https://app.jisilu.cn/data/cbnew/#cb
:param cookie: 输入获取到的游览器 cookie
:type cookie: str
:return: 集思录可转债
:rtype: pandas.DataFrame
"""
url = "https://app.jisilu.cn/data/cbnew/cb_list_new/"
headers = {
'accept': 'application/json, text/javascript, */*; q=0.01',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'content-length': '220',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'cookie': cookie,
'origin': 'https://app.jisilu.cn',
'pragma': 'no-cache',
'referer': 'https://app.jisilu.cn/data/cbnew/',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Safari/537.36',
'x-requested-with': 'XMLHttpRequest'
}
params = {
"___jsl": "LST___t=1627021692978",
}
payload = {
"fprice": "",
"tprice": "",
"curr_iss_amt": "",
"volume": "",
"svolume": "",
"premium_rt": "",
"ytm_rt": "",
"market": "",
"rating_cd": "",
"is_search": "N",
'market_cd[]': 'shmb',
'market_cd[]': 'shkc',
'market_cd[]': 'szmb',
'market_cd[]': 'szcy',
"btype": "",
"listed": "Y",
'qflag': 'N',
"sw_cd": "",
"bond_ids": "",
"rp": "50",
}
r = requests.post(url, params=params, json=payload, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame([item["cell"] for item in data_json["rows"]])
return temp_df
def bond_conv_adj_logs_jsl(symbol: str = "128013") -> pd.DataFrame:
"""
集思录-可转债转股价-调整记录
https://app.jisilu.cn/data/cbnew/#cb
:param symbol: 可转债代码
:type symbol: str
:return: 转股价调整记录
:rtype: pandas.DataFrame
"""
url = f"https://www.jisilu.cn/data/cbnew/adj_logs/?bond_id={symbol}"
r = requests.get(url)
data_text = r.text
if '</table>' not in data_text:
# 1. 该可转债没有转股价调整记录,服务端返回文本 '暂无数据'
# 2. 无效可转债代码,服务端返回 {"timestamp":1639565628,"isError":1,"msg":"无效代码格式"}
# 以上两种情况,返回空的 DataFrame
return
else:
temp_df = pd.read_html(data_text, parse_dates=True)[0]
temp_df['股东大会日'] = pd.to_datetime(temp_df['股东大会日']).dt.date
temp_df['下修前转股价'] = pd.to_numeric(temp_df['下修前转股价'])
temp_df['下修后转股价'] = pd.to_numeric(temp_df['下修后转股价'])
temp_df['新转股价生效日期'] = pd.to_datetime(temp_df['新转股价生效日期']).dt.date
temp_df['下修底价'] = pd.to_numeric(temp_df['下修底价'])
return temp_df
if __name__ == '__main__':
bond_convert_jsl_df = bond_cov_jsl(cookie='')
print(bond_convert_jsl_df)
bond_conv_adj_logs_jsl_df = bond_conv_adj_logs_jsl(symbol="128013")
print(bond_conv_adj_logs_jsl_df)
|
tensorflow_decision_forests/tensorflow/ops/inference/api.py | Hawk94/decision-forests | 412 | 12623324 | # Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Efficient inference of Yggdrasil models in tensorflow.
This API allows to run, in tensorflow, Yggdrasil models trained without the
TensorFlow Decision Forests API (i.e. generally for advanced users). More models
trained with the TensorFlow Decision Forests, you can use the SavedModel API
directory.
Note: An Yggdrasil model is a sub-part of a TensorFlow Decision Forests model.
A Yggdrasil model is always stored in the assets sub-directory
of a TensorFlow Decision Forests model. This API can effectively be used to run
TensorFlow Decision Forests models.
Usage example
=============
# With Model(V1) in TF1
features = {
"a": tf.placeholder(tf.float32, [None]),
"b": tf.placeholder(tf.string, [None])
"c": tf.ragged.placeholder(tf.string, ragged_rank=1, value_shape=None)
}
model = tf_op.Model(model_path)
predictions = model.apply(features)
with self.session() as sess:
# Effectively loads the model.
sess.run(model.init_op())
probabilities, classes = sess.run([
predictions.dense_predictions, model_output.dense_col_representation
], {features["a"] : [1, 2, 3],
features["b"] : ["a", "b", "c"],
features["c"] : tf.ragged.constant_value(
[["x"], ["y"], ["y", "z"], [""]], dtype=tf.string
)})
# With Model(V1) in TF2
model = tf_op.Model(model_path)
@tf.function
def init_model():
# Effectively loads the model.
model.init_op()
@tf.function
def apply_model(features):
return model.apply(features)
init_model()
features = {
features["a"] : [1, 2, 3],
features["b"] : ["a", "b", "c"],
features["c"] : tf.ragged.constant(
[["x"], ["y"], ["y", "z"], [""]], dtype=tf.string
)
}
# Note: "tf.ragged.constant" in TF2 is equivalent to
# "tf.ragged.constant_value" in TF1.
predictions = apply_model(features)
# With ModelV2 in TF2
# The model is loaded in the constructor.
model = tf_op.ModelV2(model_path)
features = {
features["a"] : [1, 2, 3],
features["b"] : ["a", "b", "c"],
features["c"] : tf.ragged.constant(
[["x"], ["y"], ["y", "z"], [""]], dtype=tf.string
)
}
# Eager predictions.
predictions = model.apply(features)
# Non-eager predictions.
@tf.function
def apply_non_eager(features):
return model.apply(features)
predictions_non_eager = apply_non_eager(features)
See :tf_op_test and :tf_op_tf2_test for other usage examples.
Inference OP inputs
===================
Important: Missing values should be provided as missing. A missing value is not
a value. Instead it is the absence of value. While missing value are represented
using a special value (which depend on the feature type), they are handled very
differently under the hood.
Note: A "missing value", a "out-of-vocabulary" value and an "empty set" (in the
case of a set-type feature) are three different objects.
- All the input features of the model should be given as input tensors.
- Numerical features are handled as float32, but can be provided as
float{32,64} or int[32,64}. Missing numerical values should be provided as
"quiet NaN".
- Boolean features are represented as float32, but can also be given as
float{32,64} or int{32,64}. Missing boolean values should be provided as
"quiet NaN".
- Categorical features are handled as int32 (if pre-integerized) or bytes
(if not pre-integerized). Pre-integerized can be provided as int{32,64}.
Missing categorical values should be provided as -1 (for integer
categorical) or "" (empty string; for string categorical). Out of vocabulary
should be provided as 0 (for integer categorical) or any
string-not-int-the-dictionary (for string categorical).
- Numerical, boolean, and categorical features are provided as dense tensor of
shape [batch size] or [batch size, 1].
- CategoricalSet features are handled as int32 (if pre-integerized) or bytes
(if not pre-integerized). Pre-integerized can be provided as int{32,64}.
Missing categorical values should be provided by [-1] (for integer
categorical) or [""] (for string categorical). Out of vocabulary items are
provided as 0 (for integer categorical) or any feature are provided as
ragged tensor of shape [batch size, num items].
Inference OP outputs
====================
Classification:
dense_predictions: float32 tensor of probabilities of shape [batch size,
num classes].
dense_col_representation: string tensor of shape [num classes].
Representation of the classes.
Regression:
dense_predictions: float32 tensor of regressive values of shape [batch size,
1].
dense_col_representation: string tensor of shape [1]. Contains
only empty values.
Categorical features
====================
Unlike the tf.estimator and Keras API, Yggdrasil differentiates between
categorical, categorical-set and categorical-list features. Make sure to use the
correct one for your case. All three types support "missing values" (which is
semantically different from being empty, in the case of categorical-set and
categorical-list).
"""
import abc
import collections
import os
from typing import Text, Dict, List, Any, Optional
import uuid
from absl import logging
import six
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.training.tracking import base as trackable_base
from tensorflow.python.training.tracking import tracking
# pylint: enable=g-direct-tensorflow-import
from tensorflow_decision_forests.tensorflow.ops.inference import op
from yggdrasil_decision_forests.dataset import data_spec_pb2
from yggdrasil_decision_forests.model import abstract_model_pb2
Tensor = Any
InitOp = Tensor
Task = abstract_model_pb2.Task
ColumnType = data_spec_pb2.ColumnType
# Wrapper around the outputs values of the inference op.
ModelOutput = collections.namedtuple(
"ModelOutput",
[
# Predictions of the model e.g. probabilities. See the documentation of
# "dense_predictions" in "inference_interface.cc" for the format
# details (type, shape, semantic).
"dense_predictions",
# String representation of the model output. See the documentation
# of "dense_col_representation" in "inference_interface.cc" for the
# format details (type, shape, semantic).
"dense_col_representation",
])
# Magic value used to indicate of a missing value for categorical stored as
# ints, but that should not be interpreted as integer directly.
#
# Note: TF estimators don't standardize missing value representation.
MISSING_NON_INTEGERIZED_CATEGORICAL_STORED_AS_INT = 0x7FFFFFFF - 2
class Model(object):
"""Applies a Yggdrasil model.
For TensorFlow V1 and tensorflow V2.
If you are using tensorflow v2 and if you want the model serialization to
Assets to be handled automatically, use "ModelV2" instead.
"""
def __init__(self,
model_path: Text,
tensor_model_path: Optional[Tensor] = None,
verbose: Optional[bool] = True):
"""Initialize the model.
The Yggdrasil model should be available at the "model_path" location both at
the "Model" object creation, and during the call to "init_op()".
Args:
model_path: Path to the Yggdrasil model.
tensor_model_path: Path of the model at execution time. If no provided,
will use "model_path" instead. This argument can be use to load model
from SavedModel assets.
verbose: If true, prints information about the model and its integration
in tensorflow.
"""
self._verbose: Optional[bool] = verbose
if self._verbose:
logging.info("Create inference model for %s", model_path)
# Identifier of the model tf resource. Allow a same model to be applied on
# separate inputs tensors.
self.model_identifier = _create_model_identifier()
self.input_builder = _InferenceArgsBuilder(verbose)
self.input_builder.build_from_model_path(model_path)
# Model loading and initialization op.
if tensor_model_path is None:
tensor_model_path = model_path
load_model_op = op.SimpleMLLoadModelFromPath(
model_identifier=self.model_identifier, path=tensor_model_path)
self._init_op = tf.group(self.input_builder.init_op(), load_model_op)
def init_op(self) -> InitOp:
"""Get the model "init_op".
This op initializes the model (effectively loading it in memory). This op
should be called before the model is applied.
Returns:
The init_op.
"""
return self._init_op
def apply(self, features: Dict[Text, Tensor]) -> ModelOutput:
"""Applies the model.
Args:
features: Dictionary of input features of the model. All the input
features of the model should be available. Features not used by the
model are ignored.
Returns:
Predictions of the model.
"""
if self._verbose:
logging.info("Create inference op")
inference_args = self.input_builder.build_inference_op_args(features)
dense_predictions, dense_col_representation = op.SimpleMLInferenceOp(
model_identifier=self.model_identifier, **inference_args)
return ModelOutput(
dense_predictions=dense_predictions,
dense_col_representation=dense_col_representation)
class ModelV2(tracking.AutoTrackable):
"""Applies a Yggdrasil model.
For TensorFlow V2.
"""
def __init__(self,
model_path: Text,
verbose: Optional[bool] = True,
output_types: Optional[List[str]] = []):
"""Initialize the model.
The model content will be serialized as an asset if necessary.
Args:
model_path: Path to the Yggdrasil model.
verbose: Should details about the calls be printed.
output_types: List of special outputs of the model. Can be: LEAVES.
"""
super(ModelV2).__init__()
self._input_builder = _InferenceArgsBuilder(verbose)
self._input_builder.build_from_model_path(model_path)
self._compiled_model = _CompiledSimpleMLModelResource(
_DiskModelLoader(model_path, output_types))
def apply_get_leaves(self, features: Dict[Text, Tensor]) -> Any:
"""Applies the model and returns the active leaves.
Only works with decision tree based models.
Args:
features: Dictionary of input features of the model. All the input
features of the model should be available. Features not used by the
model are ignored.
Returns:
Active leaves of the model in shape [batch_size, num_trees].
"""
inference_args = self._input_builder.build_inference_op_args(
features, output_leaves=True)
leaves = op.SimpleMLInferenceLeafIndexOpWithHandle(
model_handle=self._compiled_model.resource_handle,
name="inference_op_leaves",
**inference_args)
return leaves
def apply(self, features: Dict[Text, Tensor]) -> ModelOutput:
"""Applies the model.
Args:
features: Dictionary of input features of the model. All the input
features of the model should be available. Features not used by the
model are ignored.
Returns:
Predictions of the model.
"""
inference_args = self._input_builder.build_inference_op_args(features)
(dense_predictions,
dense_col_representation) = op.SimpleMLInferenceOpWithHandle(
model_handle=self._compiled_model.resource_handle,
name="inference_op",
**inference_args)
return ModelOutput(
dense_predictions=dense_predictions,
dense_col_representation=dense_col_representation)
def _create_model_identifier() -> Text:
"""Creates a unique identifier for the model.
This identifier is used internally by the library.
Returns:
String identifier.
"""
return "sml_{}".format(uuid.uuid4())
# For each type of features, a map between a feature index (from
# "_feature_name_to_idx") and the input tensor for this feature.
FeatureMaps = collections.namedtuple("FeatureMaps", [
"numerical_features",
"boolean_features",
"categorical_int_features",
"categorical_set_int_features",
])
class _InferenceArgsBuilder(tracking.AutoTrackable):
"""Utility for the creation of the argument of the inference OP."""
def __init__(self, verbose: Optional[bool] = True):
super(_InferenceArgsBuilder).__init__()
self._verbose: bool = verbose
self._header: Optional[abstract_model_pb2.AbstractModel] = None
self._data_spec: Optional[data_spec_pb2.DataSpecification] = None
self._feature_name_to_idx = None
# List of initialization ops.
self._init_ops: List[tf.Operation] = None
# How many dimensions has the model predictions.
self._dense_output_dim: Optional[int] = None
super(_InferenceArgsBuilder, self).__init__()
def build_from_model_path(self, model_path: Text):
# Load model meta-data.
header = abstract_model_pb2.AbstractModel()
with tf.io.gfile.GFile(os.path.join(model_path, "header.pb"), "rb") as f:
header.ParseFromString(f.read())
data_spec = data_spec_pb2.DataSpecification()
with tf.io.gfile.GFile(os.path.join(model_path, "data_spec.pb"), "rb") as f:
data_spec.ParseFromString(f.read())
self.build_from_dataspec_and_header(data_spec, header)
def build_from_dataspec_and_header(self,
dataspec: data_spec_pb2.DataSpecification,
header: abstract_model_pb2.AbstractModel):
self._header = header
self._data_spec = dataspec
# Map between the input feature names and their indices.
self._feature_name_to_idx = {
self._data_spec.columns[feature_idx].name: feature_idx
for feature_idx in self._header.input_features
}
self._init_ops = []
self._dense_output_dim = self._get_dense_output_dim()
self._create_str_to_int_tables()
def init_op(self) -> Tensor:
"""Op initializing the processing of the input features."""
if self._init_ops:
return tf.group(*self._init_ops)
else:
return tf.no_op()
def build_inference_op_args(
self,
features: Dict[Text, Tensor],
output_leaves: Optional[bool] = False) -> Dict[Text, Any]:
"""Creates the arguments of the SimpleMLInferenceOp.
Args:
features: Dictionary of input features of the model. All the input
features of the model should be available. Features not used by the
model are ignored.
output_leaves: If true, the model is expected to output leaves. If false,
the model is expected to output predictions.
Returns:
Op constructor arguments.
"""
if self._verbose:
logging.info("\tApply model on features:\n%s", features)
# Extract, clean, check and index the input feature tensors.
feature_maps = FeatureMaps(
numerical_features={},
boolean_features={},
categorical_int_features={},
categorical_set_int_features={})
for feature_name, feature_tensor in features.items():
self._register_input_feature(feature_name, feature_tensor, feature_maps)
self._check_all_input_features_are_provided(feature_maps)
# Pack the input features by type.
# Numerical features.
if feature_maps.numerical_features:
numerical_features = tf.stack(
self._dict_to_list_sorted_by_key(feature_maps.numerical_features),
axis=1)
else:
numerical_features = tf.constant(0, dtype=tf.float32, shape=(0, 0))
# Boolean features.
if feature_maps.boolean_features:
boolean_features = tf.stack(
self._dict_to_list_sorted_by_key(feature_maps.boolean_features),
axis=1)
else:
boolean_features = tf.constant(0, dtype=tf.float32, shape=(0, 0))
# Categorical features.
if feature_maps.categorical_int_features:
categorical_int_features = tf.stack(
self._dict_to_list_sorted_by_key(
feature_maps.categorical_int_features),
axis=1)
else:
categorical_int_features = tf.constant(0, dtype=tf.int32, shape=(0, 0))
# Categorical Set features.
if feature_maps.categorical_set_int_features:
categorical_set_int_features = tf.stack(
self._dict_to_list_sorted_by_key(
feature_maps.categorical_set_int_features),
axis=1)
else:
categorical_set_int_features = tf.ragged.constant([],
dtype=tf.int32,
ragged_rank=2)
args = {
"numerical_features":
numerical_features,
"boolean_features":
boolean_features,
"categorical_int_features":
categorical_int_features,
"categorical_set_int_features_values":
categorical_set_int_features.values.values,
"categorical_set_int_features_row_splits_dim_1":
categorical_set_int_features.values.row_splits,
"categorical_set_int_features_row_splits_dim_2":
categorical_set_int_features.row_splits,
}
if not output_leaves:
args["dense_output_dim"] = self._dense_output_dim
if self._verbose:
logging.info("Inference op arguments:\n%s", args)
return args
def _register_input_feature(self, name: Text, value: Tensor,
feature_maps: FeatureMaps) -> None:
"""Indexes, and optionally pre-computes, the input feature tensors.
Args:
name: Name of the input feature.
value: Tensor value of the input feature.
feature_maps: Output index of input features.
Raises:
Exception: Is the feature is already registered, or with the wrong format.
"""
feature_idx = self._feature_name_to_idx.get(name)
if feature_idx is None:
logging.warning("Registering feature \"%s\" not used by the model.", name)
return
if feature_idx in self._all_feature_idxs(feature_maps):
raise Exception("The feature \"{}\" was already registered.".format(name))
feature_spec = self._data_spec.columns[feature_idx]
if feature_spec.type == ColumnType.NUMERICAL:
value = self._prepare_and_check_numerical_feature(name, value)
feature_maps.numerical_features[feature_idx] = value
elif feature_spec.type == ColumnType.BOOLEAN:
value = self._prepare_and_check_boolean_feature(name, value)
feature_maps.boolean_features[feature_idx] = value
elif feature_spec.type == ColumnType.CATEGORICAL:
value = self._prepare_and_check_categorical_feature(
name, value, feature_spec)
feature_maps.categorical_int_features[feature_idx] = value
elif feature_spec.type == ColumnType.CATEGORICAL_SET:
value = self._prepare_and_check_categorical_set_feature(
name, value, feature_spec)
feature_maps.categorical_set_int_features[feature_idx] = value
else:
raise Exception("No supported type \"{}\" for feature \"{}\"".format(
ColumnType.Name(feature_spec.type), name))
def _create_str_to_int_tables(self):
"""Creates the tables used to convert categorical features into integers."""
# Map from feature index to the string->int hashmap.
self.categorical_str_to_int_hashmaps = {}
for feature_idx in self._header.input_features:
feature_spec = self._data_spec.columns[feature_idx]
if feature_spec.HasField(
"categorical"
) and not feature_spec.categorical.is_already_integerized:
# Extract the vocabulary of the feature.
#
# Note: The item with index "0" is the "out of vocabulary". It is
# handled by the hashmap directly.
vocabulary = [(key, item.index)
for key, item in feature_spec.categorical.items.items()
if item.index != 0]
# Missing value.
# "" (the empty string) is a missing value if it is not a valid value.
if "" not in feature_spec.categorical.items:
vocabulary.append(("", -1))
vocabulary.append(
(str(MISSING_NON_INTEGERIZED_CATEGORICAL_STORED_AS_INT), -1))
vocabulary.sort(key=lambda x: x[1])
# Create a hasmap table with the vocabulary.
vocabulary_keys = tf.constant(list(zip(*vocabulary))[0])
vocabulary_values = tf.constant(list(zip(*vocabulary))[1])
vocabulary_index = tf.lookup.KeyValueTensorInitializer(
vocabulary_keys, vocabulary_values)
# Note: Value "0" is the out-of-vocabulary.
vocabulary_hashmap = tf.lookup.StaticHashTable(vocabulary_index, 0)
self._init_ops.append(vocabulary_index.initialize(vocabulary_hashmap))
self.categorical_str_to_int_hashmaps[
feature_spec.name] = vocabulary_hashmap
@staticmethod
def _dict_to_list_sorted_by_key(src: Dict[Any, Any]) -> List[Any]:
"""Extracts the values of a dictionary, sorted by key values.
Examples:
{2:"b", 3:"c", 1:"a"} -> ["a", "b", "c"]
Args:
src: Dictionary to process.
Returns:
Input values sorted by key.
"""
return [value[1] for value in sorted(src.items())]
@staticmethod
def _all_feature_idxs(feature_maps: FeatureMaps):
"""Lists all the input feature indices."""
idxs = []
for field_name in feature_maps._fields:
idxs.extend(getattr(feature_maps, field_name).keys())
return idxs
def _check_all_input_features_are_provided(self, feature_maps):
"""Making sure all the input features of the model are provided."""
missing_features = set(self._feature_name_to_idx.values()).difference(
set(self._all_feature_idxs(feature_maps)))
if missing_features:
raise Exception(
"No all input features have been registered. Non registered required "
"input features: {}".format([
self._data_spec.columns[feature_idx].name
for feature_idx in missing_features
]))
def _get_dense_output_dim(self):
"""Gets the dimension of the op output."""
label_spec = self._data_spec.columns[self._header.label_col_idx]
if self._header.task == Task.CLASSIFICATION:
if (label_spec.categorical.number_of_unique_values == 3 and
not self._header.classification_outputs_probabilities):
# Returns a single logit.
return 1
return label_spec.categorical.number_of_unique_values - 1
elif self._header.task == Task.REGRESSION:
return 1
elif self._header.task == Task.RANKING:
return 1
elif self._header.task == Task.CATEGORICAL_UPLIFT:
return label_spec.categorical.number_of_unique_values - 2
else:
raise Exception("Non supported task {}.".format(
Task.Name(self._header.task)))
def _prepare_and_check_numerical_feature(self, name: Text, value: Tensor):
"""Checks and optionally pre-processes a numerical feature."""
extended_name = "Numerical feature \"{}\"".format(name)
if value.dtype not in [tf.float32, tf.int32, tf.int64, tf.float64]:
raise Exception(
"{} is expected to have type float{{32,64}} or int{{32,64}}. Got {} "
"instead".format(extended_name, value.dtype))
if value.dtype != tf.float32:
value = tf.cast(value, tf.float32)
if len(value.shape) == 2:
if value.shape[1] != 1:
raise Exception(
"{} is expected to have shape [None] or [None, 1]. Got {} instead."
.format(extended_name, len(value.shape)))
value = value[:, 0]
elif len(value.shape) != 1:
raise Exception(
"{} is expected to have shape [None] or [None, 1]. Got {} instead."
.format(extended_name, len(value.shape)))
return value
def _prepare_and_check_boolean_feature(self, name: Text, value: Tensor):
"""Checks and optionally pre-processes a boolean feature."""
extended_name = "Boolean feature \"{}\"".format(name)
if value.dtype not in [tf.float32, tf.int32, tf.int64, tf.float64]:
raise Exception(
"{} is expected to have type float{{32,64}} or int{{32,64}}. Got {} "
"instead".format(extended_name, value.dtype))
if value.dtype != tf.float32:
value = tf.cast(value, tf.float32)
if len(value.shape) == 2:
if value.shape[1] != 1:
raise Exception(
"{} is expected to have shape [None] or [None, 1]. Got {} instead."
.format(extended_name, len(value.shape)))
value = value[:, 0]
elif len(value.shape) != 1:
raise Exception(
"{} is expected to have shape [None] or [None, 1]. Got {} instead."
.format(extended_name, len(value.shape)))
return value
def _prepare_and_check_categorical_feature(
self, name: Text, value: Tensor,
feature_spec: data_spec_pb2.Column) -> Tensor:
"""Checks and optionally pre-processes a categorical feature.
Args:
name: Name of the feature.
value: Tensor value of the feature.
feature_spec: Feature spec (e.g. type, dictionary, statistics) of the
feature.
Returns:
The feature value ready to be consumed by the inference op.
Raises:
Exception: In case of unexpected feature type or shape.
"""
extended_name = "Categorical feature \"{}\"".format(name)
if value.dtype in [tf.int32, tf.int64]:
# Native format.
if not feature_spec.categorical.is_already_integerized:
# A categorical feature, stored as integer, but not already integerized.
value = self.categorical_str_to_int_hashmaps[name].lookup(
tf.strings.as_string(value))
if value.dtype != tf.int32:
value = tf.cast(value, tf.int32)
elif value.dtype == tf.string:
if feature_spec.categorical.is_already_integerized:
raise Exception(
"{} was feed as {}. Expecting int32 tensor instead.".format(
extended_name, value))
value = self.categorical_str_to_int_hashmaps[name].lookup(value)
else:
raise Exception(
"{} is expected to have type int32, int64 or string. Got {} instead"
.format(extended_name, value.dtype))
if len(value.shape) == 2:
if value.shape[1] != 1:
raise Exception(
"{} is expected to have shape [None] or [None, 1]. Got {} instead."
.format(extended_name, len(value.shape)))
value = value[:, 0]
elif len(value.shape) != 1:
raise Exception("{} is expected to have rank 1. Got {} instead.".format(
extended_name, len(value.shape)))
return value
def _prepare_and_check_categorical_set_feature(
self, name: Text, value: Tensor,
feature_spec: data_spec_pb2.Column) -> Tensor:
"""Checks and optionally pre-processes a categorical set feature.
Args:
name: Name of the feature.
value: Tensor value of the feature.
feature_spec: Feature spec (e.g. type, dictionary, statistics) of the
feature.
Returns:
The feature value ready to be consumed by the inference op.
Raises:
Exception: In case of unexpected feature type or shape.
"""
extended_name = "Categorical set feature \"{}\"".format(name)
if not isinstance(value, tf.RaggedTensor):
raise Exception(
"{} was feed as {}. Expecting a RaggedTensor instead.".format(
extended_name, value))
if value.dtype in [tf.int32, tf.int64]:
# Native format.
if not feature_spec.categorical.is_already_integerized:
raise Exception(
"{} was feed as {}. Expecting string tensor instead.".format(
extended_name, value))
if value.dtype != tf.int32:
value = tf.cast(value, tf.int32)
elif value.dtype == tf.string:
if feature_spec.categorical.is_already_integerized:
raise Exception(
"{} was feed as {}. Expecting int32 tensor instead.".format(
extended_name, value))
value = tf.ragged.map_flat_values(
self.categorical_str_to_int_hashmaps[name].lookup, value)
else:
raise Exception(
"{} is expected to have type int32, int64 or string. Got {} instead"
.format(extended_name, value.dtype))
return value
class _AbstractModelLoader(six.with_metaclass(abc.ABCMeta, object)):
"""Loads a model in a _CompiledSimpleMLModelResource."""
@abc.abstractmethod
def initialize(self, model: "_CompiledSimpleMLModelResource") -> tf.Operation:
raise NotImplementedError()
class _CompiledSimpleMLModelResource(tracking.TrackableResource):
"""Utility class to handle compiled model resources.
This code is directly copied from StaticHashTable in:
google3/third_party/tensorflow/python/ops/lookup_ops.py
"""
def __init__(self, model_loader: _AbstractModelLoader):
super(_CompiledSimpleMLModelResource, self).__init__()
if isinstance(model_loader, trackable_base.Trackable):
self._model_loader = self._track_trackable(model_loader, "_model_loader")
self._shared_name = "simple_ml_model_%s" % (str(uuid.uuid4()),)
with tf.init_scope():
self._resource_handle = self._create_resource()
if (not context.executing_eagerly() and
tf.compat.v1.get_default_graph()._get_control_flow_context()
is not None): # pylint: disable=protected-access
with tf.init_scope():
self._init_op = self._initialize()
else:
self._init_op = self._initialize()
def _create_resource(self):
table_ref = op.SimpleMLCreateModelResource(shared_name=self._shared_name)
return table_ref
def _initialize(self):
return self._model_loader.initialize(self)
class _DiskModelLoader(_AbstractModelLoader, tracking.AutoTrackable):
"""Loads a model from disk.
This code is directly copied from TextFileInitializer in:
google3/third_party/tensorflow/python/ops/lookup_ops.py
"""
def __init__(self, model_path, output_types: List[str]):
super(_DiskModelLoader).__init__()
if not isinstance(model_path, tf.Tensor) and not model_path:
raise ValueError("Filename required")
self._output_types = output_types
self._all_files = []
self._done_file = None
for directory, _, filenames in tf.io.gfile.walk(model_path):
for filename in filenames:
path = os.path.join(directory, filename)
asset = tf.saved_model.Asset(path)
if filename == "done":
self._done_file = asset
self._all_files.append(asset)
if self._done_file is None:
raise ValueError(f"The model at {model_path} is invalid as it is "
"missing a \"done\" file.")
super(_DiskModelLoader, self).__init__()
def initialize(self, model: _CompiledSimpleMLModelResource) -> tf.Operation:
model_path = tf.strings.regex_replace(self._done_file.asset_path, "done",
"")
with ops.name_scope("simple_ml", "load_model_from_disk",
(model.resource_handle,)):
init_op = op.SimpleMLLoadModelFromPathWithHandle(
model_handle=model.resource_handle,
path=model_path,
output_types=self._output_types)
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
def get_model_path(self) -> Tensor:
"""Gets the path to the model on disk."""
return tf.strings.regex_replace(self._done_file.asset_path, "done", "")
|
examples/service/kubernetes/fabfile.py | theoden-dd/fabricio | 291 | 12623331 | <reponame>theoden-dd/fabricio
"""
https://github.com/renskiy/fabricio/blob/master/examples/service/kubernetes
"""
import fabricio
from fabric import api as fab
from fabricio import tasks, kubernetes
from fabricio.misc import AvailableVagrantHosts
from six.moves import filter
hosts = AvailableVagrantHosts(guest_network_interface='eth1')
service = tasks.DockerTasks(
service=kubernetes.Configuration(
name='my-service',
options={
# `kubectl apply` options
'filename': 'configuration.yml',
},
),
hosts=hosts,
# rollback_command=True, # show `rollback` command in the list
# migrate_commands=True, # show `migrate` and `migrate-back` commands in the list
# backup_commands=True, # show `backup` and `restore` commands in the list
# pull_command=True, # show `pull` command in the list
# update_command=True, # show `update` command in the list
# revert_command=True, # show `revert` command in the list
# destroy_command=True, # show `destroy` command in the list
)
@fab.task(name='k8s-init')
@fab.serial
def k8s_init():
"""
create Kubernetes cluster
"""
def init():
if not init.join_command:
initialization = list(filter(None, fabricio.run(
'kubeadm init '
'--apiserver-advertise-address {0} '
'--pod-network-cidr 10.244.0.0/16'
''.format(fab.env.host),
sudo=True,
quiet=False,
).splitlines()))
init.join_command = initialization[-1].strip()
# master setup
fabricio.run('mkdir -p $HOME/.kube')
fabricio.run('cp /etc/kubernetes/admin.conf /home/vagrant/.kube/config', sudo=True)
fabricio.run('chown vagrant /home/vagrant/.kube/config', sudo=True)
# install Kubernetes network plugin
fabricio.run(
'kubectl apply --filename /vagrant/kube-rbac.yml '
'&& kubectl apply --filename /vagrant/kube-canal.yml --validate=false',
quiet=False,
)
else:
fabricio.run(init.join_command, quiet=False, sudo=True)
init.join_command = None
with fab.settings(hosts=hosts):
fab.execute(init)
@fab.task(name='k8s-reset')
def k8s_reset():
"""
reset Kubernetes cluster
"""
def reset():
fabricio.run('kubeadm reset --force', sudo=True, quiet=False)
with fab.settings(hosts=hosts):
fab.execute(reset)
|
test-framework/test-suites/unit/tests/command/stack/commands/sync/vm/test_sync_vm_plugin_hypervisor.py | sammeidinger/stack | 123 | 12623353 | import pytest
from unittest.mock import create_autospec, patch, call, ANY
from stack.commands import DatabaseConnection
from stack.commands.sync.vm.plugin_hypervisor import Plugin, VmException
from stack.commands.sync.vm import Command
from stack.bool import str2bool
from collections import namedtuple
class TestSyncVmHypervisor:
def mock_vm_exception(self, *args):
raise VmException('Oh no something went wrong!')
@pytest.fixture
def mock_sync_hypervisor_plugin(self):
"""
A fixture for mocking Plugin instances
"""
mock_command = create_autospec(
spec = Command,
instance = True
)
mock_command.db = create_autospec(
spec = DatabaseConnection,
spec_set = True,
instance = True
)
return Plugin(mock_command)
# Test various successful input to the plugin
# returns the expected output
# 1. Single host
# 2. Multiple hosts with different statuses
# and disk types
CONFIG_ADD = [
(
{'foo': {
'virtual machine': 'foo',
'hypervisor': 'hypervisor-foo',
'pending deletion': 'False',
'status': 'off'
}
},
{'foo': [{
'Name': 'disk1',
'Type': 'disk',
'Image Name': 'disk_name',
'Location': 'loc',
'Pending Deletion': 'False'
}]
}
),
(
{'foo': {
'virtual machine': 'foo',
'hypervisor': 'hypervisor-foo',
'pending deletion': 'False',
'status': 'off'
},
'bar': {
'virtual machine': 'bar',
'hypervisor': 'hypervisor-bar',
'pending deletion': 'False',
'status': 'off'
},
'baz': {
'virtual machine': 'baz',
'hypervisor': 'hypervisor-baz',
'pending deletion': 'False',
'status': 'off'
}
},
{'foo': [
{
'Name': 'disk1',
'Type': 'disk',
'Image Name': 'disk_name',
'Location': 'loc',
'Pending Deletion': 'False'
}
],
'bar': [{
'Name': 'disk1',
'Type': 'disk',
'Image Name': 'disk_name',
'Location': 'loc',
'Pending Deletion': 'False'
}],
'baz': [{
'Name': 'disk1',
'Type': 'disk',
'Image Name': 'disk_name',
'Location': 'loc',
'Pending Deletion': 'False'
}]
})
]
@patch('stack.commands.sync.vm.plugin_hypervisor.Hypervisor', autospec=True)
def test_sync_vm_hypervisor_add_vm(
self,
mock_hypervisor,
mock_sync_hypervisor_plugin
):
"""
Test adding a vm to a hypervisor
"""
hypervisor = mock_hypervisor.return_value.__enter__.return_value
# Mock output of report vm
mock_vm_config = [{'col-1': 'config_file'}]
mock_sync_hypervisor_plugin.owner.call.return_value = mock_vm_config
output = mock_sync_hypervisor_plugin.add_vm('foo', True, 'hypervisor-foo')
# Check add_domain was called with our mock
# report vm return value
hypervisor.add_domain.assert_called_once_with('config_file')
assert output == []
@patch('stack.commands.sync.vm.plugin_hypervisor.Hypervisor', autospec=True)
def test_sync_vm_hypervisor_add_vm_except(
self,
mock_hypervisor,
mock_sync_hypervisor_plugin
):
"""
Test add_vm outputs the correct
error message when a VmException
is raised
"""
hypervisor = mock_hypervisor.return_value.__enter__.return_value
# Raise a VmException when add_domain is called
hypervisor.add_domain.side_effect = self.mock_vm_exception
# Mock output of report vm
mock_vm_config = [{'col-1': 'config_file'}]
mock_sync_hypervisor_plugin.owner.call.return_value = mock_vm_config
output = mock_sync_hypervisor_plugin.add_vm('foo', True, 'hypervisor-foo')
# Check add_domain was called with our mock
# report vm return value
hypervisor.add_domain.assert_called_once_with('config_file')
assert output == ['Oh no something went wrong!']
@patch('stack.commands.sync.vm.plugin_hypervisor.Hypervisor', autospec=True)
def test_sync_vm_hypervisor_remove_vm(
self,
mock_hypervisor,
mock_sync_hypervisor_plugin
):
"""
Test adding a vm to a hypervisor
"""
hypervisor = mock_hypervisor.return_value.__enter__.return_value
output = mock_sync_hypervisor_plugin.remove_vm('foo', True, 'hypervisor-foo')
# Check remove_domain was called with our mock
# report vm return value
hypervisor.remove_domain.assert_called_once_with('foo')
assert output == []
@patch('stack.commands.sync.vm.plugin_hypervisor.Hypervisor', autospec=True)
def test_sync_vm_hypervisor_remove_vm_except(
self,
mock_hypervisor,
mock_sync_hypervisor_plugin
):
"""
Test remove_vm outputs the correct
error message when a VmException
is raised
"""
hypervisor = mock_hypervisor.return_value.__enter__.return_value
# Raise a VmException when add_domain is called
hypervisor.remove_domain.side_effect = self.mock_vm_exception
output = mock_sync_hypervisor_plugin.remove_vm('foo', True, 'hypervisor-foo')
# Check add_domain was called with our mock
# report vm return value
hypervisor.remove_domain.assert_called_once_with('foo')
assert output == ['Oh no something went wrong!']
arg_tuple = namedtuple('args', 'hosts disks debug sync force autostart')
RUN_ARGS = [
arg_tuple(
{
'foo': {
'virtual machine': 'foo',
'hypervisor': 'hypervisor-foo',
'status': 'off',
'pending deletion': 'False'
}
},
[],
True,
True,
False,
False,
),
arg_tuple(
{
'foo': {
'virtual machine': 'foo',
'hypervisor': 'hypervisor-foo',
'status': 'off',
'pending deletion': 'True'
},
'bar': {
'virtual machine': 'bar',
'hypervisor': 'hypervisor-bar',
'status': 'undefined',
'pending deletion': 'False'
},
'baz': {
'virtual machine': 'baz',
'hypervisor': 'hypervisor-baz',
'status': 'on',
'pending deletion': 'False'
}
},
[],
True,
True,
False,
True,
),
arg_tuple(
{
'foo': {
'virtual machine': 'foo',
'hypervisor': 'hypervisor-foo',
'status': 'on',
'pending deletion': 'False'
}
},
[],
True,
True,
True,
False,
)
]
@patch.object(Plugin, 'add_vm', autospec=True)
@patch.object(Plugin, 'remove_vm', autospec=True)
@pytest.mark.parametrize('args', RUN_ARGS)
def test_sync_vm_hypervisor_run(
self,
mock_remove_vm,
mock_add_vm,
mock_sync_hypervisor_plugin,
args
):
# Simulate not errors returned from
# add_vm or remove_vm
mock_remove_vm.return_value = ['remove error']
mock_add_vm.return_value = ['add error']
mock_sync_hypervisor_plugin.owner.str2bool.side_effect = str2bool
# Run the plugin
output = mock_sync_hypervisor_plugin.run(args)
# For each host input check if the correct
# functions were called for the input
for host, values in args.hosts.items():
delete_vm = str2bool(values['pending deletion'])
# Used to check if the current args are called or not
func_call = call(ANY, host, args.debug, values['hypervisor'])
if values['status'] != 'on' or args.force:
if values['status'] != 'undefined':
assert func_call in mock_remove_vm.call_args_list
if delete_vm:
assert func_call not in mock_add_vm.call_args_list
else:
assert func_call in mock_add_vm.call_args_list
else:
assert func_call not in mock_remove_vm.call_args_list
assert func_call not in mock_add_vm.call_args_list
assert output == ['remove error', 'add error']
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/ViewFamily.py | htlcnn/ironpython-stubs | 182 | 12623361 | class ViewFamily(Enum,IComparable,IFormattable,IConvertible):
"""
An enumerated type that corresponds to the type of a Revit view.
enum ViewFamily,values: AreaPlan (110),CeilingPlan (111),CostReport (106),Detail (113),Drafting (108),Elevation (114),FloorPlan (109),GraphicalColumnSchedule (119),ImageView (104),Invalid (101),Legend (117),LoadsReport (115),PanelSchedule (118),PressureLossReport (116),Schedule (105),Section (112),Sheet (107),StructuralPlan (120),ThreeDimensional (102),Walkthrough (103)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
AreaPlan=None
CeilingPlan=None
CostReport=None
Detail=None
Drafting=None
Elevation=None
FloorPlan=None
GraphicalColumnSchedule=None
ImageView=None
Invalid=None
Legend=None
LoadsReport=None
PanelSchedule=None
PressureLossReport=None
Schedule=None
Section=None
Sheet=None
StructuralPlan=None
ThreeDimensional=None
value__=None
Walkthrough=None
|
examples/chat_bot.py | sap2me/steampy | 322 | 12623368 | <reponame>sap2me/steampy
import time
from steampy.client import SteamClient
# Set API key
api_key = ''
# Set path to SteamGuard file
steamguard_path = ''
# Steam username
username = ''
# Steam password
password = ''
def main():
print('This is the chat bot.')
if not are_credentials_filled():
print('You have to fill the credentials to run the example')
print('Terminating bot')
return
client = SteamClient(api_key)
client.login(username, password, steamguard_path)
print('Bot logged in successfully, polling messages every 10 seconds')
while True:
time.sleep(10)
messages = client.chat.fetch_messages()['received']
for message in messages:
client.chat.send_message(message['partner'], "Got your message: " + message['message'])
def are_credentials_filled() -> bool:
return api_key != '' and steamguard_path != '' and username != '' and password != ''
if __name__ == "__main__":
# execute only if run as a script
main()
|
src/england_data/run_dataset_merge.py | charlottestanton/covid-19-open-data | 430 | 12623387 | <reponame>charlottestanton/covid-19-open-data
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Merge England data to the Covid-19 Open Data format."""
import datetime
import os
import pathlib
from absl import app
from absl import flags
from absl import logging
from dm_c19_modelling.england_data import constants
from dm_c19_modelling.england_data import dataset_merge_util
from dm_c19_modelling.england_data import error_reporting_util
import pandas as pd
FLAGS = flags.FLAGS
flags.DEFINE_string(
"scrape_date", None, "Enter in the form YYYYMMDD, eg. "
"November 5, 2020 would be '20201105'. If you want the "
"latest date, enter 'latest'.")
flags.DEFINE_string("input_directory", None,
"The directory to read the standardized data .csvs from.")
flags.DEFINE_string("output_directory", None,
"The directory to write the merged data .csv to.")
flags.DEFINE_list(
"names", None, "List of names: "
f"{', '.join([data_type.value for data_type in constants.DataTypes])}"
)
flags.mark_flag_as_required("scrape_date")
flags.mark_flag_as_required("input_directory")
flags.mark_flag_as_required("output_directory")
@error_reporting_util.report_exception
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
scrape_date = FLAGS.scrape_date
if scrape_date == "latest":
logging.info("Merging data for 'latest' scrape date")
scrape_date_dirname = max(os.listdir(FLAGS.input_directory))
else:
try:
datetime.datetime.strptime(scrape_date, "%Y%m%d")
except ValueError:
raise ValueError("Date must be formatted: YYYYMMDD")
scrape_date_dirname = scrape_date
logging.info("Merging data for '%s' scrape date", scrape_date_dirname)
if FLAGS.names is None:
names = list(constants.DataTypes)
else:
names = [constants.DataTypes(name) for name in FLAGS.names]
# Read standardized datasets.
input_directory = pathlib.Path(FLAGS.input_directory) / scrape_date_dirname
data_dfs = {}
for name in names:
path = input_directory / f"{name.value}.csv"
data_dfs[name] = pd.read_csv(path)
logging.info("Data loaded.")
# Create index for each standardized data dataframe.
dfs = {}
index_dfs = {}
for name, df in data_dfs.items():
dfs[name.value], index_dfs[
name.value] = dataset_merge_util.convert_to_match_cloud(df, name)
if name == constants.DataTypes.POPULATION:
dfs["population_unpooled"], _ = dataset_merge_util.convert_to_match_cloud(
df, name, pool=False)
# Combine index dfs into a single one.
index_df = dataset_merge_util.merge_index_dfs(index_dfs.values())
logging.info("Indices built.")
# Save the output.
output_directory = pathlib.Path(FLAGS.output_directory) / scrape_date_dirname
os.makedirs(output_directory, exist_ok=True)
# Save individual keyed dfs.
for name, df in dfs.items():
path = output_directory / f"{name}.csv"
df.to_csv(path, index=False)
logging.info("Wrote '%s' to '%s'", name, path)
# Save merged index df.
path = output_directory / "index.csv"
index_df.to_csv(path, index=False)
logging.info("Wrote index to '%s'", path)
if __name__ == "__main__":
app.run(main)
|
tests/test_grads.py | ZvonimirBandic/QuCumber | 163 | 12623401 | # Copyright 2019 PIQuIL - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import pickle
from collections import namedtuple
import torch
import pytest
import qucumber
from qucumber.nn_states import PositiveWaveFunction, ComplexWaveFunction, DensityMatrix
from grads_utils import ComplexGradsUtils, PosGradsUtils, DensityGradsUtils
from conftest import all_state_types, assertAlmostEqual, TOL
SEED = 1234
EPS = 1e-6
def positive_wavefunction_data(request, gpu, num_hidden):
with open(
os.path.join(request.fspath.dirname, "data", "test_grad_data.pkl"), "rb"
) as f:
test_data = pickle.load(f)
qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True)
data = torch.tensor(test_data["tfim1d"]["train_samples"], dtype=torch.double)
target = torch.tensor(test_data["tfim1d"]["target_psi"], dtype=torch.double).t()
num_visible = data.shape[-1]
nn_state = PositiveWaveFunction(num_visible, num_hidden, gpu=gpu)
PGU = PosGradsUtils(nn_state)
data = data.to(device=nn_state.device)
space = nn_state.generate_hilbert_space()
target = target.to(device=nn_state.device)
PositiveWaveFunctionFixture = namedtuple(
"PositiveWaveFunctionFixture",
["data_samples", "target", "grad_utils", "nn_state", "space"],
)
return PositiveWaveFunctionFixture(
data_samples=data, target=target, grad_utils=PGU, nn_state=nn_state, space=space
)
def complex_wavefunction_data(request, gpu, num_hidden):
with open(
os.path.join(request.fspath.dirname, "data", "test_grad_data.pkl"), "rb"
) as f:
test_data = pickle.load(f)
qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True)
data_bases = test_data["2qubits"]["train_bases"]
data_samples = torch.tensor(
test_data["2qubits"]["train_samples"], dtype=torch.double
)
all_bases = test_data["2qubits"]["bases"]
target_psi_tmp = torch.tensor(
test_data["2qubits"]["target_psi"], dtype=torch.double
).t()
num_visible = data_samples.shape[-1]
nn_state = ComplexWaveFunction(num_visible, num_hidden, gpu=gpu)
unitary_dict = nn_state.unitary_dict
CGU = ComplexGradsUtils(nn_state)
all_bases = CGU.transform_bases(all_bases)
target = CGU.load_target_psi(all_bases, target_psi_tmp)
target = {b: v.to(device=nn_state.device) for b, v in target.items()}
space = nn_state.generate_hilbert_space()
data_samples = data_samples.to(device=nn_state.device)
ComplexWaveFunctionFixture = namedtuple(
"ComplexWaveFunctionFixture",
[
"data_samples",
"data_bases",
"grad_utils",
"all_bases",
"target",
"space",
"nn_state",
"unitary_dict",
],
)
return ComplexWaveFunctionFixture(
data_samples=data_samples,
data_bases=data_bases,
grad_utils=CGU,
all_bases=all_bases,
target=target,
space=space,
nn_state=nn_state,
unitary_dict=unitary_dict,
)
def density_matrix_data(request, gpu, num_hidden):
with open(
os.path.join(request.fspath.dirname, "data", "test_grad_data.pkl"), "rb"
) as f:
test_data = pickle.load(f)
qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True)
data_bases = test_data["density_matrix"]["train_bases"]
data_samples = torch.tensor(
test_data["density_matrix"]["train_samples"], dtype=torch.double
)
all_bases = test_data["density_matrix"]["bases"]
target = torch.tensor(
test_data["density_matrix"]["density_matrix"], dtype=torch.double
)
num_visible = data_samples.shape[-1]
num_aux = num_visible + 1
nn_state = DensityMatrix(num_visible, num_hidden, num_aux, gpu=gpu)
unitary_dict = nn_state.unitary_dict
DGU = DensityGradsUtils(nn_state)
all_bases = DGU.transform_bases(all_bases)
space = nn_state.generate_hilbert_space()
data_samples = data_samples.to(device=nn_state.device)
target = target.to(device=nn_state.device)
DensityMatrixFixture = namedtuple(
"DensityMatrixFixture",
[
"data_samples",
"data_bases",
"grad_utils",
"all_bases",
"target",
"space",
"nn_state",
"unitary_dict",
],
)
return DensityMatrixFixture(
data_samples=data_samples,
data_bases=data_bases,
grad_utils=DGU,
all_bases=all_bases,
target=target,
space=space,
nn_state=nn_state,
unitary_dict=unitary_dict,
)
hidden_layer_sizes = [pytest.param(9, id="9", marks=[pytest.mark.extra]), 10]
grad_types = ["KL", "NLL"]
@pytest.fixture(scope="module", params=all_state_types)
def quantum_state_constructor(request):
nn_state_type = request.param
if nn_state_type == PositiveWaveFunction:
return positive_wavefunction_data
elif nn_state_type == ComplexWaveFunction:
return complex_wavefunction_data
elif nn_state_type == DensityMatrix:
return density_matrix_data
else:
raise ValueError(
f"invalid test config: {nn_state_type} is not a valid quantum state type"
)
@pytest.fixture(scope="module", params=hidden_layer_sizes)
def quantum_state_data(request, quantum_state_constructor, quantum_state_device):
return quantum_state_constructor(request, quantum_state_device, request.param)
@pytest.fixture(scope="module", params=grad_types)
def quantum_state_graddata(request, quantum_state_data):
grad_type = request.param
nn_state, grad_utils = quantum_state_data.nn_state, quantum_state_data.grad_utils
if grad_type == "KL":
alg_grad_fn = grad_utils.algorithmic_gradKL
num_grad_fn = grad_utils.numeric_gradKL
else:
alg_grad_fn = grad_utils.algorithmic_gradNLL
num_grad_fn = grad_utils.numeric_gradNLL
alg_grads = alg_grad_fn(**quantum_state_data._asdict())
num_grads = [None for _ in nn_state.networks]
for n, net in enumerate(nn_state.networks):
rbm = getattr(nn_state, net)
num_grad = torch.tensor([]).to(device=rbm.device, dtype=torch.double)
for param in rbm.parameters():
num_grad = torch.cat(
(
num_grad,
num_grad_fn(
param=param.view(-1), eps=EPS, **quantum_state_data._asdict()
).to(num_grad),
)
)
num_grads[n] = num_grad
return nn_state, alg_grads, num_grads, grad_type, TOL
def get_param_status(i, param_ranges):
"""Get parameter name of the parameter in param_ranges which contains the index i.
Also return whether i is pointing to the first index of the parameter.
"""
for p, rng in param_ranges.items():
if i in rng:
return p, i == rng[0]
def test_grads(quantum_state_graddata):
nn_state, alg_grads, num_grads, grad_type, test_tol = quantum_state_graddata
print(
"\nTesting {} gradients for {} on {}.".format(
grad_type, nn_state.__class__.__name__, nn_state.device
)
)
for n, net in enumerate(nn_state.networks):
print("\nRBM: %s" % net)
rbm = getattr(nn_state, net)
param_ranges = {}
counter = 0
for param_name, param in rbm.named_parameters():
param_ranges[param_name] = range(counter, counter + param.numel())
counter += param.numel()
for i, grad in enumerate(num_grads[n]):
p_name, at_start = get_param_status(i, param_ranges)
if at_start:
print(f"\nTesting {p_name}...")
print(f"Numerical {grad_type}\tAlg {grad_type}")
print("{: 10.8f}\t{: 10.8f}\t\t".format(grad, alg_grads[n][i].item()))
assertAlmostEqual(
num_grads[n],
alg_grads[n],
test_tol,
msg=f"{grad_type} grads are not close enough for {net}!",
)
|
dataset/utils.py | NotMorven/cavaface.pytorch | 329 | 12623403 | <gh_stars>100-1000
from __future__ import print_function
import os
import numpy as np
import torch
import random
import math
class RandomErasing(object):
"""
Class that performs Random Erasing in Random Erasing Data Augmentation by Zhong et al.
-------------------------------------------------------------------------------------
probability: The probability that the operation will be performed.
sl: min erasing area
sh: max erasing area
r1: min aspect ratio
mean: erasing value
-------------------------------------------------------------------------------------
"""
def __init__(
self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]
):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) > self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1 : x1 + h, y1 : y1 + w] = self.mean[0]
img[1, x1 : x1 + h, y1 : y1 + w] = self.mean[1]
img[2, x1 : x1 + h, y1 : y1 + w] = self.mean[2]
else:
img[0, x1 : x1 + h, y1 : y1 + w] = self.mean[0]
return img
return img
def mixup_data(x, y, gpu, mixup_prob=0.5, alpha=1.0):
"""Returns mixed inputs, pairs of targets, and lambda"""
if random.uniform(0, 1) < mixup_prob:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
index = torch.randperm(batch_size).cuda(gpu)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
'''
class Cutout(object):
def __init__(self, n_holes=1, length=112):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
""" cutout_image """
h, w = img.shape[:2]
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
img[y1:y2, x1:x2] = 0
return img
'''
class Cutout(object):
def __init__(
self,
p=0.5,
scale=(0.02, 0.4),
ratio=(0.4, 1 / 0.4),
value=(0, 255),
pixel_level=False,
inplace=False,
):
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if scale[0] < 0 or scale[1] > 1:
raise ValueError("range of scale should be between 0 and 1")
if p < 0 or p > 1:
raise ValueError(
"range of random erasing probability should be between 0 and 1"
)
self.p = p
self.scale = scale
self.ratio = ratio
self.value = value
self.pixel_level = pixel_level
self.inplace = inplace
@staticmethod
def get_params(img, scale, ratio):
if type(img) == np.ndarray:
img_h, img_w, img_c = img.shape
else:
img_h, img_w = img.size
img_c = len(img.getbands())
s = random.uniform(*scale)
# if you img_h != img_w you may need this.
# r_1 = max(r_1, (img_h*s)/img_w)
# r_2 = min(r_2, img_h / (img_w*s))
r = random.uniform(*ratio)
s = s * img_h * img_w
w = int(math.sqrt(s / r))
h = int(math.sqrt(s * r))
left = random.randint(0, img_w - w)
top = random.randint(0, img_h - h)
return left, top, h, w, img_c
def __call__(self, img):
if random.random() < self.p:
left, top, h, w, ch = self.get_params(img, self.scale, self.ratio)
if self.pixel_level:
c = np.random.randint(*self.value, size=(h, w, ch), dtype="uint8")
else:
c = random.randint(*self.value)
if self.pixel_level:
c = PIL.Image.fromarray(c)
img.paste(c, (left, top, left + w, top + h))
return img
return img
def rand_bbox(size, lam):
W = size[2]
H = size[3]
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def cutmix_data(input, target, gpu, cutmix_prob=0.5, alpha=1.0):
if random.uniform(0, 1) < cutmix_prob:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
rand_index = torch.randperm(input.size()[0]).cuda(gpu)
target_a = target
target_b = target[rand_index]
bbx1, bby1, bbx2, bby2 = rand_bbox(input.size(), lam)
input[:, :, bbx1:bbx2, bby1:bby2] = input[rand_index, :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (input.size()[-1] * input.size()[-2]))
return input, target_a, target_b, lam
|
matchzoo/datasets/snli/load_data.py | baajur/MatchZoo | 2,209 | 12623411 | """SNLI data loader."""
import typing
from pathlib import Path
import pandas as pd
import keras
import matchzoo
_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
def load_data(
stage: str = 'train',
task: str = 'classification',
target_label: str = 'entailment',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load SNLI data.
:param stage: One of `train`, `dev`, and `test`. (default: `train`)
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance. (default: `ranking`)
:param target_label: If `ranking`, chose one of `entailment`,
`contradiction`, `neutral`, and `-` as the positive label.
(default: `entailment`)
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'snli_1.0_{stage}.txt')
data_pack = _read_data(file_path)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
if target_label not in ['entailment', 'contradiction', 'neutral', '-']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`, "
f"`neutral` and `-`.")
binary = (data_pack.relation['label'] == target_label).astype(float)
data_pack.relation['label'] = binary
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
classes = ['entailment', 'contradiction', 'neutral', '-']
label = data_pack.relation['label'].apply(classes.index)
data_pack.relation['label'] = label
data_pack.one_hot_encode_label(num_classes=4, inplace=True)
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'snli', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='snli'
)
return Path(ref_path).parent.joinpath('snli_1.0')
def _read_data(path):
table = pd.read_csv(path, sep='\t')
df = pd.DataFrame({
'text_left': table['sentence1'],
'text_right': table['sentence2'],
'label': table['gold_label']
})
df = df.dropna(axis=0, how='any').reset_index(drop=True)
return matchzoo.pack(df)
|
src/main/python/rlbot/utils/structures/rigid_body_struct.py | VirxEC/RLBot | 408 | 12623417 | import ctypes
from rlbot.utils.structures.bot_input_struct import PlayerInput
from rlbot.utils.structures.game_data_struct import Vector3
from rlbot.utils.structures.start_match_structures import MAX_PLAYERS
class Quaternion(ctypes.Structure):
_fields_ = [("x", ctypes.c_float),
("y", ctypes.c_float),
("z", ctypes.c_float),
("w", ctypes.c_float)]
class RigidBodyState(ctypes.Structure):
_fields_ = [("frame", ctypes.c_int),
("location", Vector3),
("rotation", Quaternion),
("velocity", Vector3),
("angular_velocity", Vector3)]
class PlayerRigidBodyState(ctypes.Structure):
_fields_ = [("state", RigidBodyState),
("input", PlayerInput)]
class BallRigidBodyState(ctypes.Structure):
_fields_ = [("state", RigidBodyState)]
class RigidBodyTick(ctypes.Structure):
_fields_ = [("ball", BallRigidBodyState),
("players", PlayerRigidBodyState * MAX_PLAYERS),
("num_players", ctypes.c_int)]
|
tests/core/test_hash_files.py | siliconcompiler/siliconcompiler | 424 | 12623428 | # Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import os
import siliconcompiler
def test_hash_files():
chip = siliconcompiler.Chip('top')
chip.load_target("freepdk45_demo")
chip.write_manifest("raw.json")
allkeys = chip.getkeys()
for keypath in allkeys:
if 'file' in chip.get(*keypath, field='type'):
chip.hash_files(*keypath)
chip.write_manifest("hashed.json")
#########################
if __name__ == "__main__":
test_hash_files()
|
flextensor/test/test_tvm_expr/grad/te-padding-case1.py | imxian/FlexTensor | 135 | 12623434 | import tvm
import numpy as np
import torch
N = 2
nC = 16
H = 14
W = 14
K = 16
R = 3
S = 3
padding = 1
P = H + 2 * padding
Q = W + 2 * padding
dtype = "float32"
A = tvm.te.placeholder([N, nC, H, W], dtype=dtype, name="A")
C = tvm.te.compute([N, K, P, Q],
lambda n, k, h, w :
tvm.tir.if_then_else(
tvm.tir.all(h >= padding, h < P-padding, w >= padding, w < Q-padding),
A[n, k, h-padding, w-padding], 0.0),
name="C")
dC = tvm.te.placeholder([N, K, P, Q], dtype=dtype, name="dC")
print(C.op.body[0].name)
print(type(C.op.body[0].args[1]))
dA = tvm.te.grad_op(A, C, dC)
s = tvm.te.create_schedule(dA.op)
print(tvm.lower(s, [A, dC, dA], simple_mode=True))
func = tvm.build(s, [A, dC, dA], target="llvm")
A_np = np.random.uniform(-10, 10, [N, nC, H, W]).astype("float32")
dC_np = np.random.uniform(-10, 10, [N, K, P, Q]).astype("float32")
dA_np = np.zeros([N, nC, H, W]).astype("float32")
ctx = tvm.context("llvm", 0)
A_tvm = tvm.nd.array(A_np, ctx)
dC_tvm = tvm.nd.array(dC_np, ctx)
dA_tvm = tvm.nd.array(dA_np, ctx)
func(A_tvm, dC_tvm, dA_tvm)
print(dA_tvm)
# =======>
# compare the results with numpy
golden_np = dC_np[:,:, padding:P-padding, padding:Q-padding]
tvm.testing.assert_allclose(dA_tvm.asnumpy(), golden_np, rtol=1e-30)
print("Compare with Numpy success!") |
src/datadog/azext_datadog/generated/_params.py | haroonf/azure-cli-extensions | 207 | 12623453 | <reponame>haroonf/azure-cli-extensions
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
tags_type,
get_three_state_flag,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azext_datadog.action import (
AddMarketplaceagreementsProperties,
AddDatadogOrganizationProperties,
AddUserInfo,
AddFilteringTags,
AddLogRulesFilteringTags,
AddSinglesignonconfigurationsProperties
)
def load_arguments(self, _):
with self.argument_context('datadog terms create') as c:
c.argument('properties', action=AddMarketplaceagreementsProperties, nargs='+', help='Represents the properties '
'of the resource.')
with self.argument_context('datadog terms update') as c:
c.argument('properties', action=AddMarketplaceagreementsProperties, nargs='+', help='Represents the properties '
'of the resource.')
with self.argument_context('datadog monitor list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('datadog monitor show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name', id_part='name')
with self.argument_context('datadog monitor create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name')
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('type_', options_list=['--type'], arg_type=get_enum_type(['SystemAssigned', 'UserAssigned']),
help='Identity type', arg_group='Identity')
c.argument('datadog_organization_properties', action=AddDatadogOrganizationProperties, nargs='+',
help='Datadog organization properties')
c.argument('user_info', action=AddUserInfo, nargs='+', help='User info')
c.argument('sku_name', type=str, help='Name of the SKU.', arg_group='Sku')
with self.argument_context('datadog monitor update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name', id_part='name')
c.argument('tags', tags_type)
c.argument('monitoring_status', type=str, help='Flag specifying if the resource monitoring is enabled or '
'disabled. Allowed values: "Enabled", "Disabled".')
c.argument('sku_name', type=str, help='Name of the SKU.', arg_group='Sku')
with self.argument_context('datadog monitor delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name', id_part='name')
with self.argument_context('datadog monitor get-default-key') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name', id_part='name')
with self.argument_context('datadog monitor list-api-key') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name')
with self.argument_context('datadog monitor list-host') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name')
with self.argument_context('datadog monitor list-linked-resource') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name')
with self.argument_context('datadog monitor list-monitored-resource') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name')
with self.argument_context('datadog monitor refresh-set-password-link') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name', id_part='name')
with self.argument_context('datadog monitor set-default-key') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name', id_part='name')
c.argument('created_by', type=str, help='The user that created the API key.')
c.argument('name', type=str, help='The name of the API key.')
c.argument('key', type=str, help='The value of the API key.')
c.argument('created', type=str, help='The time of creation of the API key.')
with self.argument_context('datadog monitor wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', options_list=['--name', '-n', '--monitor-name'], type=str, help='Monitor resource '
'name', id_part='name')
with self.argument_context('datadog tag-rule list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name')
with self.argument_context('datadog tag-rule show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name', id_part='name')
c.argument('rule_set_name', type=str, help='Rule set name', id_part='child_name_1')
with self.argument_context('datadog tag-rule create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name')
c.argument('rule_set_name', type=str, help='Rule set name')
c.argument('filtering_tags', action=AddFilteringTags, nargs='+', help='List of filtering tags to be used for '
'capturing metrics. If empty, all resources will be captured. If only Exclude action is specified, '
'the rules will apply to the list of all available resources. If Include actions are specified, the '
'rules will only include resources with the associated tags.', arg_group='Metric Rules')
c.argument('send_aad_logs', arg_type=get_three_state_flag(), help='Flag specifying if AAD logs should be sent '
'for the Monitor resource.', arg_group='Log Rules')
c.argument('send_subscription_logs', arg_type=get_three_state_flag(), help='Flag specifying if Azure '
'subscription logs should be sent for the Monitor resource.', arg_group='Log Rules')
c.argument('send_resource_logs', arg_type=get_three_state_flag(), help='Flag specifying if Azure resource logs '
'should be sent for the Monitor resource.', arg_group='Log Rules')
c.argument('log_rules_filtering_tags', action=AddLogRulesFilteringTags, nargs='+', help='List of filtering '
'tags to be used for capturing logs. This only takes effect if SendResourceLogs flag is enabled. If '
'empty, all resources will be captured. If only Exclude action is specified, the rules will apply '
'to the list of all available resources. If Include actions are specified, the rules will only '
'include resources with the associated tags.', arg_group='Log Rules')
with self.argument_context('datadog tag-rule update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name', id_part='name')
c.argument('rule_set_name', type=str, help='Rule set name', id_part='child_name_1')
c.argument('filtering_tags', action=AddFilteringTags, nargs='+', help='List of filtering tags to be used for '
'capturing metrics. If empty, all resources will be captured. If only Exclude action is specified, '
'the rules will apply to the list of all available resources. If Include actions are specified, the '
'rules will only include resources with the associated tags.', arg_group='Metric Rules')
c.argument('send_aad_logs', arg_type=get_three_state_flag(), help='Flag specifying if AAD logs should be sent '
'for the Monitor resource.', arg_group='Log Rules')
c.argument('send_subscription_logs', arg_type=get_three_state_flag(), help='Flag specifying if Azure '
'subscription logs should be sent for the Monitor resource.', arg_group='Log Rules')
c.argument('send_resource_logs', arg_type=get_three_state_flag(), help='Flag specifying if Azure resource logs '
'should be sent for the Monitor resource.', arg_group='Log Rules')
c.argument('log_rules_filtering_tags', action=AddLogRulesFilteringTags, nargs='+', help='List of filtering '
'tags to be used for capturing logs. This only takes effect if SendResourceLogs flag is enabled. If '
'empty, all resources will be captured. If only Exclude action is specified, the rules will apply '
'to the list of all available resources. If Include actions are specified, the rules will only '
'include resources with the associated tags.', arg_group='Log Rules')
c.ignore('body')
with self.argument_context('datadog sso-config list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name')
with self.argument_context('datadog sso-config show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name', id_part='name')
c.argument('configuration_name', type=str, help='Configuration name', id_part='child_name_1')
with self.argument_context('datadog sso-config create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name')
c.argument('configuration_name', type=str, help='Configuration name')
c.argument('properties', action=AddSinglesignonconfigurationsProperties, nargs='+', help='')
with self.argument_context('datadog sso-config update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name', id_part='name')
c.argument('configuration_name', type=str, help='Configuration name', id_part='child_name_1')
c.argument('properties', action=AddSinglesignonconfigurationsProperties, nargs='+', help='')
c.ignore('body')
with self.argument_context('datadog sso-config wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('monitor_name', type=str, help='Monitor resource name', id_part='name')
c.argument('configuration_name', type=str, help='Configuration name', id_part='child_name_1')
|
srcs/python/kungfu/tensorflow/optimizers/async_sgd.py | Pandinosaurus/KungFu | 291 | 12623454 | <filename>srcs/python/kungfu/tensorflow/optimizers/async_sgd.py
import tensorflow as tf
from kungfu.tensorflow.compat import _tf_assign, _tf_mod
from kungfu.tensorflow.ops import (barrier, counter, current_cluster_size,
current_rank, defuse, fuse,
request_variable,
request_variable_with_template,
save_variable)
from .core import (_create_kungfu_keras_optimizer, _create_kungfu_optimizer,
_KungFuAlgorithm)
def PairAveragingOptimizer(optimizer,
fuse_requests=True,
fused_model_name=None,
name=None,
use_locking=False,
with_keras=False):
"""PairAveragingOptimizer implements the [AD-PSGD]_ algorithm.
Every iteration of training, this optimizer:
1. Randomly selects a peer in the current cluster.
2. Pulls the selected peer's model
3. Performs model averaging with the local model.
4. Applies local gradients
5. Saves the model to a local store which allows other peers to pull from.
.. [AD-PSGD] Asynchronous Decentralized Parallel Stochastic Gradient Descent, ICML 2018, `AD-PSGD Paper <https://arxiv.org/abs/1710.06952>`_
Arguments:
optimizer {tf.train.Optimizer, tf.keras.optimizers.Optimizer} -- Optimizer to use for computing gradients and applying updates.
Keyword Arguments:
- fuse_requests {bool} -- Fusing requests to amortise communication cost at the cost of extra GPU memory and cycles. (default: {True})
- fused_model_name {str} -- The unique name for the fused model kept in a local store. (default: {None})
- name {str} -- name prefix for the operations created when applying gradients. Defaults to "KungFu" followed by the provided optimizer type. (default: {None})
- use_locking {bool} -- Whether to use locking when updating variables. (default: {False})
- with_keras {bool} -- Runs with pure Keras or not (default: {False})
Raises:
TypeError: Wrapped optimizer is not a subclass of tf.train.Optimizer or tf.keras.optimizers.Optimizer
Returns:
optimizer {tf.train.Optimizer, tf.keras.optimizers.Optimizer} -- KungFu distributed optimizer
"""
if fused_model_name is None:
if hasattr(optimizer, 'get_name'):
# tf.train.Optimizer
fused_model_name = optimizer.get_name()
else:
try:
# tf.keras.optimizers.Optimizer has name since tf1.15
fused_model_name = optimizer.get_config()['name']
except:
# keras optimizer does not have name
fused_model_name = 'PairAveragingOptimizer'
print(
'WARNING: You must give a unique name if using parallel PairAveragingOptimizers.'
)
pair_avg = _PairAveraging(fuse_requests, fused_model_name=fused_model_name)
if not with_keras:
return _create_kungfu_optimizer(optimizer, pair_avg, name, use_locking)
else:
return _create_kungfu_keras_optimizer(optimizer, pair_avg)
def get_random_peer(cluster_size, self_rank):
t = tf.random.uniform([], minval=0, maxval=cluster_size, dtype=tf.int32)
return tf.cond(tf.equal(t,
self_rank), lambda: _tf_mod(t + 1, cluster_size),
lambda: tf.identity(t))
class _PairAveraging(_KungFuAlgorithm):
def __init__(self, fuse_requests, fused_model_name=None):
self._step = counter()
self._fuse_requests = fuse_requests
self._fused_model_name = fused_model_name
def _build_request_ops(self, target, variables):
if self._fuse_requests:
var_fused = fuse(variables)
other_peer_var_fused = request_variable(
target,
version=None,
name=self._fused_model_name,
shape=var_fused.shape,
dtype=var_fused.dtype)
return defuse(other_peer_var_fused, [v.shape for v in variables])
else:
return [
request_variable_with_template(target, v) for v in variables
]
def _build_save_op(self, variables):
if self._fuse_requests:
var_fused = fuse(variables)
return save_variable(var_fused, name=self._fused_model_name)
else:
return tf.group([save_variable(v) for v in variables])
def init_store(self, variables):
with tf.control_dependencies([self._build_save_op(variables)]):
return barrier()
def apply_gradients(self, apply_grads_func, grads_and_vars, **kwargs):
np, rank = current_cluster_size(), current_rank()
target = get_random_peer(np, rank)
gradients, variables = list(zip(*grads_and_vars))
# filter out grad == None
filtered_variables = [
var for (grad, var) in list(zip(gradients, variables))
if grad is not None
]
init_store_op = tf.cond(tf.equal(self._step, 0),
lambda: self.init_store(filtered_variables),
tf.no_op)
with tf.control_dependencies([init_store_op]):
other_peer_vars = self._build_request_ops(target,
filtered_variables)
save_model_op = self._build_save_op(filtered_variables)
assign_ops = [
_tf_assign(v, 0.5 * (v + other_v))
for v, other_v in zip(filtered_variables, other_peer_vars)
]
# We need to re-zip gradients and variables as grads_and_vars can be only unzipped once.
new_grads_and_vars = zip(gradients, variables)
apply_op = apply_grads_func(new_grads_and_vars, **kwargs)
with tf.control_dependencies(assign_ops):
with tf.control_dependencies([apply_op]):
with tf.control_dependencies([save_model_op]):
return tf.group(apply_op)
|
src/transformers/convert_slow_tokenizers_checkpoints_to_fast.py | dctelus/transformers | 8,028 | 12623465 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Convert slow tokenizers checkpoints in fast (serialization format of the `tokenizers` library)"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
TOKENIZER_CLASSES = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def convert_slow_checkpoint_to_fast(tokenizer_name, checkpoint_name, dump_path, force_download):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.")
if tokenizer_name is None:
tokenizer_names = TOKENIZER_CLASSES
else:
tokenizer_names = {tokenizer_name: getattr(transformers, tokenizer_name + "Fast")}
logger.info(f"Loading tokenizer classes: {tokenizer_names}")
for tokenizer_name in tokenizer_names:
tokenizer_class = TOKENIZER_CLASSES[tokenizer_name]
add_prefix = True
if checkpoint_name is None:
checkpoint_names = list(tokenizer_class.max_model_input_sizes.keys())
else:
checkpoint_names = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}")
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}")
# Load tokenizer
tokenizer = tokenizer_class.from_pretrained(checkpoint, force_download=force_download)
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}")
# For organization names we create sub-directories
if "/" in checkpoint:
checkpoint_directory, checkpoint_prefix_name = checkpoint.split("/")
dump_path_full = os.path.join(dump_path, checkpoint_directory)
elif add_prefix:
checkpoint_prefix_name = checkpoint
dump_path_full = dump_path
else:
checkpoint_prefix_name = None
dump_path_full = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}")
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]:
file_path = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint]
next_char = file_path.split(checkpoint)[-1][0]
if next_char == "/":
dump_path_full = os.path.join(dump_path_full, checkpoint_prefix_name)
checkpoint_prefix_name = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}")
file_names = tokenizer.save_pretrained(
dump_path_full, legacy_format=False, filename_prefix=checkpoint_prefix_name
)
logger.info(f"=> File names {file_names}")
for file_name in file_names:
if not file_name.endswith("tokenizer.json"):
os.remove(file_name)
logger.info(f"=> removing {file_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
"download and convert all the checkpoints from AWS.",
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
args = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
|
pwndbg/commands/vmmap.py | R2S4X/pwndbg | 287 | 12623467 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Command to print the vitual memory map a la /proc/self/maps.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gdb
import six
import pwndbg.color.memory as M
import pwndbg.commands
import pwndbg.compat
import pwndbg.vmmap
@pwndbg.commands.QuietSloppyParsedCommand
@pwndbg.commands.OnlyWhenRunning
def vmmap(map=None):
"""
Print the virtal memory map, or the specific mapping for the
provided address / module name.
"""
int_map = None
str_map = None
if isinstance(map, six.string_types):
str_map = map
elif isinstance(map, six.integer_types + (gdb.Value,)):
int_map = int(map)
print(M.legend())
for page in pwndbg.vmmap.get():
if str_map and str_map not in page.objfile:
continue
if int_map and int_map not in page:
continue
print(M.get(page.vaddr, text=str(page)))
|
landlab/graph/radial/dual_radial.py | amanaster2/landlab | 257 | 12623496 | <gh_stars>100-1000
import numpy as np
from ..dual import DualGraph
from ..voronoi.dual_voronoi import DualVoronoiGraph
from .radial import RadialGraph, RadialGraphLayout
class DualRadialGraph(DualGraph, RadialGraph):
"""Graph of a series of points on concentric circles.
Examples
--------
>>> from landlab.graph import DualRadialGraph
>>> graph = DualRadialGraph((1, 4), sort=True)
>>> graph.number_of_corners
4
>>> graph.y_of_corner
array([-0.5, -0.5, 0.5, 0.5])
>>> graph.x_of_corner
array([-0.5, 0.5, -0.5, 0.5])
"""
def __init__(self, shape, spacing=1.0, xy_of_center=(0.0, 0.0), sort=False):
"""Create a structured grid of triangles arranged radially.
Parameters
----------
shape : tuple of int
Shape of the graph as number of rings and number of points
in the first ring.
spacing : float, optional
Spacing between rings.
xy_of_center : tuple of float, optional
Coordinates of the center of the grid.
"""
try:
spacing = float(spacing)
except TypeError:
raise TypeError("spacing must be a float")
xy_of_center = tuple(np.broadcast_to(xy_of_center, 2))
x_of_node, y_of_node = RadialGraphLayout.xy_of_node(
shape, spacing=spacing, xy_of_center=xy_of_center
)
self._ring_spacing = spacing
self._shape = tuple(shape)
self._xy_of_center = xy_of_center
DualVoronoiGraph.__init__(self, (y_of_node, x_of_node), sort=False)
if sort:
self.sort()
@property
def shape(self):
return self._shape
@property
def spacing(self):
return self._spacing
@property
def origin(self):
return self._xy_of_center
@property
def xy_of_center(self):
return self._xy_of_center
|
Geometry/VeryForwardGeometryBuilder/test/print_geometry_info_geomFromDB_cfg.py | Purva-Chaudhari/cmssw | 852 | 12623525 | <filename>Geometry/VeryForwardGeometryBuilder/test/print_geometry_info_geomFromDB_cfg.py
import FWCore.ParameterSet.Config as cms
process = cms.Process("GeometryInfo")
# minimum of logs
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO')
)
)
# geometry
process.load("CondCore.CondDB.CondDB_cfi")
# input database (in this case local sqlite file)
#process.CondDB.connect = 'sqlite_file:../../CondTools/Geometry/PPSGeometry_oldDD_multiIOV.db'
process.CondDB.connect = cms.string( 'frontier://FrontierPrep/CMS_CONDITIONS' )
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDB,
DumpStat=cms.untracked.bool(True),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('VeryForwardIdealGeometryRecord'),
tag = cms.string("PPS_RecoGeometry_test_v1")
)
)
)
process.ctppsGeometryESModule = cms.ESProducer("CTPPSGeometryESModule",
fromPreprocessedDB = cms.untracked.bool(True),
fromDD4hep = cms.untracked.bool(False),
verbosity = cms.untracked.uint32(1),
)
# load alignment correction
process.load("CalibPPS.ESProducers.ctppsRPAlignmentCorrectionsDataESSourceXML_cfi")
process.ctppsRPAlignmentCorrectionsDataESSourceXML.RealFiles = cms.vstring(
"Geometry/VeryForwardGeometryBuilder/test/alignment_file_1.xml",
"Geometry/VeryForwardGeometryBuilder/test/alignment_file_2.xml",
)
process.ctppsRPAlignmentCorrectionsDataESSourceXML.verbosity = 1
# no events to process
process.source = cms.Source("EmptySource",
# firstRun = cms.untracked.uint32(273725), # start run for 2016-2017
firstRun = cms.untracked.uint32(314747), # start run for 2018
firstLuminosityBlock = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1),
numberEventsInLuminosityBlock = cms.untracked.uint32(3),
numberEventsInRun = cms.untracked.uint32(30)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.ctppsGeometryInfo = cms.EDAnalyzer("CTPPSGeometryInfo",
geometryType = cms.untracked.string("real"),
printRPInfo = cms.untracked.bool(True),
printSensorInfo = cms.untracked.bool(True)
)
process.p = cms.Path(
process.ctppsGeometryInfo
)
|
system/shui/dummyui.py | mrshu/stash | 1,822 | 12623622 | <reponame>mrshu/stash
# -*- coding: utf-8 -*-
"""
Stub ui to allow debug on PC
"""
AUTOCAPITALIZE_NONE = 0
def measure_string(*args, **kwargs):
return 12.0
def in_background(func):
return func
def get_screen_size():
return 100, 100
class View(object):
def __init__(self, *args, **kwargs):
self.on_screen = True
self.width = 100
self.height = 100
self.content_size = (100, 100)
self.content_offset = (0, 0)
self.superview = None
self.subviews = []
self.delegate = None
def add_subview(self, v):
self.subviews.append(v)
v.superview = self
def remove_subview(self, v):
self.subviews.remove(v)
def present(self, style='popover'):
pass
def wait_modal(self):
pass
def size_to_fit(self):
pass
def send_to_back(self):
pass
def bring_to_front(self):
pass
class TextField(View):
def __init__(self, *args, **kwargs):
super(TextField, self).__init__(*args, **kwargs)
self.text = ''
class TextView(View):
def __init__(self, *args, **kwargs):
super(TextView, self).__init__(*args, **kwargs)
self.text = ''
self.selected_range = (0, 0)
def replace_range(self, rng, s):
self.text = self.text[:rng[0]] + s + self.text[rng[1]:]
tot_len = len(self.text)
self.selected_range = (tot_len, tot_len)
def begin_editing(self):
pass
def end_editing(self):
pass
class ScrollView(View):
pass
class Button(View):
def __init__(self, *args, **kwargs):
super(Button, self).__init__(*args, **kwargs)
class TableView(View):
def __init__(self, *args, **kwargs):
super(TableView, self).__init__(*args, **kwargs)
class ListDataSource(object):
def __init__(self, lst):
pass
|
resource/param/vmaf_v3.py | elam03/vmaf | 2,874 | 12623627 | feature_dict = {
'VMAF_feature': ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'adm2', 'motion', ],
}
model_type = "LIBSVMNUSVR"
model_param_dict = {
# ==== preprocess: normalize each feature ==== #
# 'norm_type': 'none', # default: do nothing
'norm_type': 'clip_0to1', # rescale to within [0, 1]
# 'norm_type': 'clip_minus1to1', # rescale to within [-1, 1]
# 'norm_type': 'normalize', # rescale to mean zero and std one
# ==== postprocess: clip final quality score ==== #
# 'score_clip': None, # default: do nothing
'score_clip': [0.0, 100.0], # clip to within [0, 100]
# ==== libsvmnusvr parameters ==== #
# 'gamma': 0.0, # default
'gamma': 0.05, # vmafv3
# 'C': 1.0, # default
'C': 4.0, # vmafv3
# 'nu': 0.5, # default
'nu': 0.9, # vmafv3
}
|
automig/lib/diffing.py | abe-winter/automigrate | 336 | 12623640 | <gh_stars>100-1000
"diffing.py -- sql-diffing"
import collections
from . import wrappers
class DiffError(Exception): pass
def group_by_table(stmts):
"take list of WrappedStatement"
groups = collections.defaultdict(list)
for stmt in stmts:
if isinstance(stmt, (wrappers.CreateTable, wrappers.CreateIndex)):
groups[stmt.table].append(stmt)
elif isinstance(stmt, (wrappers.CreateEnum, wrappers.CreateExtension)):
groups[stmt.unique].append(stmt)
elif stmt is None:
pass # where are these coming from? it happens in the test suite even
else:
raise DiffError("unhandled type", type(stmt))
return groups
class UnsupportedChange(Exception):
"returned in place of a migration string when there's an error"
def diff_column(table, colname, left, right):
"return list of stmts to alter column, or UnsupportedChange if we're confused"
if not left.success or not right.success:
# todo: add details (column name and rendered old/new)
return [UnsupportedChange(f"the column parser failed on new or old change for col {colname}")]
assert left.name == right.name
ret = []
prefix = f"alter table {table} alter column {left.name}"
if left.type != right.type:
ret.append(f"{prefix} type {right.type};")
if left.default != right.default:
if right.default is None:
ret.append(UnsupportedChange("can't unset default, file a bug and workaround by setting `default null` for now"))
else:
ret.append(f"{prefix} set default {right.default};")
if left.unique != right.unique:
constraint_name = f'{table}_{colname}_key' # warning: this is postgres-specific; support with docs and check target dialect
if not left.unique and not right.unique:
raise NotImplementedError("is this case default <-> explicit 'not unique'? is there such a thing?")
elif left.unique and not right.unique:
ret.append(f"alter table {table} drop constraint {constraint_name};")
elif not left.unique and right.unique:
ret.append(UnsupportedChange("can't add unique constraint, file a bug"))
else:
raise NotImplementedError("unexpected case in uniqueness permutations")
if left.not_null != right.not_null:
# note: I think the possible values here are (None | True), shouldn't ever be False
# todo: refactor this to `null` and include null / not_null as sources
# todo: this is assuming that 'not specified' is nullable -- link to DB docs supporting this and figure out the pkey case
# todo: not allowed under sqlite dialect
ret.append(f"{prefix} {'set' if right.not_null else 'drop'} not null;")
return ret
# todo: break out per-type diffing, this is too complicated
# pylint: disable=too-many-branches
def diff_stmt(args, left, right):
"diff two WrappedStmt with same unique key. return list of statements to run."
assert left.unique == right.unique
table = left.table
if isinstance(left, wrappers.CreateTable):
left_cols = {col.name: col for col in left.columns()}
right_cols = {col.name: col for col in right.columns()}
added_cols = [k for k in right_cols if k not in left_cols]
changes = [
f'alter table {table} add column {right_cols[k].render()};'
for k in added_cols
]
changed = {
k: (left_cols[k], right_cols[k])
for k in right_cols
if k in left_cols and left_cols[k] != right_cols[k]
}
if changed:
for left_col, right_col in changed.values():
changes.extend(diff_column(table, left_col.name, left_col.parse(), right_col.parse()))
for k in left_cols:
if k not in right_cols:
edit = f'alter table {table} drop column {k};'
if args.dialect == 'sqlite':
changes.append(f"-- {edit} -- sqlite doesn't drop columns")
else:
changes.append(edit)
if left.tail() != right.tail():
change = ' '.join([expr.value for expr in left.tail() or right.tail()])
changes.append(UnsupportedChange(f"can't modify table suffix: `{change}`"))
if left.pkey_fields() != right.pkey_fields():
# note: order matters here too; don't compare sets
if left.pkey_fields():
# this gets inserted at the beginning because need to drop constraint before dropping column
changes.insert(0, f'alter table {table} drop constraint {table}_pkey;')
new_pkey = ', '.join(right.pkey_fields())
if new_pkey:
# note: ParsedColumn.pkey means that this is inline pkey stmt, no need to add constraint
if set(added_cols) >= set(right.pkey_fields()) and any(right_cols[k].parse().pkey for k in added_cols):
pass # adding a pkey column will add the constraint
else:
changes.append(f'alter table {table} add primary key ({new_pkey});')
return changes
elif isinstance(left, wrappers.CreateIndex):
return [
# note: 'if exists' because other changes can sometimes automatically destroy an index
# todo: sqlite probably doesn't support dropping
f'drop index if exists {left.index_name};',
str(right.stmt)
]
elif isinstance(left, wrappers.CreateEnum):
old_vals = set(left.values)
new_vals = set(right.values)
changes = []
if old_vals - new_vals:
return [UnsupportedChange("removing enum values not supported yet -- file a bug")]
for val in new_vals - old_vals:
# warning: if enum vals have quotes in them this fails probably
changes.append(f"alter type {left.name} add value '{val}';")
return changes
else:
raise DiffError("unhandled type", type(left))
def diff_stmts(args, left, right):
"takes WrappedStmt lists, all for same table, and compares them"
key_l = {stmt.unique: stmt for stmt in left}
key_r = {stmt.unique: stmt for stmt in right}
output = []
for k in key_r:
if k not in key_l:
output.append(str(key_r[k].stmt).strip())
elif key_l[k] == key_r[k]:
pass # not relevant to diff
else:
output.extend(diff_stmt(args, key_l[k], key_r[k]))
return output
def diff(args, left, right):
"""take two lists of statements.
return dict of {tablename: list of migration statements or errors}
"""
# todo: figure out some way to apply statements in order even across tables
# realistically this needs to be a database, not a nested dict -- it gets queried in a lot of ways
groups_l = group_by_table(map(wrappers.wrap, left))
groups_r = group_by_table(map(wrappers.wrap, right))
output = collections.OrderedDict()
for key, stmts in groups_r.items():
if key in groups_l:
changes = diff_stmts(args, groups_l[key], stmts)
if changes:
output[key] = changes
else:
output[key] = [str(wrapped.stmt).strip() for wrapped in stmts]
return output
def get_errors(table_stmt_dict):
return {
table: [stmt for stmt in stmts if isinstance(stmt, Exception)]
for table, stmts in table_stmt_dict.items()
if any(isinstance(stmt, Exception) for stmt in stmts)
}
|
orbitdeterminator/tests/test_ellipse_fit.py | DewanshiDewan/orbitdeterminator | 158 | 12623644 | """Tests ellipse_fit with satellites. Compatible with pytest."""
import pytest
import numpy as np
import sys
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from util.new_tle_kep_state import tle_to_state
from util.rkf5 import rkf5
from kep_determination.ellipse_fit import determine_kep
def test_ellipse_fit():
"""Tests ellipse fit with 8 satellites:
* NOAA-1
* GPS-23
* Cryosat-2
* NOAA-15
* NOAA-18
* NOAA-19
* MOLNIYA 2-10
* ISS
To add your own test copy the template, put the 2nd row of the TLE of the satellite
in place of kep. In the rkf5 line put the final time and time step such that 700±200
points are generated. Now, put the actual orbital parameters in the assert statements.
Args:
NIL
Returns:
NIL
"""
#noaa-1
tle = np.array([101.7540, 195.7370, 0.0031531, 352.8640, 117.2610, 12.53984625169364])
r = tle_to_state(tle)
_,vecs = rkf5(0,7200,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7826.006538, 0.1) # sma
assert kep[1] == pytest.approx(0.0031531, 0.01) # ecc
assert kep[2] == pytest.approx(101.7540, 0.1) # inc
assert kep[3] == pytest.approx(352.8640, 1.0) # argp
assert kep[4] == pytest.approx(195.7370, 0.1) # raan
assert kep[5] == pytest.approx(117.2610, 0.5) # true_anom
#gps-23
tle = np.array([54.4058, 84.8417, 0.0142955, 74.4543, 193.5934, 2.00565117179872])
r = tle_to_state(tle)
_,vecs = rkf5(0,43080,50,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(26560.21419, 0.1) # sma
assert kep[1] == pytest.approx(0.0142955, 0.01) # ecc
assert kep[2] == pytest.approx(54.4058, 0.1) # inc
assert kep[3] == pytest.approx(74.4543, 1.0) # argp
assert kep[4] == pytest.approx(84.8417, 0.1) # raan
assert kep[5] == pytest.approx(193.5934, 0.5) # true_anom
#cryosat-2
tle = np.array([92.0287, 282.8216, 0.0005088, 298.0188, 62.0505, 14.52172969429489])
r = tle_to_state(tle)
_,vecs = rkf5(0,5950,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7096.69719, 0.1) # sma
assert kep[1] == pytest.approx(0.0005088, 0.01) # ecc
assert kep[2] == pytest.approx(92.0287, 0.1) # inc
assert kep[3] == pytest.approx(298.0188, 1.0) # argp
assert kep[4] == pytest.approx(282.8216, 0.1) # raan
assert kep[5] == pytest.approx(62.0505, 0.5) # true_anom
#noaa-15
tle = np.array([98.7705, 158.2195, 0.0009478, 307.8085, 52.2235, 14.25852803])
r = tle_to_state(tle)
_,vecs = rkf5(0,6120,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7183.76381, 0.1) # sma
assert kep[1] == pytest.approx(0.0009478, 0.01) # ecc
assert kep[2] == pytest.approx(98.7705, 0.1) # inc
assert kep[3] == pytest.approx(307.8085, 1.0) # argp
assert kep[4] == pytest.approx(158.2195, 0.1) # raan
assert kep[5] == pytest.approx(52.2235, 0.5) # true_anom
#noaa-18
tle = np.array([99.1472, 176.6654, 0.0014092, 197.4778, 162.5909, 14.12376102669957])
r = tle_to_state(tle)
_,vecs = rkf5(0,6120,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7229.38911, 0.1) # sma
assert kep[1] == pytest.approx(0.0014092, 0.01) # ecc
assert kep[2] == pytest.approx(99.1472, 0.1) # inc
assert kep[3] == pytest.approx(197.4778, 1.0) # argp
assert kep[4] == pytest.approx(176.6654, 0.1) # raan
assert kep[5] == pytest.approx(162.5909, 0.5) # true_anom
#noaa-19
tle = np.array([99.1401, 119.3629, 0.0014753, 44.0001, 316.2341, 14.12279464478196])
r = tle_to_state(tle)
_,vecs = rkf5(0,6120,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(7229.71889, 0.1) # sma
assert kep[1] == pytest.approx(0.0014753, 0.01) # ecc
assert kep[2] == pytest.approx(99.1401, 0.1) # inc
assert kep[3] == pytest.approx(44.0001, 1.0) # argp
assert kep[4] == pytest.approx(119.3629, 0.1) # raan
assert kep[5] == pytest.approx(316.2341, 0.5) # true_anom
#molniya 2-10
tle = np.array([63.2749, 254.2968, 0.7151443, 294.4926, 9.2905, 2.01190064320534])
r = tle_to_state(tle)
_,vecs = rkf5(0,43000,50,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(26505.1836, 0.1) # sma
assert kep[1] == pytest.approx(0.7151443, 0.01) # ecc
assert kep[2] == pytest.approx(63.2749, 0.1) # inc
assert kep[3] == pytest.approx(294.4926, 1.0) # argp
assert kep[4] == pytest.approx(254.2968, 0.1) # raan
assert kep[5] == pytest.approx(65.56742, 0.5) # true_anom
#ISS
tle = np.array([51.6402, 150.4026, 0.0004084, 108.2140, 238.0528, 15.54082454114406])
r = tle_to_state(tle)
_,vecs = rkf5(0,5560,10,r)
r = np.reshape(r,(1,6))
vecs = np.insert(vecs,0,r,axis=0)
vecs = vecs[:,0:3]
kep,_ = determine_kep(vecs)
assert kep[0] == pytest.approx(6782.95812, 0.1) # sma
assert kep[1] == pytest.approx(0.0004084, 0.01) # ecc
assert kep[2] == pytest.approx(51.6402, 0.1) # inc
assert kep[3] == pytest.approx(108.2140, 1.0) # argp
assert kep[4] == pytest.approx(150.4026, 0.1) # raan
assert kep[5] == pytest.approx(238.0528, 0.5) # true_anom
|
checkov/common/checks_infra/solvers/connections_solvers/and_connection_solver.py | niradler/checkov | 4,013 | 12623646 | <filename>checkov/common/checks_infra/solvers/connections_solvers/and_connection_solver.py
from typing import Optional, List, Tuple, Dict, Any
from networkx.classes.digraph import DiGraph
from checkov.common.graph.checks_infra.enums import Operators
from checkov.common.graph.checks_infra.solvers.base_solver import BaseSolver
from checkov.common.checks_infra.solvers.connections_solvers.complex_connection_solver import ComplexConnectionSolver
from checkov.terraform.graph_builder.graph_components.attribute_names import CustomAttributes
class AndConnectionSolver(ComplexConnectionSolver):
operator = Operators.AND
def __init__(self, solvers: Optional[List[BaseSolver]], operator: str) -> None:
super().__init__(solvers, operator)
def get_operation(self, graph_connector: DiGraph) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
passed, failed = self.run_attribute_solvers(graph_connector)
failed_ids = [f[CustomAttributes.ID] for f in failed]
passed = [p for p in passed if p[CustomAttributes.ID] not in failed_ids]
for connection_solver in self.get_sorted_connection_solvers():
connection_solver.set_vertices(graph_connector, failed)
passed_solver, failed_solver = connection_solver.get_operation(graph_connector)
passed.extend(passed_solver)
failed.extend(failed_solver)
failed_ids.extend([f[CustomAttributes.ID] for f in failed_solver])
passed = [p for p in passed if p[CustomAttributes.ID] not in failed_ids]
return self.filter_results(passed, failed)
def _get_operation(self, *args: Any, **kwargs: Any) -> None:
pass
|
sqlalchemy_continuum/relationship_builder.py | vhermecz/sqlalchemy-continuum | 193 | 12623660 | import sqlalchemy as sa
from .exc import ClassNotVersioned
from .expression_reflector import VersionExpressionReflector
from .operation import Operation
from .table_builder import TableBuilder
from .utils import adapt_columns, version_class, option
class RelationshipBuilder(object):
def __init__(self, versioning_manager, model, property_):
self.manager = versioning_manager
self.property = property_
self.model = model
def one_to_many_subquery(self, obj):
tx_column = option(obj, 'transaction_column_name')
remote_alias = sa.orm.aliased(self.remote_cls)
primary_keys = [
getattr(remote_alias, column.name) for column
in sa.inspect(remote_alias).mapper.columns
if column.primary_key and column.name != tx_column
]
return sa.exists(
sa.select(
[1]
).where(
sa.and_(
getattr(remote_alias, tx_column) <=
getattr(obj, tx_column),
*[
getattr(remote_alias, pk.name) ==
getattr(self.remote_cls, pk.name)
for pk in primary_keys
]
)
).group_by(
*primary_keys
).having(
sa.func.max(getattr(remote_alias, tx_column)) ==
getattr(self.remote_cls, tx_column)
).correlate(self.local_cls, self.remote_cls)
)
def many_to_one_subquery(self, obj):
tx_column = option(obj, 'transaction_column_name')
reflector = VersionExpressionReflector(obj, self.property)
return getattr(self.remote_cls, tx_column) == (
sa.select(
[sa.func.max(getattr(self.remote_cls, tx_column))]
).where(
sa.and_(
getattr(self.remote_cls, tx_column) <=
getattr(obj, tx_column),
reflector(self.property.primaryjoin)
)
)
)
def query(self, obj):
session = sa.orm.object_session(obj)
return (
session.query(self.remote_cls)
.filter(
self.criteria(obj)
)
)
def process_query(self, query):
"""
Process given SQLAlchemy Query object depending on the associated
RelationshipProperty object.
:param query: SQLAlchemy Query object
"""
if self.property.lazy == 'dynamic':
return query
if self.property.uselist is False:
return query.first()
return query.all()
def criteria(self, obj):
direction = self.property.direction
if self.versioned:
if direction.name == 'ONETOMANY':
return self.one_to_many_criteria(obj)
elif direction.name == 'MANYTOMANY':
return self.many_to_many_criteria(obj)
elif direction.name == 'MANYTOONE':
return self.many_to_one_criteria(obj)
else:
reflector = VersionExpressionReflector(obj, self.property)
return reflector(self.property.primaryjoin)
def many_to_many_criteria(self, obj):
"""
Returns the many-to-many query.
Looks up remote items through associations and for each item returns
returns the last version with a transaction less than or equal to the
transaction of `obj`. This must hold true for both the association and
the remote relation items.
Example
-------
Select all tags of article with id 3 and transaction 5
.. code-block:: sql
SELECT tags_version.*
FROM tags_version
WHERE EXISTS (
SELECT 1
FROM article_tag_version
WHERE article_id = 3
AND tag_id = tags_version.id
AND operation_type != 2
AND EXISTS (
SELECT 1
FROM article_tag_version as article_tag_version2
WHERE article_tag_version2.tag_id = article_tag_version.tag_id
AND article_tag_version2.tx_id <= 5
GROUP BY article_tag_version2.tag_id
HAVING
MAX(article_tag_version2.tx_id) =
article_tag_version.tx_id
)
)
AND EXISTS (
SELECT 1
FROM tags_version as tags_version_2
WHERE tags_version_2.id = tags_version.id
AND tags_version_2.tx_id <= 5
GROUP BY tags_version_2.id
HAVING MAX(tags_version_2.tx_id) = tags_version.tx_id
)
AND operation_type != 2
"""
return sa.and_(
self.association_subquery(obj),
self.one_to_many_subquery(obj),
self.remote_cls.operation_type != Operation.DELETE
)
def many_to_one_criteria(self, obj):
"""Returns the many-to-one query.
Returns the item on the 'one' side with the highest transaction id
as long as it is less or equal to the transaction id of the `obj`.
Example
-------
Look up the Article of a Tag with article_id = 4 and
transaction_id = 5
.. code-block:: sql
SELECT *
FROM articles_version
WHERE id = 4
AND transaction_id = (
SELECT max(transaction_id)
FROM articles_version
WHERE transaction_id <= 5
AND id = 4
)
AND operation_type != 2
"""
reflector = VersionExpressionReflector(obj, self.property)
return sa.and_(
reflector(self.property.primaryjoin),
self.many_to_one_subquery(obj),
self.remote_cls.operation_type != Operation.DELETE
)
def one_to_many_criteria(self, obj):
"""
Returns the one-to-many query.
For each item on the 'many' side, returns its latest version as long as
the transaction of that version is less than equal of the transaction
of `obj`.
Example
-------
Using the Article-Tags relationship, where we look for tags of
article_version with id = 3 and transaction = 5 the sql produced is
.. code-block:: sql
SELECT tags_version.*
FROM tags_version
WHERE tags_version.article_id = 3
AND tags_version.operation_type != 2
AND EXISTS (
SELECT 1
FROM tags_version as tags_version_last
WHERE tags_version_last.transaction_id <= 5
AND tags_version_last.id = tags_version.id
GROUP BY tags_version_last.id
HAVING
MAX(tags_version_last.transaction_id) =
tags_version.transaction_id
)
"""
reflector = VersionExpressionReflector(obj, self.property)
return sa.and_(
reflector(self.property.primaryjoin),
self.one_to_many_subquery(obj),
self.remote_cls.operation_type != Operation.DELETE
)
@property
def reflected_relationship(self):
"""
Builds a reflected one-to-many, one-to-one and many-to-one
relationship between two version classes.
"""
@property
def relationship(obj):
query = self.query(obj)
return self.process_query(query)
return relationship
def association_subquery(self, obj):
"""
Returns an EXISTS clause that checks if an association exists for given
SQLAlchemy declarative object. This query is used by
many_to_many_criteria method.
Example query:
.. code-block:: sql
EXISTS (
SELECT 1
FROM article_tag_version
WHERE article_id = 3
AND tag_id = tags_version.id
AND operation_type != 2
AND EXISTS (
SELECT 1
FROM article_tag_version as article_tag_version2
WHERE article_tag_version2.tag_id = article_tag_version.tag_id
AND article_tag_version2.tx_id <=5
GROUP BY article_tag_version2.tag_id
HAVING
MAX(article_tag_version2.tx_id) =
article_tag_version.tx_id
)
)
:param obj: SQLAlchemy declarative object
"""
tx_column = option(obj, 'transaction_column_name')
reflector = VersionExpressionReflector(obj, self.property)
association_table_alias = self.association_version_table.alias()
association_cols = [
association_table_alias.c[association_col.name]
for _, association_col
in self.remote_to_association_column_pairs
]
association_exists = sa.exists(
sa.select(
[1]
).where(
sa.and_(
association_table_alias.c[tx_column] <=
getattr(obj, tx_column),
*[association_col ==
self.association_version_table.c[association_col.name]
for association_col
in association_cols]
)
).group_by(
*association_cols
).having(
sa.func.max(association_table_alias.c[tx_column]) ==
self.association_version_table.c[tx_column]
).correlate(self.association_version_table)
)
return sa.exists(
sa.select(
[1]
).where(
sa.and_(
reflector(self.property.primaryjoin),
association_exists,
self.association_version_table.c.operation_type !=
Operation.DELETE,
adapt_columns(self.property.secondaryjoin),
)
).correlate(self.local_cls, self.remote_cls)
)
def build_association_version_tables(self):
"""
Builds many-to-many association version table for given property.
Association version tables are used for tracking change history of
many-to-many associations.
"""
column = list(self.property.remote_side)[0]
self.manager.association_tables.add(column.table)
builder = TableBuilder(
self.manager,
column.table
)
metadata = column.table.metadata
if builder.parent_table.schema:
table_name = builder.parent_table.schema + '.' + builder.table_name
elif metadata.schema:
table_name = metadata.schema + '.' + builder.table_name
else:
table_name = builder.table_name
if table_name not in metadata.tables:
self.association_version_table = table = builder()
self.manager.association_version_tables.add(table)
else:
# may have already been created if we visiting the 'other' side of
# a self-referential many-to-many relationship
self.association_version_table = metadata.tables[table_name]
def __call__(self):
"""
Builds reflected relationship between version classes based on given
parent object's RelationshipProperty.
"""
self.local_cls = version_class(self.model)
self.versioned = False
try:
self.remote_cls = version_class(self.property.mapper.class_)
self.versioned = True
except (AttributeError, KeyError):
return
except ClassNotVersioned:
self.remote_cls = self.property.mapper.class_
if (self.property.secondary is not None and
not self.property.viewonly and
not self.manager.is_excluded_property(
self.model, self.property.key)):
self.build_association_version_tables()
# store remote cls to association table column pairs
self.remote_to_association_column_pairs = []
for column_pair in self.property.local_remote_pairs:
if column_pair[0] in self.property.target.c.values():
self.remote_to_association_column_pairs.append(column_pair)
setattr(
self.local_cls,
self.property.key,
self.reflected_relationship
)
|
dme.py | CKylinMC/PagerMaid_Plugins | 153 | 12623701 | """ Module to automate message deletion. """
from asyncio import sleep
from os import path, remove
from os.path import exists
from PIL import Image, UnidentifiedImageError
from pagermaid import redis, log, redis_status
from pagermaid.listener import listener
from pagermaid.utils import alias_command
@listener(is_plugin=True, outgoing=True, command=alias_command("dme"),
description="编辑并删除当前对话您发送的特定数量的消息。限制:基于消息 ID 的 1000 条消息,大于 1000 "
"条可能会触发删除消息过快限制。入群消息非管理员无法删除。(倒序)当数字足够大时即可实现删除所有消息。",
parameters="<数量> [文本]")
async def dme(context):
""" Deletes specific amount of messages you sent. """
reply = await context.get_reply_message()
if reply and reply.photo:
if exists('plugins/dme.jpg'):
remove('plugins/dme.jpg')
target_file = reply.photo
await context.client.download_media(
await context.get_reply_message(), file="plugins/dme.jpg"
)
await context.edit("替换图片设置完成。")
elif reply and reply.sticker:
if exists('plugins/dme.jpg'):
remove('plugins/dme.jpg')
await context.client.download_media(reply.media.document, file="plugins/dme.webp")
try:
im = Image.open("plugins/dme.webp")
except UnidentifiedImageError:
await context.edit("替换图片设置发生错误。")
return
im.save('plugins/dme.png', "png")
remove('plugins/dme.webp')
target_file = await context.client.upload_file('plugins/dme.png')
await context.edit("替换图片设置完成。")
elif path.isfile("plugins/dme.jpg"):
target_file = await context.client.upload_file('plugins/dme.jpg')
elif path.isfile("plugins/dme.png"):
target_file = await context.client.upload_file('plugins/dme.png')
else:
target_file = False
await context.edit("注意:没有图片进行替换。")
try:
count = int(context.parameter[0]) + 1
except ValueError:
await context.edit("出错了呜呜呜 ~ 无效的参数。")
return
except IndexError:
await context.edit("出错了呜呜呜 ~ 无效的参数。")
return
dme_msg = "别搁这防撤回了。。。"
if len(context.parameter) == 1:
if not redis_status():
pass
else:
try:
dme_msg = redis.get("dme_msg").decode()
except:
pass
elif len(context.parameter) == 2:
dme_msg = context.parameter[1]
if not redis_status():
pass
elif not dme_msg == str(count):
try:
redis.set("dme_msg", dme_msg)
except:
pass
count_buffer = 0
try:
async for message in context.client.iter_messages(context.chat_id, from_user="me"):
if count_buffer == count:
break
if message.forward or message.via_bot or message.sticker or message.contact or message.poll or message.game or message.geo:
pass
elif message.text or message.voice:
if not message.text == dme_msg:
try:
await message.edit(dme_msg)
except:
pass
elif message.document or message.photo or message.file or message.audio or message.video or message.gif:
if target_file:
if not message.text == dme_msg:
try:
await message.edit(dme_msg, file=target_file)
except:
pass
else:
if not message.text == dme_msg:
try:
await message.edit(dme_msg)
except:
pass
else:
pass
await message.delete()
count_buffer += 1
except ValueError:
try:
await context.edit('出错了呜呜呜 ~ 无法识别的对话')
except:
pass
return
count -= 1
count_buffer -= 1
await log(f"批量删除了自行发送的 {str(count_buffer)} / {str(count)} 条消息。")
try:
notification = await send_prune_notify(context, count_buffer, count)
except:
return
await sleep(.5)
await notification.delete()
async def send_prune_notify(context, count_buffer, count):
return await context.client.send_message(
context.chat_id,
"删除了 "
+ str(count_buffer) + " / " + str(count)
+ " 条消息。"
)
|
tests/stress/dict_copy.py | sebastien-riou/micropython | 13,648 | 12623711 | <filename>tests/stress/dict_copy.py
# copying a large dictionary
a = {i: 2 * i for i in range(1000)}
b = a.copy()
for i in range(1000):
print(i, b[i])
print(len(b))
|
scattertext/test/test_gensimPhraseAdder.py | shettyprithvi/scattertext | 1,823 | 12623727 | from unittest import TestCase
import pandas as pd
from scattertext.CorpusFromParsedDocuments import CorpusFromParsedDocuments
from scattertext.WhitespaceNLP import whitespace_nlp
from scattertext.representations.Word2VecFromParsedCorpus import GensimPhraseAdder
from scattertext.test.test_corpusFromPandas import get_docs_categories
class TestGensimPhraseAdder(TestCase):
@classmethod
def setUp(cls):
cls.categories, cls.documents = get_docs_categories()
cls.parsed_docs = []
for doc in cls.documents:
cls.parsed_docs.append(whitespace_nlp(doc))
cls.df = pd.DataFrame({'category': cls.categories,
'author': ['a', 'a', 'c', 'c', 'c',
'c', 'd', 'd', 'e', 'e'],
'parsed': cls.parsed_docs,
'document_lengths': [len(doc) for doc in cls.documents]})
cls.corpus = CorpusFromParsedDocuments(cls.df, 'category', 'parsed').build()
def test_add_phrase(self):
adder = GensimPhraseAdder()
# to do
#res = adder.add_phrases(self.corpus)
# self.fail()
|
fun/feudal_batch_processor.py | gooooloo/DLIB | 141 | 12623731 |
import numpy as np
from collections import namedtuple
def cosine_similarity(u, v):
return np.dot(np.squeeze(u),np.squeeze(v)) / (np.linalg.norm(u) * np.linalg.norm(v))
Batch = namedtuple("Batch", ["obs", "a", "returns", "s_diff", "ri", "gsum", "features"])
class FeudalBatch(object):
def __init__(self):
self.obs = []
self.a = []
self.returns = []
self.s_diff = []
self.ri = []
self.gsum = []
self.features = None
def add(self, obs, a, returns, s_diff, ri, gsum, features):
self.obs += [obs]
self.a += [a]
self.returns += [returns]
self.s_diff += [s_diff]
self.ri += [ri]
self.gsum += [gsum]
if not self.features:
self.features = features
def get_batch(self):
batch_obs = np.asarray(self.obs)
batch_a = np.asarray(self.a)
batch_r = np.asarray(self.returns)
batch_sd = np.squeeze(np.asarray(self.s_diff))
batch_ri = np.asarray(self.ri)
batch_gs = np.asarray(self.gsum)
return Batch(batch_obs,batch_a,batch_r,batch_sd,batch_ri,batch_gs,self.features)
class FeudalBatchProcessor(object):
"""
This class adapts the batch of PolicyOptimizer to a batch useable by
the FeudalPolicy.
"""
def __init__(self, c):
self.c = c
self.last_terminal = True
def _extend(self, batch):
if self.last_terminal:
self.last_terminal = False
self.s = [batch.s[0] for _ in range(self.c)]
self.g = [batch.g[0] for _ in range(self.c)]
# prepend with dummy values so indexing is the same
self.obs = [None for _ in range(self.c)]
self.a = [None for _ in range(self.c)]
self.returns = [None for _ in range(self.c)]
self.features = [None for _ in range(self.c)]
# extend with the actual values
self.obs.extend(batch.obs)
self.a.extend(batch.a)
self.returns.extend(batch.returns)
self.s.extend(batch.s)
self.g.extend(batch.g)
self.features.extend(batch.features)
# if this is a terminal batch, then append the final s and g c times
# note that both this and the above case can occur at the same time
if batch.terminal:
self.s.extend([batch.s[-1] for _ in range(self.c)])
self.g.extend([batch.g[-1] for _ in range(self.c)])
def process_batch(self, batch):
"""
Converts a normal batch into one used by the FeudalPolicy update.
FeudalPolicy requires a batch of the form:
c previous timesteps - batch size timesteps - c future timesteps
This class handles the tracking the leading and following timesteps over
time. Additionally, it also computes values across timesteps from the
batch to provide to FeudalPolicy.
"""
# extend with current batch
self._extend(batch)
# unpack and compute bounds
length = len(self.obs)
c = self.c
# normally we cannot compute samples for the last c elements, but
# in the terminal case, we halluciante values where necessary
end = length if batch.terminal else length - c
# collect samples to return in a FeudalBatch
feudal_batch = FeudalBatch()
for t in range(c, end):
# state difference
s_diff = self.s[t + c] - self.s[t]
# intrinsic reward
ri = 0
# note that this for loop considers s and g values
# 1 timestep to c timesteps (inclusively) ago
for i in range(1, c + 1):
ri_s_diff = self.s[t] - self.s[t - i]
if np.linalg.norm(ri_s_diff) != 0:
ri += cosine_similarity(ri_s_diff, self.g[t - i])
ri /= c
# sum of g values used to derive w, input to the linear transform
gsum = np.zeros_like(self.g[t - c])
for i in range(t - c, t + 1):
gsum += self.g[i]
# add to the batch
feudal_batch.add(self.obs[t], self.a[t], self.returns[t], s_diff,
ri, gsum, self.features[t])
# in the terminal case, set reset flag
if batch.terminal:
self.last_terminal = True
# in the general case, forget all but the last 2 * c elements
# reason being that the first c of those we have already computed
# a batch for, and the second c need those first c
else:
twoc = 2 * self.c
self.obs = self.obs[-twoc:]
self.a = self.a[-twoc:]
self.returns = self.returns[-twoc:]
self.s = self.s[-twoc:]
self.g = self.g[-twoc:]
self.features = self.features[-twoc:]
return feudal_batch.get_batch()
|
begin/cmdline.py | jamezpolley/begins | 120 | 12623737 | "Generate command line parsers and apply options using function signatures"
import argparse
import os
import sys
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from inspect import signature
except ImportError:
from funcsigs import signature
from begin import context, extensions, subcommands, utils
__all__ = ['create_parser', 'populate_parser',
'apply_options', 'call_function']
NODEFAULT = object()
class CommandLineError(ValueError):
"""Error in command line processing"""
class DefaultsManager(object):
"""Manage default values for command line options
Inspects environment variables and default argument values to determine the
correct default for command line option.
"""
def __init__(self, env_prefix=None, config_file=None, config_section=None):
self._use_env = env_prefix is not None
self._prefix = '' if not self._use_env else env_prefix
self._parser = configparser.ConfigParser()
self._section = config_section
if config_file is not None:
self._parser.read([config_file,
os.path.join(os.path.expanduser('~'), config_file)])
def metavar(self, name):
"Generate meta variable name for parameter"
metavar = (self._prefix + name).upper()
return metavar
def from_param(self, param, default=NODEFAULT):
"Get default value from signature paramater"
if param.default is not param.empty:
default = param.default
default = self.from_name(param.name, default)
return default
def from_name(self, name, default=NODEFAULT, section=None):
"Get default value from argument name"
if len(self._parser.sections()) > 0:
section = self._section if section is None else section
try:
default = self._parser.get(section, name)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
if self._use_env:
default = os.environ.get(self.metavar(name), default)
return default
def set_config_section(self, section):
self._section = section
def program_name(filename, func):
"""Choose program name for application
Iterate backwards through absolute path for application looking for first
name that is not a magic variable.
"""
fullpath = os.path.abspath(filename)
basename, filename = os.path.split(fullpath)
while len(basename) > 0:
if not filename.startswith('__'):
return filename
basename, filename = os.path.split(basename)
return func.__name__
def populate_flag(parser, param, defaults):
"""Add a flag option to the parser"""
default = defaults.from_param(param)
if not isinstance(default, bool):
default = utils.tobool(default)
help = ''
if param.annotation is not param.empty:
help = param.annotation + ' '
parser.add_argument('--' + param.name.replace('_', '-'),
action='store_true', default=default, dest=param.name,
help=(help + '(default: %(default)s)'if not default else ''))
parser.add_argument('--no-' + param.name.replace('_', '-'),
action='store_false', default=default, dest=param.name,
help=(help + '(default: %(default)s)' if default else ''))
def populate_option(parser, param, defaults, short_args):
"""Add a regulre option to the parser"""
kwargs = {'default': defaults.from_param(param)}
kwargs['metavar'] = defaults.metavar(param.name)
if param.annotation is not param.empty:
kwargs['help'] = param.annotation
args = []
if kwargs['default'] is NODEFAULT:
args.append(param.name)
else:
args.append('--' + param.name.replace('_', '-'))
if short_args:
args.append('-' + param.name[0])
if 'help' not in kwargs:
kwargs['help'] = '(default: %(default)s)'
else:
kwargs['help'] += ' (default: %(default)s)'
parser.add_argument(*args, **kwargs)
def populate_parser(parser, defaults, funcsig, short_args, lexical_order):
"""Populate parser according to function signature
Use the parameters accepted by the source function, according to the
functions signature provided, to populating a corresponding command line
argument parser.
"""
params = funcsig.parameters.values()
if lexical_order:
params = sorted(params, key=lambda p: p.name)
for param in params:
if param.kind == param.POSITIONAL_OR_KEYWORD or \
param.kind == param.KEYWORD_ONLY or \
param.kind == param.POSITIONAL_ONLY:
if isinstance(param.default, bool):
populate_flag(parser, param, defaults)
else:
populate_option(parser, param, defaults, short_args)
elif param.kind == param.VAR_POSITIONAL:
kwargs = {'nargs': '*'}
if param.annotation is not param.empty:
kwargs['help'] = param.annotation
parser.add_argument(param.name, **kwargs)
elif param.kind == param.VAR_KEYWORD:
msg = 'Variable length keyword arguments not supported'
raise ValueError(msg)
return parser
def create_parser(func, env_prefix=None, config_file=None, config_section=None,
short_args=True, lexical_order=False, sub_group=None, plugins=None,
collector=None, formatter_class=argparse.HelpFormatter):
"""Create and OptionParser object from a function definition.
Use the function's signature to generate an OptionParser object. Default
values are honoured, argument annotations are used as help strings and the
functions docstring becomes the parser description. Environment variables
can alter the default values of options. Variable positional arguments are
ingored but will alter the program's usage string. Variable keyword
arguments will raise a ValueError exception. A prefix on expected
environment variables can be added using the env_prefix argument.
"""
defaults = DefaultsManager(env_prefix, config_file, func.__name__)
parser = argparse.ArgumentParser(
prog=program_name(sys.argv[0], func),
argument_default=NODEFAULT,
conflict_handler='resolve',
description = func.__doc__,
formatter_class=formatter_class
)
collector = collector if collector is not None else subcommands.COLLECTORS[sub_group]
if plugins is not None:
collector.load_plugins(plugins)
if len(collector) > 0:
subparsers = parser.add_subparsers(title='Available subcommands',
dest='_subcommand')
for subfunc in collector.commands():
funcsig = signature(subfunc)
help = None
if subfunc.__doc__ is not None:
help = subfunc.__doc__.splitlines()[0]
subparser = subparsers.add_parser(subfunc.__name__, help=help,
conflict_handler='resolve', description=subfunc.__doc__,
formatter_class=formatter_class)
defaults.set_config_section(subfunc.__name__)
populate_parser(subparser, defaults, funcsig, short_args, lexical_order)
have_extensions = False
while hasattr(func, '__wrapped__') and not hasattr(func, '__signature__'):
if isinstance(func, extensions.Extension):
func.add_arguments(parser, defaults)
have_extensions = True
func = getattr(func, '__wrapped__')
funcsig = signature(func)
populate_parser(parser, defaults, funcsig, short_args, lexical_order)
return parser
def call_function(func, funcsig, opts):
"""Call function using command line options and arguments
Use the function's signature to extract expected values from the provided
command line options. The extracted values are used to call the funciton.
The presence of variable keyword arguments, missing attributes from the
options object or to failure to use the command line arguments list will
result in a CommandLineError being raised.
"""
def getoption(opts, name, default=None):
if not hasattr(opts, name):
msg = "Missing command line options '{0}'".format(name)
raise CommandLineError(msg)
value = getattr(opts, name)
if value is NODEFAULT:
if default is None:
msg = "'{0}' is a required option{1}".format(name, os.linesep)
sys.stderr.write(msg)
sys.exit(1)
else:
value = default
return value
pargs = []
kwargs = {}
for param in funcsig.parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD or \
param.kind == param.POSITIONAL_ONLY:
pargs.append(getoption(opts, param.name))
elif param.kind == param.VAR_POSITIONAL:
pargs.extend(getoption(opts, param.name, []))
elif param.kind == param.KEYWORD_ONLY:
kwargs[param.name] = getoption(opts, param.name)
elif param.kind == param.VAR_KEYWORD:
msg = 'Variable length keyword arguments not supported'
raise CommandLineError(msg)
return func(*pargs, **kwargs)
def apply_options(func, opts, run_main=True, sub_group=None, collector=None):
"""Apply command line options to function and subcommands
Call the target function, and any chosen subcommands, using the parsed
command line arguments.
"""
ext = func
collector = collector if collector is not None else subcommands.COLLECTORS[sub_group]
while hasattr(ext, '__wrapped__') and not hasattr(ext, '__signature__'):
if isinstance(ext, extensions.Extension):
ext.run(opts)
ext = getattr(ext, '__wrapped__')
if run_main:
with context:
return_value = call_function(func, signature(ext), opts)
context.return_values += (return_value,)
if hasattr(opts, '_subcommand'):
subfunc = collector.get(opts._subcommand)
with context:
return_value = call_function(subfunc, signature(subfunc), opts)
context.return_values += (return_value,)
return context.last_return
|
atc/atcd/atcd/AtcdDBQueueTask.py | KeleiAzz/augmented-traffic-control | 4,319 | 12623747 | #
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
#
from sqlite3 import OperationalError
from atcd.db_manager import SQLiteManager
from sparts.sparts import option
from sparts.tasks.queue import QueueTask
class AtcdDBQueueTask(QueueTask):
OPT_PREFIX = 'sqlite'
workers = 1
DEFAULT_SQLITE_FILE = '/var/lib/atcd.db'
sqlite_file = option(
default=DEFAULT_SQLITE_FILE,
metavar='SQLITE_FILE',
help='Location to store the sqlite3 db [%(default)s]',
name='file',
)
def initTask(self):
super(AtcdDBQueueTask, self).initTask()
try:
self.sqlite_manager = SQLiteManager(self.sqlite_file, self.logger)
except OperationalError:
self.logger.exception(
'Unable to initialize DB from file "{0}"'.format(
self.sqlite_file
)
)
raise
def execute(self, item, context):
try:
obj, action = item
except ValueError:
self.logger.exception('Error executing on item: {0}'.format(item))
return
try:
func = getattr(self.sqlite_manager, action)
except AttributeError:
self.logger.exception(
'unable to run action, {0}, no such method'.format(action)
)
raise
try:
if isinstance(obj, tuple):
func(*obj)
else:
func(obj)
except OperationalError:
self.logger.exception("Unsupported operation")
return
def get_saved_shapings(self):
return self.sqlite_manager.get_saved_shapings()
|
openvim/test/test_openvim.py | acasana/openmano_movilnet | 204 | 12623774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##
# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
# This file is part of openmano
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: <EMAIL>
##
'''
This is a client tester for openvim.
It is almost DEPRECATED by the openvim client
The reason for keeping is because it is used for some scripts
and it contain the -r option (delete recursive)
that it is very useful for deleting content of database.
Another difference from openvim is that it is more verbose
and so more suitable for the developers
'''
__author__="<NAME>"
__date__ ="$5-oct-2014 11:09:29$"
import requests
import json
import yaml
import sys
import getopt
from jsonschema import validate as js_v, exceptions as js_e
version="0.0.2"
global global_config
def get_elements(url):
headers_req = {'content-type': 'application/json'}
try:
vim_response = requests.get(url, headers = headers_req)
#print vim_response
#print vim_response.status_code
if vim_response.status_code == 200:
#print vim_response.json()
#print json.dumps(vim_response.json(), indent=4)
content = vim_response.json()
return 1, content
#print http_content
else:
text = " Error. VIM response '%s': not possible to GET %s" % (vim_response.status_code, url)
text += "\n " + vim_response.text
#print text
return -vim_response.status_code,text
except requests.exceptions.RequestException, e:
return -1, " Exception "+ str(e.message)
def delete_elements(url):
headers_req = {'content-type': 'application/json'}
try:
vim_response = requests.delete(url, headers = headers_req)
#print vim_response
#print vim_response.status_code
if vim_response.status_code == 200:
pass
#print vim_response.json()
#print json.dumps(vim_response.json(), indent=4)
else:
#print vim_response.text
text = " Error. VIM response '%s': not possible to DELETE %s" % (vim_response.status_code, url)
text += "\n " + vim_response.text
#print text
return -vim_response.status_code,text
except requests.exceptions.RequestException, e:
return -1, " Exception "+ str(e.message)
return 1, None
def new_elements(url, payload):
headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
#print str(payload)
try:
vim_response = requests.post(url, data=json.dumps(payload), headers=headers_req)
#print vim_response
#print vim_response.status_code
if vim_response.status_code == 200:
#print vim_response.json()
#print json.dumps(vim_response.json(), indent=4)
return 1, vim_response.text
else:
#print vim_response.text
text = "Error. VIM response '%s': not possible to ADD %s" % (vim_response.status_code, url)
text += "\n" + vim_response.text
#print text
return -vim_response.status_code,text
except requests.exceptions.RequestException, e:
return -1, " Exception "+ str(e.message)
def get_details(url, what, c):
item_list = []
return_dict = {what+'s': []}
item = c.get(what,None)
if item is None: item = c.get(what+'s',None)
if item is None:
error_text= " Internal error, not found '" + what +"[s]' in content"
print 'get_details()', error_text, c
return -1, error_text
if type(item) is list:
item_list = item
else:
item_list.append(item)
if len(item_list)==0:
print what, "not found"
return 1
for item in item_list:
uuid = item.get('id',None)
if uuid is None: uuid = item.get('uuid',None)
if uuid is None:
error_text= " Internal error, not found 'id/uuid' in item"
print 'get_details()', error_text, item
return -1, error_text
#print " get", what, uuid, " >>>>>>>> ",
r,c = get_elements(url + "/" + uuid)
if r<0:
# print "fail"
print " get", what, uuid, "fail", c
return -1, c
#else:
# print 'ok'
return_dict[what+'s'].append(c[what])
return 1, return_dict
def action_details(url, what, c, force, payload):
item_list = []
return_dict = {what+'s': []}
headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
fail=0
ok=0
#Allows for payload both keypairs inside a 'server','port' ... or directly. In later case, put keypairs inside what
item = c.get(what,None)
if item is None: item = c.get(what+'s',None)
if item is None:
error_text= " Internal error, not found '" + what +"[s]' in content"
print 'get_details()', error_text, c
return -1, error_text
if type(item) is list:
item_list = item
else:
item_list.append(item)
if len(item_list)==0:
print what, "not found"
return 1
for item in item_list:
name = item.get('name',None)
uuid = item.get('id',None)
if uuid is None: uuid = item.get('uuid',None)
if uuid is None:
error_text= " Internal error, not found 'id/uuid' in item"
print 'get_details()', error_text, item
return -1, error_text
if not force:
r = raw_input("Action on " + what + " " + uuid + " " + name + " (y/N)? ")
if len(r)>0 and r[0].lower()=="y":
print " put", what, uuid, " >>>>>>>> ",
else:
continue
#print str(payload)
try:
vim_response = requests.post(url + "/" + uuid + "/action", data=json.dumps(payload), headers=headers_req)
if vim_response.status_code == 200:
print 'ok'
ok += 1
return_dict[what+'s'].append(vim_response.json())
return_dict[what+'s'][-1]['uuid'] = uuid
return_dict[what+'s'][-1]['name'] = name
else:
fail += 1
print "fail"
#print vim_response.text
#text = "Error. VIM response '%s': not possible to PUT %s" % (vim_response.status_code, url)
#text += "\n" + vim_response.text
#print text
error_dict = vim_response.json()
error_dict['error']['uuid']=uuid
error_dict['error']['name']=name
return_dict[what+'s'].append(error_dict)
except requests.exceptions.RequestException, e:
return -1, " Exception "+ str(e.message)
if ok>0 and fail>0: return 0, return_dict
elif fail==0 : return 1, return_dict
else: return -1, return_dict
def edit_details(url, what, c, force, payload):
item_list = []
return_dict = {what+'s': []}
headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
fail=0
ok=0
#Allows for payload both keypairs inside a 'server','port' ... or directly. In later case, put keypairs inside what
if what not in payload:
payload = {what:payload}
item = c.get(what,None)
if item is None: item = c.get(what+'s',None)
if item is None:
error_text= " Internal error, not found '" + what +"[s]' in content"
print 'get_details()', error_text, c
return -1, error_text
if type(item) is list:
item_list = item
else:
item_list.append(item)
if len(item_list)==0:
print what, "not found"
return 1
for item in item_list:
name = item.get('name',None)
uuid = item.get('id',None)
if uuid is None: uuid = item.get('uuid',None)
if uuid is None:
error_text= " Internal error, not found 'id/uuid' in item"
print 'get_details()', error_text, item
return -1, error_text
if not force:
r = raw_input("Edit " + what + " " + uuid + " " + name + " (y/N)? ")
if len(r)>0 and r[0].lower()=="y":
print " put", what, uuid, " >>>>>>>> ",
else:
continue
#print str(payload)
try:
vim_response = requests.put(url + "/" + uuid, data=json.dumps(payload), headers=headers_req)
if vim_response.status_code == 200:
print 'ok'
ok += 1
return_dict[what+'s'].append( vim_response.json()[what] )
else:
fail += 1
print "fail"
#print vim_response.text
#text = "Error. VIM response '%s': not possible to PUT %s" % (vim_response.status_code, url)
#text += "\n" + vim_response.text
#print text
error_dict = vim_response.json()
error_dict['error']['uuid']=uuid
error_dict['error']['name']=name
return_dict[what+'s'].append(error_dict)
except requests.exceptions.RequestException, e:
return -1, " Exception "+ str(e.message)
if ok>0 and fail>0: return 0, return_dict
elif fail==0 : return 1, return_dict
else: return -1, return_dict
def get_del_recursive(url, what, url_suffix, force=False, recursive=False):
#print
#print " get", what, a, " >>>>>>>> ",
r,c = get_elements(url + what + 's' + url_suffix)
if r<0:
print c, "when getting", what, url_suffix
return -1
# print "ok"
list_todelete = c.get(what, None)
if list_todelete is None: list_todelete = c.get(what+'s', None)
if list_todelete is None:
print " Internal error, not found '" + what +"[s]' in", c
return -3, " Internal error, not found a valid dictionary"
if type(list_todelete) == dict:
list_todelete = (list_todelete, )
if len(list_todelete)==0:
print what, url_suffix, "not found"
return 1
for c in list_todelete:
uuid=c.get('id', None)
if uuid is None:
uuid=c.get('uuid', None)
if uuid is None:
print "Id not found"
continue
name = c.get("name","")
if recursive:
if what=='tenant' :
get_del_recursive(url + uuid + "/", 'server', "", force, recursive)
get_del_recursive(url + uuid + "/", 'flavor', "", force, recursive)
get_del_recursive(url + uuid + "/", 'image', "", force, recursive)
get_del_recursive(url, 'network', "?tenant_id="+uuid, force, recursive)
elif what=='flavors' :
#get_del_recursive(url, 'servers', "?flavorRef="+uuid, force, recursive)
pass
elif what=='image' :
get_del_recursive(url, 'server', "?imageRef="+uuid, force, recursive)
elif what=='hosts' :
get_del_recursive(url, 'server', "?hostId="+uuid, force, recursive)
if not force:
r = raw_input("Delete " + what + " " + uuid + " " + name + " (y/N)? ")
if len(r)>0 and r[0].lower()=="y":
pass
else:
continue
r,c = delete_elements(url + what + "s/" + uuid)
if r<0:
#print "Error deleting", vimURI, -r
print c
else:
print what, uuid, name, "deleted"
return 1
def check_valid_uuid(uuid):
id_schema = {"type" : "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
try:
js_v(uuid, id_schema)
return True
except js_e.ValidationError:
return False
def change_string(text, var_list):
end=0
type_=None
while True:
ini = text.find("${", end)
if ini<0: return text
end = text.find("}", ini)
if end<0: return text
end+=1
var = text[ini:end]
if ' ' in var:
kk=var.split(" ")
var=kk[0]+"}"
type_=kk[-1][:-1]
var = var_list.get(var, None)
if var==None: return text
text = text[:ini] + var + text[end:]
if type_ != None:
if 'null' in type_ and text=="null":
return None
if 'int' in type_ : #and text.isnumeric():
return int(text)
return text
def chage_var_recursively(data, var_list):
'''Check recursively the conent of data, and look for "*${*}*" variables and changes
It assumes that this variables are not in the key of dictionary,
Attributes:
'data': dictionary, or list. None or empty is consideted valid
'var_list': dictionary (name:change) pairs
Return:
None, data is modified
'''
if type(data) is dict:
for k in data.keys():
if type(data[k]) is dict or type(data[k]) is tuple or type(data[k]) is list:
chage_var_recursively(data[k], var_list)
elif type(data[k]) is str:
data[k] = change_string(data[k], var_list)
if type(data) is list:
for k in range(0,len(data)):
if type(data[k]) is dict or type(data[k]) is list:
chage_var_recursively(data[k], var_list)
elif type(data[k]) is str:
data[k] = change_string(data[k], var_list)
def change_var(data):
if type(data) is not dict:
return -1, "Format error, not a object (dictionary)"
if "${}" not in data:
return 0, data
var_list={}
for var in data["${}"]:
r = var.find("}",) + 1
if r<=2 or var[:2] != '${':
return -1, "Format error at '${}':" + var
#change variables inside description text
if "${" in var[r:]:
var = var[:r] + change_string(var[r:], var_list)
d_start = var.rfind("(",) + 1
d_end = var.rfind(")",)
if d_start>0 and d_end>=d_start:
default = var[d_start:d_end]
else: default=None
v = raw_input(var[r:] + "? ")
if v=="":
if default != None:
v = default
else:
v = raw_input(" empty string? try again: ")
var_list[ var[:r] ] = str(v)
del data["${}"]
chage_var_recursively(data, var_list)
return 0, data
def parse_yaml_json(text):
try:
data = yaml.load(text)
return 0, data
except yaml.YAMLError, exc:
error_pos = ""
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
return -1, " Error yaml/json format error at " + error_pos
def load_file(file_, parse=False):
try:
f = open(file_, 'r')
read_data = f.read()
f.close()
if not parse:
return 0, read_data
except IOError, e:
return -1, " Error opening file '" + file_ + "': " + e.args[1]
try:
data = yaml.load(read_data)
return change_var(data)
except yaml.YAMLError, exc:
error_pos = ""
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
error_pos = " at position: (%s:%s)" % (mark.line+1, mark.column+1)
return -2, " Error yaml/json format error at '"+ file_ +"'"+error_pos
def load_configuration(configuration_file):
default_tokens ={'http_port':8080, 'http_host':'localhost', 'test_mode':False, 'of_controller_nets_with_same_vlan':True}
r, config = load_file(configuration_file, parse=True)
if r < 0:
return False, config
#Check default values tokens
for k,v in default_tokens.items():
if k not in config: config[k]=v
return (True, config)
items_list = ('server','host','tenant','image','flavor','network','port')
action_list = ('list','get','new','del','edit','action')
def usage(complete=False):
global items_list
global action_list
print "Usage: ", sys.argv[0], "[options]", " [" + ",".join(action_list) +"] ", "<item> [<other>] "
print " Perform an test action over openvim"
print " "+",".join(action_list)+": List (by default), GET detais, Creates, Deletes, Edit"
print " <item>: can be one of " + ",".join(items_list)
print " <other>: list of uuid|name for 'get|del'; list of json/yaml files for 'new' or 'edit'"
if not complete:
print " Type -h or --help for a complete list of options"
return
print " Options:"
print " -v|--version: prints current version"
print " -c|--config [configuration_file]: loads the configuration file (default: openvimd.cfg)"
print " -h|--help: shows this help"
print " -u|--url [URL]: url to use instead of the one loaded from configuration file"
print " -t|--tenant [tenant uuid]: tenant to be used for some comands. IF mising it will use the default obtained in configuration file"
print " -F|--filter [A=B[&C=D...]: URL query string used for 'get' or 'del' commands"
print " -f|--force : Do not ask for confirmation when deleting. Also remove dependent objects."
print " -r|--recursive : Delete also dependency elements, (from tenants: images, flavors,server; from hosts: instances; ..."
print " Examples:"
print " ",sys.argv[0]," tenant #list tenants "
print " ",sys.argv[0]," -F'device_owner=external' get port #get details of all external ports"
print " ",sys.argv[0]," del server ses pan #delete server names 'ses' and 'pan'. Do not ask for confirmation"
print " ",sys.argv[0]," -r -f del host #delete all host and all the dependencies "
print " ",sys.argv[0]," new host ./Host/nfv100.json #add a host which information is in this file"
print " ",sys.argv[0]," edit network f348faf8-59ef-11e4-b4c7-52540030594e '{\"network\":{\"admin_state_up\":false}}'"
print " #change the admin status of this network"
return
if __name__=="__main__":
global vimURI
global vimURI_admin
global what
global query_string
#init variables
action="list"
what=None
url=None
query_string = ""
force = False
recursive = False
tenant = None
additional = []
#look for parent dir
config_file = '../openvimd.cfg'
pos = sys.argv[0].rfind("/")
if pos<0:
base_dir="./"
else:
base_dir = sys.argv[0] [:pos+1]
if pos>=0:
config_file = base_dir + config_file
#get params
try:
opts, args = getopt.getopt(sys.argv[1:], "hvrfc:u:t:F:",
["config", "help", "version", "force", "filter","tenant","url","recursive"])
except getopt.GetoptError, err:
print " Error:", err # will print something like "option -a not recognized"
usage()
sys.exit(-2)
for o, a in opts:
if o in ("-v", "--version"):
print "test_openvim version", version, "Oct 2014"
print "(c) Copyright Telefonica"
sys.exit(0)
elif o in ("-h", "--help"):
usage(True)
sys.exit(0)
elif o in ("-c", "--config"): config_file = a
elif o in ("-f", "--force"): force = True
elif o in ("-r", "--recursive"): recursive = True
elif o in ("-F", "--filter"): query_string = "?"+a
elif o in ("-u", "--url"): url = a
elif o in ("-t", "--tenant"): tenant = a
else:
assert False, "Unhandled option"
for a in args:
if len(a) == 0:
print " Warning!!! Found an empty parameter?"
elif a[0]=="-":
print " Error!!! Put options parameter at the beginning"
sys.exit(-2)
elif what is not None:
additional.append(a)
elif a in items_list:
what=a
elif a[:-1] in items_list and a[-1]=='s':
what=a[:-1]
elif a in action_list:
action=a
else:
print " Missing <item>", ",".join(items_list)
sys.exit(-2)
if what is None:
usage()
sys.exit(-1)
#Load configuration file
r, config_dic = load_configuration(config_file)
#print config_dic
if not r:
print config_dic
config_dic={}
#exit(-1)
#override parameters obtained by command line
try:
if url is not None:
vimURI = vimURI_admin = url
else:
vimURI = "http://" + config_dic['http_host'] +":"+ str(config_dic['http_port']) + "/openvim/"
if 'http_admin_port' in config_dic:
vimURI_admin = "http://" + config_dic['http_host'] +":"+ str(config_dic['http_admin_port']) + "/openvim/"
except: #key error
print " Error: can not get URL; neither option --u,-url, nor reading configuration file"
exit(-1)
if tenant is None:
tenant = config_dic.get('tenant_id', None)
#check enough parameters
URI=vimURI
if (what in ('host','port') and action in ('del','new')) or (what=='host' and action=='edit' ):
if vimURI_admin is None:
print " Error: Can not get admin URL; neither option -t,--tenant, nor reading configuration file"
exit(-1)
else:
URI=vimURI_admin
if URI[-1] != "/": URI+="/"
if what in ('server','image','flavor'):
if tenant is None:
print " Error: Can not get tenant; neither option -t,--tenant, nor reading configuration file"
exit(-1)
URI += tenant + "/"
exit_code=0
try:
#load file for new/edit
payload_list=[]
if action=='new' or action=='edit' or action=='action':
if len(additional)==0:
if action=='new' :
additional.append(base_dir+what+"s/new_"+what+".yaml")
#print " New what? Missing additional parameters to complete action"
else:
print " What must be edited? Missing additional parameters to complete action"
exit(-1)
if action=='edit'or action=='action':
#obtain only last element
additional_temp = additional[:-1]
additional = additional[-1:]
for a in additional:
r,payload = load_file(a, parse=True)
if r<0:
if r==-1 and "{" in a or ":" in a:
#try to parse directly
r,payload = parse_yaml_json(a)
if r<0:
print payload
exit (-1)
else:
print payload
exit (-1)
payload_list.append(payload)
if action=='edit'or action=='action':
additional = additional_temp
#perform actions NEW
if action=='new':
for payload in payload_list:
print "\n new", what, a, " >>>>>>>> ",
r,c = new_elements(URI+what+'s', payload)
if r>0:
print "ok"
else:
print "fail"
exit_code = -1
print c
#try to decode
exit(exit_code)
#perform actions GET LIST EDIT DEL
if len(additional)==0:
additional=[""]
for a in additional:
filter_qs = query_string
if a != "" :
if check_valid_uuid(a):
if len(filter_qs) > 0: filter_qs += "&" + "id=" + str(a)
else: filter_qs += "?" + "id=" + str(a)
else:
if len(filter_qs) > 0: filter_qs += "&" + "name=" + str(a)
else: filter_qs += "?" + "name=" + str(a)
if action=='list' or action=='get' or action=='edit'or action=='action':
url = URI + what+'s'
print url + filter_qs
#print " get", what, a, " >>>>>>>> ",
r,c = get_elements(url + filter_qs)
if r<0:
#print "fail"
exit_code = -1
print c
else:
#print "ok"
if action=='list':
print json.dumps(c, indent=4)
continue
if action=='get':
r1,c1 = get_details(url, what, c)
elif action=='action':
r1,c1 = action_details(url, what, c, force, payload_list[0])
else: # action=='edit':
r1,c1 = edit_details(url, what, c, force, payload_list[0])
if r1<0:
exit_code = -1
else:
if r>0: print "ok"
else: print "ok with some fails"
print json.dumps(c1, indent=4)
elif action=='del':
r = get_del_recursive(URI, what, filter_qs, force, recursive)
if r<0:
exit_code = -1
exit(exit_code)
except KeyboardInterrupt:
print " Canceled"
|
causalimpact/tests/test_inferences.py | cdutr/causalimpact-1 | 152 | 12623791 | """Unit Tests for inferences module"""
import pytest
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from statsmodels.tsa.statespace.structural import UnobservedComponents
from statsmodels.tsa.arima_process import ArmaProcess
import causalimpact
compile_posterior = causalimpact.inferences.compile_posterior_inferences
np.random.seed(1)
@pytest.fixture
def data():
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 1 + arma_process.generate_sample(nsample=100)
X = X.reshape(-1, 1)
y = 1.2 * X + np.random.normal(size=(100, 1))
data = np.concatenate((y, X), axis=1)
data = pd.DataFrame(data)
return data
def test_compile_posterior_inferences_w_data(data):
pre_period = [0, 70]
post_period = [71, 100]
df_pre = data.loc[pre_period[0]: pre_period[1], :]
df_post = data.loc[post_period[0]: post_period[1], :]
post_period_response = None
alpha = 0.05
orig_std_params = (0., 1.)
model = UnobservedComponents(
endog=df_pre.iloc[:, 0].values,
level='llevel',
exog=df_pre.iloc[:, 1:].values
)
trained_model = model.fit()
inferences = compile_posterior(
trained_model,
data,
df_pre,
df_post,
post_period_response,
alpha,
orig_std_params
)
expected_response = pd.Series(data.iloc[:, 0], name='response')
assert_series_equal(expected_response, inferences['series']['response'])
expected_cumsum = pd.Series(
np.cumsum(expected_response),
name='cum_response'
)
assert_series_equal(expected_cumsum, inferences['series']['cum_response'])
predictor = trained_model.get_prediction()
forecaster = trained_model.get_forecast(
steps=len(df_post),
exog=df_post.iloc[:, 1].values.reshape(-1, 1),
alpha=alpha
)
pre_pred = predictor.predicted_mean
post_pred = forecaster.predicted_mean
point_pred = np.concatenate([pre_pred, post_pred])
expected_point_pred = pd.Series(point_pred, name='point_pred')
assert_series_equal(
expected_point_pred,
inferences['series']['point_pred']
)
pre_ci = pd.DataFrame(predictor.conf_int(alpha=alpha))
pre_ci.index = df_pre.index
post_ci = pd.DataFrame(forecaster.conf_int(alpha=alpha))
post_ci.index = df_post.index
ci = pd.concat([pre_ci, post_ci])
expected_pred_upper = ci.iloc[:, 1]
expected_pred_upper = expected_pred_upper.rename('point_pred_upper')
expected_pred_lower = ci.iloc[:, 0]
expected_pred_lower = expected_pred_lower.rename('point_pred_lower')
assert_series_equal(
expected_pred_upper,
inferences['series']['point_pred_upper']
)
assert_series_equal(
expected_pred_lower,
inferences['series']['point_pred_lower']
)
expected_cum_pred = pd.Series(
np.cumsum(point_pred),
name='cum_pred'
)
assert_series_equal(
expected_cum_pred,
inferences['series']['cum_pred']
)
expected_cum_pred_lower = pd.Series(
np.cumsum(expected_pred_lower),
name='cum_pred_lower'
)
assert_series_equal(
expected_cum_pred_lower,
inferences['series']['cum_pred_lower']
)
expected_cum_pred_upper = pd.Series(
np.cumsum(expected_pred_upper),
name='cum_pred_upper'
)
assert_series_equal(
expected_cum_pred_upper,
inferences['series']['cum_pred_upper']
)
expected_point_effect = pd.Series(
expected_response - expected_point_pred,
name='point_effect'
)
assert_series_equal(
expected_point_effect,
inferences['series']['point_effect']
)
expected_point_effect_lower = pd.Series(
expected_response - expected_pred_lower,
name='point_effect_lower'
)
assert_series_equal(
expected_point_effect_lower,
inferences['series']['point_effect_lower']
)
expected_point_effect_upper = pd.Series(
expected_response - expected_pred_upper,
name='point_effect_upper'
)
assert_series_equal(
expected_point_effect_upper,
inferences['series']['point_effect_upper']
)
expected_cum_effect = pd.Series(
np.concatenate((np.zeros(len(df_pre)),
np.cumsum(expected_point_effect.iloc[len(df_pre):]))),
name='cum_effect'
)
assert_series_equal(
expected_cum_effect,
inferences['series']['cum_effect']
)
expected_cum_effect_lower = pd.Series(
np.concatenate(
(np.zeros(len(df_pre)),
np.cumsum(expected_point_effect_lower.iloc[len(df_pre):]))),
name='cum_effect_lower'
)
assert_series_equal(
expected_cum_effect_lower,
inferences['series']['cum_effect_lower']
)
expected_cum_effect_upper = pd.Series(
np.concatenate((
np.zeros(len(df_pre)),
np.cumsum(expected_point_effect_upper.iloc[len(df_pre):])
)),
name='cum_effect_upper'
)
assert_series_equal(
expected_cum_effect_upper,
inferences['series']['cum_effect_upper']
)
def test_compile_posterior_inferences_w_post_period_response(data):
pre_period = [0, 70]
post_period = [71, 100]
df_pre = data.loc[pre_period[0]: pre_period[1], :]
df_post = data.loc[post_period[0]: post_period[1], :]
post_period_response = df_post.loc[post_period[0]: post_period[1]]
X = df_post.iloc[:, 1:]
y = X.copy()
y[:] = np.nan
df_post = pd.DataFrame(np.concatenate([y, X], axis=1))
data_index = data.index
data = pd.concat([df_pre, df_post], axis=0)
data.index = data_index
alpha = 0.05
orig_std_params = (0., 1.)
model = UnobservedComponents(
endog=data.iloc[:, 0].values,
level='llevel',
exog=data.iloc[:, 1:].values
)
trained_model = model.fit()
inferences = compile_posterior(
trained_model,
data,
df_pre,
None,
post_period_response,
alpha,
orig_std_params
)
expected_response = pd.Series(data.iloc[:, 0], name='response')
assert_series_equal(expected_response, inferences['series']['response'])
expected_cumsum = pd.Series(
np.cumsum(expected_response),
name='cum_response'
)
assert_series_equal(expected_cumsum, inferences['series']['cum_response'])
predictor = trained_model.get_prediction(end=len(df_pre) - 1)
forecaster = trained_model.get_prediction(start=len(df_pre))
pre_pred = predictor.predicted_mean
post_pred = forecaster.predicted_mean
point_pred = np.concatenate([pre_pred, post_pred])
expected_point_pred = pd.Series(point_pred, name='point_pred')
assert_series_equal(
expected_point_pred,
inferences['series']['point_pred']
)
pre_ci = pd.DataFrame(predictor.conf_int(alpha=alpha))
pre_ci.index = df_pre.index
post_ci = pd.DataFrame(forecaster.conf_int(alpha=alpha))
post_ci.index = df_post.index
ci = pd.concat([pre_ci, post_ci])
expected_pred_upper = ci.iloc[:, 1]
expected_pred_upper = expected_pred_upper.rename('point_pred_upper')
expected_pred_upper.index = data.index
expected_pred_lower = ci.iloc[:, 0]
expected_pred_lower = expected_pred_lower.rename('point_pred_lower')
expected_pred_lower.index = data.index
assert_series_equal(
expected_pred_upper,
inferences['series']['point_pred_upper']
)
assert_series_equal(
expected_pred_lower,
inferences['series']['point_pred_lower']
)
expected_cum_pred = pd.Series(
np.cumsum(point_pred),
name='cum_pred'
)
assert_series_equal(
expected_cum_pred,
inferences['series']['cum_pred']
)
expected_cum_pred_lower = pd.Series(
np.cumsum(expected_pred_lower),
name='cum_pred_lower'
)
assert_series_equal(
expected_cum_pred_lower,
inferences['series']['cum_pred_lower']
)
expected_cum_pred_upper = pd.Series(
np.cumsum(expected_pred_upper),
name='cum_pred_upper'
)
assert_series_equal(
expected_cum_pred_upper,
inferences['series']['cum_pred_upper']
)
expected_point_effect = pd.Series(
expected_response - expected_point_pred,
name='point_effect'
)
assert_series_equal(
expected_point_effect,
inferences['series']['point_effect']
)
expected_point_effect_lower = pd.Series(
expected_response - expected_pred_lower,
name='point_effect_lower'
)
assert_series_equal(
expected_point_effect_lower,
inferences['series']['point_effect_lower']
)
expected_point_effect_upper = pd.Series(
expected_response - expected_pred_upper,
name='point_effect_upper'
)
assert_series_equal(
expected_point_effect_upper,
inferences['series']['point_effect_upper']
)
expected_cum_effect = pd.Series(
np.concatenate((
np.zeros(len(df_pre)),
np.cumsum(expected_point_effect.iloc[len(df_pre):])
)),
name='cum_effect'
)
assert_series_equal(
expected_cum_effect,
inferences['series']['cum_effect']
)
expected_cum_effect_lower = pd.Series(
np.concatenate((
np.zeros(len(df_pre)),
np.cumsum(expected_point_effect_lower.iloc[len(df_pre):])
)),
name='cum_effect_lower'
)
assert_series_equal(
expected_cum_effect_lower,
inferences['series']['cum_effect_lower']
)
expected_cum_effect_upper = pd.Series(
np.concatenate((
np.zeros(len(df_pre)),
np.cumsum(expected_point_effect_upper.iloc[len(df_pre):])
)),
name='cum_effect_upper'
)
assert_series_equal(
expected_cum_effect_upper,
inferences['series']['cum_effect_upper']
)
|
example/tests/checkout/test_checkout_utils.py | icvntechstudio/django-salesman | 222 | 12623802 | import pytest
from django.core.exceptions import ValidationError
from salesman.checkout import utils
def test_validate_address():
with pytest.raises(ValidationError):
assert utils.validate_address('', context={})
assert utils.validate_address('Test', context={}) == 'Test'
|
opem/Test/test_Padulles_Hauer.py | Martenet/opem | 173 | 12623829 | <reponame>Martenet/opem<filename>opem/Test/test_Padulles_Hauer.py
# -*- coding: utf-8 -*-
'''
>>> from opem.Dynamic.Padulles_Hauer import *
>>> import shutil
>>> Test_Vector={"T":343,"E0":0.6,"N0":5,"KO2":0.0000211,"KH2":0.0000422,"KH2O":0.000007716,"tH2":3.37,"tO2":6.74,"t1":2,"t2":2,"tH2O":18.418,"B":0.04777,"C":0.0136,"Rint":0.00303,"rho":1.168,"qMethanol":0.0002,"CV":2,"i-start":0.1,"i-stop":4,"i-step":0.1,"Name":"test3"}
>>> Padulles_Hauer_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True)
###########
Padulles-Hauer-Model Simulation
###########
Analyzing . . .
I : 0.1
E : 2.9234154992732004 V
FC Efficiency : 0.41518043908246366
FC Power : 0.3238407424843217 W
FC Voltage : 3.2384074248432166 V
PH2 : 0.19717074233280188 atm
PH2O : 0.2426831613626925 atm
PO2 : 0.1906263686382979 atm
Power-Thermal : 0.2911592575156784 W
###########
I : 0.2
E : 2.9234139617015558 V
FC Efficiency : 0.4108963136482338
FC Power : 0.6409982492912448 W
FC Voltage : 3.204991246456224 V
PH2 : 0.1971566919511875 atm
PH2O : 0.24266586776736396 atm
PO2 : 0.1906184358000996 atm
Power-Thermal : 0.5890017507087553 W
###########
I : 0.3
E : 2.9234124240659227 V
FC Efficiency : 0.4083740564879825
FC Power : 0.955595292181879 W
FC Voltage : 3.1853176406062635 V
PH2 : 0.19714264156957312 atm
PH2O : 0.24264857417203542 atm
PO2 : 0.1906105029619013 atm
Power-Thermal : 0.889404707818121 W
###########
I : 0.4
E : 2.9234108863662946 V
FC Efficiency : 0.4065731449109761
FC Power : 1.2685082121222457 W
FC Voltage : 3.171270530305614 V
PH2 : 0.19712859118795872 atm
PH2O : 0.24263128057670688 atm
PO2 : 0.19060257012370302 atm
Power-Thermal : 1.1914917878777547 W
###########
I : 0.5
E : 2.9234093486026658 V
FC Efficiency : 0.4051674903968853
FC Power : 1.5801532125478528 W
FC Voltage : 3.1603064250957056 V
PH2 : 0.19711454080634436 atm
PH2O : 0.24261398698137834 atm
PO2 : 0.1905946372855047 atm
Power-Thermal : 1.4948467874521474 W
###########
I : 0.6
E : 2.923407810775032 V
FC Efficiency : 0.4040118444230801
FC Power : 1.8907754319000147 W
FC Voltage : 3.1512923865000246 V
PH2 : 0.19710049042472996 atm
PH2O : 0.2425966933860498 atm
PO2 : 0.1905867044473064 atm
Power-Thermal : 1.7992245680999854 W
###########
I : 0.7
E : 2.923406272883388 V
FC Efficiency : 0.4030287270042349
FC Power : 2.2005368494431226 W
FC Voltage : 3.1436240706330323 V
PH2 : 0.19708644004311557 atm
PH2O : 0.24257939979072127 atm
PO2 : 0.19057877160910808 atm
Power-Thermal : 2.1044631505568776 W
###########
I : 0.8
E : 2.9234047349277277 V
FC Efficiency : 0.4021718894938075
FC Power : 2.509552590441359 W
FC Voltage : 3.1369407380516985 V
PH2 : 0.19707238966150117 atm
PH2O : 0.24256210619539273 atm
PO2 : 0.1905708387709098 atm
Power-Thermal : 2.4104474095586417 W
###########
I : 0.9
E : 2.9234031969080454 V
FC Efficiency : 0.4014115005665013
FC Power : 2.81790873397684 W
FC Voltage : 3.1310097044187106 V
PH2 : 0.19705833927988675 atm
PH2O : 0.24254481260006414 atm
PO2 : 0.19056290593271147 atm
Power-Thermal : 2.7170912660231608 W
###########
I : 1.0
E : 2.9234016588243374 V
FC Efficiency : 0.40072719160282416
FC Power : 3.1256720945020287 W
FC Voltage : 3.1256720945020287 V
PH2 : 0.19704428889827239 atm
PH2O : 0.2425275190047356 atm
PO2 : 0.1905549730945132 atm
Power-Thermal : 3.0243279054979717 W
###########
I : 1.1
E : 2.9234001206765963 V
FC Efficiency : 0.40010443449551725
FC Power : 3.4328960479715387 W
FC Voltage : 3.1208145890650347 V
PH2 : 0.197030238516658 atm
PH2O : 0.24251022540940706 atm
PO2 : 0.19054704025631486 atm
Power-Thermal : 3.3321039520284623 W
###########
I : 1.2
E : 2.9233985824648183 V
FC Efficiency : 0.39953250222749515
FC Power : 3.7396242208493544 W
FC Voltage : 3.116353517374462 V
PH2 : 0.1970161881350436 atm
PH2O : 0.24249293181407852 atm
PO2 : 0.19053910741811658 atm
Power-Thermal : 3.640375779150646 W
###########
I : 1.3
E : 2.923397044188998 V
FC Efficiency : 0.3990032485837277
FC Power : 4.045892940639 W
FC Voltage : 3.1122253389530767 V
PH2 : 0.19700213775342923 atm
PH2O : 0.24247563821874998 atm
PO2 : 0.19053117457991825 atm
Power-Thermal : 3.9491070593610007 W
###########
I : 1.4
E : 2.923395505849129 V
FC Efficiency : 0.3985103413824903
FC Power : 4.351732927896794 W
FC Voltage : 3.1083806627834245 V
PH2 : 0.19698808737181484 atm
PH2O : 0.24245834462342142 atm
PO2 : 0.19052324174171997 atm
Power-Thermal : 4.258267072103206 W
###########
I : 1.5
E : 2.923393967445207 V
FC Efficiency : 0.3980487608857143
FC Power : 4.657170502362857 W
FC Voltage : 3.1047803349085714 V
PH2 : 0.19697403699020044 atm
PH2O : 0.24244105102809288 atm
PO2 : 0.19051530890352164 atm
Power-Thermal : 4.567829497637144 W
###########
I : 1.6
E : 2.923392428977226 V
FC Efficiency : 0.39761446042126253
FC Power : 4.962228466057358 W
FC Voltage : 3.101392791285848 V
PH2 : 0.19695998660858605 atm
PH2O : 0.24242375743276434 atm
PO2 : 0.19050737606532336 atm
Power-Thermal : 4.877771533942644 W
###########
I : 1.7
E : 2.9233908904451815 V
FC Efficiency : 0.3972041300730298
FC Power : 5.2669267647683755 W
FC Voltage : 3.098192214569633 V
PH2 : 0.19694593622697168 atm
PH2O : 0.2424064638374358 atm
PO2 : 0.19049944322712503 atm
Power-Thermal : 5.188073235231625 W
###########
I : 1.8
E : 2.9233893518490675 V
FC Efficiency : 0.39681502801851076
FC Power : 5.571282993379892 W
FC Voltage : 3.0951572185443843 V
PH2 : 0.19693188584535729 atm
PH2O : 0.24238917024210727 atm
PO2 : 0.19049151038892673 atm
Power-Thermal : 5.498717006620109 W
###########
I : 1.9
E : 2.9233878131888784 V
FC Efficiency : 0.3964448575287326
FC Power : 5.875312788575817 W
FC Voltage : 3.0922698887241142 V
PH2 : 0.1969178354637429 atm
PH2O : 0.24237187664677873 atm
PO2 : 0.19048357755072845 atm
Power-Thermal : 5.809687211424183 W
###########
I : 2.0
E : 2.9233862744646095 V
FC Efficiency : 0.3960916755547374
FC Power : 6.1790301386539035 W
FC Voltage : 3.0895150693269517 V
PH2 : 0.19690378508212852 atm
PH2O : 0.2423545830514502 atm
PO2 : 0.19047564471253012 atm
Power-Thermal : 6.120969861346097 W
###########
I : 2.1
E : 2.923384735676255 V
FC Efficiency : 0.39575382364054146
FC Power : 6.482447631232071 W
FC Voltage : 3.086879824396224 V
PH2 : 0.19688973470051413 atm
PH2O : 0.24233728945612165 atm
PO2 : 0.19046771187433184 atm
Power-Thermal : 6.432552368767931 W
###########
I : 2.2
E : 2.92338319682381 V
FC Efficiency : 0.3954298749226794
FC Power : 6.78557665367318 W
FC Voltage : 3.0843530243968997 V
PH2 : 0.19687568431889974 atm
PH2O : 0.2423199958607931 atm
PO2 : 0.1904597790361335 atm
Power-Thermal : 6.744423346326822 W
###########
I : 2.3
E : 2.923381657907269 V
FC Efficiency : 0.39511859292081414
FC Power : 7.088427556999405 W
FC Voltage : 3.0819250247823504 V
PH2 : 0.19686163393728537 atm
PH2O : 0.24230270226546458 atm
PO2 : 0.19045184619793523 atm
Power-Thermal : 7.0565724430005945 W
###########
I : 2.4
E : 2.9233801189266266 V
FC Efficiency : 0.39481889910524637
FC Power : 7.391009791250212 W
FC Voltage : 3.079587413020922 V
PH2 : 0.19684758355567097 atm
PH2O : 0.242285408670136 atm
PO2 : 0.1904439133597369 atm
Power-Thermal : 7.368990208749787 W
###########
I : 2.5
E : 2.923378579881877 V
FC Efficiency : 0.39452984708947947
FC Power : 7.693332018244849 W
FC Voltage : 3.07733280729794 V
PH2 : 0.19683353317405658 atm
PH2O : 0.24226811507480747 atm
PO2 : 0.19043598052153862 atm
Power-Thermal : 7.681667981755151 W
###########
I : 2.6
E : 2.923377040773016 V
FC Efficiency : 0.39425060188740335
FC Power : 7.995402206276542 W
FC Voltage : 3.0751546947217467 V
PH2 : 0.19681948279244216 atm
PH2O : 0.2422508214794789 atm
PO2 : 0.1904280476833403 atm
Power-Thermal : 7.99459779372346 W
###########
I : 2.7
E : 2.923375501600037 V
FC Efficiency : 0.3939804230873111
FC Power : 8.297227710218774 W
FC Voltage : 3.073047300081027 V
PH2 : 0.19680543241082776 atm
PH2O : 0.24223352788415034 atm
PO2 : 0.190420114845142 atm
Power-Thermal : 8.307772289781228 W
###########
I : 2.8
E : 2.923373962362936 V
FC Efficiency : 0.3937186510874208
FC Power : 8.59881533974927 W
FC Voltage : 3.0710054784818825 V
PH2 : 0.1967913820292134 atm
PH2O : 0.2422162342888218 atm
PO2 : 0.19041218200694368 atm
Power-Thermal : 8.62118466025073 W
###########
I : 2.9
E : 2.9233724230617057 V
FC Efficiency : 0.3934646957478549
FC Power : 8.900171417816479 W
FC Voltage : 3.0690246268332686 V
PH2 : 0.196777331647599 atm
PH2O : 0.24219894069349326 atm
PO2 : 0.1904042491687454 atm
Power-Thermal : 8.934828582183522 W
###########
I : 3.0
E : 2.923370883696343 V
FC Efficiency : 0.39321802696722546
FC Power : 9.201301831033076 W
FC Voltage : 3.0671006103443585 V
PH2 : 0.1967632812659846 atm
PH2O : 0.24218164709816473 atm
PO2 : 0.19039631633054707 atm
Power-Thermal : 9.248698168966925 W
###########
I : 3.1
E : 2.9233693442668414 V
FC Efficiency : 0.39297816680494896
FC Power : 9.502212073343667 W
FC Voltage : 3.0652297010786023 V
PH2 : 0.19674923088437024 atm
PH2O : 0.2421643535028362 atm
PO2 : 0.1903883834923488 atm
Power-Thermal : 9.562787926656334 W
###########
I : 3.2
E : 2.9233678047731946 V
FC Efficiency : 0.39274468285467545
FC Power : 9.8029072840527 W
FC Voltage : 3.0634085262664685 V
PH2 : 0.19673518050275585 atm
PH2O : 0.24214705990750765 atm
PO2 : 0.19038045065415046 atm
Power-Thermal : 9.877092715947303 W
###########
I : 3.3
E : 2.9233662652153996 V
FC Efficiency : 0.3925171826377131
FC Power : 10.103392281094736 W
FC Voltage : 3.0616340245741624 V
PH2 : 0.19672113012114145 atm
PH2O : 0.2421297663121791 atm
PO2 : 0.19037251781595219 atm
Power-Thermal : 10.191607718905265 W
###########
I : 3.4
E : 2.923364725593449 V
FC Efficiency : 0.39229530883366054
FC Power : 10.403671590268678 W
FC Voltage : 3.059903408902552 V
PH2 : 0.19670707973952706 atm
PH2O : 0.24211247271685057 atm
PO2 : 0.19036458497775385 atm
Power-Thermal : 10.506328409731324 W
###########
I : 3.5
E : 2.923363185907339 V
FC Efficiency : 0.39207873520256487
FC Power : 10.703749471030022 W
FC Voltage : 3.0582141345800062 V
PH2 : 0.1966930293579127 atm
PH2O : 0.24209517912152204 atm
PO2 : 0.19035665213955555 atm
Power-Thermal : 10.82125052896998 W
###########
I : 3.6
E : 2.923361646157063 V
FC Efficiency : 0.3918671630816706
FC Power : 11.003629939333312 W
FC Voltage : 3.0565638720370307 V
PH2 : 0.1966789789762983 atm
PH2O : 0.2420778855261935 atm
PO2 : 0.19034871930135727 atm
Power-Thermal : 11.13637006066669 W
###########
I : 3.7
E : 2.923360106342616 V
FC Efficiency : 0.3916603183622587
FC Power : 11.303316787934786 W
FC Voltage : 3.054950483225618 V
PH2 : 0.1966649285946839 atm
PH2O : 0.24206059193086493 atm
PO2 : 0.19034078646315894 atm
Power-Thermal : 11.451683212065214 W
###########
I : 3.8
E : 2.9233585664639925 V
FC Efficiency : 0.3914579488697281
FC Power : 11.602813604498742 W
FC Voltage : 3.0533720011838796 V
PH2 : 0.19665087821306954 atm
PH2O : 0.2420432983355364 atm
PO2 : 0.19033285362496066 atm
Power-Thermal : 11.767186395501259 W
###########
I : 3.9
E : 2.9233570265211877 V
FC Efficiency : 0.3912598220840501
FC Power : 11.902123787796803 W
FC Voltage : 3.051826612255591 V
PH2 : 0.19663682783145514 atm
PH2O : 0.24202600474020786 atm
PO2 : 0.19032492078676233 atm
Power-Thermal : 12.082876212203196 W
###########
Report is generating ...
Done!
>>> Padulles_Hauer_Data["Status"]
True
>>> Padulles_Hauer_Data["P"][5]
1.8907754319000147
>>> Padulles_Hauer_Data["I"][5]
0.6
>>> Padulles_Hauer_Data["V"][5]
3.1512923865000246
>>> Padulles_Hauer_Data["EFF"][5]
0.4040118444230801
>>> Padulles_Hauer_Data["PO2"][5]
0.1905867044473064
>>> Padulles_Hauer_Data["PH2"][5]
0.19710049042472996
>>> Padulles_Hauer_Data["PH2O"][5]
0.2425966933860498
>>> Padulles_Hauer_Data["Ph"][5]
1.7992245680999854
>>> Padulles_Hauer_Data["V0"]
3.1748727715256186
>>> Padulles_Hauer_Data["K"]
-0.03643090556526363
>>> Padulles_Hauer_Data["VE"][5]
3.1530142281864606
>>> Padulles_Hauer_Data=Dynamic_Analysis(InputMethod={}, TestMode=True,PrintMode=False)
>>> Padulles_Hauer_Data["Status"]
False
>>> qH2_Calc(qMethanol=None,CV=2,t1=2,t2=2)
[Error] qH2 Calculation Failed (qMethanol:None, CV:2, t1:2, t2:2)
>>> Test_Vector={"T":2,"E0":-0.6,"N0":5,"KO2":0.0000211,"KH2":0.0000422,"KH2O":0.000007716,"tH2":3.37,"tO2":6.74,"t1":2,"t2":2,"tH2O":18.418,"B":0.04777,"C":0.0136,"Rint":0.00303,"rho":1.168,"qMethanol":0.0002,"CV":2,"i-start":4,"i-stop":0.1,"i-step":-2,"Name":"test3"}
>>> Padulles_Hauer_Data=Dynamic_Analysis(InputMethod=Test_Vector, TestMode=True)
###########
Padulles-Hauer-Model Simulation
###########
Analyzing . . .
I : 0.1
E : -3.00044655685555 V
FC Efficiency : -0.3442890552930171
FC Power : -0.2685454631285534 W
FC Voltage : -2.6854546312855336 V
PH2 : 0.19717074233280188 atm
PH2O : 0.2426831613626925 atm
PO2 : 0.1906263686382979 atm
Power-Thermal : 0.8835454631285535 W
###########
I : 2.0
E : -3.000446727262597 V
FC Efficiency : -0.3633740938974685
FC Power : -5.6686358648005095 W
FC Voltage : -2.8343179324002548 V
PH2 : 0.19690378508212852 atm
PH2O : 0.2423545830514502 atm
PO2 : 0.19047564471253012 atm
Power-Thermal : 17.96863586480051 W
###########
Report is generating ...
Warning : The value of I(>0.1) leads to minus amount of V, please check your inputs
Done!
>>> shutil.rmtree("Padulles-Hauer")
'''
|
slybot/slybot/pageactions.py | rmdes/portia-dashboard | 223 | 12623867 | import json
import re
LUA_SOURCE = """
function main(splash)
assert(splash:go(splash.args.url))
splash:runjs(splash.args.js_source)
splash:wait_for_resume(splash.args.slybot_actions_source)
splash:set_result_content_type("text/html")
return splash.html()
end
"""
JS_SOURCE = """
function main(splash) {
var events = (%s);
try{
__slybot__performEvents(events, function(){
splash.resume();
});
}catch(e){
splash.error(e);
}
}
"""
def filter_for_url(url):
def _filter(page_action):
accept = page_action.get('accept')
reject = page_action.get('reject')
if reject and re.search(reject, url):
return False
if accept and not re.search(accept, url):
return False
return True
return _filter
class PageActionsMiddleware(object):
def process_request(self, request, spider):
splash_options = request.meta.get('splash', None)
if not splash_options: # Already processed or JS disabled
return
splash_args = splash_options.get('args', {})
events = spider.page_actions
url = splash_args['url']
events = list(filter(filter_for_url(url), events))
if len(events):
splash_options['endpoint'] = 'execute'
splash_args.update({
"lua_source": LUA_SOURCE,
"slybot_actions_source": (JS_SOURCE % json.dumps(events)),
})
__all__ = ['PageActionsMiddleware']
|
robomimic/algo/td3_bc.py | akolobov/robomimic | 107 | 12623871 | """
Implementation of TD3-BC.
Based on https://github.com/sfujim/TD3_BC
(Paper - https://arxiv.org/abs/1812.02900).
Note that several parts are exactly the same as the BCQ implementation,
such as @_create_critics, @process_batch_for_training, and
@_train_critic_on_batch. They are replicated here (instead of subclassing
from the BCQ algo class) to be explicit and have implementation details
self-contained in this file.
"""
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import robomimic.models.obs_nets as ObsNets
import robomimic.models.policy_nets as PolicyNets
import robomimic.models.value_nets as ValueNets
import robomimic.models.vae_nets as VAENets
import robomimic.utils.tensor_utils as TensorUtils
import robomimic.utils.torch_utils as TorchUtils
import robomimic.utils.obs_utils as ObsUtils
import robomimic.utils.loss_utils as LossUtils
from robomimic.algo import register_algo_factory_func, PolicyAlgo, ValueAlgo
@register_algo_factory_func("td3_bc")
def algo_config_to_class(algo_config):
"""
Maps algo config to the TD3_BC algo class to instantiate, along with additional algo kwargs.
Args:
algo_config (Config instance): algo config
Returns:
algo_class: subclass of Algo
algo_kwargs (dict): dictionary of additional kwargs to pass to algorithm
"""
# only one variant of TD3_BC for now
return TD3_BC, {}
class TD3_BC(PolicyAlgo, ValueAlgo):
"""
Default TD3_BC training, based on https://arxiv.org/abs/2106.06860 and
https://github.com/sfujim/TD3_BC.
"""
def __init__(self, **kwargs):
PolicyAlgo.__init__(self, **kwargs)
# save the discount factor - it may be overriden later
self.set_discount(self.algo_config.discount)
# initialize actor update counter. This is used to train the actor at a lower freq than critic
self.actor_update_counter = 0
def _create_networks(self):
"""
Creates networks and places them into @self.nets.
"""
self.nets = nn.ModuleDict()
self._create_critics()
self._create_actor()
# sync target networks at beginning of training
with torch.no_grad():
for critic_ind in range(len(self.nets["critic"])):
TorchUtils.hard_update(
source=self.nets["critic"][critic_ind],
target=self.nets["critic_target"][critic_ind],
)
TorchUtils.hard_update(
source=self.nets["actor"],
target=self.nets["actor_target"],
)
self.nets = self.nets.float().to(self.device)
def _create_critics(self):
"""
Called in @_create_networks to make critic networks.
Exactly the same as BCQ.
"""
critic_class = ValueNets.ActionValueNetwork
critic_args = dict(
obs_shapes=self.obs_shapes,
ac_dim=self.ac_dim,
mlp_layer_dims=self.algo_config.critic.layer_dims,
value_bounds=self.algo_config.critic.value_bounds,
goal_shapes=self.goal_shapes,
encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder),
)
# Q network ensemble and target ensemble
self.nets["critic"] = nn.ModuleList()
self.nets["critic_target"] = nn.ModuleList()
for _ in range(self.algo_config.critic.ensemble.n):
critic = critic_class(**critic_args)
self.nets["critic"].append(critic)
critic_target = critic_class(**critic_args)
self.nets["critic_target"].append(critic_target)
def _create_actor(self):
"""
Called in @_create_networks to make actor network.
"""
actor_class = PolicyNets.ActorNetwork
actor_args = dict(
obs_shapes=self.obs_shapes,
goal_shapes=self.goal_shapes,
ac_dim=self.ac_dim,
mlp_layer_dims=self.algo_config.actor.layer_dims,
encoder_kwargs=ObsUtils.obs_encoder_kwargs_from_config(self.obs_config.encoder),
)
self.nets["actor"] = actor_class(**actor_args)
self.nets["actor_target"] = actor_class(**actor_args)
def _check_epoch(self, net_name, epoch):
"""
Helper function to check whether backprop should happen this epoch.
Args:
net_name (str): name of network in @self.nets and @self.optim_params
epoch (int): epoch number
"""
epoch_start_check = (self.optim_params[net_name]["start_epoch"] == -1) or (epoch >= self.optim_params[net_name]["start_epoch"])
epoch_end_check = (self.optim_params[net_name]["end_epoch"] == -1) or (epoch < self.optim_params[net_name]["end_epoch"])
return (epoch_start_check and epoch_end_check)
def set_discount(self, discount):
"""
Useful function to modify discount factor if necessary (e.g. for n-step returns).
"""
self.discount = discount
def process_batch_for_training(self, batch):
"""
Processes input batch from a data loader to filter out
relevant information and prepare the batch for training.
Exactly the same as BCQ.
Args:
batch (dict): dictionary with torch.Tensors sampled
from a data loader
Returns:
input_batch (dict): processed and filtered batch that
will be used for training
"""
input_batch = dict()
# n-step returns (default is 1)
n_step = self.algo_config.n_step
assert batch["actions"].shape[1] >= n_step
# remove temporal batches for all
input_batch["obs"] = {k: batch["obs"][k][:, 0, :] for k in batch["obs"]}
input_batch["next_obs"] = {k: batch["next_obs"][k][:, n_step - 1, :] for k in batch["next_obs"]}
input_batch["goal_obs"] = batch.get("goal_obs", None) # goals may not be present
input_batch["actions"] = batch["actions"][:, 0, :]
# note: ensure scalar signals (rewards, done) retain last dimension of 1 to be compatible with model outputs
# single timestep reward is discounted sum of intermediate rewards in sequence
reward_seq = batch["rewards"][:, :n_step]
discounts = torch.pow(self.algo_config.discount, torch.arange(n_step).float()).unsqueeze(0)
input_batch["rewards"] = (reward_seq * discounts).sum(dim=1).unsqueeze(1)
# discount rate will be gamma^N for computing n-step returns
new_discount = (self.algo_config.discount ** n_step)
self.set_discount(new_discount)
# consider this n-step seqeunce done if any intermediate dones are present
done_seq = batch["dones"][:, :n_step]
input_batch["dones"] = (done_seq.sum(dim=1) > 0).float().unsqueeze(1)
if self.algo_config.infinite_horizon:
# scale terminal rewards by 1 / (1 - gamma) for infinite horizon MDPs
done_inds = input_batch["dones"].round().long().nonzero(as_tuple=False)[:, 0]
if done_inds.shape[0] > 0:
input_batch["rewards"][done_inds] = input_batch["rewards"][done_inds] * (1. / (1. - self.discount))
return TensorUtils.to_device(TensorUtils.to_float(input_batch), self.device)
def _train_critic_on_batch(self, batch, epoch, no_backprop=False):
"""
A modular helper function that can be overridden in case
subclasses would like to modify training behavior for the
critics.
Exactly the same as BCQ (except for removal of @action_sampler_outputs and @critic_outputs)
Args:
batch (dict): dictionary with torch.Tensors sampled
from a data loader and filtered by @process_batch_for_training
epoch (int): epoch number - required by some Algos that need
to perform staged training and early stopping
no_backprop (bool): if True, don't perform any learning updates.
Returns:
info (dict): dictionary of relevant inputs, outputs, and losses
that might be relevant for logging
"""
info = OrderedDict()
# batch variables
s_batch = batch["obs"]
a_batch = batch["actions"]
r_batch = batch["rewards"]
ns_batch = batch["next_obs"]
goal_s_batch = batch["goal_obs"]
# 1 if not done, 0 otherwise
done_mask_batch = 1. - batch["dones"]
info["done_masks"] = done_mask_batch
# Bellman backup for Q-targets
q_targets = self._get_target_values(
next_states=ns_batch,
goal_states=goal_s_batch,
rewards=r_batch,
dones=done_mask_batch,
)
info["critic/q_targets"] = q_targets
# Train all critics using this set of targets for regression
for critic_ind, critic in enumerate(self.nets["critic"]):
critic_loss = self._compute_critic_loss(
critic=critic,
states=s_batch,
actions=a_batch,
goal_states=goal_s_batch,
q_targets=q_targets,
)
info["critic/critic{}_loss".format(critic_ind + 1)] = critic_loss
if not no_backprop:
critic_grad_norms = TorchUtils.backprop_for_loss(
net=self.nets["critic"][critic_ind],
optim=self.optimizers["critic"][critic_ind],
loss=critic_loss,
max_grad_norm=self.algo_config.critic.max_gradient_norm,
)
info["critic/critic{}_grad_norms".format(critic_ind + 1)] = critic_grad_norms
return info
def _train_actor_on_batch(self, batch, epoch, no_backprop=False):
"""
A modular helper function that can be overridden in case
subclasses would like to modify training behavior for the
actor.
Args:
batch (dict): dictionary with torch.Tensors sampled
from a data loader and filtered by @process_batch_for_training
epoch (int): epoch number - required by some Algos that need
to perform staged training and early stopping
no_backprop (bool): if True, don't perform any learning updates.
Returns:
info (dict): dictionary of relevant inputs, outputs, and losses
that might be relevant for logging
"""
info = OrderedDict()
# Actor loss (update with mixture of DDPG loss and BC loss)
s_batch = batch["obs"]
a_batch = batch["actions"]
goal_s_batch = batch["goal_obs"]
# lambda mixture weight is combination of hyperparameter (alpha) and Q-value normalization
actor_actions = self.nets["actor"](s_batch, goal_s_batch)
Q_values = self.nets["critic"][0](s_batch, actor_actions, goal_s_batch)
lam = self.algo_config.alpha / Q_values.abs().mean().detach()
actor_loss = -lam * Q_values.mean() + nn.MSELoss()(actor_actions, a_batch)
info["actor/loss"] = actor_loss
if not no_backprop:
actor_grad_norms = TorchUtils.backprop_for_loss(
net=self.nets["actor"],
optim=self.optimizers["actor"],
loss=actor_loss,
)
info["actor/grad_norms"] = actor_grad_norms
return info
def _get_target_values(self, next_states, goal_states, rewards, dones):
"""
Helper function to get target values for training Q-function with TD-loss.
Args:
next_states (dict): batch of next observations
goal_states (dict): if not None, batch of goal observations
rewards (torch.Tensor): batch of rewards - should be shape (B, 1)
dones (torch.Tensor): batch of done signals - should be shape (B, 1)
Returns:
q_targets (torch.Tensor): target Q-values to use for TD loss
"""
with torch.no_grad():
# get next actions via target actor and noise
next_target_actions = self.nets["actor_target"](next_states, goal_states)
noise = (
torch.randn_like(next_target_actions) * self.algo_config.actor.noise_std
).clamp(-self.algo_config.actor.noise_clip, self.algo_config.actor.noise_clip)
next_actions = (next_target_actions + noise).clamp(-1.0, 1.0)
# TD3 trick to combine max and min over all Q-ensemble estimates into single target estimates
all_value_targets = self.nets["critic_target"][0](next_states, next_actions, goal_states).reshape(-1, 1)
max_value_targets = all_value_targets
min_value_targets = all_value_targets
for critic_target in self.nets["critic_target"][1:]:
all_value_targets = critic_target(next_states, next_actions, goal_states).reshape(-1, 1)
max_value_targets = torch.max(max_value_targets, all_value_targets)
min_value_targets = torch.min(min_value_targets, all_value_targets)
value_targets = self.algo_config.critic.ensemble.weight * min_value_targets + \
(1. - self.algo_config.critic.ensemble.weight) * max_value_targets
q_targets = rewards + dones * self.discount * value_targets
return q_targets
def _compute_critic_loss(self, critic, states, actions, goal_states, q_targets):
"""
Helper function to compute loss between estimated Q-values and target Q-values.
Nearly the same as BCQ (return type slightly different).
Args:
critic (torch.nn.Module): critic network
states (dict): batch of observations
actions (torch.Tensor): batch of actions
goal_states (dict): if not None, batch of goal observations
q_targets (torch.Tensor): batch of target q-values for the TD loss
Returns:
critic_loss (torch.Tensor): critic loss
"""
q_estimated = critic(states, actions, goal_states)
if self.algo_config.critic.use_huber:
critic_loss = nn.SmoothL1Loss()(q_estimated, q_targets)
else:
critic_loss = nn.MSELoss()(q_estimated, q_targets)
return critic_loss
def train_on_batch(self, batch, epoch, validate=False):
"""
Training on a single batch of data.
Args:
batch (dict): dictionary with torch.Tensors sampled
from a data loader and filtered by @process_batch_for_training
epoch (int): epoch number - required by some Algos that need
to perform staged training and early stopping
validate (bool): if True, don't perform any learning updates.
Returns:
info (dict): dictionary of relevant inputs, outputs, and losses
that might be relevant for logging
"""
with TorchUtils.maybe_no_grad(no_grad=validate):
info = PolicyAlgo.train_on_batch(self, batch, epoch, validate=validate)
# Critic training
no_critic_backprop = validate or (not self._check_epoch(net_name="critic", epoch=epoch))
with TorchUtils.maybe_no_grad(no_grad=no_critic_backprop):
critic_info = self._train_critic_on_batch(
batch=batch,
epoch=epoch,
no_backprop=no_critic_backprop,
)
info.update(critic_info)
# update actor and target networks at lower frequency
if not no_critic_backprop:
# update counter only on critic training gradient steps
self.actor_update_counter += 1
do_actor_update = (self.actor_update_counter % self.algo_config.actor.update_freq == 0)
# Actor training
no_actor_backprop = validate or (not self._check_epoch(net_name="actor", epoch=epoch))
no_actor_backprop = no_actor_backprop or (not do_actor_update)
with TorchUtils.maybe_no_grad(no_grad=no_actor_backprop):
actor_info = self._train_actor_on_batch(
batch=batch,
epoch=epoch,
no_backprop=no_actor_backprop,
)
info.update(actor_info)
if not no_actor_backprop:
# to match original implementation, only update target networks on
# actor gradient steps
with torch.no_grad():
# update the target critic networks
for critic_ind in range(len(self.nets["critic"])):
TorchUtils.soft_update(
source=self.nets["critic"][critic_ind],
target=self.nets["critic_target"][critic_ind],
tau=self.algo_config.target_tau,
)
# update target actor network
TorchUtils.soft_update(
source=self.nets["actor"],
target=self.nets["actor_target"],
tau=self.algo_config.target_tau,
)
return info
def log_info(self, info):
"""
Process info dictionary from @train_on_batch to summarize
information to pass to tensorboard for logging.
Args:
info (dict): dictionary of info
Returns:
loss_log (dict): name -> summary statistic
"""
loss_log = OrderedDict()
# record current optimizer learning rates
for k in self.optimizers:
keys = [k]
optims = [self.optimizers[k]]
if k == "critic":
# account for critic having one optimizer per ensemble member
keys = ["{}{}".format(k, critic_ind) for critic_ind in range(len(self.nets["critic"]))]
optims = self.optimizers[k]
for kp, optimizer in zip(keys, optims):
for i, param_group in enumerate(optimizer.param_groups):
loss_log["Optimizer/{}{}_lr".format(kp, i)] = param_group["lr"]
# extract relevant logs for critic, and actor
loss_log["Loss"] = 0.
for loss_logger in [self._log_critic_info, self._log_actor_info]:
this_log = loss_logger(info)
if "Loss" in this_log:
# manually merge total loss
loss_log["Loss"] += this_log["Loss"]
del this_log["Loss"]
loss_log.update(this_log)
return loss_log
def _log_critic_info(self, info):
"""
Helper function to extract critic-relevant information for logging.
"""
loss_log = OrderedDict()
if "done_masks" in info:
loss_log["Critic/Done_Mask_Percentage"] = 100. * torch.mean(info["done_masks"]).item()
if "critic/q_targets" in info:
loss_log["Critic/Q_Targets"] = info["critic/q_targets"].mean().item()
loss_log["Loss"] = 0.
for critic_ind in range(len(self.nets["critic"])):
loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)] = info["critic/critic{}_loss".format(critic_ind + 1)].item()
if "critic/critic{}_grad_norms".format(critic_ind + 1) in info:
loss_log["Critic/Critic{}_Grad_Norms".format(critic_ind + 1)] = info["critic/critic{}_grad_norms".format(critic_ind + 1)]
loss_log["Loss"] += loss_log["Critic/Critic{}_Loss".format(critic_ind + 1)]
return loss_log
def _log_actor_info(self, info):
"""
Helper function to extract actor-relevant information for logging.
"""
loss_log = OrderedDict()
loss_log["Actor/Loss"] = info["actor/loss"].item()
if "actor/grad_norms" in info:
loss_log["Actor/Grad_Norms"] = info["actor/grad_norms"]
loss_log["Loss"] = loss_log["Actor/Loss"]
return loss_log
def set_train(self):
"""
Prepare networks for evaluation. Update from super class to make sure
target networks stay in evaluation mode all the time.
"""
self.nets.train()
# target networks always in eval
for critic_ind in range(len(self.nets["critic_target"])):
self.nets["critic_target"][critic_ind].eval()
self.nets["actor_target"].eval()
def on_epoch_end(self, epoch):
"""
Called at the end of each epoch.
"""
# LR scheduling updates
for lr_sc in self.lr_schedulers["critic"]:
if lr_sc is not None:
lr_sc.step()
if self.lr_schedulers["actor"] is not None:
self.lr_schedulers["actor"].step()
def get_action(self, obs_dict, goal_dict=None):
"""
Get policy action outputs.
Args:
obs_dict (dict): current observation
goal_dict (dict): (optional) goal
Returns:
action (torch.Tensor): action tensor
"""
assert not self.nets.training
return self.nets["actor"](obs_dict=obs_dict, goal_dict=goal_dict)
def get_state_value(self, obs_dict, goal_dict=None):
"""
Get state value outputs.
Args:
obs_dict (dict): current observation
goal_dict (dict): (optional) goal
Returns:
value (torch.Tensor): value tensor
"""
assert not self.nets.training
actions = self.nets["actor"](obs_dict=obs_dict, goal_dict=goal_dict)
return self.nets["critic"][0](obs_dict, actions, goal_dict)
def get_state_action_value(self, obs_dict, actions, goal_dict=None):
"""
Get state-action value outputs.
Args:
obs_dict (dict): current observation
actions (torch.Tensor): action
goal_dict (dict): (optional) goal
Returns:
value (torch.Tensor): value tensor
"""
assert not self.nets.training
return self.nets["critic"][0](obs_dict, actions, goal_dict)
|
clearly/utils/colors.py | lowercase00/clearly | 344 | 12623890 | from typing import List, TypeVar
C = TypeVar('C') # how to constrain to only the closure below?
def color_factory(color_code: str) -> C:
def apply(text: str, format_spec: str = '') -> str:
return color_code + format(text, format_spec) + '\033[0m'
def mix(*colors: C) -> List[C]:
return [color_factory(c.color_code + color_code) for c in colors]
apply.mix, apply.color_code = mix, color_code
return apply
class Colors:
BLUE = color_factory('\033[94m')
GREEN = color_factory('\033[92m')
YELLOW = color_factory('\033[93m')
RED = color_factory('\033[91m')
MAGENTA = color_factory('\033[95m')
CYAN = color_factory('\033[96m')
ORANGE = color_factory('\033[38;5;208m')
BOLD = color_factory('\033[1m')
DIM = color_factory('\033[2m')
BLUE_BOLD, BLUE_DIM = BLUE.mix(BOLD, DIM)
GREEN_BOLD, GREEN_DIM = GREEN.mix(BOLD, DIM)
YELLOW_BOLD, YELLOW_DIM = YELLOW.mix(BOLD, DIM)
RED_BOLD, RED_DIM = RED.mix(BOLD, DIM)
MAGENTA_BOLD, MAGENTA_DIM = MAGENTA.mix(BOLD, DIM)
CYAN_BOLD, CYAN_DIM = CYAN.mix(BOLD, DIM)
ORANGE_BOLD, ORANGE_DIM = ORANGE.mix(BOLD, DIM)
|
concepts/listSlicingListOfLists.py | sixtysecondrevit/dynamoPython | 114 | 12623895 | """
PYTHON LIST SLICING: FLAT LIST
"""
__author__ = '<NAME> - <EMAIL>'
__twitter__ = '@solamour'
__version__ = '1.0.0'
# SYNTAX: [ startCut : endCut ]
# startCut = This is the first item included in the Slice
# endCut = This is the last item included in the Slice
# NOTES:
# All parameters have to be integers
# A colon is required to demarcate slicing
# The first value is the start point. If left empty, it
# starts at the beginning
# The second value is the end point. If left empty, it takes
# the entire list from the chosen start point
# Using -1 at the end will give the second to last item in a
# list. Using -X will come backwards from the end of the list
# (i.e -2 finishes the Slice at the third to last item)
# A slice of list[ : ] will clone the list
# A multi-tiered numbers list
numbers = [ [ 0, 1, 2, 3 ], [ 4, 5, 6, 7, 8 ] ]
chosenRangeInSublists = [ n[ 1 : -1 ] for n in numbers ] # Using
# a List Comprehension to get all items from the 1st index to
# the second to last index in all sublists
# The out port using our List Slices
OUT = chosenRangeInSublists
|
Tests/SSL/test_ssl_containers.py | microsoft/InnerEye-DeepLearning | 402 | 12623947 | <reponame>microsoft/InnerEye-DeepLearning
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
from pathlib import Path
from unittest import mock
import math
import numpy as np
import pandas as pd
import pytest
import torch
from pl_bolts.models.self_supervised.resnets import ResNet
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.nn import Module
from torch.optim.lr_scheduler import _LRScheduler
from typing import Dict
from InnerEye.Common import fixed_paths
from InnerEye.Common.common_util import is_windows
from InnerEye.Common.fixed_paths import repository_root_directory
from InnerEye.Common.fixed_paths_for_tests import full_ml_test_data_path
from InnerEye.Common.output_directories import OutputFolderForTests
from InnerEye.ML.SSL.lightning_containers.ssl_container import EncoderName, SSLDatasetName
from InnerEye.ML.SSL.lightning_modules.byol.byol_module import BYOLInnerEye
from InnerEye.ML.SSL.lightning_modules.simclr_module import SimCLRInnerEye
from InnerEye.ML.SSL.lightning_modules.ssl_classifier_module import SSLClassifier
from InnerEye.ML.SSL.lightning_modules.ssl_online_evaluator import SSLOnlineEvaluatorInnerEye
from InnerEye.ML.SSL.utils import SSLDataModuleType, SSLTrainingType
from InnerEye.ML.common import BEST_CHECKPOINT_FILE_NAME_WITH_SUFFIX
from InnerEye.ML.configs.ssl.CXR_SSL_configs import CXRImageClassifier
from InnerEye.ML.runner import Runner
from Tests.ML.configs.lightning_test_containers import DummyContainerWithModel
from Tests.ML.utils.test_io_util import write_test_dicom
path_to_test_dataset = full_ml_test_data_path("cxr_test_dataset")
def _create_test_cxr_data(path_to_test_dataset: Path) -> None:
"""
Creates fake datasets dataframe and dicom images mimicking the expected structure of the datasets
of NIHCXR and RSNAKaggleCXR
:param path_to_test_dataset: folder to which we want to save the mock data.
"""
if path_to_test_dataset.exists():
return
path_to_test_dataset.mkdir(exist_ok=True)
df = pd.DataFrame({"Image Index": np.repeat("1.dcm", 200)})
df.to_csv(path_to_test_dataset / "Data_Entry_2017.csv", index=False)
df = pd.DataFrame({"subject": np.repeat("1", 300),
"label": np.random.RandomState(42).binomial(n=1, p=0.2, size=300)})
df.to_csv(path_to_test_dataset / "dataset.csv", index=False)
write_test_dicom(array=np.ones([256, 256], dtype="uint16"), path=path_to_test_dataset / "1.dcm")
def default_runner() -> Runner:
"""
Create an InnerEye Runner object with the default settings, pointing to the repository root and
default settings files.
"""
return Runner(project_root=repository_root_directory(),
yaml_config_file=fixed_paths.SETTINGS_YAML_FILE)
common_test_args = ["", "--is_debug_model=True", "--num_epochs=1", "--ssl_training_batch_size=10",
"--linear_head_batch_size=5",
"--num_workers=0"]
def _compare_stored_metrics(runner: Runner, expected_metrics: Dict[str, float], abs: float = 1e-5) -> None:
"""
Checks if the StoringLogger in the given runner holds all the expected metrics as results of training
epoch 0, up to a given absolute precision.
:param runner: The Innereye runner.
:param expected_metrics: A dictionary with all metrics that are expected to be present.
"""
assert runner.ml_runner is not None
assert runner.ml_runner.storing_logger is not None
print(f"Actual metrics in epoch 0: {runner.ml_runner.storing_logger.results_per_epoch[0]}")
print(f"Expected metrics: {expected_metrics}")
for metric, expected in expected_metrics.items():
actual = runner.ml_runner.storing_logger.results_per_epoch[0][metric]
if isinstance(actual, float):
if math.isnan(expected):
assert math.isnan(actual), f"Metric {metric}: Expected NaN, but got: {actual}"
else:
assert actual == pytest.approx(expected, abs=abs), f"Mismatch for metric {metric}"
else:
assert actual == expected, f"Mismatch for metric {metric}"
@pytest.mark.skipif(is_windows(), reason="Too slow on windows")
def test_innereye_ssl_container_cifar10_resnet_simclr() -> None:
"""
Tests:
- training of SSL model on cifar10 for one epoch
- checkpoint saving
- checkpoint loading and ImageClassifier module creation
- training of image classifier for one epoch.
"""
args = common_test_args + ["--model=CIFAR10SimCLR"]
runner = default_runner()
with mock.patch("sys.argv", args):
loaded_config, actual_run = runner.run()
assert loaded_config is not None
assert isinstance(loaded_config.model, SimCLRInnerEye)
assert loaded_config.encoder_output_dim == 2048
assert loaded_config.l_rate == 1e-4
assert loaded_config.num_epochs == 1
assert loaded_config.recovery_checkpoint_save_interval == 200
assert loaded_config.ssl_training_type == SSLTrainingType.SimCLR
assert loaded_config.online_eval.num_classes == 10
assert loaded_config.online_eval.dataset == SSLDatasetName.CIFAR10.value
assert loaded_config.ssl_training_dataset_name == SSLDatasetName.CIFAR10
assert not loaded_config.use_balanced_binary_loss_for_linear_head
assert isinstance(loaded_config.model.encoder.cnn_model, ResNet)
# Check the metrics that were recorded during training
expected_metrics = {
'simclr/train/loss': 3.423144578933716,
'simclr/learning_rate': 0.0,
'ssl_online_evaluator/train/loss': 2.6143882274627686,
'ssl_online_evaluator/train/online_AccuracyAtThreshold05': 0.0,
'epoch_started': 0.0,
'simclr/val/loss': 2.886892795562744,
'ssl_online_evaluator/val/loss': 2.2472469806671143,
'ssl_online_evaluator/val/AccuracyAtThreshold05': 0.20000000298023224
}
_compare_stored_metrics(runner, expected_metrics, abs=5e-5)
# Check that the checkpoint contains both the optimizer for the embedding and for the linear head
checkpoint_path = loaded_config.outputs_folder / "checkpoints" / "best_checkpoint.ckpt"
checkpoint = torch.load(checkpoint_path)
assert len(checkpoint["optimizer_states"]) == 1
assert len(checkpoint["lr_schedulers"]) == 1
assert "callbacks" in checkpoint
callback_name = SSLOnlineEvaluatorInnerEye.__name__
assert callback_name in checkpoint["callbacks"]
callback_state = checkpoint["callbacks"][callback_name]
assert SSLOnlineEvaluatorInnerEye.OPTIMIZER_STATE_NAME in callback_state
assert SSLOnlineEvaluatorInnerEye.EVALUATOR_STATE_NAME in callback_state
# Now run the actual SSL classifier off the stored checkpoint
args = common_test_args + ["--model=SSLClassifierCIFAR", f"--local_ssl_weights_path={checkpoint_path}"]
with mock.patch("sys.argv", args):
loaded_config, actual_run = default_runner().run()
assert loaded_config is not None
assert isinstance(loaded_config.model, SSLClassifier)
assert loaded_config.model.class_weights is None
assert loaded_config.model.num_classes == 10
@pytest.mark.skipif(is_windows(), reason="Too slow on windows")
def test_load_innereye_ssl_container_cifar10_cifar100_resnet_byol() -> None:
"""
Tests that the parameters feed into the BYOL model and online evaluator are
indeed the one we fed through our command line args
"""
args = common_test_args + ["--model=CIFAR10CIFAR100BYOL"]
runner = default_runner()
with mock.patch("sys.argv", args):
runner.parse_and_load_model()
loaded_config = runner.lightning_container
assert loaded_config is not None
assert loaded_config.linear_head_dataset_name == SSLDatasetName.CIFAR100
assert loaded_config.ssl_training_dataset_name == SSLDatasetName.CIFAR10
assert loaded_config.ssl_training_type == SSLTrainingType.BYOL
@pytest.mark.skipif(is_windows(), reason="Too slow on windows")
def test_innereye_ssl_container_rsna() -> None:
"""
Test if we can get the config loader to load a Lightning container model, and then train locally.
"""
runner = default_runner()
_create_test_cxr_data(path_to_test_dataset)
# Test training of SSL model
args = common_test_args + ["--model=NIH_RSNA_BYOL",
f"--local_dataset={str(path_to_test_dataset)}",
f"--extra_local_dataset_paths={str(path_to_test_dataset)}",
"--use_balanced_binary_loss_for_linear_head=True",
f"--ssl_encoder={EncoderName.densenet121.value}"]
with mock.patch("sys.argv", args):
loaded_config, actual_run = runner.run()
assert loaded_config is not None
assert isinstance(loaded_config.model, BYOLInnerEye)
assert loaded_config.online_eval.dataset == SSLDatasetName.RSNAKaggleCXR.value
assert loaded_config.online_eval.num_classes == 2
assert loaded_config.ssl_training_dataset_name == SSLDatasetName.NIHCXR
assert loaded_config.ssl_training_type == SSLTrainingType.BYOL
assert loaded_config.encoder_output_dim == 1024 # DenseNet output size
# Check model params
assert isinstance(loaded_config.model.hparams, Dict)
assert loaded_config.model.hparams["batch_size"] == 10
assert loaded_config.model.hparams["use_7x7_first_conv_in_resnet"]
assert loaded_config.model.hparams["encoder_name"] == EncoderName.densenet121.value
assert loaded_config.model.hparams["learning_rate"] == 1e-4
assert loaded_config.model.hparams["num_samples"] == 180
# Check some augmentation params
assert loaded_config.datamodule_args[
SSLDataModuleType.ENCODER].augmentation_params.preprocess.center_crop_size == 224
assert loaded_config.datamodule_args[SSLDataModuleType.ENCODER].augmentation_params.augmentation.use_random_crop
assert loaded_config.datamodule_args[SSLDataModuleType.ENCODER].augmentation_params.augmentation.use_random_affine
expected_metrics = {
'byol/train/loss': 0.00401744619011879,
'byol/tau': 0.9899999499320984,
'byol/learning_rate/0/0': 0.0,
'byol/learning_rate/0/1': 0.0,
'ssl_online_evaluator/train/loss': 0.685592532157898,
'ssl_online_evaluator/train/online_AreaUnderRocCurve': 0.5,
'ssl_online_evaluator/train/online_AreaUnderPRCurve': 0.699999988079071,
'ssl_online_evaluator/train/online_AccuracyAtThreshold05': 0.4000000059604645,
'epoch_started': 0.0,
'byol/val/loss': -0.07644838094711304,
'ssl_online_evaluator/val/loss': 0.6965796947479248,
'ssl_online_evaluator/val/AreaUnderRocCurve': math.nan,
'ssl_online_evaluator/val/AreaUnderPRCurve': math.nan,
'ssl_online_evaluator/val/AccuracyAtThreshold05': 0.0
}
_compare_stored_metrics(runner, expected_metrics)
# Check that we are able to load the checkpoint and create classifier model
checkpoint_path = loaded_config.checkpoint_folder / BEST_CHECKPOINT_FILE_NAME_WITH_SUFFIX
args = common_test_args + ["--model=CXRImageClassifier",
f"--local_dataset={str(path_to_test_dataset)}",
"--use_balanced_binary_loss_for_linear_head=True",
f"--local_ssl_weights_path={checkpoint_path}"]
with mock.patch("sys.argv", args):
loaded_config, actual_run = runner.run()
assert loaded_config is not None
assert isinstance(loaded_config, CXRImageClassifier)
assert loaded_config.model.freeze_encoder
assert torch.isclose(loaded_config.model.class_weights, torch.tensor([0.21, 0.79]), atol=1e-6).all() # type: ignore
assert loaded_config.model.num_classes == 2
def test_simclr_lr_scheduler() -> None:
"""
Test if the LR scheduler has the expected warmup behaviour.
"""
num_samples = 100
batch_size = 20
gpus = 1
max_epochs = 10
warmup_epochs = 2
model = SimCLRInnerEye(encoder_name="resnet18", dataset_name="CIFAR10",
gpus=gpus, num_samples=num_samples, batch_size=batch_size,
max_epochs=max_epochs, warmup_epochs=warmup_epochs)
# The LR scheduler used here works per step. Scheduler computes the total number of steps, in this example that's 5
train_iters_per_epoch = num_samples / (batch_size * gpus)
assert model.train_iters_per_epoch == train_iters_per_epoch
# Mock a second optimizer that is normally created in the SSL container
linear_head_optimizer = mock.MagicMock()
model.online_eval_optimizer = linear_head_optimizer
# Retrieve the scheduler and iterate it
_, scheduler_list = model.configure_optimizers()
assert isinstance(scheduler_list[0], dict)
assert scheduler_list[0]["interval"] == "step"
scheduler = scheduler_list[0]["scheduler"]
assert isinstance(scheduler, _LRScheduler)
lr = []
for i in range(0, int(max_epochs * train_iters_per_epoch)):
scheduler.step()
lr.append(scheduler.get_last_lr()[0])
# The highest learning rate is expected after the warmup epochs
highest_lr = np.argmax(lr)
assert highest_lr == int(warmup_epochs * train_iters_per_epoch - 1)
for i in range(0, highest_lr):
assert lr[i] < lr[i + 1], f"Not strictly monotonically increasing at index {i}"
for i in range(highest_lr, len(lr) - 1):
assert lr[i] > lr[i + 1], f"Not strictly monotonically decreasing at index {i}"
def test_online_evaluator_recovery(test_output_dirs: OutputFolderForTests) -> None:
"""
Test checkpoint recovery for the online evaluator in an end-to-end training run.
"""
container = DummyContainerWithModel()
model = container.create_model()
data = container.get_data_module()
checkpoint_folder = test_output_dirs.create_file_or_folder_path("checkpoints")
checkpoint_folder.mkdir(exist_ok=True)
checkpoints = ModelCheckpoint(dirpath=checkpoint_folder,
every_n_val_epochs=1,
save_last=True)
# Create a first callback, that will be used in training.
callback1 = SSLOnlineEvaluatorInnerEye(class_weights=None,
z_dim=1,
num_classes=2,
dataset="foo",
drop_p=0.2,
learning_rate=1e-5)
# To simplify the test setup, do not run any actual training (this would require complicated dataset with a
# combined loader)
with mock.patch(
"InnerEye.ML.SSL.lightning_modules.ssl_online_evaluator.SSLOnlineEvaluatorInnerEye.on_train_batch_end",
return_value=None) as mock_train:
with mock.patch(
"InnerEye.ML.SSL.lightning_modules.ssl_online_evaluator.SSLOnlineEvaluatorInnerEye"
".on_validation_batch_end",
return_value=None):
trainer = Trainer(default_root_dir=str(test_output_dirs.root_dir),
callbacks=[checkpoints, callback1],
max_epochs=10)
trainer.fit(model, datamodule=data)
# Check that the callback was actually used
mock_train.assert_called()
# Now read out the parameters of the callback.
# We will then run a second training job, with a new callback object, that will be initialized randomly,
# and should have different parameters initially. After checkpoint recovery, it should have exactly the
# same parameters as the first callback.
parameters1 = list(callback1.evaluator.parameters())
callback2 = SSLOnlineEvaluatorInnerEye(class_weights=None,
z_dim=1,
num_classes=2,
dataset="foo",
drop_p=0.2,
learning_rate=1e-5)
# Ensure that the parameters are really different initially
parameters2_before_training = list(callback2.evaluator.parameters())
assert not torch.allclose(parameters2_before_training[0], parameters1[0])
# Start a second training run with recovery
last_checkpoint = checkpoints.last_model_path
trainer2 = Trainer(default_root_dir=str(test_output_dirs.root_dir),
callbacks=[callback2],
max_epochs=20,
resume_from_checkpoint=last_checkpoint)
trainer2.fit(model, datamodule=data)
# Read the parameters and check if they are the same as what was stored in the first callback.
parameters2_after_training = list(callback2.evaluator.parameters())
assert torch.allclose(parameters2_after_training[0], parameters1[0])
# It's somewhat obsolete, but we can now check that the checkpoint file really contained the optimizer and weights
checkpoint = torch.load(last_checkpoint)
assert "callbacks" in checkpoint
callback_name = SSLOnlineEvaluatorInnerEye.__name__
assert callback_name in checkpoint["callbacks"]
callback_state = checkpoint["callbacks"][callback_name]
assert SSLOnlineEvaluatorInnerEye.OPTIMIZER_STATE_NAME in callback_state
assert SSLOnlineEvaluatorInnerEye.EVALUATOR_STATE_NAME in callback_state
@pytest.mark.gpu
def test_online_evaluator_not_distributed() -> None:
"""
Check if the online evaluator uses the DDP flag correctly when running not distributed
"""
with mock.patch("InnerEye.ML.SSL.lightning_modules.ssl_online_evaluator.DistributedDataParallel") as mock_ddp:
callback = SSLOnlineEvaluatorInnerEye(class_weights=None,
z_dim=1,
num_classes=2,
dataset="foo",
drop_p=0.2,
learning_rate=1e-5)
mock_ddp.assert_not_called()
# Standard trainer without DDP
trainer = Trainer()
# Test the flag that the internal logic of on_pretrain_routine_start uses
assert hasattr(trainer, "_accelerator_connector")
assert not trainer._accelerator_connector.is_distributed
mock_module = mock.MagicMock(device=torch.device("cpu"))
callback.on_pretrain_routine_start(trainer, mock_module)
assert isinstance(callback.evaluator, Module)
mock_ddp.assert_not_called()
@pytest.mark.gpu
def test_online_evaluator_distributed() -> None:
"""
Check if the online evaluator uses the DDP flag correctly when running distributed.
"""
mock_ddp_result = "mock_ddp_result"
mock_sync_result = "mock_sync_result"
with mock.patch("InnerEye.ML.SSL.lightning_modules.ssl_online_evaluator.SyncBatchNorm.convert_sync_batchnorm",
return_value=mock_sync_result) as mock_sync:
with mock.patch("InnerEye.ML.SSL.lightning_modules.ssl_online_evaluator.DistributedDataParallel",
return_value=mock_ddp_result) as mock_ddp:
callback = SSLOnlineEvaluatorInnerEye(class_weights=None,
z_dim=1,
num_classes=2,
dataset="foo",
drop_p=0.2,
learning_rate=1e-5)
# Trainer with DDP
device = torch.device("cuda:0")
mock_module = mock.MagicMock(device=device)
trainer = Trainer(accelerator="ddp", gpus=2)
# Test the two flags that the internal logic of on_pretrain_routine_start uses
assert trainer._accelerator_connector.is_distributed
assert trainer._accelerator_connector.use_ddp
original_evaluator = callback.evaluator
callback.on_pretrain_routine_start(trainer, mock_module)
# Check that SyncBatchNorm has been turned on
mock_sync.assert_called_once_with(original_evaluator)
# Check that the evaluator has been turned into a DDP object
# We still need to mock DDP here because the constructor relies on having a process group available
mock_ddp.assert_called_once_with(mock_sync_result, device_ids=[device])
assert callback.evaluator == mock_ddp_result
|
torch/csrc/jit/tensorexpr/codegen_external.py | xiaohanhuang/pytorch | 183 | 12623966 | #!/usr/bin/env python3
import argparse
from tools.codegen.gen import parse_native_yaml, FileManager
import tools.codegen.model as model
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())
def deindent(code: str) -> str:
lines = code.split('\n')
min_leading_spaces = min(map(num_leading_spaces, lines))
lines = [line[min_leading_spaces:] for line in lines]
return '\n'.join(lines)
def gen_external(native_functions_path, external_path):
native_functions = parse_native_yaml(native_functions_path)
func_decls = []
func_registrations = []
for func in native_functions:
schema = func.func
name = schema.name.name.base
args = schema.arguments
# Only supports extern calls for functions with out variants
if not schema.is_out_fn():
continue
# Doesn't currently support functions with more than one out parameter
if len(args.out) > 1:
continue
# Doesn't currently support kwarg arguments
if len(args.pre_tensor_options_kwarg_only) > 0 or len(args.post_tensor_options_kwarg_only) > 0:
continue
self_arg = [args.self_arg.argument] if args.self_arg is not None else []
args = list(args.pre_self_positional) + self_arg + list(args.post_self_positional)
tensor_args = [arg for arg in args if isinstance(arg.type, model.BaseType) and arg.type.name == model.BaseTy.Tensor]
if len(tensor_args) != len(args):
continue
arg_names = [None] * len(args)
tensor_decls = []
for idx, arg in enumerate(tensor_args):
s = f"const at::Tensor& {arg.name} = tensors[{idx + 1}];"
tensor_decls.append(s)
arg_names[idx] = arg.name
nl = '\n'
# print(tensor_decls, name, arg_names)
func_decl = f"""\
void nnc_aten_{name}(
int64_t bufs_num,
void** buf_data,
int64_t* buf_ranks,
int64_t* buf_dims,
int64_t* buf_strides,
int8_t* buf_dtypes,
int64_t args_num,
int64_t* extra_args) {{
std::vector<at::Tensor> tensors =
constructTensors(bufs_num, buf_data, buf_ranks, buf_dims, buf_strides, buf_dtypes);
at::Tensor& r = tensors[0];
{nl.join(tensor_decls)}
try {{
at::{name}_out({', '.join(['r'] + arg_names)});
}} catch (...) {{
}}
}}"""
func_registration = f"""\
const static RegisterNNCExternalFunction nnc_{name}(
"nnc_aten_{name}",
nnc_aten_{name});"""
func_decls.append(func_decl)
func_registrations.append(func_registration)
fm = FileManager(install_dir='.', template_dir='.', dry_run=False)
fm.write_with_template('external_functions_codegen.cpp', external_path,
lambda: {'external_registrations': func_registrations, 'external_functions': func_decls})
def main() -> None:
parser = argparse.ArgumentParser(
description='Generate annotated_fn_args script')
parser.add_argument('--native_functions',
help='path to native_functions.yaml',
default='../../../../aten/src/ATen/native/native_functions.yaml')
parser.add_argument('--template_path',
help='path to external_functions_codegen_template.cpp',
default='../../../../tools/jit/templates/external_functions_codegen_template.cpp')
args = parser.parse_args()
gen_external(args.native_functions, args.template_path)
if __name__ == '__main__':
main()
|
Contents/Libraries/Shared/guessit/test/test_options.py | jippo015/Sub-Zero.bundle | 1,553 | 12623983 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name, pointless-string-statement
import os
import pytest
from ..options import get_config_file_locations, merge_configurations, load_config_file, ConfigurationException, \
load_config
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def test_config_locations():
homedir = '/root'
cwd = '/root/cwd'
locations = get_config_file_locations(homedir, cwd, True)
assert len(locations) == 9
assert '/root/.guessit/options.json' in locations
assert '/root/.guessit/options.yml' in locations
assert '/root/.guessit/options.yaml' in locations
assert '/root/.config/guessit/options.json' in locations
assert '/root/.config/guessit/options.yml' in locations
assert '/root/.config/guessit/options.yaml' in locations
assert '/root/cwd/guessit.options.json' in locations
assert '/root/cwd/guessit.options.yml' in locations
assert '/root/cwd/guessit.options.yaml' in locations
def test_merge_configurations():
c1 = {'param1': True, 'param2': True, 'param3': False}
c2 = {'param1': False, 'param2': True, 'param3': False}
c3 = {'param1': False, 'param2': True, 'param3': False}
merged = merge_configurations(c1, c2, c3)
assert not merged['param1']
assert merged['param2']
assert not merged['param3']
merged = merge_configurations(c3, c2, c1)
assert merged['param1']
assert merged['param2']
assert not merged['param3']
def test_merge_configurations_lists():
c1 = {'param1': [1], 'param2': True, 'param3': False}
c2 = {'param1': [2], 'param2': True, 'param3': False}
c3 = {'param1': [3], 'param2': True, 'param3': False}
merged = merge_configurations(c1, c2, c3)
assert merged['param1'] == [1, 2, 3]
assert merged['param2']
assert not merged['param3']
merged = merge_configurations(c3, c2, c1)
assert merged['param1'] == [3, 2, 1]
assert merged['param2']
assert not merged['param3']
def test_merge_configurations_pristine_all():
c1 = {'param1': [1], 'param2': True, 'param3': False}
c2 = {'param1': [2], 'param2': True, 'param3': False, 'pristine': True}
c3 = {'param1': [3], 'param2': True, 'param3': False}
merged = merge_configurations(c1, c2, c3)
assert merged['param1'] == [2, 3]
assert merged['param2']
assert not merged['param3']
merged = merge_configurations(c3, c2, c1)
assert merged['param1'] == [2, 1]
assert merged['param2']
assert not merged['param3']
def test_merge_configurations_pristine_properties():
c1 = {'param1': [1], 'param2': False, 'param3': True}
c2 = {'param1': [2], 'param2': True, 'param3': False, 'pristine': ['param2', 'param3']}
c3 = {'param1': [3], 'param2': True, 'param3': False}
merged = merge_configurations(c1, c2, c3)
assert merged['param1'] == [1, 2, 3]
assert merged['param2']
assert not merged['param3']
def test_merge_configurations_pristine_properties2():
c1 = {'param1': [1], 'param2': False, 'param3': True}
c2 = {'param1': [2], 'param2': True, 'param3': False, 'pristine': ['param1', 'param2', 'param3']}
c3 = {'param1': [3], 'param2': True, 'param3': False}
merged = merge_configurations(c1, c2, c3)
assert merged['param1'] == [2, 3]
assert merged['param2']
assert not merged['param3']
def test_load_config_file():
json_config = load_config_file(os.path.join(__location__, 'config', 'test.json'))
yml_config = load_config_file(os.path.join(__location__, 'config', 'test.yml'))
yaml_config = load_config_file(os.path.join(__location__, 'config', 'test.yaml'))
assert json_config['expected_title'] == ['The 100', 'OSS 117']
assert yml_config['expected_title'] == ['The 100', 'OSS 117']
assert yaml_config['expected_title'] == ['The 100', 'OSS 117']
assert json_config['yaml'] is False
assert yml_config['yaml'] is True
assert yaml_config['yaml'] is True
with pytest.raises(ConfigurationException) as excinfo:
load_config_file(os.path.join(__location__, 'config', 'dummy.txt'))
assert excinfo.match('Configuration file extension is not supported for ".*?dummy.txt" file\\.')
def test_load_config():
config = load_config({'no_embedded_config': True, 'param1': 'test',
'config': [os.path.join(__location__, 'config', 'test.yml')]})
assert config['param1'] == 'test'
assert config['expected_title'] == ['The 100', 'OSS 117']
assert config['yaml'] is True
config = load_config({'no_embedded_config': True, 'param1': 'test'})
assert config['param1'] == 'test'
assert 'expected_title' not in config
assert 'yaml' not in config
config = load_config({'no_embedded_config': True, 'param1': 'test', 'config': ['false']})
assert config['param1'] == 'test'
assert 'expected_title' not in config
assert 'yaml' not in config
|
vel/rl/models/backbone/nature_cnn_rnn.py | galatolofederico/vel | 273 | 12623990 | from vel.api import RnnLinearBackboneModel, ModelFactory
from vel.rl.models.backbone.nature_cnn import NatureCnn
from vel.modules.rnn_cell import RnnCell
class NatureCnnRnnBackbone(RnnLinearBackboneModel):
"""
Long-Short-Term Memory rnn cell together with DeepMind-style 'Nature' cnn preprocessing
"""
def __init__(self, input_width: int, input_height: int, input_channels: int, rnn_type='lstm',
cnn_output_dim: int=512, hidden_units: int=128):
super().__init__()
self.hidden_units = hidden_units
self.nature_cnn = NatureCnn(input_width, input_height, input_channels, cnn_output_dim)
self.rnn_cell = RnnCell(input_size=self.nature_cnn.output_dim, hidden_size=self.hidden_units, rnn_type=rnn_type)
def reset_weights(self):
""" Call proper initializers for the weights """
self.nature_cnn.reset_weights()
self.rnn_cell.reset_weights()
@property
def output_dim(self) -> int:
return self.rnn_cell.output_dim
@property
def state_dim(self) -> int:
""" Initial state of the network """
return self.rnn_cell.state_dim
def forward(self, input_image, state):
cnn_output = self.nature_cnn(input_image)
hidden_state, new_state = self.rnn_cell(cnn_output, state)
return hidden_state, new_state
def create(input_width, input_height, input_channels=1, rnn_type='lstm', cnn_output_dim=512, hidden_units=128):
""" Vel factory function """
def instantiate(**_):
return NatureCnnRnnBackbone(
input_width=input_width, input_height=input_height, input_channels=input_channels,
rnn_type=rnn_type, cnn_output_dim=cnn_output_dim, hidden_units=hidden_units
)
return ModelFactory.generic(instantiate)
# Add this to make nicer scripting interface
NatureCnnFactory = create
|
tests/test_project_conf.py | mubashshirjamal/code | 1,582 | 12624005 | from tests.base import TestCase
from vilya.models.project import CodeDoubanProject
from vilya.models.project_conf import PROJECT_CONF_FILE
from nose.tools import raises
class TestProjectConf(TestCase):
def test_create_project_without_conf(self):
self.clean_up()
project = CodeDoubanProject.add(
'tp', owner_id="test1", create_trac=False)
assert project.conf['docs'], "enabled by default"
def test_conf_add_wrong_keys(self):
self.clean_up()
project = CodeDoubanProject.add(
'tp', owner_id="test1", create_trac=False)
u = self.addUser()
project.git.commit_one_file(
PROJECT_CONF_FILE,
'unexisting_key_argl1: 1\nunexisting_key_argl2: 2', 'm', u)
assert 'unexisting_key_argl1' not in project.conf
def test_conf(self):
self.clean_up()
project = CodeDoubanProject.add(
'tp', owner_id="test1", create_trac=False)
u = self.addUser()
project.git.commit_one_file(PROJECT_CONF_FILE,
'docs: {Docs: {dir: other_dir}}', 'm', u)
assert project.conf['docs']['Docs']['dir'] == 'other_dir'
@raises(Exception)
def test_broken_conf(self):
self.clean_up()
project = CodeDoubanProject.add(
'tp', owner_id="test1", create_trac=False)
u = self.addUser()
project.git.commit_one_file(PROJECT_CONF_FILE,
'docs {dir: other_dir', 'm', u)
assert project.conf['docs']['dir'] == 'other_dir'
def test_cannot_set_undefined_first_level_entry(self):
self.clean_up()
project = CodeDoubanProject.add(
'tp', owner_id="test1", create_trac=False)
u = self.addUser()
project.git.commit_one_file(PROJECT_CONF_FILE,
'unexisting_key: 123', 'm', u)
# First level key need to be defined in default_code_config.yaml
assert 'unexisting_key' not in project.conf
def test_can_set_undefined_second_level_entry(self):
self.clean_up()
project = CodeDoubanProject.add(
'tp', owner_id="test1", create_trac=False)
u = self.addUser()
project.git.commit_one_file(PROJECT_CONF_FILE,
'docs: {unexisting_key: aaa}', 'm', u)
assert project.conf['docs']['unexisting_key'] == 'aaa'
def clean_up(self):
prj = CodeDoubanProject.get_by_name('tp')
if prj:
prj.delete()
|
tests/test_subscriptions.py | sguzman/castero | 483 | 12624012 | <gh_stars>100-1000
import os
from unittest import mock
from lxml import etree
import pytest
from castero.feed import Feed, FeedDownloadError, FeedStructureError, FeedParseError
from castero.subscriptions import (
Subscriptions,
SubscriptionsLoadError,
SubscriptionsParseError,
SubscriptionsStructureError,
SubscriptionsError,
)
my_dir = os.path.dirname(os.path.realpath(__file__))
def test_subscriptions_valid_complete():
mysubscriptions = Subscriptions()
Feed.__init__ = mock.MagicMock(return_value=None)
mysubscriptions.load(my_dir + "/subscriptions/valid_complete.xml")
for generated in mysubscriptions.parse():
pass
assert isinstance(mysubscriptions, Subscriptions)
Feed.__init__.assert_any_call(url="http://feed1")
Feed.__init__.assert_any_call(url="http://feed2")
assert Feed.__init__.call_count == 2
assert len(mysubscriptions.feeds) == 2
def test_subscriptions_valid_base():
mysubscriptions = Subscriptions()
Feed.__init__ = mock.MagicMock(return_value=None)
mysubscriptions.load(my_dir + "/subscriptions/valid_base.xml")
for generated in mysubscriptions.parse():
pass
assert isinstance(mysubscriptions, Subscriptions)
Feed.__init__.assert_any_call(url="http://feed1")
Feed.__init__.assert_any_call(url="http://feed2")
assert Feed.__init__.call_count == 2
assert len(mysubscriptions.feeds) == 2
def test_subscriptions_valid_no_head():
mysubscriptions = Subscriptions()
Feed.__init__ = mock.MagicMock(return_value=None)
mysubscriptions.load(my_dir + "/subscriptions/valid_no_head.xml")
for generated in mysubscriptions.parse():
pass
assert isinstance(mysubscriptions, Subscriptions)
Feed.__init__.assert_any_call(url="http://feed1")
Feed.__init__.assert_any_call(url="http://feed2")
assert Feed.__init__.call_count == 2
assert len(mysubscriptions.feeds) == 2
def test_subscriptions_valid_minimal():
mysubscriptions = Subscriptions()
Feed.__init__ = mock.MagicMock(return_value=None)
mysubscriptions.load(my_dir + "/subscriptions/valid_minimal.xml")
assert isinstance(mysubscriptions, Subscriptions)
assert len(mysubscriptions.feeds) == 0
def test_subscriptions_broken_nonexistant():
mysubscriptions = Subscriptions()
Feed.__init__ = mock.MagicMock(return_value=None)
with pytest.raises(SubscriptionsLoadError):
mysubscriptions.load(my_dir + "/subscriptions/doesnt_exist")
def test_subscriptions_broken_parse():
mysubscriptions = Subscriptions()
Feed.__init__ = mock.MagicMock(return_value=None)
with pytest.raises(SubscriptionsParseError):
mysubscriptions.load(my_dir + "/subscriptions/broken_parse.xml")
def test_subscriptions_broken_no_body():
mysubscriptions = Subscriptions()
Feed.__init__ = mock.MagicMock(return_value=None)
with pytest.raises(SubscriptionsStructureError):
mysubscriptions.load(my_dir + "/subscriptions/broken_no_body.xml")
for generated in mysubscriptions.parse():
pass
def test_subscriptions_broken_no_outline():
mysubscriptions = Subscriptions()
Feed.__init__ = mock.MagicMock(return_value=None)
mysubscriptions.load(my_dir + "/subscriptions/broken_no_outline.xml")
count = 0
for generated in mysubscriptions.parse():
count += 1
assert count == 0
def test_subscriptions_generate():
feed1 = mock.MagicMock()
feed1.key = "feed1key"
feed2 = mock.MagicMock()
feed2.key = "feed2key"
mysubscriptions = Subscriptions()
mysubscriptions.generate([feed1, feed2])
Feed.__init__ = mock.MagicMock(return_value=None)
for generated in mysubscriptions.parse():
pass
assert len(mysubscriptions.feeds) == 2
def test_subscriptions_save():
temp_fname = my_dir + "/subscriptions/saved_temp.xml"
Feed.__init__ = mock.MagicMock(return_value=None)
mysubscriptions1 = Subscriptions()
mysubscriptions1.load(my_dir + "/subscriptions/valid_complete.xml")
mysubscriptions1.save(temp_fname)
mysubscriptions2 = Subscriptions()
mysubscriptions2.load(my_dir + "/subscriptions/saved_temp.xml")
os.remove(temp_fname)
tree1 = etree.tostring(mysubscriptions1._tree.getroot())
tree2 = etree.tostring(mysubscriptions2._tree.getroot())
assert tree1 == tree2
def test_subscriptions_save_before_create():
mysubscriptions = Subscriptions()
with pytest.raises(SubscriptionsError):
mysubscriptions.save(my_dir + "/subscriptions/saved_bad_temp.xml")
def test_subscriptions_parse_feeddownloaderror():
Feed.__init__ = mock.MagicMock(return_value=None)
Feed.__init__.side_effect = FeedDownloadError()
mysubscriptions = Subscriptions()
mysubscriptions.load(my_dir + "/subscriptions/valid_complete.xml")
for generated in mysubscriptions.parse():
assert isinstance(generated[1], FeedDownloadError)
def test_subscriptions_parse_feedstructureerror():
Feed.__init__ = mock.MagicMock(return_value=None)
Feed.__init__.side_effect = FeedStructureError()
mysubscriptions = Subscriptions()
mysubscriptions.load(my_dir + "/subscriptions/valid_complete.xml")
for generated in mysubscriptions.parse():
assert isinstance(generated[1], FeedStructureError)
def test_subscriptions_parse_feedparseerror():
Feed.__init__ = mock.MagicMock(return_value=None)
Feed.__init__.side_effect = FeedParseError()
mysubscriptions = Subscriptions()
mysubscriptions.load(my_dir + "/subscriptions/valid_complete.xml")
for generated in mysubscriptions.parse():
assert isinstance(generated[1], FeedParseError)
|
cinder/api/contrib/snapshot_manage.py | cloudification-io/cinder | 571 | 12624042 | <reponame>cloudification-io/cinder<gh_stars>100-1000
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from oslo_log import log as logging
from cinder.api.contrib import resource_common_manage
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshot_manage
from cinder.api import validation
from cinder.api.views import manageable_snapshots as list_manageable_view
from cinder.api.views import snapshots as snapshot_views
from cinder.policies import manageable_snapshots as policy
from cinder import volume as cinder_volume
LOG = logging.getLogger(__name__)
class SnapshotManageController(wsgi.Controller):
"""The /os-snapshot-manage controller for the OpenStack API."""
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self, *args, **kwargs):
super(SnapshotManageController, self).__init__(*args, **kwargs)
self.volume_api = cinder_volume.API()
self._list_manageable_view = list_manageable_view.ViewBuilder()
@wsgi.response(HTTPStatus.ACCEPTED)
@validation.schema(snapshot_manage.create)
def create(self, req, body):
"""Instruct Cinder to manage a storage snapshot object.
Manages an existing backend storage snapshot object (e.g. a Linux
logical volume or a SAN disk) by creating the Cinder objects required
to manage it, and possibly renaming the backend storage snapshot object
(driver dependent).
From an API perspective, this operation behaves very much like a
snapshot creation operation.
Required HTTP Body:
.. code-block:: json
{
"snapshot":
{
"volume_id": "<Cinder volume already exists in volume backend>",
"ref":
"<Driver-specific reference to the existing storage object>"
}
}
See the appropriate Cinder drivers' implementations of the
manage_snapshot method to find out the accepted format of 'ref'.
For example,in LVM driver, it will be the logic volume name of snapshot
which you want to manage.
This API call will return with an error if any of the above elements
are missing from the request, or if the 'volume_id' element refers to
a cinder volume that could not be found.
The snapshot will later enter the error state if it is discovered that
'ref' is bad.
Optional elements to 'snapshot' are::
name A name for the new snapshot.
description A description for the new snapshot.
metadata Key/value pairs to be associated with the new snapshot.
"""
context = req.environ['cinder.context']
snapshot = body['snapshot']
# Check whether volume exists
volume_id = snapshot['volume_id']
# Not found exception will be handled at the wsgi level
volume = self.volume_api.get(context, volume_id)
context.authorize(policy.MANAGE_POLICY, target_obj=volume)
LOG.debug('Manage snapshot request body: %s', body)
snapshot_parameters = {}
snapshot_parameters['metadata'] = snapshot.get('metadata', None)
snapshot_parameters['description'] = snapshot.get('description', None)
snapshot_parameters['name'] = snapshot.get('name')
# Not found exception will be handled at the wsgi level
new_snapshot = self.volume_api.manage_existing_snapshot(
context,
snapshot['ref'],
volume,
**snapshot_parameters)
return self._view_builder.detail(req, new_snapshot)
@wsgi.extends
def index(self, req):
"""Returns a summary list of snapshots available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, False, self.volume_api.get_manageable_snapshots,
self._list_manageable_view)
@wsgi.extends
def detail(self, req):
"""Returns a detailed list of snapshots available to manage."""
context = req.environ['cinder.context']
context.authorize(policy.LIST_MANAGEABLE_POLICY)
return resource_common_manage.get_manageable_resources(
req, True, self.volume_api.get_manageable_snapshots,
self._list_manageable_view)
class Snapshot_manage(extensions.ExtensionDescriptor):
"""Allows existing backend storage to be 'managed' by Cinder."""
name = 'SnapshotManage'
alias = 'os-snapshot-manage'
updated = '2014-12-31T00:00:00+00:00'
def get_resources(self):
controller = SnapshotManageController()
return [extensions.ResourceExtension(Snapshot_manage.alias,
controller,
collection_actions=
{'detail': 'GET'})]
|
src/badgr/envs/env.py | KaiW-53/badgr | 110 | 12624051 | import numpy as np
from badgr.utils.np_utils import imresize
from badgr.utils.python_utils import AttrDict
class EnvSpec(object):
def __init__(self, names_shapes_limits_dtypes):
names_shapes_limits_dtypes = list(names_shapes_limits_dtypes)
names_shapes_limits_dtypes += [('done', (1,), (0, 1), np.bool)]
self._names_to_shapes = AttrDict()
self._names_to_limits = AttrDict()
self._names_to_dtypes = AttrDict()
for name, shape, limit, dtype in names_shapes_limits_dtypes:
self._names_to_shapes.add_recursive(name, shape)
self._names_to_limits.add_recursive(name, limit)
self._names_to_dtypes.add_recursive(name, dtype)
@property
def observation_names(self):
raise NotImplementedError
@property
def output_observation_names(self):
return self.observation_names
@property
def action_names(self):
raise NotImplementedError
@property
def names(self):
return self.observation_names + self.action_names
@property
def names_to_shapes(self):
return self._names_to_shapes
@property
def names_to_limits(self):
return self._names_to_limits
@property
def names_to_dtypes(self):
return self._names_to_dtypes
def dims(self, names):
return np.array([np.sum(self.names_to_shapes.get_recursive(name)) for name in names])
def dim(self, names):
return np.sum(self.dims(names))
def normalize(self, inputs):
"""
:param inputs (AttrDict):
:return: AttrDict
"""
inputs_normalized = AttrDict()
for key, value in inputs.get_leaf_items():
lower, upper = self.names_to_limits.get_recursive(key)
lower, upper = np.array(lower), np.array(upper)
mean = 0.5 * (lower + upper)
std = 0.5 * (upper - lower)
value_normalized = (value - mean) / std
inputs_normalized.add_recursive(key, value_normalized)
return inputs_normalized
def denormalize(self, inputs):
"""
:param inputs (AttrDict):
:return: AttrDict
"""
inputs_denormalized = AttrDict()
for key, value in inputs.get_leaf_items():
lower, upper = self.names_to_limits.get_recursive(key)
lower, upper = np.array(lower), np.array(upper)
mean = 0.5 * (lower + upper)
std = 0.5 * (upper - lower)
value_denormalized = value * std + mean
inputs_denormalized.add_recursive(key, value_denormalized)
return inputs_denormalized
def process_image(self, name, image):
"""
Default behavior: resize the image
"""
if len(image.shape) == 4:
return np.array([self.process_image(name, im_i) for im_i in image])
return imresize(image, self.names_to_shapes.get_recursive(name))
class Env(object):
def __init__(self, env_spec, params):
self.spec = env_spec
def step(self, get_action):
raise NotImplementedError
return obs, goal, done
def reset(self):
raise NotImplementedError
return obs, goal
|
gammapy/scripts/analysis.py | Rishank2610/gammapy | 155 | 12624076 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import click
from gammapy.analysis import Analysis, AnalysisConfig
log = logging.getLogger(__name__)
@click.command(name="config")
@click.option(
"--filename",
default="config.yaml",
help="Filename to store the default configuration values.",
show_default=True,
)
@click.option(
"--overwrite", default=False, is_flag=True, help="Overwrite existing file."
)
def cli_make_config(filename, overwrite):
"""Writes default configuration file."""
config = AnalysisConfig()
config.write(filename, overwrite=overwrite)
log.info(f"Configuration file produced: {filename}")
@click.command(name="run")
@click.option(
"--filename",
default="config.yaml",
help="Filename with default configuration values.",
show_default=True,
)
@click.option(
"--out",
default="datasets",
help="Output folder where reduced datasets are stored.",
show_default=True,
)
@click.option(
"--overwrite", default=False, is_flag=True, help="Overwrite existing datasets."
)
def cli_run_analysis(filename, out, overwrite):
"""Performs automated data reduction process."""
config = AnalysisConfig.read(filename)
config.datasets.background.method = "reflected"
analysis = Analysis(config)
analysis.get_observations()
analysis.get_datasets()
analysis.datasets.write(out, overwrite=overwrite)
log.info(f"Datasets stored in {out} folder.")
|
api/src/opentrons/config/types.py | knownmed/opentrons | 235 | 12624109 | <reponame>knownmed/opentrons
from dataclasses import dataclass
from typing import Dict, Tuple
from typing_extensions import TypedDict
class AxisDict(TypedDict):
X: float
Y: float
Z: float
A: float
B: float
C: float
class CurrentDictDefault(TypedDict):
default: AxisDict
CurrentDictModelEntries = TypedDict(
"CurrentDictModelEntries",
{"2.1": AxisDict, "A": AxisDict, "B": AxisDict, "C": AxisDict},
total=False,
)
class CurrentDict(CurrentDictDefault, CurrentDictModelEntries):
pass
@dataclass
class RobotConfig:
name: str
version: int
gantry_steps_per_mm: Dict[str, float]
acceleration: Dict[str, float]
serial_speed: int
default_pipette_configs: Dict[str, float]
default_current: CurrentDict
low_current: CurrentDict
high_current: CurrentDict
default_max_speed: AxisDict
log_level: str
z_retract_distance: float
left_mount_offset: Tuple[float, float, float]
|
scripts/multiprocessing_performance_plot.py | ptallada/pysparkling | 260 | 12624115 | <gh_stars>100-1000
import matplotlib.pyplot as plt
import numpy as np
import pysparkling.tests.test_multiprocessing as test_mp
def plot(has_hyperthreading=True):
n_cpu, r = test_mp.test_performance()
r = {n: 1.0 / (v[0] / r[1][0]) for n, v in r.items()}
if has_hyperthreading:
n_cpu /= 2
x, y = zip(*sorted(r.items()))
x_left = np.array(x) - 0.5
fig, ax = plt.subplots()
# ideal line
# line = ax.plot((1, n_cpu), (1.0, n_cpu),
# linewidth=2, linestyle='dashed', color='grey')
# ax.plot((n_cpu, max(x)+0.5), (n_cpu, n_cpu),
# linewidth=2, linestyle='dashed', color='grey')
n_threads = n_cpu * 2 if has_hyperthreading else n_cpu
bars_ideal = ax.bar(
x_left,
range(n_threads) + [n_threads for _ in range(len(x) - n_threads)],
1.0, color='lightgrey', linewidth=0,
)
# measured
bars = ax.bar(x_left, y, 1.0, color='y')
# divide with cpu cores
ax.plot((n_cpu + 0.5, n_cpu + 0.5), (0, n_threads + 1),
linewidth=2, linestyle='solid', color='black')
ax.text(n_cpu + 0.4, n_threads + 1,
f'{n_cpu} CPU cores',
ha='right', va='top')
# divide with cpu threads
if has_hyperthreading:
ax.plot((n_cpu * 2 + 0.5, n_cpu * 2 + 0.5), (0, n_threads + 1),
linewidth=2, linestyle='solid', color='black')
ax.text(n_cpu * 2 + 0.4, n_threads + 1,
f'{n_cpu * 2} CPU threads',
ha='right', va='top')
# add some text for labels, title and axes ticks
ax.set_xlabel('n processes')
ax.set_ylabel('speedup')
ax.set_xticks(x)
ax.set_xticklabels(['no\nserialization\n(single process)']
+ [str(s) for s in x[1:]])
ax.set_xlim(-0.5, max(x) + 0.5)
ax.set_ylim(0, max(x))
ax.legend((bars[0], bars_ideal[0]), ('measured', 'ideal'),
loc='upper left')
for rect in bars:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., height - 0.05,
f'{height:.2f}',
ha='center', va='top')
fig.tight_layout()
# plt.show()
fig.savefig('tests/multiprocessing_performance_plot.pdf')
fig.savefig('tests/multiprocessing_performance_plot.png', dpi=300)
if __name__ == '__main__':
plot()
|
demo/cict_demo/camera/coordinate_transformation.py | timothijoe/DI-drive | 219 | 12624118 | import numpy as np
def rotationMatrix3D(roll, pitch, yaw):
# RPY <--> XYZ, roll first, picth then, yaw final
si, sj, sk = np.sin(roll), np.sin(pitch), np.sin(yaw)
ci, cj, ck = np.cos(roll), np.cos(pitch), np.cos(yaw)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
R = np.identity(3)
R[0, 0] = cj * ck
R[0, 1] = sj * sc - cs
R[0, 2] = sj * cc + ss
R[1, 0] = cj * sk
R[1, 1] = sj * ss + cc
R[1, 2] = sj * cs - sc
R[2, 0] = -sj
R[2, 1] = cj * si
R[2, 2] = cj * ci
return R
def rotationMatrixRoll(roll):
R = np.identity(3)
R[1, 1] = np.cos(roll)
R[2, 2] = np.cos(roll)
R[2, 1] = np.sin(roll)
R[1, 2] = -np.sin(roll)
return R
def rotarotationMatrixPitch(pitch):
R = np.identity(3)
R[0, 0] = np.cos(pitch)
R[2, 2] = np.cos(pitch)
R[2, 0] = -np.sin(pitch)
R[0, 2] = np.sin(pitch)
return R
def rotarotationMatrixYaw(yaw):
R = np.identity(3)
R[0, 0] = np.cos(yaw)
R[1, 1] = np.cos(yaw)
R[1, 0] = np.sin(yaw)
R[0, 1] = -np.sin(yaw)
return R
def rotationMatrix3DYPR(roll, pitch, yaw):
return np.dot(np.dot(rotationMatrixRoll(roll), rotarotationMatrixPitch(pitch)), rotarotationMatrixYaw(yaw))
def reverseX():
I = np.identity(3)
I[0, 0] = -1
return I
def reverseY():
I = np.identity(3)
I[1, 1] = -1
return I
def intrinsicMatrix(fx, fy, u0, v0):
K = np.array([[fx, 0, u0], [0, fy, v0], [0, 0, 1]])
return K
class CoordinateTransformation(object):
I = np.dot(np.dot(reverseX(), reverseY()), rotationMatrix3DYPR(np.pi / 2, 0, -np.pi / 2))
@staticmethod
def world3DToCamera3D(world_vec, R, t):
camera_vec = np.dot(R.T, world_vec - t)
return camera_vec
@staticmethod
def camera3DToWorld3D(camera_vec, R, t):
world_vec = np.dot(R, camera_vec) + t
return world_vec
@staticmethod
def camera3DToImage2D(camera_vec, K, eps=1e-24):
image_vec = np.dot(np.dot(K, CoordinateTransformation.I), camera_vec)
return image_vec[:2, :] / (image_vec[2, :] + eps)
@staticmethod
def world3DToImage2D(world_vec, K, R, t):
camera_vec = CoordinateTransformation.world3DToCamera3D(world_vec, R, t)
image_vec = CoordinateTransformation.camera3DToImage2D(camera_vec, K)
return image_vec
@staticmethod
def world3DToImagePixel2D(world_vec, K, R, t):
image_vec = CoordinateTransformation.world3DToImage2D(world_vec, K, R, t)
x_pixel, y_pixel = round(image_vec[0, 0]), round(image_vec[1, 0])
return np.array([x_pixel, y_pixel]).reshape(2, 1)
@staticmethod
def image2DToWorld3D(image_vec, K, R, t):
r = np.vstack((image_vec, 1))
b = np.vstack((np.dot(np.dot(K, CoordinateTransformation.I), t), 0))
temp1 = np.dot(np.dot(K, CoordinateTransformation.I), R.T)
temp2 = np.hstack((temp1, -r))
A = np.vstack((temp2, np.array([[0, 0, 1, 0]])))
world_vec = np.dot(np.linalg.inv(A), b)
return world_vec[:3]
@staticmethod
def image2DToWorld3D2(image_vec, K, R, t):
r = np.vstack((image_vec, np.ones((1, image_vec.shape[1]))))
b = np.vstack((np.dot(np.dot(K, CoordinateTransformation.I), t), 0))
temp1 = np.dot(np.dot(K, CoordinateTransformation.I), R.T)
temp1 = np.expand_dims(temp1, axis=2).repeat(image_vec.shape[1], axis=2)
r = np.expand_dims(r, axis=1)
temp1 = np.transpose(temp1, (2, 0, 1))
r = np.transpose(r, (2, 0, 1))
temp2 = np.concatenate((temp1, -r), axis=2)
temp3 = np.array([[0, 0, 1, 0]])
temp3 = np.expand_dims(temp3, axis=2).repeat(image_vec.shape[1], axis=2)
temp3 = np.transpose(temp3, (2, 0, 1))
A = np.concatenate((temp2, temp3), axis=1)
world_vec = np.dot(np.linalg.inv(A), b)
return world_vec[:, :3]
|
prerender/cache/__init__.py | bosondata/chrome-prerender | 169 | 12624185 | import os
from .base import CacheBackend
CACHE_BACKEND = os.environ.get('CACHE_BACKEND', 'dummy')
cache: CacheBackend = None
if CACHE_BACKEND == 'disk':
from .disk import DiskCache
cache = DiskCache()
elif CACHE_BACKEND == 's3':
from .s3 import S3Cache
cache = S3Cache()
else:
from .dummy import DummyCache
cache = DummyCache()
|
fexm/docker_scripts/afl_base_image/afl_utils/tests/test_afl_sync.py | fgsect/fexm | 105 | 12624196 | from afl_utils import afl_sync
from afl_utils.afl_sync import AflRsync
import os
import shutil
import unittest
class AflSyncTestCase(unittest.TestCase):
def setUp(self):
# Use to set up test environment prior to test case
# invocation
os.makedirs('testdata/rsync_tmp_store', exist_ok=True)
os.makedirs('testdata/sync/fuzz000/crashes', exist_ok=True)
os.makedirs('testdata/sync/fuzz000/hangs', exist_ok=True)
os.makedirs('testdata/sync/fuzz000/.cur_input', exist_ok=True)
os.makedirs('testdata/sync/fuzz001/.cur_input', exist_ok=True)
os.makedirs('testdata/sync/fuzz002.sync', exist_ok=True)
os.makedirs('testdata/sync/invalid_fuzz000', exist_ok=True)
os.makedirs('testdata/sync/invalid_fuzz001', exist_ok=True)
# push
os.makedirs('testdata/rsync_output_push', exist_ok=True)
# pull
os.makedirs('testdata/rsync_output_pull/fuzz000.sync', exist_ok=True)
os.makedirs('testdata/rsync_output_pull/fuzz001.sync', exist_ok=True)
os.makedirs('testdata/rsync_output_pull/other_fuzz000.sync', exist_ok=True)
os.makedirs('testdata/rsync_output_pull/other_fuzz000.sync/.cur_input', exist_ok=True)
os.makedirs('testdata/rsync_output_pull/other_fuzz000.sync/crashes', exist_ok=True)
os.makedirs('testdata/rsync_output_pull/other_fuzz001.sync', exist_ok=True)
os.makedirs('testdata/rsync_output_pull/other_fuzz001.sync/.cur_input', exist_ok=True)
os.makedirs('testdata/rsync_output_pull/other_invalid_fuzz000.sync', exist_ok=True)
# sync
os.makedirs('testdata/rsync_output_sync/other_fuzz000.sync', exist_ok=True)
os.makedirs('testdata/rsync_output_sync/other_fuzz001.sync', exist_ok=True)
os.makedirs('testdata/rsync_output_sync/other_invalid_fuzz000.sync', exist_ok=True)
def tearDown(self):
# Use for clean up after tests have run
self.clean_remove_dir('testdata/rsync_tmp_store')
self.clean_remove_dir('testdata/sync/fuzz000/crashes')
self.clean_remove_dir('testdata/sync/fuzz000/hangs')
self.clean_remove_dir('testdata/sync/fuzz000/.cur_input')
self.clean_remove_dir('testdata/sync/fuzz001/.cur_input')
self.clean_remove_dir('testdata/sync/fuzz002.sync')
self.clean_remove_dir('testdata/sync/invalid_fuzz000')
self.clean_remove_dir('testdata/sync/invalid_fuzz001')
self.clean_remove_dir('testdata/sync/fuzz000.sync')
self.clean_remove_dir('testdata/sync/fuzz001.sync')
self.clean_remove_dir('testdata/sync/other_fuzz000.sync')
self.clean_remove_dir('testdata/sync/other_fuzz001.sync')
self.clean_remove_dir('testdata/sync/other_invalid_fuzz000.sync')
self.clean_remove_dir('testdata/rsync_output_push')
self.clean_remove_dir('testdata/rsync_output_pull')
self.clean_remove_dir('testdata/rsync_output_sync')
self.clean_remove_dir('testdata/new_sync')
def clean_remove(self, file):
if os.path.exists(file):
os.remove(file)
def clean_remove_dir(self, dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def test_show_info(self):
self.assertIsNone(afl_sync.show_info())
def test_afl_rsync_init(self):
server_config = {
'remote_path': 'testdata/rsync_output',
}
fuzzer_config = {
'sync_dir': 'testdata/sync',
'session': 'fuzz',
'exclude_crashes': True,
'exclude_hangs': True,
}
afl_rsync = AflRsync(server_config, fuzzer_config)
self.assertDictEqual(server_config, afl_rsync.server_config)
self.assertDictEqual(fuzzer_config, afl_rsync.fuzzer_config)
def test_afl_rsync_prepare_sync_command(self):
afl_rsync = AflRsync(None, None)
expected_put_cmdline = [
'rsync',
afl_sync._rsync_default_options[0],
'--exclude=\"exclude\"',
'src/',
'dst.sync/'
]
expected_get_cmdline = [
'rsync',
afl_sync._rsync_default_options[0],
'--exclude=\"exclude\"',
'dst/*',
'src/'
]
self.assertListEqual(expected_put_cmdline, afl_rsync._AflRsync__prepare_rsync_commandline('src', 'dst',
rsync_excludes=[
'exclude']))
self.assertListEqual(expected_get_cmdline, afl_rsync._AflRsync__prepare_rsync_commandline('src', 'dst',
rsync_excludes=[
'exclude'],
rsync_get=True))
def test_afl_rsync_invoke_rsync(self):
rsync_cmdline = ['rsync', '--help']
afl_rsync = AflRsync(None, None)
self.assertTrue(afl_rsync._AflRsync__invoke_rsync(rsync_cmdline))
self.assertFalse(afl_rsync._AflRsync__invoke_rsync(['rsync']))
def test_afl_rsync_get_fuzzers(self):
fuzzer_config = {
'sync_dir': 'testdata/sync',
'session': 'fuzz',
'exclude_crashes': True,
'exclude_hangs': True,
}
expected_fuzzers = [
'fuzz000',
'fuzz001',
'invalid_fuzz000',
'invalid_fuzz001'
]
afl_rsync = AflRsync(None, fuzzer_config)
self.assertListEqual(sorted(expected_fuzzers), sorted(afl_rsync._AflRsync__get_fuzzers()))
def test_afl_rsync_put(self):
local_path = 'testdata/sync/fuzz000'
remote_path = 'testdata/rsync_tmp_store/fuzz000'
excludes = ['crashes*/', 'hangs*/']
afl_rsync = AflRsync(None, None)
self.assertTrue(afl_rsync.rsync_put(local_path, remote_path, rsync_excludes=excludes))
self.assertTrue(os.path.exists(remote_path + '.sync/fuzzer_stats'))
self.assertTrue(os.path.exists(remote_path + '.sync/.cur_input'))
self.assertFalse(os.path.exists(remote_path + '.sync/crashes'))
self.assertFalse(os.path.exists(remote_path + '.sync/hangs'))
def test_afl_rsync_get(self):
local_path = 'testdata/rsync_tmp_store/fuzz000_get'
remote_path = 'testdata/sync/fuzz000'
excludes = ['crashes*/', 'hangs*/']
afl_rsync = AflRsync(None, None)
self.assertTrue(afl_rsync.rsync_get(remote_path, local_path, rsync_excludes=excludes))
self.assertTrue(os.path.exists(local_path + '/fuzzer_stats'))
self.assertFalse(os.path.exists(local_path + '/crashes'))
self.assertFalse(os.path.exists(local_path + '/hangs'))
def test_afl_rsync_push(self):
server_config = {
'remote_path': 'testdata/rsync_output_push',
}
fuzzer_config = {
'sync_dir': 'testdata/sync',
'session': 'fuzz',
'exclude_crashes': True,
'exclude_hangs': True,
}
afl_rsync = AflRsync(server_config, fuzzer_config)
self.assertIsNone(afl_rsync.push())
self.assertTrue(os.path.exists('testdata/rsync_output_push/fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_push/fuzz000.sync/.cur_input'))
self.assertTrue(os.path.exists('testdata/rsync_output_push/fuzz001.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_push/fuzz000.sync/.cur_input'))
self.assertFalse(os.path.exists('testdata/rsync_output_push/fuzz002.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_push/fuzz002.sync.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_push/invalid_fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_push/invalid_fuzz001.sync'))
def test_afl_rsync_pull_session(self):
server_config = {
'remote_path': 'testdata/rsync_output_pull',
}
fuzzer_config = {
'sync_dir': 'testdata/sync',
'session': 'other_fuzz',
'exclude_crashes': True,
'exclude_hangs': True,
}
afl_rsync = AflRsync(server_config, fuzzer_config)
self.assertIsNone(afl_rsync.pull())
self.assertTrue(os.path.exists('testdata/sync/other_fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/sync/other_fuzz000.sync/crashes'))
self.assertFalse(os.path.exists('testdata/sync/other_fuzz000.sync/.cur_input'))
self.assertTrue(os.path.exists('testdata/sync/other_fuzz001.sync'))
self.assertFalse(os.path.exists('testdata/sync/other_fuzz001.sync/.cur_input'))
self.assertFalse(os.path.exists('testdata/sync/other_invalid_fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz001.sync'))
def test_afl_rsync_pull_all(self):
server_config = {
'remote_path': 'testdata/rsync_output_pull',
}
fuzzer_config = {
'sync_dir': 'testdata/sync',
'session': None,
'exclude_crashes': True,
'exclude_hangs': True,
}
afl_rsync = AflRsync(server_config, fuzzer_config)
self.assertIsNone(afl_rsync.pull())
self.assertTrue(os.path.exists('testdata/sync/other_fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/sync/other_fuzz001.sync'))
self.assertFalse(os.path.exists('testdata/sync/other_fuzz000.sync/.cur_input'))
self.assertFalse(os.path.exists('testdata/sync/other_fuzz001.sync/.cur_input'))
self.assertTrue(os.path.exists('testdata/sync/other_invalid_fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz001.sync'))
def test_afl_rsync_sync(self):
server_config = {
'remote_path': 'testdata/rsync_output_sync',
}
fuzzer_config = {
'sync_dir': 'testdata/sync',
'session': None,
'exclude_crashes': True,
'exclude_hangs': True,
}
afl_rsync = AflRsync(server_config, fuzzer_config)
self.assertIsNone(afl_rsync.sync())
# pull assertions
self.assertTrue(os.path.exists('testdata/sync/other_fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/sync/other_fuzz001.sync'))
self.assertTrue(os.path.exists('testdata/sync/other_invalid_fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz001.sync'))
# push assertions
self.assertTrue(os.path.exists('testdata/rsync_output_sync/fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_sync/fuzz001.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_sync/fuzz002.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_sync/fuzz002.sync.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_sync/invalid_fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_sync/invalid_fuzz001.sync'))
def test_main(self):
argv = [
'afl-sync'
]
with self.assertRaises(SystemExit):
self.assertIsNone(afl_sync.main(argv))
argv = [
'afl-sync',
'put',
'src',
'dst'
]
with self.assertRaises(SystemExit) as e:
afl_sync.main(argv)
self.assertEqual(1, e.exception.code)
argv = [
'afl-sync',
'push',
'testdata/new_sync',
'testdata/rsync_output_push'
]
with self.assertRaises(SystemExit) as e:
afl_sync.main(argv)
self.assertEqual(1, e.exception.code)
argv = [
'afl-sync',
'pull',
'testdata/new_sync',
'testdata/rsync_output_pull'
]
self.assertIsNone(afl_sync.main(argv))
argv = [
'afl-sync',
'push',
'testdata/sync',
'testdata/rsync_output_push'
]
self.assertIsNone(afl_sync.main(argv))
self.assertTrue(os.path.exists('testdata/rsync_output_push/fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_push/fuzz001.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_push/fuzz002.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_push/fuzz002.sync.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_push/invalid_fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_push/invalid_fuzz001.sync'))
argv = [
'afl-sync',
'pull',
'testdata/sync',
'testdata/rsync_output_pull'
]
self.assertIsNone(afl_sync.main(argv))
self.assertTrue(os.path.exists('testdata/sync/other_fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/sync/other_fuzz001.sync'))
self.assertTrue(os.path.exists('testdata/sync/other_invalid_fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz001.sync'))
argv = [
'afl-sync',
'sync',
'testdata/sync',
'testdata/rsync_output_sync'
]
self.assertIsNone(afl_sync.main(argv))
# pull assertions
self.assertTrue(os.path.exists('testdata/sync/other_fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/sync/other_fuzz001.sync'))
self.assertTrue(os.path.exists('testdata/sync/other_invalid_fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz000.sync'))
self.assertFalse(os.path.exists('testdata/sync/fuzz001.sync'))
# push assertions
self.assertTrue(os.path.exists('testdata/rsync_output_sync/fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_sync/fuzz001.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_sync/fuzz002.sync'))
self.assertFalse(os.path.exists('testdata/rsync_output_sync/fuzz002.sync.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_sync/invalid_fuzz000.sync'))
self.assertTrue(os.path.exists('testdata/rsync_output_sync/invalid_fuzz001.sync'))
|
examples/parallel/interengine/bintree_script.py | chebee7i/ipython | 748 | 12624204 | #!/usr/bin/env python
"""
Script for setting up and using [all]reduce with a binary-tree engine interconnect.
usage: `python bintree_script.py`
This spanning tree strategy ensures that a single node node mailbox will never
receive more that 2 messages at once. This is very important to scale to large
clusters (e.g. 1000 nodes) since if you have many incoming messages of a couple
of megabytes you might saturate the network interface of a single node and
potentially its memory buffers if the messages are not consumed in a streamed
manner.
Note that the AllReduce scheme implemented with the spanning tree strategy
impose the aggregation function to be commutative and distributive. It might
not be the case if you implement the naive gather / reduce / broadcast strategy
where you can reorder the partial data before performing the reduce.
"""
from IPython.parallel import Client, Reference
# connect client and create views
rc = Client()
rc.block=True
ids = rc.ids
root_id = ids[0]
root = rc[root_id]
view = rc[:]
# run bintree.py script defining bintree functions, etc.
execfile('bintree.py')
# generate binary tree of parents
btree = bintree(ids)
print "setting up binary tree interconnect:"
print_bintree(btree)
view.run('bintree.py')
view.scatter('id', ids, flatten=True)
view['root_id'] = root_id
# create the Communicator objects on the engines
view.execute('com = BinaryTreeCommunicator(id, root = id==root_id )')
pub_url = root.apply_sync(lambda : com.pub_url)
# gather the connection information into a dict
ar = view.apply_async(lambda : com.info)
peers = ar.get_dict()
# this is a dict, keyed by engine ID, of the connection info for the EngineCommunicators
# connect the engines to each other:
def connect(com, peers, tree, pub_url, root_id):
"""this function will be called on the engines"""
com.connect(peers, tree, pub_url, root_id)
view.apply_sync(connect, Reference('com'), peers, btree, pub_url, root_id)
# functions that can be used for reductions
# max and min builtins can be used as well
def add(a,b):
"""cumulative sum reduction"""
return a+b
def mul(a,b):
"""cumulative product reduction"""
return a*b
view['add'] = add
view['mul'] = mul
# scatter some data
data = range(1000)
view.scatter('data', data)
# perform cumulative sum via allreduce
view.execute("data_sum = com.allreduce(add, data, flat=False)")
print "allreduce sum of data on all engines:", view['data_sum']
# perform cumulative sum *without* final broadcast
# when not broadcasting with allreduce, the final result resides on the root node:
view.execute("ids_sum = com.reduce(add, id, flat=True)")
print "reduce sum of engine ids (not broadcast):", root['ids_sum']
print "partial result on each engine:", view['ids_sum']
|
src/ch6/3-caricature/silhouette-better.py | ssloy/least-squares-course | 129 | 12624231 | import numpy as np
import matplotlib.pyplot as plt
def amplify(x):
n = len(x)
A = np.matrix(np.zeros((2*n,n)))
b = np.matrix(np.zeros((2*n,1)))
for i in range(n):
A[i, i] = 1. # amplify the curvature
A[i, (i+1)%n] = -1.
b[i, 0] = (x[i] - x[(i+1)%n])*1.9
A[n+i, i] = 1*.3 # light data fitting term
b[n+i, 0] = x[i]*.3
return (np.linalg.inv(A.T*A)*A.T*b).tolist()
x = [100,100,97,93,91,87,84,83,85,87,88,89,90,90,90,88,87,86,84,82,80,
77,75,72,69,66,62,58,54,47,42,38,34,32,28,24,22,20,17,15,13,12,9,
7,8,9,8,6,0,0,2,0,0,2,3,2,0,0,1,4,8,11,14,19,24,27,25,23,21,19]
y = [0,25,27,28,30,34,37,41,44,47,51,54,59,64,66,70,74,78,80,83,86,90,93,
95,96,98,99,99,100,99,99,99,98,98,96,94,93,91,90,87,85,79,75,70,65,
62,60,58,52,49,46,44,41,37,34,30,27,20,17,15,16,17,17,19,18,14,11,6,4,1]
plt.plot(x+[x[0]], y+[y[0]], 'g--')
x = amplify(x)
y = amplify(y)
plt.plot(x+[x[0]], y+[y[0]], 'k-', linewidth=3)
plt.axis('off')
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
|
panoramix/contract.py | git-github-com-warren1990-Github-git/panoramix | 259 | 12624232 | <reponame>git-github-com-warren1990-Github-git/panoramix
import collections
import logging
import panoramix.folder as folder
import panoramix.sparser as sparser
from panoramix.matcher import Any, match
from panoramix.prettify import pprint_ast, pprint_trace, prettify, pretty_stor
from panoramix.utils.helpers import (
COLOR_GREEN,
ENDC,
find_f_list,
opcode,
replace_f,
replace_lines,
to_exp2,
tuplify,
)
from panoramix.function import Function
from panoramix.sparser import get_loc, get_name
logger = logging.getLogger(__name__)
def deserialize(trace):
res = []
for line in trace:
line_t = tuple(line)
if opcode(line_t) == "while":
_, cond, path, lid, setvars = line_t
cond = tuplify(cond)
setvars = tuplify(setvars)
assert type(lid) == str
path = deserialize(path)
res.append(("while", cond, path, lid, setvars))
elif opcode(line_t) == "if":
_, cond, if_true, if_false = line_t
cond = tuplify(cond)
if_true = deserialize(if_true)
if_false = deserialize(if_false)
res.append(("if", cond, if_true, if_false))
else:
res.append(tuplify(line))
return res
class Contract:
def __init__(self, functions, problems):
self.problems = problems
self.functions = []
for func in functions.values():
self.functions.append(func)
self.stor_defs = {}
def json(self) -> dict:
return {
"problems": self.problems,
"stor_defs": self.stor_defs,
"functions": [f.serialize() for f in self.functions],
}
def load(self, data):
self.problems = data["problems"]
self.functions = []
self.stor_defs = data["stor_defs"] if "stor_defs" in data else {}
for func in data["functions"]:
self.functions.append(
Function(hash=func["hash"], trace=deserialize(func["trace"]))
)
return self
def postprocess(self):
try:
self.stor_defs = sparser.rewrite_functions(self.functions)
except Exception:
# this is critical, because it causes full contract to display very
# badly, and cannot be limited in scope to just one affected function
logger.exception("Storage postprocessing failed. This is very bad!")
self.stor_defs = {}
for func in self.functions:
def replace_names(exp):
if (m := match(exp, ("cd", ":int:idx"))) and m.idx in func.params:
return ("param", func.params[m.idx][1])
return exp
func.trace = replace_f(func.trace, replace_names)
# const list, sort by putting all-caps consts at the end - looks way better this way
self.consts = [
f for f in self.functions if f.const and f.name.upper() != f.name
] + [f for f in self.functions if f.const and f.name.upper() == f.name]
self.make_asts()
def make_asts(self):
"""
we need to do ast creation from the contract, not function level,
because some simplifications (type/field removal) require insight to all the functions,
not just a single one
"""
for func in self.functions:
func.ast = self.make_ast(func.trace)
def find_stor_masks(exp):
if opcode(exp) == "type":
return [exp]
else:
return []
stor_masks = frozenset(
find_f_list([f.ast for f in self.functions], find_stor_masks)
)
stor_loc_to_masks = collections.defaultdict(set)
stor_name_to_masks = collections.defaultdict(set)
for mask in stor_masks:
stor_loc_to_masks[get_loc(mask)].add(mask)
stor_name_to_masks[get_name(mask)].add(mask)
def cleanup(exp):
if m := match(exp, ("field", 0, ("stor", ("length", ":idx")))):
return ("stor", ("length", m.idx))
if m := match(
exp, ("type", 256, ("field", 0, ("stor", ("length", ":idx"))))
):
return ("stor", ("length", m.idx))
if m := match(exp, ("type", 256, ("stor", ("length", ":idx")))):
return ("stor", ("length", m.idx))
if m := match(
exp,
(
"type",
":e_type",
("field", ":e_field", ("stor", ("name", ":e_name", ":loc"))),
),
):
e_type, e_field, e_name, loc = m.e_type, m.e_field, m.e_name, m.loc
for mask in stor_name_to_masks[e_name]:
assert (
get_loc(mask) == loc
) # otherwise, two locs with the same name?
assert (
m := match(
mask, ("type", ":m_type", ("field", ":m_field", Any))
)
)
if m.m_field != e_field or m.m_type != e_type:
return exp
return ("stor", ("name", e_name, loc))
if m := match(exp, ("type", ":e_type", ":stor")):
e_type, stor = m.e_type, m.stor
e_loc = get_loc(stor)
for mask in stor_loc_to_masks[e_loc]:
if not match(
mask, ("type", 256, ("field", 0, ("stor", ("length", Any))))
):
assert (m := match(mask, ("type", ":m_type", Any)))
if m.m_type != e_type:
return exp
return stor
if m := match(exp, ("field", ":e_off", ":stor")):
e_off, stor = m.e_off, m.stor
e_loc = get_loc(stor)
for mask in stor_loc_to_masks[e_loc]:
if not match(
mask, ("type", 256, ("field", 0, ("stor", ("length", Any))))
):
assert (
m := match(mask, ("type", Any, ("field", ":m_off", Any)))
)
if m.m_off != e_off:
return exp
return stor
return exp
for f in self.functions:
f.ast = replace_f(f.ast, cleanup)
def make_ast(self, trace):
trace = folder.fold(trace)
def store_to_set(line):
if m := match(line, ("store", ":size", ":off", ":idx", ":val")):
return ("set", ("stor", m.size, m.off, m.idx), m.val)
else:
return line
def loc_to_name(exp):
if m := match(exp, ("loc", ":int:num")):
num = m.num
if num < 1000:
return ("name", "stor" + str(num), num)
else:
return ("name", "stor" + hex(num)[2:6].upper(), num)
if m := match(exp, ("loc", ":num")):
return (
"name",
"stor" + prettify(m.num, add_color=False, parentheses=True),
m.num,
)
return exp
def arr_rem_mul(exp):
if m := match(
exp,
("array", ("mask_shl", ":size", ":off", ":int:shl", ":idx"), ":loc"),
):
size, off, shl, idx, loc = m.size, m.off, m.shl, m.idx, m.loc
r = 2 ** shl
e_loc = get_loc(loc)
for s in self.stor_defs:
assert match(s, ("def", Any, ":d_loc", ":d_def"))
if match(s, ("def", Any, e_loc, ("array", ("struct", r)))):
return ("array", ("mask_shl", size, off, 0, idx), loc)
elif m := match(exp, ("array", ("mul", ":int:r", ":idx"), ":loc")):
r, idx, loc = m.r, m.idx, m.loc
e_loc = get_loc(loc)
for s in self.stor_defs:
assert match(s, ("def", Any, ":d_loc", ":d_def"))
if match(s, ("def", Any, e_loc, ("array", ("struct", r)))):
return ("array", idx, loc)
return exp
def mask_storage(exp):
if m := match(exp, ("stor", ":size", ":off", ":idx")):
size, off, idx = m.size, m.off, m.idx
if isinstance(off, int) and off < 0:
off = 0
return ("type", size, ("field", off, ("stor", idx)))
else:
return exp
def other_1(exp):
if (
(
m := match(
exp, ("mask_shl", ":int:size", ":n_size", ":size_n", ":str:val")
)
)
and 256 - m.size == m.n_size
and m.size - 256 == m.size_n
and m.size + 16 == len(m.val) * 8
and len(m.val) > 0
and m.val[0] == m.val[-1] == "'"
): # +16 because '' in strings
return m.val
else:
return exp
def other_2(exp):
if (
m := match(exp, ("if", ("eq", ":a", ":b"), ":if_true"))
) and m.if_true == [("return", ("eq", m.a, m.b))]:
return ("if", ("eq", m.a, m.b), [("return", ("bool", 1))])
elif (m := match(exp, ("mask_shl", 160, 0, 0, ":str:e"))) and m.e in (
"address",
"coinbase",
"caller",
"origin",
):
return m.e
elif (
(
m := match(
exp, ("mask_shl", ":int:size", ":int:off", ":int:m_off", ":e")
)
)
and m.m_off == -m.off
and m.off in range(1, 9)
and m.size + m.off in [8, 16, 32, 64, 128, 256]
):
return ("div", ("mask", m.size + m.off, 0, m.e), 2 ** m.off)
elif exp == ("mask_shl", 32, 224, 0, ("cd", 0)):
return ("cd", 0)
elif m := match(exp, ("mask_shl", 160, 0, 96, ":val")):
# nasty hack for stuff like 0xF8DFaC6CAe56736FD2a05e45108490C6Cb40147D approve
return ("mask_shl", 160, 0, 0, m.val)
else:
return exp
trace = replace_f(trace, store_to_set)
trace = replace_f(trace, loc_to_name)
trace = replace_f(trace, arr_rem_mul)
trace = replace_f(trace, mask_storage)
trace = replace_f(trace, other_1)
trace = replace_f(trace, other_2)
return trace
|
fhir/resources/familymemberhistory.py | cstoltze/fhir.resources | 144 | 12624235 | <filename>fhir/resources/familymemberhistory.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/FamilyMemberHistory
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
import typing
from pydantic import Field, root_validator
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.errors import MissingError, NoneIsNotAllowedError
from . import backboneelement, domainresource, fhirtypes
class FamilyMemberHistory(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Information about patient's relatives, relevant for patient.
Significant health conditions for a person related to the patient relevant
in the context of care for the patient.
"""
resource_type = Field("FamilyMemberHistory", const=True)
ageAge: fhirtypes.AgeType = Field(
None,
alias="ageAge",
title="(approximate) age",
description=(
"The age of the relative at the time the family member history is "
"recorded."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e age[x]
one_of_many="age",
one_of_many_required=False,
)
ageRange: fhirtypes.RangeType = Field(
None,
alias="ageRange",
title="(approximate) age",
description=(
"The age of the relative at the time the family member history is "
"recorded."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e age[x]
one_of_many="age",
one_of_many_required=False,
)
ageString: fhirtypes.String = Field(
None,
alias="ageString",
title="(approximate) age",
description=(
"The age of the relative at the time the family member history is "
"recorded."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e age[x]
one_of_many="age",
one_of_many_required=False,
)
ageString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_ageString", title="Extension field for ``ageString``."
)
bornDate: fhirtypes.Date = Field(
None,
alias="bornDate",
title="(approximate) date of birth",
description="The actual or approximate date of birth of the relative.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e born[x]
one_of_many="born",
one_of_many_required=False,
)
bornDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_bornDate", title="Extension field for ``bornDate``."
)
bornPeriod: fhirtypes.PeriodType = Field(
None,
alias="bornPeriod",
title="(approximate) date of birth",
description="The actual or approximate date of birth of the relative.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e born[x]
one_of_many="born",
one_of_many_required=False,
)
bornString: fhirtypes.String = Field(
None,
alias="bornString",
title="(approximate) date of birth",
description="The actual or approximate date of birth of the relative.",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e born[x]
one_of_many="born",
one_of_many_required=False,
)
bornString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_bornString", title="Extension field for ``bornString``."
)
condition: typing.List[fhirtypes.FamilyMemberHistoryConditionType] = Field(
None,
alias="condition",
title="Condition that the related person had",
description=(
"The significant Conditions (or condition) that the family member had. "
"This is a repeating section to allow a system to represent more than "
"one condition per resource, though there is nothing stopping multiple "
"resources - one per condition."
),
# if property is element of this resource.
element_property=True,
)
dataAbsentReason: fhirtypes.CodeableConceptType = Field(
None,
alias="dataAbsentReason",
title="subject-unknown | withheld | unable-to-obtain | deferred",
description="Describes why the family member's history is not available.",
# if property is element of this resource.
element_property=True,
)
date: fhirtypes.DateTime = Field(
None,
alias="date",
title="When history was recorded or last updated",
description=(
"The date (and possibly time) when the family member history was "
"recorded or last updated."
),
# if property is element of this resource.
element_property=True,
)
date__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_date", title="Extension field for ``date``."
)
deceasedAge: fhirtypes.AgeType = Field(
None,
alias="deceasedAge",
title="Dead? How old/when?",
description=(
"Deceased flag or the actual or approximate age of the relative at the "
"time of death for the family member history record."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e deceased[x]
one_of_many="deceased",
one_of_many_required=False,
)
deceasedBoolean: bool = Field(
None,
alias="deceasedBoolean",
title="Dead? How old/when?",
description=(
"Deceased flag or the actual or approximate age of the relative at the "
"time of death for the family member history record."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e deceased[x]
one_of_many="deceased",
one_of_many_required=False,
)
deceasedBoolean__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_deceasedBoolean", title="Extension field for ``deceasedBoolean``."
)
deceasedDate: fhirtypes.Date = Field(
None,
alias="deceasedDate",
title="Dead? How old/when?",
description=(
"Deceased flag or the actual or approximate age of the relative at the "
"time of death for the family member history record."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e deceased[x]
one_of_many="deceased",
one_of_many_required=False,
)
deceasedDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_deceasedDate", title="Extension field for ``deceasedDate``."
)
deceasedRange: fhirtypes.RangeType = Field(
None,
alias="deceasedRange",
title="Dead? How old/when?",
description=(
"Deceased flag or the actual or approximate age of the relative at the "
"time of death for the family member history record."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e deceased[x]
one_of_many="deceased",
one_of_many_required=False,
)
deceasedString: fhirtypes.String = Field(
None,
alias="deceasedString",
title="Dead? How old/when?",
description=(
"Deceased flag or the actual or approximate age of the relative at the "
"time of death for the family member history record."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e deceased[x]
one_of_many="deceased",
one_of_many_required=False,
)
deceasedString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_deceasedString", title="Extension field for ``deceasedString``."
)
estimatedAge: bool = Field(
None,
alias="estimatedAge",
title="Age is estimated?",
description="If true, indicates that the age value specified is an estimated value.",
# if property is element of this resource.
element_property=True,
)
estimatedAge__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_estimatedAge", title="Extension field for ``estimatedAge``."
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="External Id(s) for this record",
description=(
"Business identifiers assigned to this family member history by the "
"performer or other systems which remain constant as the resource is "
"updated and propagates from server to server."
),
# if property is element of this resource.
element_property=True,
)
instantiatesCanonical: typing.List[fhirtypes.Canonical] = Field(
None,
alias="instantiatesCanonical",
title="Instantiates FHIR protocol or definition",
description=(
"The URL pointing to a FHIR-defined protocol, guideline, orderset or "
"other definition that is adhered to in whole or in part by this "
"FamilyMemberHistory."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"PlanDefinition",
"Questionnaire",
"ActivityDefinition",
"Measure",
"OperationDefinition",
],
)
instantiatesCanonical__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(
None,
alias="_instantiatesCanonical",
title="Extension field for ``instantiatesCanonical``.",
)
instantiatesUri: typing.List[fhirtypes.Uri] = Field(
None,
alias="instantiatesUri",
title="Instantiates external protocol or definition",
description=(
"The URL pointing to an externally maintained protocol, guideline, "
"orderset or other definition that is adhered to in whole or in part by"
" this FamilyMemberHistory."
),
# if property is element of this resource.
element_property=True,
)
instantiatesUri__ext: typing.List[
typing.Union[fhirtypes.FHIRPrimitiveExtensionType, None]
] = Field(
None, alias="_instantiatesUri", title="Extension field for ``instantiatesUri``."
)
name: fhirtypes.String = Field(
None,
alias="name",
title="The family member described",
description=(
'This will either be a name or a description; e.g. "<NAME>", "my '
'cousin with the red hair".'
),
# if property is element of this resource.
element_property=True,
)
name__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_name", title="Extension field for ``name``."
)
note: typing.List[fhirtypes.AnnotationType] = Field(
None,
alias="note",
title="General note about related person",
description=(
"This property allows a non condition-specific note to the made about "
"the related person. Ideally, the note would be in the condition "
"property, but this is not always possible."
),
# if property is element of this resource.
element_property=True,
)
patient: fhirtypes.ReferenceType = Field(
...,
alias="patient",
title="Patient history is about",
description="The person who this history concerns.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Patient"],
)
reasonCode: typing.List[fhirtypes.CodeableConceptType] = Field(
None,
alias="reasonCode",
title="Why was family member history performed?",
description=(
"Describes why the family member history occurred in coded or textual "
"form."
),
# if property is element of this resource.
element_property=True,
)
reasonReference: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="reasonReference",
title="Why was family member history performed?",
description=(
"Indicates a Condition, Observation, AllergyIntolerance, or "
"QuestionnaireResponse that justifies this family member history event."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=[
"Condition",
"Observation",
"AllergyIntolerance",
"QuestionnaireResponse",
"DiagnosticReport",
"DocumentReference",
],
)
relationship: fhirtypes.CodeableConceptType = Field(
...,
alias="relationship",
title="Relationship to the subject",
description=(
"The type of relationship this person has to the patient (father, "
"mother, brother etc.)."
),
# if property is element of this resource.
element_property=True,
)
sex: fhirtypes.CodeableConceptType = Field(
None,
alias="sex",
title="male | female | other | unknown",
description="The birth sex of the family member.",
# if property is element of this resource.
element_property=True,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="partial | completed | entered-in-error | health-unknown",
description=(
"A code specifying the status of the record of the family history of a "
"specific family member."
),
# if property is element of this resource.
element_property=True,
element_required=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["partial", "completed", "entered-in-error", "health-unknown"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``FamilyMemberHistory`` according specification,
with preserving original sequence order.
"""
return [
"id",
"meta",
"implicitRules",
"language",
"text",
"contained",
"extension",
"modifierExtension",
"identifier",
"instantiatesCanonical",
"instantiatesUri",
"status",
"dataAbsentReason",
"patient",
"date",
"name",
"relationship",
"sex",
"bornPeriod",
"bornDate",
"bornString",
"ageAge",
"ageRange",
"ageString",
"estimatedAge",
"deceasedBoolean",
"deceasedAge",
"deceasedRange",
"deceasedDate",
"deceasedString",
"reasonCode",
"reasonReference",
"note",
"condition",
]
@root_validator(pre=True, allow_reuse=True)
def validate_required_primitive_elements_2155(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/extensibility.html#Special-Case
In some cases, implementers might find that they do not have appropriate data for
an element with minimum cardinality = 1. In this case, the element must be present,
but unless the resource or a profile on it has made the actual value of the primitive
data type mandatory, it is possible to provide an extension that explains why
the primitive value is not present.
"""
required_fields = [("status", "status__ext")]
_missing = object()
def _fallback():
return ""
errors: typing.List["ErrorWrapper"] = []
for name, ext in required_fields:
field = cls.__fields__[name]
ext_field = cls.__fields__[ext]
value = values.get(field.alias, _missing)
if value not in (_missing, None):
continue
ext_value = values.get(ext_field.alias, _missing)
missing_ext = True
if ext_value not in (_missing, None):
if isinstance(ext_value, dict):
missing_ext = len(ext_value.get("extension", [])) == 0
elif (
getattr(ext_value.__class__, "get_resource_type", _fallback)()
== "FHIRPrimitiveExtension"
):
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
else:
validate_pass = True
for validator in ext_field.type_.__get_validators__():
try:
ext_value = validator(v=ext_value)
except ValidationError as exc:
errors.append(ErrorWrapper(exc, loc=ext_field.alias))
validate_pass = False
if not validate_pass:
continue
if ext_value.extension and len(ext_value.extension) > 0:
missing_ext = False
if missing_ext:
if value is _missing:
errors.append(ErrorWrapper(MissingError(), loc=field.alias))
else:
errors.append(
ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)
)
if len(errors) > 0:
raise ValidationError(errors, cls) # type: ignore
return values
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_2155(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"age": ["ageAge", "ageRange", "ageString"],
"born": ["bornDate", "bornPeriod", "bornString"],
"deceased": [
"deceasedAge",
"deceasedBoolean",
"deceasedDate",
"deceasedRange",
"deceasedString",
],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class FamilyMemberHistoryCondition(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Condition that the related person had.
The significant Conditions (or condition) that the family member had. This
is a repeating section to allow a system to represent more than one
condition per resource, though there is nothing stopping multiple resources
- one per condition.
"""
resource_type = Field("FamilyMemberHistoryCondition", const=True)
code: fhirtypes.CodeableConceptType = Field(
...,
alias="code",
title="Condition suffered by relation",
description=(
"The actual condition specified. Could be a coded condition (like MI or"
" Diabetes) or a less specific string like 'cancer' depending on how "
"much is known about the condition and the capabilities of the creating"
" system."
),
# if property is element of this resource.
element_property=True,
)
contributedToDeath: bool = Field(
None,
alias="contributedToDeath",
title="Whether the condition contributed to the cause of death",
description=(
"This condition contributed to the cause of death of the related "
"person. If contributedToDeath is not populated, then it is unknown."
),
# if property is element of this resource.
element_property=True,
)
contributedToDeath__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_contributedToDeath",
title="Extension field for ``contributedToDeath``.",
)
note: typing.List[fhirtypes.AnnotationType] = Field(
None,
alias="note",
title="Extra information about condition",
description=(
"An area where general notes can be placed about this specific "
"condition."
),
# if property is element of this resource.
element_property=True,
)
onsetAge: fhirtypes.AgeType = Field(
None,
alias="onsetAge",
title="When condition first manifested",
description=(
"Either the age of onset, range of approximate age or descriptive "
"string can be recorded. For conditions with multiple occurrences, "
"this describes the first known occurrence."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e onset[x]
one_of_many="onset",
one_of_many_required=False,
)
onsetPeriod: fhirtypes.PeriodType = Field(
None,
alias="onsetPeriod",
title="When condition first manifested",
description=(
"Either the age of onset, range of approximate age or descriptive "
"string can be recorded. For conditions with multiple occurrences, "
"this describes the first known occurrence."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e onset[x]
one_of_many="onset",
one_of_many_required=False,
)
onsetRange: fhirtypes.RangeType = Field(
None,
alias="onsetRange",
title="When condition first manifested",
description=(
"Either the age of onset, range of approximate age or descriptive "
"string can be recorded. For conditions with multiple occurrences, "
"this describes the first known occurrence."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e onset[x]
one_of_many="onset",
one_of_many_required=False,
)
onsetString: fhirtypes.String = Field(
None,
alias="onsetString",
title="When condition first manifested",
description=(
"Either the age of onset, range of approximate age or descriptive "
"string can be recorded. For conditions with multiple occurrences, "
"this describes the first known occurrence."
),
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e onset[x]
one_of_many="onset",
one_of_many_required=False,
)
onsetString__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_onsetString", title="Extension field for ``onsetString``."
)
outcome: fhirtypes.CodeableConceptType = Field(
None,
alias="outcome",
title="deceased | permanent disability | etc.",
description=(
"Indicates what happened following the condition. If the condition "
"resulted in death, deceased date is captured on the relation."
),
# if property is element of this resource.
element_property=True,
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from
``FamilyMemberHistoryCondition`` according specification,
with preserving original sequence order.
"""
return [
"id",
"extension",
"modifierExtension",
"code",
"outcome",
"contributedToDeath",
"onsetAge",
"onsetRange",
"onsetPeriod",
"onsetString",
"note",
]
@root_validator(pre=True, allow_reuse=True)
def validate_one_of_many_3079(
cls, values: typing.Dict[str, typing.Any]
) -> typing.Dict[str, typing.Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"onset": ["onsetAge", "onsetPeriod", "onsetRange", "onsetString"]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
|
documentation/test_python/inspect_underscored/inspect_underscored/__init__.py | Ryan-rsm-McKenzie/m.css | 367 | 12624237 | """..
:data _DATA_IN_MODULE: In-module documented underscored data. This won't be
picked up by the initial crawl, unfortunately, as the docstrings are
processed much later.
"""
import enum
from . import _submodule, _submodule_external, _submodule_undocumented
class _Class:
"""Documented underscored class"""
class _ClassExternal: pass
class _ClassUndocumented: pass
class _Enum(enum.Enum):
"""Documented underscored enum"""
class _EnumExternal(enum.Enum): pass
class _EnumUndocumented(enum.Enum): pass
def _function():
"""Documented undercored function"""
def _function_external(): pass
def _function_undocumented(): pass
_DATA_IN_MODULE: int = 0
_DATA_EXTERNAL: int = 1
_DATA_EXTERNAL_IN_MODULE: int = 2
_DATA_UNDOCUMENTED: int = 3
class Class:
"""..
:property _property_in_class: In-class documented underscored property.
This won't be picked up by the initial crawl, unfortunately, as the
docstrings are processed much later.
:data _DATA_IN_CLASS: In-class documented underscored data. This won't be
picked up by the initial crawl, unfortunately, as the docstrings are
processed much later.
:data _DATA_DECLARATION_IN_CLASS: In-class documented underscored data.
This won't be picked up by the initial crawl, unfortunately, as the
docstrings are processed much later.
"""
def _function(self):
"""Documented underscored function"""
def _function_external(self): pass
def _function_undocumented(self): pass
@property
def _property(self):
"""Documented underscored property"""
@property
def _property_in_class(self): pass
@property
def _property_external(self): pass
@property
def _property_external_in_class(self): pass
@property
def _property_undocumented(self): pass
_DATA_IN_CLASS: int = 4
_DATA_EXTERNAL: int = 5
_DATA_EXTERNAL_IN_CLASS: int = 6
_DATA_UNDOCUMENTED: int = 7
_DATA_DECLARATION_IN_CLASS: float
_DATA_DECLARATION_EXTERNAL: float
_DATA_DECLARATION_EXTERNAL_IN_CLASS: float
_DATA_DECLARATION_UNDOCUMENTED: float
|
icevision/models/ross/efficientdet/fastai/__init__.py | ai-fast-track/mantisshrimp | 580 | 12624242 | from icevision.models.ross.efficientdet.fastai.callbacks import *
from icevision.models.ross.efficientdet.fastai.learner import *
|
tests/t-mime-type.py | kurtace72/lighttpd2 | 395 | 12624263 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from base import *
from requests import *
class TestMimeType1(CurlRequest):
URL = "/test.txt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/plain; charset=utf-8") ]
class TestMimeType2(CurlRequest):
URL = "/test.xt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/plain") ]
class TestMimeType3(CurlRequest):
URL = "/test.rxt"
EXPECT_RESPONSE_BODY = ""
EXPECT_RESPONSE_CODE = 200
EXPECT_RESPONSE_HEADERS = [ ("Content-Type", "text/strange") ]
class Test(GroupTest):
group = [TestMimeType1,TestMimeType2,TestMimeType3]
def Prepare(self):
self.PrepareVHostFile("test.txt", "")
self.PrepareVHostFile("test.xt", "")
self.PrepareVHostFile("test.rxt", "")
self.config = """
mime_types (
".txt" => "text/plain; charset=utf-8",
".xt" => "text/plain",
".rxt" => "text/strange",
"xt" => "should-not-trigger"
);
"""
|
caffe2/python/layers/merge_id_lists.py | KevinKecc/caffe2 | 585 | 12624276 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
IdList
)
import numpy as np
class MergeIdLists(ModelLayer):
"""Merge multiple ID_LISTs into a single ID_LIST
Arguments:
model: A layer model instance
input_record: Tuple (Struct) of ID_LIST features to be
merged
Returns:
the merged ID_LIST feature
"""
def __init__(self, model, input_record, name='merged'):
super(MergeIdLists, self).__init__(model, name, input_record)
assert all(schema.equal_schemas(x, IdList) for x in input_record), \
"Inputs to MergeIdLists should all be IdLists."
assert all(record.items.metadata is not None
for record in self.input_record), \
"Features without metadata are not supported"
merge_dim = max(get_categorical_limit(record)
for record in self.input_record)
assert merge_dim is not None, "Unbounded features are not supported"
self.output_schema = schema.NewRecord(
model.net, schema.List(
schema.Scalar(
np.int64,
blob=model.net.NextBlob(name),
metadata=schema.Metadata(categorical_limit=merge_dim)
)))
def add_ops(self, net):
return net.MergeIdLists(self.input_record.field_blobs(),
self.output_schema.field_blobs())
|
Geometry/CMSCommonData/python/cmsRecoIdealGeometryXML_cff.py | ckamtsikis/cmssw | 852 | 12624277 | import FWCore.ParameterSet.Config as cms
from Geometry.CMSCommonData.cmsRecoIdealGeometryXML_cfi import *
from Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi import *
from Geometry.EcalCommonData.ecalSimulationParameters_cff import *
from Geometry.HcalCommonData.hcalDDDSimConstants_cff import *
from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *
from Geometry.MuonNumbering.muonNumberingInitialization_cfi import *
|
dirigible/fts/tests/test_2799_FillDownDuringPaste.py | EnoX1/dirigible-spreadsheet | 168 | 12624293 | # Copyright (c) 2010 Resolver Systems Ltd.
# All Rights Reserved
#
from functionaltest import FunctionalTest
import key_codes
class Test_2799_fill_down_during_paste(FunctionalTest):
def wait_for_cell_to_contain_formula(self, column, row, formula):
self.open_cell_for_editing(column, row)
self.wait_for_cell_editor_content(formula)
self.human_key_press(key_codes.ENTER)
def test_fill_down_formula_with_copy_and_paste(self):
# * Harold logs in to dirigible, his favourite cloud computing tool
self.login_and_create_new_sheet()
# * Harold populates a table full of data
self.enter_cell_text(1, 1, '1')
self.enter_cell_text(2, 1, '1')
self.enter_cell_text(1, 2, '2')
self.enter_cell_text(2, 2, '2')
self.enter_cell_text(1, 3, '3')
self.enter_cell_text(2, 3, '3')
# * He writes a function to sum the values in a row
self.enter_cell_text(3, 1, '=A1+B1')
# * He uses copy & paste to 'fill down' his formula
self.copy_range((3, 1), (3, 1))
self.paste_range((3, 2), (3, 3))
self.wait_for_cell_to_contain_formula(3, 2, '=A2+B2')
self.wait_for_cell_to_contain_formula(3, 3, '=A3+B3')
def test_fill_down_2x2_clipboard_into_3x5_selection(self):
# * Harold logs in to dirigible, his favourite cloud computing tool
self.login_and_create_new_sheet()
# * Harold populates a table full of data
self.enter_cell_text(1, 1, '=C1')
self.enter_cell_text(2, 1, '=D1')
self.enter_cell_text(1, 2, '=C2')
self.enter_cell_text(2, 2, '=D2')
# * He uses copy & paste to 'fill down' his formula
self.copy_range((1, 1), (2, 2))
self.paste_range((3, 3), (5, 7))
self.wait_for_cell_to_contain_formula(3, 3, '=E3')
self.wait_for_cell_to_contain_formula(4, 3, '=F3')
self.wait_for_cell_to_contain_formula(5, 3, '=G3')
self.wait_for_cell_to_contain_formula(3, 4, '=E4')
self.wait_for_cell_to_contain_formula(4, 4, '=F4')
self.wait_for_cell_to_contain_formula(5, 4, '=G4')
self.wait_for_cell_to_contain_formula(3, 5, '=E5')
self.wait_for_cell_to_contain_formula(4, 5, '=F5')
self.wait_for_cell_to_contain_formula(5, 5, '=G5')
self.wait_for_cell_to_contain_formula(3, 6, '=E6')
self.wait_for_cell_to_contain_formula(4, 6, '=F6')
self.wait_for_cell_to_contain_formula(5, 6, '=G6')
self.wait_for_cell_to_contain_formula(3, 7, '=E7')
self.wait_for_cell_to_contain_formula(4, 7, '=F7')
self.wait_for_cell_to_contain_formula(5, 7, '=G7')
self.wait_for_cell_to_contain_formula(3, 8, '')
self.wait_for_cell_to_contain_formula(4, 8, '')
self.wait_for_cell_to_contain_formula(5, 8, '')
self.wait_for_cell_to_contain_formula(6, 3, '')
self.wait_for_cell_to_contain_formula(6, 4, '')
self.wait_for_cell_to_contain_formula(6, 5, '')
self.wait_for_cell_to_contain_formula(6, 6, '')
self.wait_for_cell_to_contain_formula(6, 7, '')
|
dynaconf/vendor/click/_unicodefun.py | sephiartlist/dynaconf | 2,293 | 12624294 | import codecs,os
def _verify_python_env():
M='.utf8';L='.utf-8';J=None;I='ascii'
try:import locale as A;G=codecs.lookup(A.getpreferredencoding()).name
except Exception:G=I
if G!=I:return
B=''
if os.name=='posix':
import subprocess as D
try:C=D.Popen(['locale','-a'],stdout=D.PIPE,stderr=D.PIPE).communicate()[0]
except OSError:C=b''
E=set();H=False
if isinstance(C,bytes):C=C.decode(I,'replace')
for K in C.splitlines():
A=K.strip()
if A.lower().endswith((L,M)):
E.add(A)
if A.lower()in('c.utf8','c.utf-8'):H=True
B+='\n\n'
if not E:B+='Additional information: on this system no suitable UTF-8 locales were discovered. This most likely requires resolving by reconfiguring the locale system.'
elif H:B+='This system supports the C.UTF-8 locale which is recommended. You might be able to resolve your issue by exporting the following environment variables:\n\n export LC_ALL=C.UTF-8\n export LANG=C.UTF-8'
else:B+=f"This system lists some UTF-8 supporting locales that you can pick from. The following suitable locales were discovered: {', '.join(sorted(E))}"
F=J
for A in (os.environ.get('LC_ALL'),os.environ.get('LANG')):
if A and A.lower().endswith((L,M)):F=A
if A is not J:break
if F is not J:B+=f"\n\nClick discovered that you exported a UTF-8 locale but the locale system could not pick up from it because it does not exist. The exported locale is {F!r} but it is not supported"
raise RuntimeError(f"Click will abort further execution because Python was configured to use ASCII as encoding for the environment. Consult https://click.palletsprojects.com/unicode-support/ for mitigation steps.{B}") |
thrift/lib/py3/test/interactions/interaction_test.py | sakibguy/fbthrift | 2,112 | 12624330 | <filename>thrift/lib/py3/test/interactions/interaction_test.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import unittest
from blank_interaction.services import BlankServiceInterface
from interaction.clients import Calculator
from interaction.types import Point
from thrift.py3.client import get_client, ClientType
from .run_interaction import run_interaction
class InteractionTest(unittest.TestCase):
def setUp(self) -> None:
self.interaction = run_interaction()
def init_client(self) -> Calculator:
return get_client(
Calculator,
port=self.interaction.getPort(),
host="::1",
client_type=ClientType.THRIFT_ROCKET_CLIENT_TYPE,
)
def tearDown(self) -> None:
self.interaction.reset()
def test_basic(self) -> None:
async def inner_test() -> None:
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 0), 0)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(1)
self.assertEqual(await add.getPrimitive(), 1)
point = await add.getPoint()
self.assertEqual(point.x, 0)
self.assertEqual(point.y, 0)
newPoint = Point(x=2, y=3)
await add.accumulatePoint(newPoint)
point = await add.getPoint()
self.assertEqual(point.x, 2)
self.assertEqual(point.y, 3)
await add.noop()
asyncio.run(inner_test())
def test_multiple_interactions(self) -> None:
async def inner_test() -> None:
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 0), 0)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(1)
self.assertEqual(await add.getPrimitive(), 1)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(2)
self.assertEqual(await add.getPrimitive(), 2)
asyncio.run(inner_test())
def test_multiple_clients(self) -> None:
async def inner_test() -> None:
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 0), 0)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(1)
self.assertEqual(await add.getPrimitive(), 1)
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 1), 1)
async with calc.createAddition() as add:
self.assertEqual(await add.getPrimitive(), 0)
add.accumulatePrimitive(2)
self.assertEqual(await add.getPrimitive(), 2)
asyncio.run(inner_test())
def test_terminate_unused(self) -> None:
async def inner_test() -> None:
async with self.init_client() as calc:
async with calc.createAddition() as _:
pass
asyncio.run(inner_test())
def test_terminate_client_error(self) -> None:
class SpecificError(Exception):
pass
async def inner_test() -> None:
try:
async with self.init_client() as calc:
self.assertEqual(await calc.addPrimitive(0, 0), 0)
async with calc.createAddition() as add:
add.accumulatePrimitive(1)
raise SpecificError("Generic error")
except SpecificError:
pass
else:
self.fail("Didn't throw SpecificError")
asyncio.run(inner_test())
|
ml-agents-envs/mlagents_envs/registry/base_registry_entry.py | bobcy2015/ml-agents | 13,653 | 12624360 | from abc import abstractmethod
from typing import Any, Optional
from mlagents_envs.base_env import BaseEnv
class BaseRegistryEntry:
def __init__(
self,
identifier: str,
expected_reward: Optional[float],
description: Optional[str],
):
"""
BaseRegistryEntry allows launching a Unity Environment with its make method.
:param identifier: The name of the Unity Environment.
:param expected_reward: The cumulative reward that an Agent must receive
for the task to be considered solved.
:param description: A description of the Unity Environment. Contains human
readable information about potential special arguments that the make method can
take as well as information regarding the observation, reward, actions,
behaviors and number of agents in the Environment.
"""
self._identifier = identifier
self._expected_reward = expected_reward
self._description = description
@property
def identifier(self) -> str:
"""
The unique identifier of the entry
"""
return self._identifier
@property
def expected_reward(self) -> Optional[float]:
"""
The cumulative reward that an Agent must receive for the task to be considered
solved.
"""
return self._expected_reward
@property
def description(self) -> Optional[str]:
"""
A description of the Unity Environment the entry can make.
"""
return self._description
@abstractmethod
def make(self, **kwargs: Any) -> BaseEnv:
"""
This method creates a Unity BaseEnv (usually a UnityEnvironment).
"""
raise NotImplementedError(
f"The make() method not implemented for entry {self.identifier}"
)
|
flask_aiohttp/__init__.py | Hardtack/Flask-aiohttp | 142 | 12624366 | """:mod:`flask_aiohttp` --- Asynchronous Flask with aiohttp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides Flask extension for asynchronous I/O.
With this extension, we can use `asyncio.coroutine` as Flask's view function.
So, we can add
asyncio-redis <https://github.com/jonathanslenders/asyncio-redis>`_, or
websocket support to your application.
To make view asynchronous, just simply add :func:`helper.async` decorator to
your view function ::
@app.route('/foo')
@async
def lazy():
yield from asyncio.sleep(3)
return 'Done'
You have to run your flask application with :class:`AioHTTP` ::
aio = AioHTTP(app)
aio.run(app)
And you can also use gunicorn ::
aio = AioHTTP(flask_app)
app = aio.create_aiohttp_app(flask_app)
# Run gunicorn by
#
# gunicorn your_module:app -k aiohttp.worker.GunicornWebWorker
# -b localhost:8080
You can even use aiohttp's websocket in your Flask application using
:func:`helper.websocket` ::
aio = AioHTTP(flask_app)
@app.route('echo')
@websocket
def echo():
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(msg.data)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
"""
import os
import asyncio
import logging
import flask
import aiohttp.web
from flask import request
from werkzeug.debug import DebuggedApplication
from werkzeug.serving import run_with_reloader
from .helper import async, websocket, has_websocket, wrap_wsgi_middleware
from .handler import WSGIHandlerBase, WSGIWebSocketHandler
__all__ = ['AioHTTP', 'async', 'websocket', 'has_websocket',
'wrap_wsgi_middleware']
class AioHTTP(object):
"""Flask middleware for aiohttp"""
def __init__(self, app: flask.Flask=None, *,
handler_factory=WSGIWebSocketHandler):
"""
:param app:
Flask application
:param handler_factory:
aiohttp request handler factory. Factory should accept a single
flask application.
"""
self.handler_factory = handler_factory
if app is not None:
self.init_app(app)
def init_app(self, app: flask.Flask):
"""Init Flask app
:param app: Flask application
"""
app.aiohttp_app = self.create_aiohttp_app(app)
def create_aiohttp_app(self, app: flask.Flask) -> aiohttp.web.Application:
"""Create aiohttp web application from Flask application
:param app: Flask application
:returns: aiohttp web application
"""
# aiohttp web application instance
aio_app = aiohttp.web.Application()
# WSGI handler for aiohttp
wsgi_handler = self.handler_factory(app)
# aiohttp's router should accept any possible HTTP method of request.
aio_app.router.add_route('*', r'/{path:.*}', wsgi_handler)
return aio_app
@staticmethod
def run(app: flask.Flask, *,
host='127.0.0.1', port=None, debug=False, loop=None):
"""Run Flask application on aiohttp
:param app: Flask application
:param host: host name or ip
:param port: port (default is 5000)
:param debug: debug?
"""
# Check initialization status of flask app.
if getattr(app, 'aiohttp_app', None) is None:
raise RuntimeError(
"This application is not initialized for Flask-aiohttp. "
"Please initialize the app by `aio.init_app(app)`.")
# Configure args
if port is None:
server_name = app.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[-1])
else:
port = 5000
loop = loop or asyncio.get_event_loop()
# Define run_server
def run_server():
# run_server can be called in another thread
asyncio.set_event_loop(loop)
coroutine = loop.create_server(
app.aiohttp_app.make_handler(), host, port)
loop.run_until_complete(coroutine)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Configure logging
file_handler = logging.StreamHandler()
app.logger.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
if debug:
# Logging
app.logger.setLevel(logging.DEBUG)
# Wrap WSGI app with werkzeug debugger.
app.wsgi_app = wrap_wsgi_middleware(DebuggedApplication)(
app.wsgi_app)
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
app.logger.info(' * Running on http://{}:{}/'
.format(host, port))
# Run with reloader
run_with_reloader(run_server)
else:
app.logger.info(' * Running on http://{}:{}/'.format(host, port))
run_server()
@property
def ws(self) -> aiohttp.web.WebSocketResponse:
"""Websocket response of aiohttp"""
ws = request.environ.get('wsgi.websocket', None)
if ws is None:
raise RuntimeError('Request context is not a WebSocket context.')
return ws
|
tino/server.py | NotSoSmartDev/Tino | 143 | 12624383 | import uvicorn
class Server(uvicorn.Server):
async def startup(self, sockets=None):
await super().startup(sockets=sockets)
for f in self.config.loaded_app.startup_funcs:
await f()
async def shutdown(self, sockets=None):
await super().shutdown(sockets=sockets)
for f in self.config.loaded_app.shutdown_funcs:
await f()
|
IO/Exodus/Testing/Python/TestExodusWithNaN.py | satya-arjunan/vtk8 | 1,755 | 12624418 | #!/usr/bin/env python
# This test loads an Exodus file with NaNs and we test that the vtkDataArray
# returns a correct range for the array with NaNs i.e. not including the NaN.
from vtk import *
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
rdr = vtkExodusIIReader()
rdr.SetFileName(str(VTK_DATA_ROOT) + "/Data/cyl_with_NaN.g")
rdr.UpdateInformation()
rdr.SetPointResultArrayStatus("dist_from_origin", 1);
rdr.Update()
data = rdr.GetOutput().GetBlock(0).GetBlock(0)
# Test that this dataset with NaNs gets a correct range i.e. range without NaNs
# in it.
drange = data.GetPointData().GetArray("dist_from_origin").GetRange()[:]
print("'dist_from_origin' Range: ", drange)
assert (abs(drange[0] - 0.5) < 1e-3) and (abs(drange[1] - 1.118) < 1e-3)
|
backend/services/task_annotations_service.py | majdal/tasking-manager | 421 | 12624423 | from backend.models.postgis.task_annotation import TaskAnnotation
from backend.models.postgis.utils import timestamp
class TaskAnnotationsService:
@staticmethod
def add_or_update_annotation(annotation, project_id, annotation_type):
""" Takes a json of tasks and create annotations in the db """
task_id = annotation["taskId"]
source = annotation.get("annotationSource", None)
markdown = annotation.get("annotationMarkdown", None)
task_annotation = TaskAnnotation(
task_id,
project_id,
annotation_type,
annotation["properties"],
source,
markdown,
)
# check if the task has this annotation_type
existing_annotation = TaskAnnotation.get_task_annotation(
task_id, project_id, annotation_type
)
if existing_annotation:
# update this annotation
existing_annotation.properties = task_annotation.properties
existing_annotation.updated_timestamp = timestamp()
existing_annotation.update()
else:
# add this annotation
task_annotation.create()
|
plasmapy/formulary/tests/test_magnetostatics.py | seanjunheng2/PlasmaPy | 429 | 12624510 | <filename>plasmapy/formulary/tests/test_magnetostatics.py<gh_stars>100-1000
import numpy as np
import pytest
from astropy import constants
from astropy import units as u
from plasmapy.formulary.magnetostatics import (
CircularWire,
FiniteStraightWire,
GeneralWire,
InfiniteStraightWire,
MagneticDipole,
)
mu0_4pi = constants.mu0 / 4 / np.pi
class Test_MagneticDipole:
def setup_method(self):
self.moment = np.array([0, 0, 1]) * u.A * u.m * u.m
self.p0 = np.array([0, 0, 0]) * u.m
def test_value1(self):
"Test a known solution"
p = np.array([1, 0, 0])
B1 = MagneticDipole(self.moment, self.p0).magnetic_field(p)
B1_expected = np.array([0, 0, -1]) * 1e-7 * u.T
assert np.all(np.isclose(B1.value, B1_expected.value))
assert B1.unit == u.T
def test_value2(self):
"Test a known solution"
p = np.array([0, 0, 1])
B2 = MagneticDipole(self.moment, self.p0).magnetic_field(p)
B2_expected = np.array([0, 0, 2]) * 1e-7 * u.T
assert np.all(np.isclose(B2.value, B2_expected.value))
assert B2.unit == u.T
def test_repr(self):
"Test __repr__ function"
B1 = MagneticDipole(self.moment, self.p0)
assert repr(B1) == r"MagneticDipole(moment=[0. 0. 1.]A m2, p0=[0. 0. 0.]m)"
class Test_GeneralWire:
def setup_method(self):
self.cw = CircularWire(
np.array([0, 0, 1]), np.array([0, 0, 0]) * u.m, 1 * u.m, 1 * u.A
)
p1 = np.array([0.0, 0.0, 0.0]) * u.m
p2 = np.array([0.0, 0.0, 1.0]) * u.m
self.fw = FiniteStraightWire(p1, p2, 1 * u.A)
def test_not_callable(self):
"Test that `GeneralWire` raises `ValueError` if its first argument is not callale"
with pytest.raises(ValueError):
GeneralWire("wire", 0, 1, 1 * u.A)
def test_close_cw(self):
"Test if the GeneralWire is close to the CircularWire it converted from"
gw_cw = self.cw.to_GeneralWire()
p = np.array([0, 0, 0])
B_cw = self.cw.magnetic_field(p)
B_gw_cw = gw_cw.magnetic_field(p)
assert np.all(np.isclose(B_cw.value, B_gw_cw.value))
assert B_cw.unit == B_gw_cw.unit
def test_repr(self):
"Test __repr__ function"
gw_cw = self.cw.to_GeneralWire()
# round numbers to avoid calculation accuracy mismatch
gw_cw.t1 = -3.1516
gw_cw.t2 = +3.1516
assert (
repr(gw_cw)
== r"GeneralWire(parametric_eq=curve, t1=-3.1516, t2=3.1516, current=1.0A)"
)
def test_close_fw(self):
"Test if the GeneralWire is close to the FiniteWire it converted from"
gw_fw = self.fw.to_GeneralWire()
p = np.array([1, 0, 0])
B_fw = self.fw.magnetic_field(p)
B_gw_fw = gw_fw.magnetic_field(p)
assert np.all(np.isclose(B_fw.value, B_gw_fw.value))
assert B_fw.unit == B_gw_fw.unit
def test_value_error(self):
"Test GeneralWire raise ValueError when argument t1>t2"
with pytest.raises(ValueError):
gw_cw = GeneralWire(lambda t: [0, 0, t], 2, 1, 1.0 * u.A)
class Test_FiniteStraightWire:
def setup_method(self):
self.p1 = np.array([0.0, 0.0, -1.0]) * u.m
self.p2 = np.array([0.0, 0.0, 1.0]) * u.m
self.current = 1 * u.A
def test_same_point(self):
"Test that `FintiteStraightWire` raises `ValueError` if p1 == p2"
with pytest.raises(ValueError):
FiniteStraightWire(self.p1, self.p1, self.current)
def test_value1(self):
"Test a known solution"
fw = FiniteStraightWire(self.p1, self.p2, self.current)
B1 = fw.magnetic_field([1, 0, 0])
B1_expected = np.array([0, np.sqrt(2), 0]) * 1e-7 * u.T
assert np.all(np.isclose(B1.value, B1_expected.value))
assert B1.unit == u.T
def test_repr(self):
"Test __repr__ function"
fw = FiniteStraightWire(self.p1, self.p2, self.current)
assert (
repr(fw)
== r"FiniteStraightWire(p1=[ 0. 0. -1.]m, p2=[0. 0. 1.]m, current=1.0A)"
)
class Test_InfiniteStraightWire:
def setup_method(self):
self.direction = np.array([0, 1, 0])
self.p0 = np.array([0, 0, 0]) * u.m
self.current = 1 * u.A
def test_value1(self):
"Test a known solution"
iw = InfiniteStraightWire(self.direction, self.p0, self.current)
B1 = iw.magnetic_field([1, 0, 0])
B1_expected = np.array([0, 0, -2]) * 1e-7 * u.T
assert np.all(np.isclose(B1.value, B1_expected.value))
assert B1.unit == u.T
def test_repr(self):
"Test __repr__ function"
iw = InfiniteStraightWire(self.direction, self.p0, self.current)
assert (
repr(iw)
== r"InfiniteStraightWire(direction=[0. 1. 0.], p0=[0. 0. 0.]m, current=1.0A)"
)
class Test_CircularWire:
def setup_method(self):
self.normalz = np.array([0, 0, 1])
self.normalx = np.array([1, 0, 0])
self.center = np.array([0, 0, 0]) * u.m
self.radius = 1 * u.m
self.current = 1 * u.A
def test_negative_radius(self):
"Test that `FintiteStraightWire` raises `ValueError` if radius < 0"
with pytest.raises(ValueError):
CircularWire(self.normalz, self.center, -1.0 * u.m, self.current)
def test_value1(self):
"Test a known solution"
cw = CircularWire(self.normalz, self.center, self.radius, self.current)
B1 = cw.magnetic_field([0, 0, 1])
B1_expected = np.array([0, 0, 1]) * 2 * np.pi / 2 ** 1.5 * 1e-7 * u.T
assert np.all(np.isclose(B1.value, B1_expected.value))
assert B1.unit == u.T
def test_value2(self):
"Test a known solution"
cw = CircularWire(self.normalx, self.center, self.radius, self.current)
B2 = cw.magnetic_field([1, 0, 0])
B2_expected = np.array([1, 0, 0]) * 2 * np.pi / 2 ** 1.5 * 1e-7 * u.T
assert np.all(np.isclose(B2.value, B2_expected.value))
assert B2.unit == u.T
def test_repr(self):
"Test __repr__ function"
cw = CircularWire(self.normalz, self.center, self.radius, self.current)
assert (
repr(cw)
== r"CircularWire(normal=[0. 0. 1.], center=[0. 0. 0.]m, radius=1.0m, current=1.0A)"
)
|
test/paysage/batch/test_shuffle.py | fyumoto/RBMs | 124 | 12624531 | <filename>test/paysage/batch/test_shuffle.py
import tempfile
import numpy as np
import pandas as pd
from paysage import batch
import pytest
def test_shuffle():
# create temporary files
file_original = tempfile.NamedTemporaryFile()
file_shuffle = tempfile.NamedTemporaryFile()
# create data
num_rows = 10000
num_cols_A = 100
num_cols_B = 1
df_A = pd.DataFrame(np.arange(num_rows*num_cols_A).reshape(num_rows, num_cols_A),
columns=['col_{}'.format(i) for i in np.arange(num_cols_A)],
index=['ix_{}'.format(i) for i in np.arange(num_rows)])
df_B = pd.DataFrame(np.arange(num_rows*num_cols_B).reshape(num_rows, num_cols_B),
columns=['col_{}'.format(i) for i in np.arange(num_cols_B)],
index=['ix_{}'.format(i) for i in np.arange(num_rows)])
# save it
store = pd.HDFStore(file_original.name, mode='w', format='table')
store.append("A", df_A)
store.append("B", df_B)
store.close()
# shuffle it, with an artificially low memory limit
shuffler = batch.DataShuffler(file_original.name, file_shuffle.name,
allowed_mem=0.001)
shuffler.shuffle()
# read the shuffled data
df_As = pd.read_hdf(file_shuffle.name, "A")
df_Bs = pd.read_hdf(file_shuffle.name, "B")
# check the two shuffles are consistent
assert (df_As.index == df_Bs.index).all()
assert (df_As['col_0'] // num_cols_A == df_Bs['col_0'] // num_cols_B).all()
# check that the shuffles preserve the index
ix_A_orig = sorted(list(df_A.index))
ix_A_shuffled = sorted(list(df_As.index))
assert ix_A_orig == ix_A_shuffled
# check a couple of statistics
vals_B = df_B['col_0'].values
vals_Bs = df_Bs['col_0'].values
# the number of fixed points tends to a Poisson distribution with e.v. = 1
assert (vals_B == vals_Bs).sum() < 5
# the difference between values (using the natural numbers as values)
# is a triangular distribution centered at 0. Can check the variance.
diff_dist_std = (vals_B - vals_Bs).std()
assert np.abs(diff_dist_std / (num_rows / np.sqrt(6)) - 1) < 0.05
if __name__ == "__main__":
pytest.main([__file__])
|
corehq/apps/enterprise/migrations/0002_enterprisepermissions_account_unique.py | akashkj/commcare-hq | 471 | 12624575 | # Generated by Django 2.2.24 on 2021-07-23 20:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('enterprise', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='enterprisepermissions',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounting.BillingAccount',
unique=True),
),
]
|
insights/combiners/krb5.py | lhuett/insights-core | 121 | 12624627 | <gh_stars>100-1000
"""
krb5 configuration
==================
The krb5 files are normally available to rules as a list of
Krb5Configuration objects.
"""
from .. import LegacyItemAccess
from insights.core.plugins import combiner
from insights.parsers.krb5 import Krb5Configuration
from insights.parsers.httpd_conf import dict_deep_merge
@combiner(Krb5Configuration)
class AllKrb5Conf(LegacyItemAccess):
"""
Combiner for accessing all the krb5 configuration files, the format is dict.
There may be multi files for krb5 configuration, and the main config file is
krb5.conf. In the situation that same section is both in krb5.conf and other
configuration files, section in krb5.conf is the available setting. Data from
parser krb5 is list of dict(s), this combiner will parse this list and return
a dict which containing all valid data.
Sample files::
/etc/krb5.conf:
includedir /etc/krb5.conf.d/
include /etc/krb5test.conf
module /etc/krb5test.conf:residual
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
/etc/krb5.d/krb5_more.conf:
[logging]
default = FILE:/var/log/krb5.log
kdc = FILE:/var/log/krb5.log
admin_server = FILE:/var/log/kadmind.log
[realms]
dns_lookup_realm = false
default_ccache_name = KEYRING:persistent:%{uid}
Examples:
>>> all_krb5 = shared[AllKrb5Conf]
>>> all_krb5.include
['/etc/krb5test.conf']
>>> all_krb5.sections()
['logging', 'realms']
>>> all_krb5.options('logging')
['default', 'kdc', 'admin_server']
>>> all_krb5['logging']['kdc']
'FILE:/var/log/krb5kdc.log'
>>> all_krb5.has_option('logging', 'admin_server')
True
>>> all_krb5['realms']['dns_lookup_realm']
'false'
>>> all_krb5.files
['krb5.conf', 'test.conf', 'test2.conf']
Attributes:
includedir (list): The directory list that `krb5.conf` includes via
`includedir` directive
include (list): The configuration file list that `krb5.conf` includes
via `include` directive
module (list): The module list that `krb5.conf` specifed via 'module'
directive
files (list): The list of configuration file names.
"""
def __init__(self, krb5configs):
self.data = {}
main_data = {}
self.includedir = []
self.include = []
self.module = []
self.files = []
for krb5_parser in krb5configs:
self.files.append(krb5_parser.file_name)
if krb5_parser.file_path == "/etc/krb5.conf":
main_data = krb5_parser.data
self.includedir = krb5_parser.includedir
self.include = krb5_parser.include
self.module = krb5_parser.module
else:
dict_deep_merge(self.data, krb5_parser.data)
# Same options in same section from other configuration files will be covered by the option
# from main configuration, but different options in same section will be kept.
for key, value in main_data.items():
if key in self.data.keys():
self.data[key].update(value)
else:
self.data[key] = value
super(AllKrb5Conf, self).__init__()
def sections(self):
"""
Return a list of section names.
"""
return self.data.keys()
def has_section(self, section):
"""
Indicate whether the named section is present in the configuration.
Return True if the given section is present, and False if not present.
"""
return section in self.data
def options(self, section):
"""
Return a list of option names for the given section name.
"""
return self.data[section].keys() if self.has_section(section) else []
def has_option(self, section, option):
"""
Check for the existence of a given option in a given section.
Return True if the given option is present, and False if not present.
"""
if section not in self.data:
return False
return option in self.data[section]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.