max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
code/from-arguments.py | DarkSuniuM/skillshare-downloader | 292 | 12691936 | <reponame>DarkSuniuM/skillshare-downloader
import sys
import re
from downloader import Downloader
cookie = sys.argv[1]
dl = Downloader(cookie=cookie)
if len(sys.argv) != 3:
raise Exception('Invalid arguments. Usage : {program} <cookie> <url_or_class_id>'.format(program=sys.argv[0]))
if re.match(r'^[0-9]+$', sys.argv[2]):
dl.download_course_by_class_id(sys.argv[2])
else:
dl.download_course_by_url(sys.argv[2])
|
python/tests/integration/register_test.py | justinforbes/depthcharge | 133 | 12691943 | #!/usr/bin/env python3
#
# SPDX-License-Identifier: BSD-3-Clause
# Depthcharge: <https://github.com/nccgroup/depthcharge>
#
# pylint: disable=redefined-outer-name,missing-function-docstring,invalid-name
# pylint: disable=global-statement # (Like salt and sugar; fine if used sparingly)
"""
Exercise all available RegisterReader operations for a platform.
"""
import os
import sys
from depthcharge.cmdline import ArgumentParser, create_depthcharge_ctx
_DEFAULT_ARCH = os.getenv('DEPTHCHARGE_TEST_ARCH', 'arm')
def perform_reads(ctx) -> list:
reg = ctx.arch.gd_register
results = []
# Get ground truth with default reader.
expected_value = ctx.read_register(reg)
for impl in ctx.register_readers:
value = impl.read(reg)
success = value == expected_value
results.append((impl.name, value, success))
return results
def print_results(results):
total = len(results)
n_pass = 0
print()
print(' RegisterReader Value Pass/Fail')
print('---------------------------------------------------------')
for result in results:
if result[2]:
state = 'Pass'
n_pass += 1
else:
state = 'Fail'
line = ' {:32s} 0x{:08x} {:s}'
print(line.format(result[0], result[1], state))
summary = os.linesep + '{:d} Tested, {:d} passed.' + os.linesep
print(summary.format(total, n_pass))
return n_pass == total
if __name__ == '__main__':
success = False
cmdline = ArgumentParser(
allow_deploy_default=True, allow_reboot_default=True,
arch_default=_DEFAULT_ARCH)
args = cmdline.parse_args()
ctx = create_depthcharge_ctx(args)
if args.config:
ctx.save(args.config)
try:
results = perform_reads(ctx)
success = print_results(results)
finally:
if args.config:
ctx.save(args.config)
if not success:
sys.exit(2)
|
python_src/exploration/coverage_path_planning/grid_map_lib.py | tkortz/motion_planning_rt | 111 | 12691946 | <gh_stars>100-1000
"""
Grid map library in python
author: <NAME>
"""
import matplotlib.pyplot as plt
import numpy as np
class GridMap:
"""
GridMap class
"""
def __init__(self, width, height, resolution,
center_x, center_y, init_val=0.0):
"""__init__
:param width: number of grid for width
:param height: number of grid for heigt
:param resolution: grid resolution [m]
:param center_x: center x position [m]
:param center_y: center y position [m]
:param init_val: initial value for all grid
"""
self.width = width
self.height = height
self.resolution = resolution
self.center_x = center_x
self.center_y = center_y
self.left_lower_x = self.center_x - \
(self.width / 2.0) * self.resolution
self.left_lower_y = self.center_y - \
(self.height / 2.0) * self.resolution
self.ndata = self.width * self.height
self.data = [init_val] * int(self.ndata)
def get_value_from_xy_index(self, x_ind, y_ind):
"""get_value_from_xy_index
when the index is out of grid map area, return None
:param x_ind: x index
:param y_ind: y index
"""
grid_ind = self.calc_grid_index_from_xy_index(x_ind, y_ind)
if 0 <= grid_ind <= self.ndata:
return self.data[grid_ind]
else:
return None
def get_xy_index_from_xy_pos(self, x_pos, y_pos):
"""get_xy_index_from_xy_pos
:param x_pos: x position [m]
:param y_pos: y position [m]
"""
x_ind = self.calc_xy_index_from_position(
x_pos, self.left_lower_x, self.width)
y_ind = self.calc_xy_index_from_position(
y_pos, self.left_lower_y, self.height)
return x_ind, y_ind
def set_value_from_xy_pos(self, x_pos, y_pos, val):
"""set_value_from_xy_pos
return bool flag, which means setting value is succeeded or not
:param x_pos: x position [m]
:param y_pos: y position [m]
:param val: grid value
"""
x_ind, y_ind = self.get_xy_index_from_xy_pos(x_pos, y_pos)
if (not x_ind) or (not y_ind):
return False # NG
flag = self.set_value_from_xy_index(x_ind, y_ind, val)
return flag
def set_value_from_xy_index(self, x_ind, y_ind, val):
"""set_value_from_xy_index
return bool flag, which means setting value is succeeded or not
:param x_ind: x index
:param y_ind: y index
:param val: grid value
"""
if (x_ind is None) or (y_ind is None):
print(x_ind, y_ind)
return False, False
grid_ind = int(y_ind * self.width + x_ind)
if 0 <= grid_ind < self.ndata:
self.data[grid_ind] = val
return True # OK
else:
return False # NG
def set_value_from_polygon(self, pol_x, pol_y, val, inside=True):
"""set_value_from_polygon
Setting value inside or outside polygon
:param pol_x: x position list for a polygon
:param pol_y: y position list for a polygon
:param val: grid value
:param inside: setting data inside or outside
"""
# making ring polygon
if (pol_x[0] != pol_x[-1]) or (pol_y[0] != pol_y[-1]):
pol_x.append(pol_x[0])
pol_y.append(pol_y[0])
# setting value for all grid
for x_ind in range(int(self.width)):
for y_ind in range(int(self.height)):
x_pos, y_pos = self.calc_grid_central_xy_position_from_xy_index(
x_ind, y_ind)
flag = self.check_inside_polygon(x_pos, y_pos, pol_x, pol_y)
if flag is inside:
self.set_value_from_xy_index(x_ind, y_ind, val)
def calc_grid_index_from_xy_index(self, x_ind, y_ind):
grid_ind = int(y_ind * self.width + x_ind)
return grid_ind
def calc_grid_central_xy_position_from_xy_index(self, x_ind, y_ind):
x_pos = self.calc_grid_central_xy_position_from_index(
x_ind, self.left_lower_x)
y_pos = self.calc_grid_central_xy_position_from_index(
y_ind, self.left_lower_y)
return x_pos, y_pos
def calc_grid_central_xy_position_from_index(self, index, lower_pos):
return lower_pos + index * self.resolution + self.resolution / 2.0
def calc_xy_index_from_position(self, pos, lower_pos, max_index):
ind = int(np.floor((pos - lower_pos) / self.resolution))
if 0 <= ind <= max_index:
return ind
else:
return None
def check_occupied_from_xy_index(self, xind, yind, occupied_val=1.0):
val = self.get_value_from_xy_index(xind, yind)
if val >= occupied_val:
return True
else:
return False
def expand_grid(self):
xinds, yinds = [], []
for ix in range(int(self.width)):
for iy in range(int(self.height)):
if self.check_occupied_from_xy_index(ix, iy):
xinds.append(ix)
yinds.append(iy)
for (ix, iy) in zip(xinds, yinds):
self.set_value_from_xy_index(ix + 1, iy, val=1.0)
self.set_value_from_xy_index(ix, iy + 1, val=1.0)
self.set_value_from_xy_index(ix + 1, iy + 1, val=1.0)
self.set_value_from_xy_index(ix - 1, iy, val=1.0)
self.set_value_from_xy_index(ix, iy - 1, val=1.0)
self.set_value_from_xy_index(ix - 1, iy - 1, val=1.0)
@staticmethod
def check_inside_polygon(iox, ioy, x, y):
npoint = len(x) - 1
inside = False
for i1 in range(npoint):
i2 = (i1 + 1) % (npoint + 1)
if x[i1] >= x[i2]:
min_x, max_x = x[i2], x[i1]
else:
min_x, max_x = x[i1], x[i2]
if not min_x < iox < max_x:
continue
if (y[i1] + (y[i2] - y[i1]) / (x[i2] - x[i1])
* (iox - x[i1]) - ioy) > 0.0:
inside = not inside
return inside
def plot_grid_map(self, ax=None):
grid_data = np.reshape(np.array(self.data), (int(self.height), int(self.width)))
if not ax:
fig, ax = plt.subplots()
heat_map = ax.pcolor(grid_data, cmap="Blues", vmin=0.0, vmax=1.0)
plt.axis("equal")
return heat_map
def test_polygon_set():
ox = [0.0, 20.0, 50.0, 100.0, 130.0, 40.0]
oy = [0.0, -20.0, 0.0, 30.0, 60.0, 80.0]
grid_map = GridMap(600, 290, 0.7, 60.0, 30.5)
grid_map.set_value_from_polygon(ox, oy, 1.0, inside=False)
grid_map.plot_grid_map()
plt.axis("equal")
plt.grid(True)
def test_position_set():
grid_map = GridMap(100, 120, 0.5, 10.0, -0.5)
grid_map.set_value_from_xy_pos(10.1, -1.1, 1.0)
grid_map.set_value_from_xy_pos(10.1, -0.1, 1.0)
grid_map.set_value_from_xy_pos(10.1, 1.1, 1.0)
grid_map.set_value_from_xy_pos(11.1, 0.1, 1.0)
grid_map.set_value_from_xy_pos(10.1, 0.1, 1.0)
grid_map.set_value_from_xy_pos(9.1, 0.1, 1.0)
grid_map.plot_grid_map()
|
examples/NeurIPS2020-Learning-to-Run-a-Power-Network-Challenge/track2/utils.py | lp2333/PARL | 3,172 | 12691972 | <gh_stars>1000+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
import numpy as np
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
selected_feature = [
'loads_q', 'loads_v', 'prods_q', 'prods_v', 'rho', 'line_status',
'hour_of_day', 'month'
]
inference_info = np.load('./saved_files/inference_info.npz')
col = inference_info['col']
mean = inference_info['mean']
std = inference_info['std']
def process(raw_obs):
obs = raw_obs.to_dict()
x = dict()
x['loads_p'] = obs['loads']['p']
x['loads_q'] = obs['loads']['q']
x['loads_v'] = obs['loads']['v']
x['prods_p'] = obs['prods']['p']
x['prods_q'] = obs['prods']['q']
x['prods_v'] = obs['prods']['v']
x['lines_or_p'] = obs['lines_or']['p']
x['lines_or_q'] = obs['lines_or']['q']
x['lines_or_v'] = obs['lines_or']['v']
x['lines_or_a'] = obs['lines_or']['a']
x['lines_ex_p'] = obs['lines_ex']['p']
x['lines_ex_q'] = obs['lines_ex']['q']
x['lines_ex_v'] = obs['lines_ex']['v']
x['lines_ex_a'] = obs['lines_ex']['a']
x['day_of_week'] = raw_obs.day_of_week
x['month'] = raw_obs.month
x['hour_of_day'] = raw_obs.hour_of_day
to_maintain_lines = np.where((raw_obs.time_next_maintenance>0) \
& (raw_obs.time_next_maintenance<2))[0]
x['rho'] = np.copy(obs['rho'])
x['line_status'] = np.copy(obs['line_status'].astype(float))
line_num = x['line_status'].shape[0]
if len(to_maintain_lines):
x['rho'][to_maintain_lines] = 0.0
x['line_status'][to_maintain_lines] = 0.0
x['line_status'] += np.array([x * 2 for x in range(line_num)])
x['rho'] = x['rho'] - 1.0
data = []
for feature in selected_feature:
col_data = x[feature]
if isinstance(col_data, np.int32):
col_data = np.array([col_data])
data.append(col_data)
data = np.concatenate(data)
data = data[col]
assert data.shape[0] == mean.shape[0]
assert data.shape[0] == std.shape[0]
data = (data - mean) / std
return data
|
tf2onnx/tflite_utils.py | LoicDagnas/tensorflow-onnx | 1,473 | 12691985 | <reponame>LoicDagnas/tensorflow-onnx
# SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.tflite_utils - utilities for parsing tflite files into onnx graph
"""
import collections
import importlib
import logging
import struct
from onnx import helper, onnx_pb, numpy_helper
from tensorflow.core.framework import types_pb2, tensor_pb2, node_def_pb2
from tensorflow.python.framework import tensor_util
import tensorflow as tf
import numpy as np
from tf2onnx.tflite.TensorType import TensorType as TFLiteTensorType
from tf2onnx.tflite.Model import Model
from tf2onnx.flexbuffers import read_flexbuffer
from tf2onnx.tf_utils import read_tf_node_def_attrs
from tf2onnx.graph import Graph
from tf2onnx import utils
logger = logging.getLogger(__name__)
TFLITE_TO_ONNX_DTYPE = {
TFLiteTensorType.FLOAT32: onnx_pb.TensorProto.FLOAT,
TFLiteTensorType.FLOAT16: onnx_pb.TensorProto.FLOAT16,
TFLiteTensorType.INT32: onnx_pb.TensorProto.INT32,
TFLiteTensorType.UINT8: onnx_pb.TensorProto.UINT8,
TFLiteTensorType.INT64: onnx_pb.TensorProto.INT64,
TFLiteTensorType.STRING: onnx_pb.TensorProto.STRING,
TFLiteTensorType.BOOL: onnx_pb.TensorProto.BOOL,
TFLiteTensorType.INT16: onnx_pb.TensorProto.INT16,
TFLiteTensorType.COMPLEX64: onnx_pb.TensorProto.COMPLEX64,
TFLiteTensorType.INT8: onnx_pb.TensorProto.INT8,
TFLiteTensorType.FLOAT64: onnx_pb.TensorProto.DOUBLE,
TFLiteTensorType.COMPLEX128: onnx_pb.TensorProto.COMPLEX128,
TFLiteTensorType.UINT64: onnx_pb.TensorProto.UINT64,
TFLiteTensorType.UINT32: onnx_pb.TensorProto.UINT32,
TFLiteTensorType.RESOURCE: onnx_pb.TensorProto.UNDEFINED,
TFLiteTensorType.VARIANT: onnx_pb.TensorProto.UNDEFINED,
}
TFLITE_TO_TF_DTYPE = {
TFLiteTensorType.FLOAT32: types_pb2.DT_FLOAT,
TFLiteTensorType.FLOAT16: types_pb2.DT_HALF,
TFLiteTensorType.INT32: types_pb2.DT_INT32,
TFLiteTensorType.UINT8: types_pb2.DT_UINT8,
TFLiteTensorType.INT64: types_pb2.DT_INT64,
TFLiteTensorType.STRING: types_pb2.DT_STRING,
TFLiteTensorType.BOOL: types_pb2.DT_BOOL,
TFLiteTensorType.INT16: types_pb2.DT_INT16,
TFLiteTensorType.COMPLEX64: types_pb2.DT_COMPLEX64,
TFLiteTensorType.INT8: types_pb2.DT_INT8,
TFLiteTensorType.FLOAT64: types_pb2.DT_DOUBLE,
TFLiteTensorType.COMPLEX128: types_pb2.DT_COMPLEX128,
TFLiteTensorType.UINT64: types_pb2.DT_UINT64,
TFLiteTensorType.UINT32: types_pb2.DT_UINT32,
TFLiteTensorType.RESOURCE: types_pb2.DT_RESOURCE,
TFLiteTensorType.VARIANT: types_pb2.DT_VARIANT,
}
def map_tflite_dtype_to_onnx(dtype):
return TFLITE_TO_ONNX_DTYPE[dtype]
def map_tflite_dtype_to_tf(dtype):
return TFLITE_TO_TF_DTYPE[dtype]
# The tflite schema uses snake case, but the python bindings use proper case
def snake_to_proper_case(name):
return ''.join(n.capitalize() for n in name.split('_'))
def proper_to_snake_case(name):
res = ''
for c in name:
if c.isupper() and res:
res += '_'
res += c.lower()
return res
# Pulled from the tflite schema.fbs file. Needed to decode enum numbers into strings.
NODE_ATTR_NAME_TO_ENUM_TYPE = {
'fused_activation_function': 'ActivationFunctionType',
'padding': 'Padding',
'type': 'LSHProjectionType',
'weights_format': 'FullyConnectedOptionsWeightsFormat',
'kernel_type': 'LSTMKernelType',
'combiner': 'CombinerType',
'in_data_type': 'TensorType',
'out_data_type': 'TensorType',
'output_type': 'TensorType',
'out_type': 'TensorType',
'mode': 'MirrorPadMode',
'idx_out_type': 'TensorType',
}
NODE_ATTR_NAME_TO_ENUM_TYPE = {snake_to_proper_case(key): value for key, value in NODE_ATTR_NAME_TO_ENUM_TYPE.items()}
# Pulled from the tflite schema.fbs file.
FUNCTION_ATTRS = ['then_subgraph_index', 'else_subgraph_index', 'cond_subgraph_index',
'body_subgraph_index', 'subgraph']
FUNCTION_ATTRS = [snake_to_proper_case(attr) for attr in FUNCTION_ATTRS]
enum_cache = {}
def lookup_enum(idx, enum_name):
"""Given the name of a tflite enum class and an index, return a string with the name of the enum value"""
if enum_name == 'TensorType':
return map_tflite_dtype_to_onnx(idx)
if enum_name in enum_cache:
idx_to_name = enum_cache[enum_name]
else:
module = importlib.import_module('tf2onnx.tflite.' + enum_name)
enum_class = getattr(module, enum_name)
idx_to_name = {value: key for key, value in enum_class.__dict__.items() if not key.startswith('_')}
enum_cache[enum_name] = idx_to_name
utils.make_sure(idx in idx_to_name, "Can't lookup value %s for tflite enum %s. Please update tf2onnx or "
"submit an issue on GitHub.", idx, enum_name)
return idx_to_name[idx]
def get_options_class(name):
"""Each tflite optype has a flatbuffer Options class (ex: AddOptions). Returns the options class given its name."""
if name == "NONE":
return None
module = importlib.import_module('tf2onnx.tflite.' + name)
return getattr(module, name)
def graphs_from_tflite(tflite_path, input_names=None, output_names=None):
"""
Given the path to a tflite model, returns a tuple (main_graph, subgraphs) of graph.py Graph objects
inputs/outputs will be taken from main graph in model if not overridden
"""
tflite_graphs, opcodes, model, tensor_shapes = read_tflite_model(tflite_path)
main_g = None
subgraphs = []
for i, tfl_graph in enumerate(tflite_graphs):
is_main_g = i == len(tflite_graphs) - 1
prefix = '' if is_main_g else tfl_graph.Name().decode() + '_'
tensor_shapes_from_interpreter = None
if is_main_g:
tensor_shapes_from_interpreter = tensor_shapes
onnx_nodes, _, _, output_shapes, dtypes, f_inputs, f_outputs, graph_name = \
parse_tflite_graph(tfl_graph, opcodes, model, prefix, tensor_shapes_from_interpreter)
g_inputs = f_inputs
g_outputs = f_outputs
if is_main_g:
# Override IO in main graph
utils.check_io(input_names, output_names, output_shapes.keys())
if input_names is not None:
g_inputs = input_names
if output_names is not None:
g_outputs = output_names
g = Graph(onnx_nodes, output_shapes, dtypes, input_names=g_inputs, output_names=g_outputs,
is_subgraph=not is_main_g, graph_name=graph_name)
if is_main_g:
main_g = g
else:
subgraphs.append(g)
return main_g, subgraphs
def read_tflite_model(tflite_path):
"""
Given the path to a tflite model, returns tuple (tflite_graphs, opcodes_map, model)
Graphs are topologically sorted and the main graph is last
Pass these to parse_tflite_graph
"""
with open(tflite_path, 'rb') as f:
buf = f.read()
buf = bytearray(buf)
model = Model.GetRootAsModel(buf, 0)
# To save space, each op in the model indicates its opcode as an index into the model's opcode map.
opcodes_map = {}
for i in range(model.OperatorCodesLength()):
op_code = model.OperatorCodes(i)
# TFlite ran out of opcodes since they only used a byte. Old models store opcodes in DeprecatedBuiltinCode.
# New models put PLACEHOLDER_FOR_GREATER_OP_CODES in this field to signify that BuiltinCode should be used.
code = lookup_enum(op_code.DeprecatedBuiltinCode(), 'BuiltinOperator')
if code == 'PLACEHOLDER_FOR_GREATER_OP_CODES':
code = lookup_enum(op_code.BuiltinCode(), 'BuiltinOperator')
if code == 'CUSTOM':
code = op_code.CustomCode().decode()
opcodes_map[i] = code
# Shapes stored in tflite models are not always reliable so we get them from the interpreter if possible.
tensor_shapes = {}
try:
interpreter = tf.lite.Interpreter(tflite_path)
interpreter.allocate_tensors()
tensor_cnt = model.Subgraphs(0).TensorsLength()
for i in range(tensor_cnt):
name = model.Subgraphs(0).Tensors(i).Name().decode()
details = interpreter._get_tensor_details(i) # pylint: disable=protected-access
if "shape_signature" in details:
tensor_shapes[name] = details["shape_signature"].tolist()
elif "shape" in details:
tensor_shapes[name] = details["shape"].tolist()
except Exception as e: # pylint: disable=broad-except
logger.warning("Error loading model into tflite interpreter: %s", e)
tflite_graphs = get_model_subgraphs(model)
return tflite_graphs, opcodes_map, model, tensor_shapes
def get_subgraph_dependencies(model, graph_idx):
"""Returns a list of subgraph indices referenced by the indicated graph"""
dependencies = []
g = model.Subgraphs(graph_idx)
for i in range(g.OperatorsLength()):
op = g.Operators(i)
options_type_name = lookup_enum(op.BuiltinOptionsType(), 'BuiltinOptions')
option_class = get_options_class(options_type_name)
if option_class is not None:
options = option_class()
options.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)
for attr in FUNCTION_ATTRS:
if hasattr(options, attr):
value = getattr(options, attr)()
dependencies.append(value)
return dependencies
def get_model_subgraphs(model):
"""Returns topologically sorted subgraphs of a model. Guarantees main graph is placed at the end."""
main_g = 0
dependencies = {}
idx_to_graph = {}
for i in range(model.SubgraphsLength()):
idx_to_graph[i] = model.Subgraphs(i)
ds = get_subgraph_dependencies(model, i)
utils.make_sure(main_g not in ds, "Main graph %s is a dependency of subgraph %s", main_g, i)
dependencies[i] = ds
ordered = utils.topological_sort(dependencies)
return [idx_to_graph[i] for i in ordered]
def get_quantization_attr(quant_params):
attr = {}
attr['scale'] = quant_params.ScaleAsNumpy().tolist()
attr['zero_point'] = quant_params.ZeroPointAsNumpy().tolist()
attr['quantized_dimension'] = quant_params.QuantizedDimension()
if not quant_params.MaxIsNone():
attr['max'] = quant_params.MaxAsNumpy().tolist()
if not quant_params.MinIsNone():
attr['min'] = quant_params.MinAsNumpy().tolist()
return attr
def parse_tflite_string_tensor(buffer_bytes, shape):
"""Returns an onnx tensor with the string data encoded in the tflite tensor data buffer"""
def read_int(offset):
return struct.unpack('<i', buffer_bytes[offset:offset+4])[0]
offset = 0
count = read_int(offset)
offset += 4
offset_list = []
for i in range(count):
offset_list.append(read_int(offset))
offset += 4
offset_list.append(len(buffer_bytes))
string_list = []
for i in range(count):
string_list.append(buffer_bytes[offset_list[i]:offset_list[i+1]].decode("utf-8"))
return numpy_helper.from_array(np.array(string_list, dtype=np.object).reshape(shape))
def op_has_scalar_output(input_shapes, optype, attr):
"""
TFLite uses [] to denote both scalars and unknown output shapes. Return True if an op can have scalar outputs
despite having non-scalar inputs. Otherwise, we will replace [] with None
"""
if optype in ["TFL_STRIDED_SLICE", "StridedSlice"]:
inp_rank = len(input_shapes[0])
return attr['shrink_axis_mask'] == 2 ** inp_rank - 1
if (optype.startswith("TFL_REDUCE") or optype in ['All']) and len(input_shapes) == 2:
inp_rank = len(input_shapes[0])
keep_dims = attr.get('keep_dims', True)
# axes input can be a scalar for a single axis
num_axes = 1 if input_shapes[1] == [] else input_shapes[1][0]
return not keep_dims and inp_rank == num_axes
if optype == "TFL_RESHAPE":
return input_shapes[1] == [0]
if optype == "Size":
# Op from TF
return True
return False
def parse_tflite_graph(tflite_g, opcodes_map, model, input_prefix='', tensor_shapes_override=None):
"""
Returns a Graph object along with some op count stats. All tflite op types are prefixed with "TFL_".
Names of graph inputs are optionally prefixed with a string to prevent name conflicts in subgraphs.
Quantizatized tensors are surrounded with quantize/dequantize ops
"""
op_cnt = collections.Counter()
attr_cnt = collections.Counter()
onnx_nodes = []
output_shapes = {}
dtypes = {}
tensor_names = {}
if tensor_shapes_override is None:
tensor_shapes_override = {}
# Map tensor name to tflite Tensor object so we can fetch quantization info as needed
name_to_tensor = {}
# If a node takes a quantized tensor as input, we must add a dequantize op after it.
# Store a mapping so we only need to make at most one dequantize op per tensor.
tensor_name_to_dequant_output = {}
# tflite uses generic names (arg0, arg1, etc.) for inputs but full names for other tensors, so
# prefixing just the inputs should be fine. Other tensors are prefixed when we do inlining.
input_indices = {tflite_g.Inputs(i) for i in range(tflite_g.InputsLength())}
for i in range(tflite_g.TensorsLength()):
tensor = tflite_g.Tensors(i)
name = tensor.Name().decode()
if i in input_indices:
name = input_prefix + name
tensor_names[i] = name
name_to_tensor[name] = tensor
if name in tensor_shapes_override:
output_shapes[name] = tensor_shapes_override[name]
elif tensor.ShapeIsNone():
output_shapes[name] = None
elif tensor.ShapeSignatureIsNone():
# The shape signature uses -1 to signify unknown dims. Old models don't have this and use Shape instead.
output_shapes[name] = tensor.ShapeAsNumpy().tolist()
else:
output_shapes[name] = tensor.ShapeSignatureAsNumpy().tolist()
buf = model.Buffers(tensor.Buffer())
dtypes[name] = map_tflite_dtype_to_onnx(tensor.Type())
if not buf.DataIsNone() and tensor.Buffer() > 0:
# For const values we use TF to decode the binary data from the buffer
t = tensor_pb2.TensorProto()
t.tensor_content = buf.DataAsNumpy().tobytes()
if output_shapes[name] is None:
output_shapes[name] = []
for d in output_shapes[name]:
t.tensor_shape.dim.add().size = d
t.dtype = map_tflite_dtype_to_tf(tensor.Type())
if t.dtype == tf.string:
onnx_tensor = parse_tflite_string_tensor(t.tensor_content, output_shapes[name])
else:
np_data = tensor_util.MakeNdarray(t)
onnx_tensor = numpy_helper.from_array(np_data, name=name)
onnx_node = helper.make_node("Const", [], outputs=[name], name=name, value=onnx_tensor)
onnx_nodes.append(onnx_node)
op_cnt["Const"] += 1
def get_dequant(tensor_name):
"""Creates a dequantize op for the provided tensor if needed and returns the output of the op, or
the original tensor name if no dequantization is needed"""
quant = name_to_tensor[tensor_name].Quantization()
if quant is None or quant.ScaleIsNone() or quant.ZeroPointIsNone():
return tensor_name
if tensor_name in tensor_name_to_dequant_output:
return tensor_name_to_dequant_output[tensor_name]
dequant_name = tensor_name + "_dequant"
attr = get_quantization_attr(quant)
onnx_node = helper.make_node("TFL_DEQUANTIZE", [tensor_name], [dequant_name], name=dequant_name, **attr)
onnx_nodes.append(onnx_node)
tensor_name_to_dequant_output[tensor_name] = dequant_name
output_shapes[dequant_name] = output_shapes[tensor_name].copy()
dtypes[dequant_name] = onnx_pb.TensorProto.FLOAT
return dequant_name
def get_prequant(tensor_name):
"""Called by nodes with the name of the tensor they must output.
If the output is supposed to be quantized, creates a Quantize op outputting the tensor.
Returns the name that should be used for the "prequantized" tensor, or the original tensor if no quantization
is needed"""
quant = name_to_tensor[tensor_name].Quantization()
if quant is None or quant.ScaleIsNone() or quant.ZeroPointIsNone():
return tensor_name
prequant_name = tensor_name + "_prequant"
quantize_name = tensor_name + "_quantize"
attr = get_quantization_attr(quant)
onnx_node = helper.make_node("TFL_QUANTIZE", [prequant_name], [tensor_name], name=quantize_name, **attr)
onnx_nodes.append(onnx_node)
output_shapes[prequant_name] = output_shapes[tensor_name].copy()
dtypes[prequant_name] = onnx_pb.TensorProto.FLOAT
return prequant_name
for i in range(tflite_g.OperatorsLength()):
op = tflite_g.Operators(i)
optype = 'TFL_' + opcodes_map[op.OpcodeIndex()]
op_cnt[optype] += 1
attr = {}
options_type_name = lookup_enum(op.BuiltinOptionsType(), 'BuiltinOptions')
option_class = get_options_class(options_type_name)
wants_dequantized_input = True
has_prequantized_output = True
if optype == 'TFL_QUANTIZE':
out_tensor = tflite_g.Tensors(op.Outputs(0))
quant = out_tensor.Quantization()
has_prequantized_output = False
if quant is not None and not quant.ScaleIsNone() and not quant.ZeroPointIsNone():
attr.update(get_quantization_attr(quant))
elif optype == 'TFL_DEQUANTIZE':
in_tensor = tflite_g.Tensors(op.Inputs(0))
quant = in_tensor.Quantization()
wants_dequantized_input = False
if quant is not None and not quant.ScaleIsNone() and not quant.ZeroPointIsNone():
attr.update(get_quantization_attr(quant))
input_names = [tensor_names[op.Inputs(i)] for i in range(op.InputsLength()) if op.Inputs(i) != -1]
output_names = [tensor_names[op.Outputs(i)] for i in range(op.OutputsLength()) if op.Outputs(i) != -1]
if optype.startswith("TFL_Flex"):
data = read_flexbuffer(op.CustomOptionsAsNumpy().tobytes(), decode_strings=False)
utils.make_sure(isinstance(data, list), "Flex ops are expected to store data as a flexbuffer list")
tf_op = data[0].decode("utf-8")
tf_node_def = node_def_pb2.NodeDef()
tf_node_def.ParseFromString(data[1])
input_tf_dtypes = [map_tflite_dtype_to_tf(name_to_tensor[inp].Type()) for inp in input_names]
def shape_to_tf_shape(dims):
return [None if d < 0 else d for d in dims] if dims is not None else None
input_shapes = [shape_to_tf_shape(output_shapes[inp]) for inp in input_names]
tf_attrs, _ = read_tf_node_def_attrs(tf_node_def, input_tf_dtypes, input_shapes)
attr.update(tf_attrs)
optype = tf_op
elif not op.CustomOptionsIsNone():
custom_ops_format = lookup_enum(op.CustomOptionsFormat(), 'CustomOptionsFormat')
if custom_ops_format == 'FLEXBUFFERS':
data = None
try:
data = read_flexbuffer(op.CustomOptionsAsNumpy().tobytes())
except Exception as e: # pylint: disable=broad-except
logger.warning("Could not parse attributes for custom op '%s': %s", optype, e)
if isinstance(data, dict):
attr.update(data)
if option_class is not None:
options = option_class()
options.Init(op.BuiltinOptions().Bytes, op.BuiltinOptions().Pos)
# All flatbuffer objects have these properties.
block_list = [options_type_name + 'BufferHasIdentifier', 'Init',
'GetRootAs' + options_type_name, 'GetRootAs']
# The rest of the properties of the options class provide its attribute names
attr_names = {opt for opt in dir(options) if not opt.startswith('_') and opt not in block_list}
for a in list(attr_names):
# Flatbufffer list properties have 3 functions: *Length, *IsNone, and *AsNumpy
if a + 'Length' in attr_names:
attr_names.remove(a + 'Length')
attr_names.remove(a + 'IsNone')
attr_names.remove(a)
for a in attr_names:
if a.endswith('AsNumpy'):
value = getattr(options, a)().tolist()
a = a[:-len('AsNumpy')]
else:
# For enums we use a string with the value name, not enum index
value = getattr(options, a)()
if a in NODE_ATTR_NAME_TO_ENUM_TYPE:
value = lookup_enum(value, NODE_ATTR_NAME_TO_ENUM_TYPE[a])
elif a in FUNCTION_ATTRS:
value = model.Subgraphs(value).Name().decode()
attr_cnt[a] += 1
attr[proper_to_snake_case(a)] = value
if wants_dequantized_input:
input_names = [get_dequant(inp) for inp in input_names]
if optype == "TFL_TFLite_Detection_PostProcess":
# There's a bug in tflite for the output shapes of this op
for out, shape in zip(output_names, [[-1, -1, 4], [-1, -1], [-1, -1], [-1]]):
if len(output_shapes[out]) != len(shape):
output_shapes[out] = shape
if all(output_shapes[out] == [] for out in output_names):
# tflite uses [] to represent both scalars and completely unknown shapes
# If an op has non-scalar inputs and all scalar outputs, it is very likely the shapes are actually unknown.
inp_shapes = [output_shapes[inp] for inp in input_names]
if not all(s == [] for s in inp_shapes):
if any(s is None for s in inp_shapes) or not op_has_scalar_output(inp_shapes, optype, attr):
for out in output_names:
logger.warning("Replacing scalar output shape of %s with unknown shape", out)
output_shapes[out] = None
if has_prequantized_output:
output_names = [get_prequant(out) for out in output_names]
onnx_node = helper.make_node(optype, input_names, output_names, name=output_names[0], **attr)
onnx_nodes.append(onnx_node)
inputs = [tensor_names[tflite_g.Inputs(i)] for i in range(tflite_g.InputsLength())]
outputs = [tensor_names[tflite_g.Outputs(i)] for i in range(tflite_g.OutputsLength())]
# TODO: Allow input/outputs to be overridden
for inp in inputs:
onnx_node = helper.make_node("Placeholder", [], outputs=[inp], name=inp)
onnx_nodes.append(onnx_node)
graph_name = (tflite_g.Name() or b'tflite graph').decode()
return onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes, inputs, outputs, graph_name
|
scripts/cspan.py | DevPoetsSociety/congress-legislators | 1,629 | 12691987 | #!/usr/bin/env python
# Update current cspan IDs using NYT Congress API.
import json, urllib.request, urllib.parse, urllib.error
from utils import load_data, save_data
def run():
# load in current members
y = load_data("legislators-current.yaml")
for m in y:
# retrieve C-SPAN id, if available, from ProPublica API
# TODO: use utils.download here
response = urllib.request.urlopen("https://projects.propublica.org/represent/api/v1/members/%s.json" % m['id']['bioguide']).read()
j = json.loads(response.decode("utf8"))
cspan = j['results'][0]['cspan_id']
if not cspan == '':
m['id']['cspan'] = int(cspan)
save_data(y, "legislators-current.yaml")
if __name__ == '__main__':
run()
|
sdk/regionmove/azure-mgmt-regionmove/azure/mgmt/regionmove/models/_region_move_service_api_enums.py | rsdoherty/azure-sdk-for-python | 2,728 | 12692030 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class DependencyType(str, Enum):
"""Defines the dependency type.
"""
required_for_prepare = "RequiredForPrepare"
required_for_move = "RequiredForMove"
class MoveResourceInputType(str, Enum):
"""Defines the move resource input type.
"""
move_resource_id = "MoveResourceId"
move_resource_source_id = "MoveResourceSourceId"
class MoveState(str, Enum):
"""Defines the MoveResource states.
"""
assignment_pending = "AssignmentPending"
prepare_pending = "PreparePending"
prepare_in_progress = "PrepareInProgress"
prepare_failed = "PrepareFailed"
move_pending = "MovePending"
move_in_progress = "MoveInProgress"
move_failed = "MoveFailed"
discard_in_progress = "DiscardInProgress"
discard_failed = "DiscardFailed"
commit_pending = "CommitPending"
commit_in_progress = "CommitInProgress"
commit_failed = "CommitFailed"
committed = "Committed"
class ProvisioningState(str, Enum):
"""Defines the provisioning states.
"""
succeeded = "Succeeded"
updating = "Updating"
creating = "Creating"
failed = "Failed"
class ResolutionType(str, Enum):
"""Defines the resolution type.
"""
manual = "Manual"
automatic = "Automatic"
class ResourceIdentityType(str, Enum):
"""The type of identity used for the region move service.
"""
none = "None"
system_assigned = "SystemAssigned"
user_assigned = "UserAssigned"
class TargetAvailabilityZone(str, Enum):
"""Gets or sets the target availability zone.
"""
one = "1"
two = "2"
three = "3"
na = "NA"
class ZoneRedundant(str, Enum):
"""Defines the zone redundant resource setting.
"""
enable = "Enable"
disable = "Disable"
|
annotate/annotate_current_lane.py | zdx3578/self-driving-truck | 373 | 12692034 | <reponame>zdx3578/self-driving-truck
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from lib import replay_memory
from common import GridAnnotationWindow
import Tkinter
def main():
print("Loading replay memory...")
memory = replay_memory.ReplayMemory.create_instance_supervised()
win = GridAnnotationWindow.create(
memory,
current_anno_attribute_name="current_lane_grid",
save_to_fp="annotations_current_lane.pickle",
every_nth_example=20
)
win.brush_size = 2
win.autosave_every_nth = 100
win.master.wm_title("Annotate current lane")
Tkinter.mainloop()
if __name__ == "__main__":
main()
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/i/import_itself.py | ciskoinch8/vimrc | 463 | 12692048 | """test module importing itself"""
# pylint: disable=no-absolute-import,using-constant-test
from __future__ import print_function
from . import import_itself # [import-self]
__revision__ = 0
if __revision__:
print(import_itself)
|
puma/registration/run_icp.py | okryush/puma | 239 | 12692055 | <filename>puma/registration/run_icp.py
import open3d as o3d
from .method_selector import get_te_method
def run_icp(src, tgt, trans_init, config):
te = get_te_method(config.method)
if config.method == "gicp":
return o3d.pipelines.registration.registration_generalized_icp(
src, tgt, config.threshold, trans_init, te
).transformation
return o3d.pipelines.registration.registration_icp(
src, tgt, config.threshold, trans_init, te
).transformation
|
python/paddle/fluid/tests/unittests/test_fleet_private_function.py | zmxdream/Paddle | 17,085 | 12692076 | <filename>python/paddle/fluid/tests/unittests/test_fleet_private_function.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import paddle
import socket
import threading
class TestFleetPrivateFunction(unittest.TestCase):
def test_wait_port(self):
def init_server(port):
import time
time.sleep(5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("127.0.0.1", port))
sock.listen(10)
while True:
c, addr = sock.accept()
c.send("0")
c.close()
break
thr = threading.Thread(target=init_server, args=(9292, ))
thr.start()
import paddle.distributed.fleet as fleet
ep = ["127.0.0.1:9292"]
fleet.base.private_helper_function.wait_server_ready(ep)
thr.join()
if __name__ == "__main__":
unittest.main()
|
design_patterns__examples/Bridge/example_1/main.py | DazEB2/SimplePyScripts | 117 | 12692085 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: Design Patterns: Bridge — Мост
# SOURCE: https://ru.wikipedia.org/wiki/Мост_(шаблон_проектирования)
# SOURCE: https://refactoring.guru/ru/design-patterns/bridge
# SOURCE: https://refactoring.guru/ru/design-patterns/bridge/java/example
from devices.radio import Radio
from devices.tv import Tv
from remotes.basic_remote import BasicRemote
from remotes.advanced_remote import AdvancedRemote
def test_device(device):
print("Tests with basic remote.")
basic_remote = BasicRemote(device)
basic_remote.power()
device.print_status()
print("Tests with advanced remote.")
advanced_remote = AdvancedRemote(device)
advanced_remote.power()
advanced_remote.mute()
device.print_status()
if __name__ == '__main__':
test_device(Tv())
test_device(Radio())
|
Sankey/sankey_with_level_setting.py | pyecharts/pyecharts_gallery | 759 | 12692092 | <filename>Sankey/sankey_with_level_setting.py
import json
from pyecharts import options as opts
from pyecharts.charts import Sankey
with open("product.json", "r", encoding="utf-8") as f:
j = json.load(f)
c = (
Sankey()
.add(
"sankey",
nodes=j["nodes"],
links=j["links"],
pos_top="10%",
focus_node_adjacency=True,
levels=[
opts.SankeyLevelsOpts(
depth=0,
itemstyle_opts=opts.ItemStyleOpts(color="#fbb4ae"),
linestyle_opts=opts.LineStyleOpts(color="source", opacity=0.6),
),
opts.SankeyLevelsOpts(
depth=1,
itemstyle_opts=opts.ItemStyleOpts(color="#b3cde3"),
linestyle_opts=opts.LineStyleOpts(color="source", opacity=0.6),
),
opts.SankeyLevelsOpts(
depth=2,
itemstyle_opts=opts.ItemStyleOpts(color="#ccebc5"),
linestyle_opts=opts.LineStyleOpts(color="source", opacity=0.6),
),
opts.SankeyLevelsOpts(
depth=3,
itemstyle_opts=opts.ItemStyleOpts(color="#decbe4"),
linestyle_opts=opts.LineStyleOpts(color="source", opacity=0.6),
),
],
linestyle_opt=opts.LineStyleOpts(curve=0.5),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="Sankey-Level Settings"),
tooltip_opts=opts.TooltipOpts(trigger="item", trigger_on="mousemove"),
)
.render("sankey_with_level_setting.html")
)
|
src/cltk/phonology/lat/transcription.py | yelircaasi/cltk | 757 | 12692139 | <filename>src/cltk/phonology/lat/transcription.py
"""Convert a word from Latin orthography into its hypothesized
pronunciation in the International Phonetic Alphabet (IPA).
https://raw.githubusercontent.com/j-duff/cltk/ipa/
cltk/phonology/lat/transcription.py
"""
import re
import unicodedata
from typing import List
from nltk.tokenize import wordpunct_tokenize
from cltk.core.cltk_logger import logger
from cltk.prosody.lat import macronizer as m
try:
# James Tauber's greek_accentuation package
from greek_accentuation import characters as chars
except ImportError as import_error:
message = (
'Missing "greek_accentuation" package. Install with '
"`pip install greek-accentuation`."
)
logger.error(message)
logger.error(import_error)
raise
__author__ = ["<NAME> <<EMAIL>>"]
__license__ = "MIT License. See LICENSE."
# Dictionaries of phonological reconstructions for use in transcribing.
# <NAME>. 1965. Vox Latina.
LATIN = {
"Classical": {
"Allen": {
"correspondence": {
"p": "p",
"t": "t̪",
"c": "k",
"k": "k",
"qu": "kʷ",
"b": "b",
"d": "d̪",
"g": "g",
"gu": "gʷ",
"ph": "pʰ",
"th": "t̪ʰ",
"ch": "kʰ",
"n": "n̪",
"m": "m",
"r": "r",
"rh": "r", # Voiceless r was spelled but not pronounced.
"l": "l",
"f": "f",
"s": "s",
"h": "h",
"j": "j",
"v": "w",
"x": "ks",
"z": "z",
"ī": "iː",
"ū": "uː",
"i": "ɪ",
"u": "ʊ",
"e": "ɛ",
"o": "ɔ",
"ē": "eː",
"ō": "oː",
"a": "a",
"ā": "aː",
"y": "y",
"ȳ": "y:",
"ae": "aj",
"au": "aw",
"oe": "oj",
"eu": "ew",
"ei": "ej",
},
"diphthongs": [ # and digraphs
"qu",
"gu",
"ph",
"th",
"ch",
"rh",
"ae",
"au",
"oe",
"eu",
"ei",
],
"punctuation": [
".",
",",
";",
":",
"-",
"–",
"?",
"!",
"(",
")",
"'",
'"',
"[",
"]",
],
"alternations": [
"j_maker", # word initial and intervocalic i is assumed j
"w_maker", # word initial and intervocalic u is assumed w
"wj_block", # prevents accidental sequence wj
"uj_diph_maker", # after w and j have been created, recognizes
# <ui> = [uj]
"b_devoice", # b devoices before /t/, /s/
"g_n_nasality_assimilation", # only before n
"n_place_assimilation", # should also do labial, and
# labio-dental before f.
"final_m_drop", # m drops and lengthens + nasalizes preceding
# vowel word-finally
"ns_nf_lengthening", # vowels lengthen before ns or nf
"l_darken", # l darkens to ɫ in coda
"j_z_doubling", # intervocalic j and z > jj and zz
"long_vowel_catcher", # corrects accidental instances of ɪː
# and similar.
"e_i_closer_before_vowel", # ɛ to ɛ̣, ɪ to ɪ̣ before another vowel
"intervocalic_j", # j glide between vowels
],
}
}
}
# Unhandled exceptions: preposition "ad" becomes [at̪] not [ad̪] before s and t
# subf > suff, subm > summ, subg > sugg, subc > succ, subr > rr
# j exceptions like ad*j*ectivum and con*j*unx
# All IPA characters used sorted by natural classes.
# WILL NEED ADDITIONS AS MORE RECONSTRUCTIONS USED
IPA = {
"voiced": [ # [+voice]
"b",
"d̪",
"g",
"gʷ",
"m",
"n̪",
"ŋ",
"ɱ" "l",
"ɫ",
"r",
"z",
],
"labial": ["b", "p", "pʰ", "m"], # [+labial, -labiodental]
"labiodental": ["f", "ɱ"], # [+labial, +labiodental]
"coronal": ["d̪", "t̪", "t̪ʰ", "n̪", "s", "z", "r", "l", "ɫ"], # [+coronal]
"velar": ["g", "k", "kʰ", "kʷ", "gʷ", "ŋ"], # [+velar]
"nasal": ["m", "ɱ", "n", "ŋ"], # [+consonantal, +nasal]
"approximant": ["l", "ɫ", "r", "j", "w"], # [+approximant]
"continuant": ["h", "f", "s", "z", "l", "ɫ", "r"], # [+continuant, +consonantal]
"vowel": [ # [-consonantal -approximant]
"a",
"aː",
"ɛ",
"ɛ̣",
"eː",
"ɪ",
"ɪ̣",
"iː",
"ɔ",
"oː",
"ʊ",
"u",
"uː",
"y",
"yː",
"ãː",
"ẽː",
"ĩː",
"õː",
"ũː",
],
"high": [ # [-consonantal, +high]
"ɪ",
"ɪ̣",
"iː",
"ʊ",
"u",
"uː",
"y",
"yː",
"ɪ̃",
"ɪ̣̃",
"ĩː",
"ʊ̃",
"ũ",
"ũː",
"ỹ",
"ỹː",
],
"mid": [ # [-consonantal, -high, -low]
"ɛ",
"ɛ̣",
"eː",
"ɔ",
"oː",
"ɛ̃",
"ɛ̣̃",
"ẽː",
"ɔ̃",
"õː",
],
"low": ["a", "aː", "ã", "ãː"], # [-consonantal, +low]
"front": [ # [-consonantal, +front]
"ɪ",
"ɪ̣",
"iː",
"y",
"yː",
"ɛ",
"ɛ̣",
"eː",
"ɪ̃",
"ɪ̣̃",
"ĩː",
"ỹ",
"ỹː",
"ɛ̃",
"ɛ̣̃",
"ẽː",
],
"central": ["a", "aː", "ã", "ãː"], # [-consonantal, -front, -back]
"back": [ # [-consonantal, +back]
"ʊ",
"u",
"uː",
"ɔ",
"oː",
"ʊ̃",
"ũ",
"ũː",
"ɔ̃",
"õː",
],
"boundary": ["#"],
}
class Phone:
"""A phonological unit to be manipulated and represented as an IPA string."""
# Has a bundle of feature values that help classify it so that it can
# trigger contextual pronunciation changes.
def __init__(self, ipa_ch: str):
"""
Analyzes features of phonetic signs
:param ipa_ch: phonetic sign from IPA
"""
# eventually exported to output string
self.ipa = unicodedata.normalize("NFC", ipa_ch)
# will be assigned once in Word, as the pre-context of this phone
self.left = ""
# .... as the post-context of this phone
self.right = ""
# bundle of features, stored as booleans:
self.vce = self.ipa in IPA["voiced"]
self.lab = self.ipa in IPA["labial"]
self.lbd = self.ipa in IPA["labiodental"]
self.cor = self.ipa in IPA["coronal"]
self.vel = self.ipa in IPA["velar"]
self.nas = self.ipa in IPA["nasal"]
self.app = self.ipa in IPA["approximant"]
self.cont = self.ipa in IPA["continuant"]
self.vow = self.ipa in IPA["vowel"]
self.hi = self.ipa in IPA["high"]
self.mid = self.ipa in IPA["mid"]
self.lo = self.ipa in IPA["low"]
self.fr = self.ipa in IPA["front"]
self.ctr = self.ipa in IPA["central"]
self.bk = self.ipa in IPA["back"]
self.bound = self.ipa in IPA["boundary"]
def __repr__(self):
return self.ipa
class Word:
"""Max. phonological unit, contains phones and triggers alternations."""
# An ordered collection of Phones, which are bundles of
# features/IPA strings.
def __init__(self, ipa_str: str, root: dict):
"""
:param ipa_str:
:param root:
"""
self.string = unicodedata.normalize("NFC", ipa_str)
# Appropriate directory in the reconstruction dictionary
self.root = root
# list of contextual pronunciation alternations
self.alts = self.root["alternations"]
# Turns string of IPA characters into list of Phones
self.phones = [Phone(c) for c in re.findall(r".[̪̣̃ʷʰ]*ː?", self.string)]
self.syllables = []
def _refresh(self):
"""
Assigns left and right contexts for every phone
"""
for n in range(len(self.phones)):
p = self.phones[n]
if n != 0:
p.left = self.phones[n - 1]
else:
p.left = Phone("#")
if n != len(self.phones) - 1:
p.right = self.phones[n + 1]
else:
p.right = Phone("#")
def _j_maker(self):
"""
Assume word-initial or intervocalic i to be j
"""
out_phones = self.phones
target = Phone("j")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == "ɪ" and (
(p.left.bound and p.right.vow) or (p.left.vow and p.right.vow)
):
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _w_maker(self):
"""
Assume word-initial or intervocalic u to be w
"""
out_phones = self.phones
target = Phone("w")
for n in range(len(self.phones)):
p = self.phones[n]
if ((p.ipa == "ʊ") or (p.ipa == "u")) and (
(p.left.bound and (p.right.vow or p.right.ipa == "j"))
or (p.left.vow and p.right.vow)
):
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _wj_block(self):
"""
Addendum to correct possible 'wj' sequences
"""
out_phones = self.phones
target = Phone("ɪ")
for n in range(len(self.phones)):
p = self.phones[n]
if p.left.ipa == "w" and p.ipa == "j":
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _uj_diph_maker(self):
"""
Find accidental "ʊɪ" instances and treat as diphthong [uj].
"""
out_phones = self.phones
for n in range(len(self.phones)):
p = self.phones[n]
if p.left.ipa == "ʊ" and p.ipa == "ɪ":
out_phones[n - 1] = Phone("u")
out_phones[n] = Phone("j")
self.phones = out_phones
self._refresh()
def _b_devoice(self):
"""
Pronounce b as p when followed by s or t.
"""
out_phones = self.phones
target = Phone("p")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == "b" and (p.right.ipa == "s" or p.right.ipa == "t̪"):
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _final_m_drop(self):
"""
Final m nasalizes and lengthens nucleus and drops.
"""
out_phones = self.phones
for n in range(len(self.phones)):
p = self.phones[n]
if p.left.vow and p.ipa == "m" and p.right.bound:
out_phones[n - 1] = Phone(p.left.ipa + "̃ː")
del out_phones[n]
self.phones = out_phones
self._refresh()
def _n_place_assimilation(self):
"""
Pronounce n as ŋ when followed by velar.
"""
out_phones = self.phones
target = Phone("ŋ")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == "n̪" and p.right.vel:
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _g_n_nasality_assimilation(self):
"""
Pronounce g as ŋ when followed by n.
"""
out_phones = self.phones
target = Phone("ŋ")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == "g" and p.right.ipa == "n̪":
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _ns_nf_lengthening(self):
"""
Lengthen vowel before ns or nf.
"""
out_phones = self.phones
for n in range(len(self.phones)):
p = self.phones[n]
if (
p.left.vow
and "ː" not in p.left.ipa
and p.ipa == "n̪"
and (p.right.ipa == "s" or p.right.ipa == "f")
):
out_phones[n - 1] = Phone(p.left.ipa + "ː")
self.phones = out_phones
self._refresh()
def _l_darken(self):
"""
Pronounce l as ɫ in coda.
"""
out_phones = self.phones
target = Phone("ɫ")
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa == "l" and ((not p.right.vow) or p.right.bound):
out_phones[n] = target
self.phones = out_phones
self._refresh()
def _j_z_doubling(self):
"""
Double j and z between vowels.
"""
out_phones = self.phones
dupl = []
for n in range(len(self.phones)):
p = self.phones[n]
if p.right.vow and (p.ipa == "j" or p.ipa == "z") and p.left.vow:
dupl.append((True, n - len(self.phones), p.ipa))
else:
dupl.append((False, n - len(self.phones), None))
for t in sorted(dupl, key=lambda tup: tup[1]):
if t[0]:
out_phones.insert(t[1], Phone(t[2]))
self.phones = out_phones
self._refresh()
def _long_vowel_catcher(self):
"""
Replace ɪː with iː, ʊː with uː, and ɛː with eː.
"""
out_phones = self.phones
target_dict = {
"ɪː": "iː",
"ʊː": "uː",
"ɛː": "eː",
"ɪ̃ː": "ĩː",
"ʊ̃ː": "ũː",
"ɛ̃ː": "ẽː",
}
for n in range(len(self.phones)):
p = self.phones[n]
if p.ipa in target_dict.keys():
out_phones[n] = Phone(target_dict[p.ipa])
self.phones = out_phones
self._refresh()
def _e_i_closer_before_vowel(self):
"""
e and i become closer (̣) when followed by a vowel.
"""
out_phones = self.phones
for n in range(len(self.phones)):
p = self.phones[n]
if (p.ipa == "ɛ" or p.ipa == "ɪ") and p.right.vow:
out_phones[n] = Phone(p.ipa + "̣")
self.phones = out_phones
self._refresh()
def _intervocalic_j(self):
"""
epenthesize j between vowels
"""
out_phones = self.phones
target = Phone("j")
j = []
for n in range(len(self.phones)):
p = self.phones[n]
if p.left.vow and p.vow:
j.append((True, n - len(self.phones)))
else:
j.append((False, n - len(self.phones)))
for t in sorted(j, key=lambda tup: tup[1]):
if t[0]:
out_phones.insert(t[1], target)
self.phones = out_phones
self._refresh()
# list of all possible alternations
ALTERNATIONS = [
("j_maker", _j_maker),
("w_maker", _w_maker),
("wj_block", _wj_block),
("uj_diph_maker", _uj_diph_maker),
("b_devoice", _b_devoice),
("final_m_drop", _final_m_drop),
("n_place_assimilation", _n_place_assimilation),
("g_n_nasality_assimilation", _g_n_nasality_assimilation),
("ns_nf_lengthening", _ns_nf_lengthening),
("l_darken", _l_darken),
("j_z_doubling", _j_z_doubling),
("long_vowel_catcher", _long_vowel_catcher),
("e_i_closer_before_vowel", _e_i_closer_before_vowel),
("intervocalic_j", _intervocalic_j),
]
def _alternate(self):
"""
After setting left and right contexts for every phone...
"""
self._refresh()
# runs all alternations
for a in Word.ALTERNATIONS:
if a[0] in self.alts:
a[1](self)
def syllabify(self) -> List[List[Phone]]:
"""
Takes Word input and returns a list of syllables
as (onset, nucleus, coda) tuples
where onset, nucleus, and coda are all lists of Phones.
:return: list of syllables
"""
nuclei = []
for n in range(len(self.phones)):
p = self.phones[n]
if p.vow:
nuclei.append(n)
# initialize syllables with a tuple for the first syllable
# where onset is everything before the first nucleus
# and coda remains unknown.
syllables = [[self.phones[0 : nuclei[0]], [self.phones[nuclei[0]]], []]]
# continue for every nucleus, assuming that everything between
# the previous nucleus and it is the onset.
for x in range(len(nuclei) - 1):
i = nuclei[x + 1]
onset = self.phones[nuclei[x] + 1 : i]
nucleus = [self.phones[i]]
syllables.append([onset, nucleus, []])
# assume that everything after the final nucleus is final coda.
syllables[-1][2] = self.phones[nuclei[-1] + 1 :]
# now go through and check onset viability
for x in range(len(syllables) - 1):
onset = syllables[x + 1][0]
nucleus = syllables[x + 1][1]
coda = syllables[x + 1][2]
# trim all onsets greater than the maximum 2 phones
# removing extra phones from the left
# and appending them to the previous coda
if len(onset) > 2:
trim = onset[:-2]
del onset[:-2]
syllables[x][2] = trim
# once onset is 2 phones...
if len(onset) == 2:
# stop + liquid is the only viable sequence and passes
if (
(not onset[0].cont)
and (not onset[0].app)
and (onset[1].nas or onset[1].app)
):
break
# otherwise, onset must be right Phone only
# the left phone is appended to the previous coda
else:
trim = onset[0]
del onset[0]
syllables[x][2] += [trim]
self.syllables = syllables
return syllables
def _print_ipa(self, syllabify, accentuate):
"""
Depending on the syllabify and accentuate parameters
Prints an appropriately marked up version of the transcription
:param syllabify:
:param accentuate:
:return:
"""
out = ""
if syllabify:
syllables = self.syllabify()
# the ultima is the final syllable
ultima = syllables[-1]
# identify which syllable has stress and store index as accent
if accentuate:
# one syllable words have ultimate stress
if len(syllables) == 1:
accent = -1
# two syllable words have penultimate stress
elif len(syllables) == 2:
accent = -2
else:
# penult is second to last syllable
penult = syllables[-2]
# if penult is diphthong (long), penultimate stress
if len(penult[1]) > 1:
accent = -2
# if penult is long vowel, penultimate stress
elif "ː" in penult[1][0].ipa:
accent = -2
# if penult has coda (closed/long by position),
# penultimate stress
elif len(penult[2]) > 0:
accent = -2
# otherwise (penult is short) antepenultimate stress
else:
accent = -3
# loop over syllables by index
for x in range(len(syllables)):
s = syllables[x]
# if index matches accent index set above
if x - len(syllables) == accent:
# precede that syllable with
# IPA stress punctuation: '
out += "'"
# then, print IPA by syllable segment as usual
for n in s:
for p in n:
out += p.ipa
# seperate all syllables with IPA syllable punctuation: .
if s != ultima:
out += "."
# if no accentuation flag, proceed with syllabified printing
else:
for s in syllables:
for n in s:
for p in n:
out += p.ipa
# seperate all syllables with IPA syllable punctuation: .
if s != ultima:
out += "."
# if no syllabification flag, proceed with
# unsyllabified IPA printing
else:
for p in self.phones:
out += p.ipa
return out
class Transcriber:
"""Uses a reconstruction to transcribe a orthographic string into IPA."""
def __init__(self, dialect: str, reconstruction: str):
"""
:param dialect: Latin dialect
:param reconstruction: reconstruction method
"""
self.lect = dialect
self.recon = reconstruction
self.root = LATIN[self.lect][self.recon]
self.table = self.root["correspondence"]
self.diphs = self.root["diphthongs"]
self.punc = self.root["punctuation"]
self.macronizer = m.Macronizer("tag_ngram_123_backoff")
def _parse_diacritics(self, ch: str) -> str:
"""
EG: input with base a -> a/LENGTH/DIAERESIS/
:param ch: character
:return: a string with separated and organized diacritics for easier access later.
"""
out = chars.base(ch).lower() # Initialize out as base of character.
length = chars.length(ch)
dia = chars.diaeresis(ch)
out += "/" # Create 1st boundary
# If any length, place between 1st and 2nd boundary
if length:
out += length
out += "/" # Create 2nd boundary
if dia: # If any diaeresis,
out += dia # place between second and final boundary
out += "/" # Create final boundary
return out
def _prep_text(self, text: str):
"""
Performs preparatory tasks grouping and reordering characters
in order to make transcription formulaic.
:param text:
:return:
"""
string_in = "".join([self._parse_diacritics(ch) for ch in text])
# searches for diphthongs and treats them as one phone
for d in self.diphs:
d1 = d[0]
d2 = d[1]
pattern = r"(" + d1 + r")\/\/\/(" + d2 + r")(\/\/\/)"
string_in = re.sub(pattern, r"\1\2\3", string_in)
tup_out = re.findall(r"(..?)\/([̄̆]*)\/(¨?)\/", string_in)
return tup_out
def transcribe(
self,
text,
macronize=True,
syllabify=True,
accentuate=True,
with_squared_brackets=True,
):
"""
>>> allen_transcriber = Transcriber("Classical", "Allen")
>>> example = allen_transcriber.transcribe("Quo usque tandem, O Catilina, " + "abutere nostra patientia?")
>>> example
"['kʷoː 'ʊs.kʷɛ 't̪an̪.d̪ẽː 'oː ka.t̪ɪ.'liː.n̪aː a.buː.'t̪eː.rɛ 'n̪ɔs.t̪raː pa.t̪ɪ̣.'jɛn̪.t̪ɪ̣.ja]"
:param text: text to transcribe
:param macronize: if True, macronize result
:param syllabify: if True, syllabify result
:param accentuate: if True, accentuate result
:param with_squared_brackets: if True, put squared brackets around transcription
:return: transcribed text
"""
# if macronize, will first use the tagger to macronize input
# otherwise, input will be the raw input string
if macronize:
text = self.macronizer.macronize_text(text)
# input is word-tokenized, stripped of non-diacritic punctuation,
# and diphthongs and diacritics are handled
inp = [
self._prep_text(w) for w in wordpunct_tokenize(text) if w not in self.punc
]
words = []
for w in inp:
out = ""
for c in w:
if "̄" in c[1]:
macron_added = c[0] + "̄"
ipa = self.table.get(macron_added, macron_added)
else:
ipa = self.table.get(c[0], c[0])
out += ipa
transcription = Word(out, self.root)
transcription._alternate()
words.append(transcription)
# Encloses output in brackets, proper notation for surface form.
result = " ".join([w._print_ipa(syllabify, accentuate) for w in words])
if with_squared_brackets:
result = "[" + result + "]"
return result
|
chainer-1.4/seg_ffnn.py | ysadamori/chainer_LSTM_seq2seq_example | 137 | 12692142 | #!/usr/bin/python3
import my_settings
import sys
import math
import numpy as np
from argparse import ArgumentParser
from chainer import functions, optimizers
import util.generators as gens
from util.functions import trace, fill_batch
from util.model_file import ModelFile
from util.vocabulary import Vocabulary
from util.chainer_cpu_wrapper import wrapper
#from util.chainer_gpu_wrapper import wrapper
class SegmentationModel:
def __init__(self):
pass
def __make_model(self):
self.__model = wrapper.make_model(
w_xh = functions.EmbedID(2 * self.__n_context * len(self.__vocab), self.__n_hidden),
w_hy = functions.Linear(self.__n_hidden, 1),
)
@staticmethod
def new(vocab, n_context, n_hidden):
self = SegmentationModel()
self.__vocab = vocab
self.__n_context = n_context
self.__n_hidden = n_hidden
self.__make_model()
return self
def save(self, filename):
with ModelFile(filename, 'w') as fp:
self.__vocab.save(fp.get_file_pointer())
fp.write(self.__n_context)
fp.write(self.__n_hidden)
wrapper.begin_model_access(self.__model)
fp.write_embed(self.__model.w_xh)
fp.write_linear(self.__model.w_hy)
wrapper.end_model_access(self.__model)
@staticmethod
def load(filename):
self = SegmentationModel()
with ModelFile(filename) as fp:
self.__vocab = Vocabulary.load(fp.get_file_pointer())
self.__n_context = int(fp.read())
self.__n_hidden = int(fp.read())
self.__make_model()
wrapper.begin_model_access(self.__model)
fp.read_embed(self.__model.w_xh)
fp.read_linear(self.__model.w_hy)
wrapper.end_model_access(self.__model)
return self
def init_optimizer(self):
self.__opt = optimizers.AdaGrad(lr=0.01)
self.__opt.setup(self.__model)
def __make_input(self, is_training, text):
c = self.__vocab.stoi
k = self.__n_context - 1
word_list = text.split()
letters = [c('<s>')] * k + [c(x) for x in ''.join(word_list)] + [c('</s>')] * k
if is_training:
labels = []
for x in word_list:
labels += [-1] * (len(x) - 1) + [1]
return letters, labels[:-1]
else:
return letters, None
def __forward(self, is_training, text):
m = self.__model
tanh = functions.tanh
letters, labels = self.__make_input(is_training, text)
scores = []
accum_loss = wrapper.zeros(()) if is_training else None
for n in range(len(letters) - 2 * self.__n_context + 1):
s_hu = wrapper.zeros((1, self.__n_hidden))
for k in range(2 * self.__n_context):
wid = k * len(self.__vocab) + letters[n + k]
s_x = wrapper.make_var([wid], dtype=np.int32)
s_hu += m.w_xh(s_x)
s_hv = tanh(s_hu)
s_y = tanh(m.w_hy(s_hv))
scores.append(float(wrapper.get_data(s_y)))
if is_training:
s_t = wrapper.make_var([[labels[n]]])
accum_loss += functions.mean_squared_error(s_y, s_t)
return scores, accum_loss
def train(self, text):
self.__opt.zero_grads()
scores, accum_loss = self.__forward(True, text)
accum_loss.backward()
self.__opt.clip_grads(5)
self.__opt.update()
return scores
def predict(self, text):
return self.__forward(False, text)[0]
def parse_args():
def_vocab = 2500
def_hidden = 100
def_epoch = 100
def_context = 3
p = ArgumentParser(description='Word segmentation using feedforward neural network')
p.add_argument('mode', help='\'train\' or \'test\'')
p.add_argument('corpus', help='[in] source corpus')
p.add_argument('model', help='[in/out] model file')
p.add_argument('--vocab', default=def_vocab, metavar='INT', type=int,
help='vocabulary size (default: %d)' % def_vocab)
p.add_argument('--hidden', default=def_hidden, metavar='INT', type=int,
help='hidden layer size (default: %d)' % def_hidden)
p.add_argument('--epoch', default=def_epoch, metavar='INT', type=int,
help='number of training epoch (default: %d)' % def_epoch)
p.add_argument('--context', default=def_context, metavar='INT', type=int,
help='width of context window (default: %d)' % def_context)
args = p.parse_args()
# check args
try:
if args.mode not in ['train', 'test']: raise ValueError('you must set mode = \'train\' or \'test\'')
if args.vocab < 1: raise ValueError('you must set --vocab >= 1')
if args.hidden < 1: raise ValueError('you must set --hidden >= 1')
if args.epoch < 1: raise ValueError('you must set --epoch >= 1')
if args.context < 1: raise ValueError('you must set --context >= 1')
except Exception as ex:
p.print_usage(file=sys.stderr)
print(ex, file=sys.stderr)
sys.exit()
return args
def make_hyp(letters, scores):
hyp = letters[0]
for w, s in zip(letters[1:], scores):
if s >= 0:
hyp += ' '
hyp += w
return hyp
def train_model(args):
trace('making vocabularies ...')
vocab = Vocabulary.new(gens.letter_list(args.corpus), args.vocab)
trace('start training ...')
model = SegmentationModel.new(vocab, args.context, args.hidden)
for epoch in range(args.epoch):
trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
trained = 0
model.init_optimizer()
with open(args.corpus) as fp:
for text in fp:
word_list = text.split()
if not word_list:
continue
text = ' '.join(word_list)
letters = ''.join(word_list)
scores = model.train(text)
trained += 1
hyp = make_hyp(letters, scores)
trace(trained)
trace(text)
trace(hyp)
trace(' '.join('%+.1f' % x for x in scores))
if trained % 100 == 0:
trace(' %8d' % trained)
trace('saveing model ...')
model.save(args.model + '.%03d' % (epoch + 1))
trace('finished.')
def test_model(args):
trace('loading model ...')
model = SegmentationModel.load(args.model)
trace('generating output ...')
with open(args.corpus) as fp:
for text in fp:
letters = ''.join(text.split())
if not letters:
print()
continue
scores = model.predict(text)
hyp = make_hyp(letters, scores)
print(hyp)
trace('finished.')
def main():
args = parse_args()
trace('initializing CUDA ...')
wrapper.init()
if args.mode == 'train': train_model(args)
elif args.mode == 'test': test_model(args)
if __name__ == '__main__':
main()
|
BloodHoundLoader.py | 0xvm/BloodHoundQueries | 198 | 12692168 | <filename>BloodHoundLoader.py
#!/usr/bin/env python3
import argparse
import logging
import socket
from importlib import util
if util.find_spec("neo4j") is None:
print('[-] Neo4j library is not installed, please execute the following before: pip3 install neo4j')
exit()
from neo4j import GraphDatabase
parser = argparse.ArgumentParser(description = 'BloodHoundLoader, tool to set attributes in BloodHound for all the items contained in files')
parser.add_argument('--dburi', dest = 'databaseUri', help = 'Database URI', default = 'bolt://localhost:7687')
parser.add_argument('--dbuser', dest = 'databaseUser', help = 'Database user', default = 'neo4j')
parser.add_argument('--dbpassword', dest = 'databasePassword', help = 'Database password', default = '<PASSWORD>')
group = parser.add_mutually_exclusive_group(required = True)
group.add_argument('-m', '--mode', dest = 'mode', help = 'Mode, h = set to high value, o = set to owned, s = set to no SMB signing', choices = ['h', 'o', 's'])
group.add_argument('-o', '--operation', dest = 'operation', help = 'Operation to perform if the mode is not set, for instance "highvalue = true"')
parser.add_argument('-c', '--comment', dest = 'comment', help = 'Comment for the log', default = '')
parser.add_argument('-v', '--verbose', dest = 'verbose', help = 'Verbose mode', action = 'store_true')
parser.add_argument('filePaths', nargs = '+', help = 'Paths of files the to import')
arguments = parser.parse_args()
loggingLevel = (logging.DEBUG if arguments.verbose else logging.INFO)
logger = logging.getLogger('BloodHoundLoader')
logger.setLevel(loggingLevel)
consoleLogger = logging.StreamHandler()
consoleLogger.setLevel(loggingLevel)
logger.addHandler(consoleLogger)
logger.debug('[*] Arguments: ' + str(arguments))
if arguments.mode == 'h':
operation = 'highvalue = true'
elif arguments.mode == 'o':
operation = 'owned = true'
elif arguments.mode == 's':
operation = 'hassigning = false'
else:
operation = arguments.operation
logger.debug('[*] Operation: ' + operation)
try:
driver = GraphDatabase.driver(arguments.databaseUri, auth = (arguments.databaseUser, arguments.databasePassword))
logger.info('[*] Connected to BloodHound Neo4j database')
except:
logger.error('[-] Connection to BloodHound Neo4j database failed')
exit()
with driver.session() as session:
for filePath in arguments.filePaths:
with open(filePath) as file:
logger.info('[*] Opened file: ' + filePath)
for line in file:
item = line.strip()
logger.debug('[*] Current item: ' + item)
if item:
name = item.upper()
log = '(file: ' + filePath + ', comment: ' + arguments.comment + ')'
query = 'MATCH (a {name: $name}) SET a.' + operation + ', a.BloodHoundLoaderLog = $log RETURN COUNT(*) AS count'
results = session.run(query, name = name, log = log)
count = results.single()['count']
if count > 0:
logger.info('[+] Modified: ' + item)
logger.debug('[*] Number of modified entries: ' + str(count))
logger.debug('[*] Stored message: ' + log)
else:
logger.error('[-] Could not modify: ' + item)
|
tests/core/test_dispatch.py | ashu96902/Pincer | 118 | 12692175 | <filename>tests/core/test_dispatch.py<gh_stars>100-1000
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from pincer.core.dispatch import GatewayDispatch
class TestDispatch:
op = 123
data = {
"foo": "bar",
"bar": "foo"
}
seq = 456
event_name = "test_event"
dispatch_string = (
'{"op": 123, "d": {"foo": "bar", "bar": "foo"}, '
'"s": 456, "t": "test_event"}'
)
dispatch = GatewayDispatch(op, data, seq, event_name)
def test_string_fmt(self):
"""
Tests whether or not the dispatch class its string conversion
is correct.
"""
assert str(self.dispatch) == self.dispatch_string
def test_from_string(self):
"""
Tests whether or not the from_string function is properly
parsing the string and creating a GatewayDispatch instance.
"""
assert (
str(GatewayDispatch.from_string(self.dispatch_string))
== self.dispatch_string
)
|
tracardi/process_engine/action/v1/strings/regex_match/model/model.py | bytepl/tracardi | 153 | 12692208 | <filename>tracardi/process_engine/action/v1/strings/regex_match/model/model.py
from pydantic import BaseModel
class Configuration(BaseModel):
pattern: str
text: str
group_prefix: str = "Group"
|
projects/oak/rustc.py | darkma773r/oss-fuzz | 7,629 | 12692213 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import subprocess
#Disable coverage for troubling crates.
sys.argv[0] = "rustc"
if "tokio_util" in sys.argv or "hyper" in sys.argv:
try:
sys.argv.remove("-Zinstrument-coverage")
except:
pass
print(sys.argv)
subprocess.call(sys.argv)
|
gitlab_runner/tests/test_integration.py | mchelen-gov/integrations-core | 663 | 12692214 | <reponame>mchelen-gov/integrations-core
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
import pytest
from requests.exceptions import ConnectionError
from datadog_checks.gitlab_runner import GitlabRunnerCheck
from .common import BAD_CONFIG, CONFIG, CUSTOM_TAGS, GITLAB_RUNNER_VERSION, HOST, assert_check
pytestmark = [pytest.mark.usefixtures('dd_environment'), pytest.mark.integration]
def test_check(aggregator, dd_run_check):
instance = CONFIG['instances'][0]
init_config = copy.deepcopy(CONFIG['init_config'])
gitlab_runner = GitlabRunnerCheck('gitlab_runner', init_config, instances=[instance])
dd_run_check(gitlab_runner)
dd_run_check(gitlab_runner)
assert_check(aggregator)
aggregator.assert_all_metrics_covered()
def test_connection_failure(aggregator):
"""
Make sure we're failing when the URL isn't right
"""
gitlab_runner = GitlabRunnerCheck('gitlab', BAD_CONFIG['init_config'], instances=BAD_CONFIG['instances'])
with pytest.raises(ConnectionError):
gitlab_runner.check(BAD_CONFIG['instances'][0])
# We should get two failed service checks
aggregator.assert_service_check(
GitlabRunnerCheck.MASTER_SERVICE_CHECK_NAME,
status=GitlabRunnerCheck.CRITICAL,
tags=['gitlab_host:{}'.format(HOST), 'gitlab_port:1234'] + CUSTOM_TAGS,
count=1,
)
aggregator.assert_service_check(
GitlabRunnerCheck.PROMETHEUS_SERVICE_CHECK_NAME, status=GitlabRunnerCheck.CRITICAL, tags=CUSTOM_TAGS, count=1
)
def test_version_metadata(aggregator, datadog_agent, dd_run_check):
check_instance = GitlabRunnerCheck('gitlab_runner', CONFIG['init_config'], instances=[CONFIG['instances'][0]])
check_instance.check_id = 'test:123'
dd_run_check(check_instance)
raw_version = GITLAB_RUNNER_VERSION
major, minor, patch = raw_version.split('.')
version_metadata = {
'version.scheme': 'semver',
'version.major': major,
'version.minor': minor,
'version.patch': patch,
'version.raw': raw_version,
}
datadog_agent.assert_metadata('test:123', version_metadata)
|
desktop/core/ext-py/ndg_httpsclient-0.4.0/ndg/httpsclient/urllib2_build_opener.py | kokosing/hue | 5,079 | 12692227 | """urllib2 style build opener integrates with HTTPSConnection class from this
package.
"""
__author__ = "<NAME>"
__date__ = "21/12/10"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "<EMAIL>"
__revision__ = '$Id$'
import logging
import sys
# Py 2 <=> 3 compatibility for class type checking
if sys.version_info[0] > 2:
class_type_ = type
from urllib.request import (ProxyHandler, UnknownHandler,
HTTPDefaultErrorHandler, FTPHandler,
FileHandler, HTTPErrorProcessor,
HTTPHandler, OpenerDirector,
HTTPRedirectHandler)
else:
import types
class_type_ = types.ClassType
from urllib2 import (ProxyHandler, UnknownHandler, HTTPDefaultErrorHandler,
FTPHandler, FileHandler, HTTPErrorProcessor,
HTTPHandler, OpenerDirector, HTTPRedirectHandler)
from ndg.httpsclient.https import HTTPSContextHandler
log = logging.getLogger(__name__)
# Copied from urllib2 with modifications for ssl
def build_opener(*handlers, **kw):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP and FTP.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
def isclass(obj):
return isinstance(obj, class_type_) or hasattr(obj, "__bases__")
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
check_classes = list(default_classes)
check_classes.append(HTTPSContextHandler)
skip = []
for klass in check_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.append(klass)
elif isinstance(check, klass):
skip.append(klass)
for klass in default_classes:
if klass not in skip:
opener.add_handler(klass())
# Pick up SSL context from keyword settings
ssl_context = kw.get('ssl_context')
# Add the HTTPS handler with ssl_context
if HTTPSContextHandler not in skip:
opener.add_handler(HTTPSContextHandler(ssl_context))
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
|
testing/MLDB-927-null-row-output.py | kstepanmpmg/mldb | 665 | 12692237 | #
# MLDB-927-null-row-output.py
# mldb.ai inc, 2015
# This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
#
import json
import datetime
import difflib
from mldb import mldb
dataset_index = 1
def run_transform(when, format):
global dataset_index
dataset_index += 1
result = mldb.put("/v1/procedures/when_procedure", {
"type": "transform",
"params": {
"inputData": "select * from dataset1 when " + when,
"outputDataset": {
"id": "dataset_out_" + str(dataset_index),
"type":"sparse.mutable"
}
}
})
mldb.post("/v1/procedures/when_procedure/runs")
result = mldb.get('/v1/query',
q="SELECT * FROM dataset_out_" + str(dataset_index) + " ORDER BY rowHash()",
format=format)
rows = result.json()
return rows
def load_test_dataset():
ds1 = mldb.create_dataset({
'type': 'sparse.mutable',
'id': 'dataset1'})
ds1.record_row('user1', [['x', 1, same_time_tomorrow],
['y', 2, same_time_tomorrow]])
ds1.record_row('user2', [['x', 3, now], ['y', 4, now]])
ds1.commit()
def compare_json(json1, json2, format):
if json1 != json2:
mldb.log("output format differ:\n")
for line in difflib.ndiff(json1.splitlines(), json2.splitlines()):
mldb.log(line)
assert json1 == json2, \
"difference in the way null values are outputted in format %s" \
% format
now = datetime.datetime.now()
later = now + datetime.timedelta(seconds=1)
same_time_tomorrow = now + datetime.timedelta(days=1)
load_test_dataset()
formats = ['full', 'sparse', 'soa', 'aos', 'table']
for format in formats:
result = mldb.get('/v1/query',
q="SELECT * FROM dataset1 WHEN value_timestamp() > '%s' ORDER BY rowHash()" % later ,
format=format)
rows1 = json.dumps(result.json(), indent=4, sort_keys=True)
result = mldb.get('/v1/query',
q = "SELECT * from dataset1 WHEN value_timestamp() > '%s' ORDER BY rowHash()" % later, format=format)
rows2 = json.dumps(result.json(), indent=4, sort_keys=True)
response = run_transform("value_timestamp() > '%s'" % later, format)
rows3 = json.dumps(response, indent=4, sort_keys=True, default=str)
compare_json(rows1, rows2, format)
compare_json(rows2, rows3, format)
request.set_return('success')
|
xls/synthesis/yosys/yosys_server_test.py | felixzhuologist/xls | 687 | 12692280 | <filename>xls/synthesis/yosys/yosys_server_test.py
# Lint as: python3
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of the synthesis service: client and dummy server."""
import subprocess
import portpicker
from google.protobuf import text_format
from absl.testing import absltest
from xls.common import runfiles
from xls.synthesis import synthesis_pb2
CLIENT_PATH = runfiles.get_path('xls/synthesis/synthesis_client_main')
SERVER_PATH = runfiles.get_path('xls/synthesis/yosys/yosys_server_main')
YOSYS_PATH = runfiles.get_path('xls/synthesis/yosys/bogusys')
NEXTPNR_PATH = runfiles.get_path('xls/synthesis/yosys/nextpbr')
VERILOG = """
module main(
input wire [31:0] x,
input wire [31:0] y,
output wire [31:0] out
);
assign out = x + y;
endmodule
"""
class SynthesisServerTest(absltest.TestCase):
def _start_server(self):
port = portpicker.pick_unused_port()
proc = subprocess.Popen([
runfiles.get_path(SERVER_PATH),
f'--port={port}',
f'--yosys_path={YOSYS_PATH}',
f'--nextpnr_path={NEXTPNR_PATH}',
'--synthesis_target=ecp5',
])
return port, proc
def test_slack(self):
port, proc = self._start_server()
verilog_file = self.create_tempfile(content=VERILOG)
response_text = subprocess.check_output(
[CLIENT_PATH, verilog_file.full_path, f'--port={port}',
'--ghz=1.0']).decode('utf-8')
response = text_format.Parse(response_text, synthesis_pb2.CompileResponse())
# The response is generated by parsing testdata/nextpnr.out.
self.assertEqual(response.max_frequency_hz, 180280000)
proc.terminate()
proc.wait()
def test_cell_histogram(self):
port, proc = self._start_server()
verilog_file = self.create_tempfile(content=VERILOG)
response_text = subprocess.check_output(
[CLIENT_PATH, verilog_file.full_path, f'--port={port}',
'--ghz=1.0']).decode('utf-8')
response = text_format.Parse(response_text, synthesis_pb2.CompileResponse())
# The response is generated by parsing bogusys stdout.
self.assertLen(response.instance_count.cell_histogram, 2)
self.assertIn('CCU2C', response.instance_count.cell_histogram)
self.assertEqual(response.instance_count.cell_histogram['CCU2C'], 32)
self.assertIn('TRELLIS_FF', response.instance_count.cell_histogram)
self.assertEqual(response.instance_count.cell_histogram['TRELLIS_FF'], 192)
proc.terminate()
proc.wait()
if __name__ == '__main__':
absltest.main()
|
oslo/torch/nn/modules/activation.py | lipovsek/oslo | 249 | 12692289 | from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.modules.linear import NonDynamicallyQuantizableLinear
from torch.nn.parameter import Parameter
from oslo.torch.nn.modules.functional import multi_head_attention_forward
class MultiheadAttention(nn.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces as described in the paper:
`Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
Multi-Head Attention is defined as:
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
Default: ``False``.
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
use_sequence_parallel: If ``True``, then self attention module is changed
self attention ring module. Default: ``False``
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ["batch_first"]
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
use_sequence_parallel=False,
parallel_context=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
# Support sequence parallel
self.sequence_parallel_support = True
self.use_sequence_parallel = use_sequence_parallel
self.parallel_context = parallel_context
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs)
)
self.k_proj_weight = Parameter(
torch.empty((embed_dim, self.kdim), **factory_kwargs)
)
self.v_proj_weight = Parameter(
torch.empty((embed_dim, self.vdim), **factory_kwargs)
)
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(
torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
)
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = NonDynamicallyQuantizableLinear(
embed_dim, embed_dim, bias=bias, **factory_kwargs
)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
Binary and byte masks are supported.
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
the purpose of attention. For a byte mask, a non-zero value indicates that the corresponding ``key``
value will be ignored.
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
the attention weight.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_weights=False``, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
"""
is_batched = query.dim() == 3
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = [x.transpose(1, 0) for x in (query, key)]
value = key
else:
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
# TODO: When batch first is True, RSA
if not self._qkv_same_embed_dim:
# TODO: When dimension of qkv is not same, RSA
attn_output, attn_output_weights = multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
average_attn_weights=average_attn_weights,
use_sequence_parallel=self.use_sequence_parallel,
parallel_context=self.parallel_context,
)
else:
attn_output, attn_output_weights = multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
average_attn_weights=average_attn_weights,
use_sequence_parallel=self.use_sequence_parallel,
parallel_context=self.parallel_context,
)
if self.batch_first and is_batched:
# TODO: When batch first is True, RSA attention output
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
|
lib/oembed/constants.py | goztrk/django-htk | 206 | 12692311 | <reponame>goztrk/django-htk<filename>lib/oembed/constants.py<gh_stars>100-1000
OEMBED_URL_SCHEME_REGEXPS = {
'slideshare' : r'https?://(?:www\.)?slideshare\.(?:com|net)/.*',
'soundcloud' : r'https?://soundcloud.com/.*',
'vimeo' : r'https?://(?:www\.)?vimeo\.com/.*',
'youtube' : r'https?://(?:(www\.)?youtube\.com|youtu\.be)/.*',
}
OEMBED_BASE_URLS = {
'slideshare' : 'https://www.slideshare.net/api/oembed/2?url=%(url)s',
'soundcloud' : 'https://soundcloud.com/oembed?url=%(url)s&format=json',
'vimeo' : 'https://vimeo.com/api/oembed.json?url=%(url)s&maxwidth=400&maxheight=350',
'youtube' : 'https://www.youtube.com/oembed?url=%(url)s&format=json',
}
|
scripts/python/fetch_chromosomes/settings.py | mikiec84/ideogram | 229 | 12692348 | <filename>scripts/python/fetch_chromosomes/settings.py
import logging
def get_logger(output_dir, log_name):
"""Creates a log file and returns an object to interface with it.
"""
logger = logging.getLogger(log_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(output_dir + log_name + '.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def init(_fresh_run, _fill_cache, _output_dir, _cache_dir, log_name):
"""Initializes global variables that are readable from importing modules.
"""
global fresh_run, fill_cache, output_dir, cache_dir
fresh_run = _fresh_run
fill_cache = _fill_cache
output_dir = _output_dir
cache_dir = _cache_dir
return get_logger(output_dir, log_name) |
examples/FirstReconstruction/FirstReconstruction.py | agravgaard/RTK | 167 | 12692370 | #!/usr/bin/env python
import sys
import itk
from itk import RTK as rtk
if len ( sys.argv ) < 3:
print( "Usage: FirstReconstruction <outputimage> <outputgeometry>" )
sys.exit ( 1 )
# Defines the image type
ImageType = itk.Image[itk.F,3]
# Defines the RTK geometry object
geometry = rtk.ThreeDCircularProjectionGeometry.New()
numberOfProjections = 360
firstAngle = 0.
angularArc = 360.
sid = 600 # source to isocenter distance
sdd = 1200 # source to detector distance
for x in range(0,numberOfProjections):
angle = firstAngle + x * angularArc / numberOfProjections
geometry.AddProjection(sid,sdd,angle)
# Writing the geometry to disk
xmlWriter = rtk.ThreeDCircularProjectionGeometryXMLFileWriter.New()
xmlWriter.SetFilename ( sys.argv[2] )
xmlWriter.SetObject ( geometry )
xmlWriter.WriteFile()
# Create a stack of empty projection images
ConstantImageSourceType = rtk.ConstantImageSource[ImageType]
constantImageSource = ConstantImageSourceType.New()
origin = [ -127, -127, 0. ]
sizeOutput = [ 128, 128, numberOfProjections ]
spacing = [ 2.0, 2.0, 2.0 ]
constantImageSource.SetOrigin( origin )
constantImageSource.SetSpacing( spacing )
constantImageSource.SetSize( sizeOutput )
constantImageSource.SetConstant(0.)
REIType = rtk.RayEllipsoidIntersectionImageFilter[ImageType, ImageType]
rei = REIType.New()
semiprincipalaxis = [ 50, 50, 50]
center = [ 0, 0, 10]
# Set GrayScale value, axes, center...
rei.SetDensity(2)
rei.SetAngle(0)
rei.SetCenter(center)
rei.SetAxis(semiprincipalaxis)
rei.SetGeometry( geometry )
rei.SetInput(constantImageSource.GetOutput())
# Create reconstructed image
constantImageSource2 = ConstantImageSourceType.New()
sizeOutput = [ 128, 128, 128 ]
origin = [ -63.5, -63.5, -63.5 ]
spacing = [ 1.0, 1.0, 1.0 ]
constantImageSource2.SetOrigin( origin )
constantImageSource2.SetSpacing( spacing )
constantImageSource2.SetSize( sizeOutput )
constantImageSource2.SetConstant(0.)
# FDK reconstruction
print("Reconstructing...")
FDKCPUType = rtk.FDKConeBeamReconstructionFilter[ImageType]
feldkamp = FDKCPUType.New()
feldkamp.SetInput(0, constantImageSource2.GetOutput())
feldkamp.SetInput(1, rei.GetOutput())
feldkamp.SetGeometry(geometry)
feldkamp.GetRampFilter().SetTruncationCorrection(0.0)
feldkamp.GetRampFilter().SetHannCutFrequency(0.0)
# Field-of-view masking
FOVFilterType = rtk.FieldOfViewImageFilter[ImageType, ImageType]
fieldofview = FOVFilterType.New()
fieldofview.SetInput(0, feldkamp.GetOutput())
fieldofview.SetProjectionsStack(rei.GetOutput())
fieldofview.SetGeometry(geometry)
# Writer
print("Writing output image...")
WriterType = rtk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(sys.argv[1])
writer.SetInput(fieldofview.GetOutput())
writer.Update()
print("Done!")
|
cctbx/array_family/boost_python/tst_flex.py | dperl-sol/cctbx_project | 155 | 12692371 | <reponame>dperl-sol/cctbx_project
from __future__ import absolute_import, division, print_function
from libtbx.test_utils import approx_equal
from cctbx import uctbx
from cctbx.array_family import flex
from six.moves import range
try:
from six.moves import cPickle as pickle
except ImportError:
import pickle
def exercise_flex_miller_index():
from scitbx.array_family.flex import exercise_triple
exercise_triple(flex_triple=flex.miller_index, flex_order=flex.order)
a = flex.miller_index([(1,2,3), (2,3,4)])
assert approx_equal(a.as_vec3_double(), [(1,2,3), (2,3,4)])
h, k, l = [flex.int((0,1,2,3)),
flex.int((1,2,3,4)),
flex.int((2,3,4,5))]
b = flex.miller_index(h, k, l)
assert approx_equal(b, ((0,1,2),(1,2,3),(2,3,4),(3,4,5)))
#
i = flex.miller_index([(1,-2,3), (-2,3,4)])
c = flex.complex_double([3-4j, -7+8j])
x = (0.9284, -1.2845, -0.2293)
assert approx_equal(
i.fourier_transform_real_part_at_x(fourier_coeffs=c, x=x),
15.4357188164)
def exercise_flex_hendrickson_lattman():
a = flex.hendrickson_lattman()
assert a.size() == 0
a = flex.hendrickson_lattman(132)
for x in a:
assert x == (0,0,0,0)
a = flex.hendrickson_lattman(((1,2,3,4), (2,3,4,5), (3,4,5,6)))
assert a.size() == 3
assert a.count((1,2,3,4)) == 1
assert a.count((0,0,0,0)) == 0
assert tuple(a) == ((1,2,3,4), (2,3,4,5), (3,4,5,6))
assert tuple(a+a) == ((2,4,6,8), (4,6,8,10), (6,8,10,12))
a += a
assert tuple(a) == ((2,4,6,8), (4,6,8,10), (6,8,10,12))
p = pickle.dumps(a)
b = pickle.loads(p)
assert tuple(a) == tuple(b)
centric_flags = flex.bool([False, True])
phase_integrals = flex.complex_double([complex(0.5,-0.7), complex(-0.3,0.4)])
a = flex.hendrickson_lattman(
centric_flags=centric_flags,
phase_integrals=phase_integrals,
max_figure_of_merit=1-1.e-6)
assert approx_equal(a, [(2.2684820912654264, -3.1758749277715967, 0, 0),
(-0.3295836866004328, 0.43944491546724396, 0, 0)])
assert approx_equal(
[a.slice(i) for i in range(4)],
[[2.2684820912654264, -0.3295836866004328],
[-3.1758749277715967, 0.43944491546724396],
[0.0, 0.0],
[0.0, 0.0]])
a = flex.hendrickson_lattman(3, (1,2,3,4))
assert a.all_eq((1,2,3,4))
assert not a.all_eq((1,2,0,4))
assert approx_equal(a.conj(), [(1,-2,3,-4), (1,-2,3,-4), (1,-2,3,-4)])
assert approx_equal(a.conj().conj(), a)
#
a = flex.double()
h = flex.hendrickson_lattman(a=a, b=a)
assert h.size() == 0
a = flex.double([1,2,3])
b = flex.double([-3,4,5])
h = flex.hendrickson_lattman(a=a, b=b)
assert approx_equal(h, [(1,-3,0,0), (2,4,0,0), (3,5,0,0)])
assert approx_equal(h == (1,-3,0,0), (True,False,False))
assert approx_equal(h != (1,-3,0,0), (False,True,True))
assert approx_equal(h != (0,0,0,0), (True,True,True))
assert approx_equal(h == h.deep_copy(), (True, True, True))
assert approx_equal(
h == flex.hendrickson_lattman(a=b, b=a), (False, False, False))
assert approx_equal(
h != flex.hendrickson_lattman(a=b, b=a), (True, True, True))
assert approx_equal(
h != flex.hendrickson_lattman(a=b, b=a), (True, True, True))
assert approx_equal(
h != h.deep_copy(), (False, False, False))
c = flex.double([4,5,6])
d = flex.double([-4,7,8])
h = flex.hendrickson_lattman(a=a, b=b, c=c, d=d)
assert approx_equal(h, [(1,-3,4,-4), (2,4,5,7), (3,5,6,8)])
assert approx_equal(h.as_abcd(), [a, b, c, d])
h = h * 2
assert approx_equal(h, [(2, -6, 8, -8), (4, 8, 10, 14), (6, 10, 12, 16)])
def exercise_flex_xray_scatterer():
from cctbx import uctbx, sgtbx, xray
uc = uctbx.unit_cell((10,11,12))
sg = sgtbx.space_group_info("P 2")
a = flex.xray_scatterer()
assert a.size() == 0
a = flex.xray_scatterer((
xray.scatterer("Si1", (0.1,0.2,0.3)),
xray.scatterer("O1", (0.2,0.3,0.4), (1,2,3,-0.1,0.2,-0.3), 0.9),
xray.scatterer("K1", (0.3,0.4,0.5), (3,1,2,-0.2,0.3,-0.1), 0.8,
fp=5, fdp=7)))
assert a.size() == 3
assert a[1].multiplicity() == 0
a[1].apply_symmetry(uc, sg.group())
assert a[1].multiplicity() == 2
assert approx_equal(a[1].weight(), 0.9)
a.front().occupancy = 0.8
assert approx_equal(a[0].occupancy, 0.8)
a.back().occupancy = 0.7
assert approx_equal(a[-1].occupancy, 0.7)
a[0].flags.set_grad_site(state=True)
a[1].flags.set_grad_fp(state=True)
a[2].flags.param = -234
p = pickle.dumps(a)
b = pickle.loads(p)
a_ = a.deep_copy()
assert a_.n_grad_u_iso() == a_.n_grad_u_aniso() == 0
a_[0].flags.set_grad_u_iso(state=True)
a_[1].flags.set_grad_u_aniso(state=True)
a_[2].flags.set_grad_u_aniso(state=True)
assert a_.n_grad_u_iso() == 1
assert a_.n_grad_u_aniso() == 2
for i,ai in enumerate(a):
bi = b[i]
assert ai.label == bi.label
assert ai.scattering_type == bi.scattering_type
assert approx_equal(ai.fp, bi.fp)
assert approx_equal(ai.fdp, bi.fdp)
assert approx_equal(ai.site, bi.site)
assert ai.flags.use_u_aniso() == bi.flags.use_u_aniso()
assert ai.u_iso == bi.u_iso
assert ai.u_star == bi.u_star
assert ai.multiplicity() == bi.multiplicity()
assert approx_equal(ai.weight(), bi.weight())
assert ai.flags.bits == bi.flags.bits
assert ai.flags.param == bi.flags.param
assert b[0].flags.grad_site()
assert not b[0].flags.grad_fp()
assert not b[1].flags.grad_site()
assert b[1].flags.grad_fp()
assert b[2].flags.param == -234
assert list(a.extract_labels()) == ["Si1", "O1", "K1"]
assert list(a.extract_scattering_types()) == ["Si", "O", "K"]
assert approx_equal(a.extract_sites(),
((0.1,0.2,0.3),(0.2,0.3,0.4),(0.3,0.4,0.5)))
a.set_sites(sites=flex.vec3_double(
((-0.1,-0.2,-0.3),(-0.2,-0.3,-0.4),(-0.3,-0.4,-0.5))))
assert approx_equal(a.extract_sites(),
((-0.1,-0.2,-0.3),(-0.2,-0.3,-0.4),(-0.3,-0.4,-0.5)))
assert approx_equal(a[1].site, (-0.2,-0.3,-0.4))
assert approx_equal(a.extract_occupancies(), (0.8,0.9,0.7))
assert approx_equal(a.extract_fps(), (0.0, 0.0, 5.0))
assert approx_equal(a.extract_fdps(), (0.0, 0.0, 7.0))
a.set_occupancies(occupancies=flex.double((0.1,0.2,0.3)))
a.set_fps(fps=flex.double((0.0, 0.0, 1.0)))
a.set_fdps(fdps=flex.double((0.0, 0.0, 2.0)))
assert approx_equal(a.extract_occupancies(), (0.1,0.2,0.3))
assert approx_equal(a.extract_fps(), (0.0, 0.0, 1.0))
assert approx_equal(a.extract_fdps(), (0.0, 0.0, 2.0))
assert approx_equal(a[1].occupancy, 0.2)
assert approx_equal(a[2].fp, 1.0)
assert approx_equal(a[2].fdp, 2.0)
assert approx_equal(a.extract_u_iso(), (0.0, -1.0, -1.0))
assert approx_equal(a.extract_u_iso_or_u_equiv(unit_cell=uc),
(0.0, 258, 236+1/3.))
a.set_u_iso(u_iso=flex.double((3,4,5)), selection=flex.bool(a.size(), True),
unit_cell = uc)
assert approx_equal(a.extract_u_iso(), (3,-1,-1))
assert approx_equal(a.extract_u_iso_or_u_equiv(unit_cell=uc),
(3, 4, 5))
assert approx_equal(a[1].u_iso, -1)
u_cart_answer = [(-1.0, -1.0, -1.0, -1.0, -1.0, -1.0),
(4, 4, 4, 0, 0, 0), (5, 5, 5, 0, 0, 0)]
assert approx_equal(a.extract_u_cart(uc), u_cart_answer)
a.set_u_star(u_star=flex.sym_mat3_double(
[(-1,-2,-1,-1,-1,-1),
(1,2,3,-0.6,0.2,-0.3),
(3,1,2,-0.2,0.5,-0.1)]))
assert approx_equal(a.extract_u_star(),
[(-1,-1,-1,-1,-1,-1),
(1,2,3,-0.6,0.2,-0.3),
(3,1,2,-0.2,0.5,-0.1)])
assert approx_equal(a[1].u_star, (1,2,3,-0.6,0.2,-0.3))
unit_cell = uctbx.unit_cell((1,1,1,90,90,90))
a.set_u_cart(
unit_cell=unit_cell,
u_cart=flex.sym_mat3_double(
[(-1,-2,-1,-1,-1,-1),
(1,2,3,-0.6,0.2,-0.3),
(3,1,2,-0.2,0.5,-0.1)]))
assert approx_equal(a.extract_u_cart(unit_cell=unit_cell),
[(-1,-1,-1,-1,-1,-1),
(1,2,3,-0.6,0.2,-0.3),
(3,1,2,-0.2,0.5,-0.1)])
#
a.set_u_cart(unit_cell = unit_cell,
u_cart = flex.sym_mat3_double([(1,2,3,4,5,6),
(0,0,0,1,2,3),
(1,2,3,0,0,0)]),
selection = flex.size_t([1,2]))
assert approx_equal(a.extract_u_cart(unit_cell=unit_cell),
[(-1,-1,-1,-1,-1,-1),
(0,0,0,1,2,3),
(1,2,3,0,0,0)])
#
unit_cell = uctbx.unit_cell((10,10,10,90,90,90))
a.set_u_cart(
unit_cell=unit_cell,
u_cart=flex.sym_mat3_double(
[(-1,-2,-1,-1,-1,-1),
(1,2,3,-0.6,0.2,-0.3),
(3,1,2,-0.2,0.5,-0.1)]))
assert approx_equal(a.extract_u_star(),
[(-1,-1,-1,-1,-1,-1),
(0.01, 0.02, 0.03, -0.006, 0.002, -0.003),
(0.03, 0.01, 0.02, -0.002, 0.005, -0.001)])
assert approx_equal(a.extract_u_iso(), [3,-1,-1])
a.scale_adps(2.0)
assert approx_equal(a.extract_u_star(),
[(-1,-1,-1,-1,-1,-1),
(0.02, 0.04, 0.06, -0.012, 0.004, -0.006),
(0.06, 0.02, 0.04, -0.004, 0.01, -0.002)])
assert approx_equal(a.extract_u_iso(), [6,-1,-1])
assert a.count_anisotropic() == 2
assert a.count_anomalous() == 1
a.convert_to_isotropic(unit_cell=unit_cell)
assert a.count_anisotropic() == 0
a.convert_to_anisotropic(unit_cell=unit_cell)
assert a.count_anisotropic() == 3
m = a.sites_mod_positive()
assert approx_equal(m.extract_sites(), [
(0.9,0.8,0.7),
(0.8,0.7,0.6),
(0.7,0.6,0.5)])
m[2].site = (0.7,0.6,1.4) # to avoid +-0.5 ambiguity
m = m.sites_mod_short()
assert approx_equal(m.extract_sites(), [
(-0.1,-0.2,-0.3),
(-0.2,-0.3,-0.4),
(-0.3,-0.4,0.4)])
#
assert a.extract_grad_u_iso().all_eq(False)
a[1].flags.set_grad_u_iso(state=True)
assert list(a.extract_grad_u_iso()) == [False, True, False]
def exercise_extract_u_cart_plus_u_iso():
from cctbx import uctbx, sgtbx, xray
uc = uctbx.unit_cell((1,1,1))
sg = sgtbx.space_group_info("P 1")
a = flex.xray_scatterer()
assert a.size() == 0
s1 = xray.scatterer(label = "C", u = 0.1)
s2 = xray.scatterer(label = "C", u = 0.1)
s2.flags.set_use_u_iso(False)
s3 = xray.scatterer(label = "C", u = (1,1,1,1,1,1))
s4 = xray.scatterer(label = "C", u = (1,1,1,1,1,1))
s4.flags.set_use_u_aniso(False)
s5 = xray.scatterer(label = "C", u = 0.1)
s5.u_star=(1,1,1,1,1,1)
s5.flags.set_use_u_aniso(True)
s6 = xray.scatterer(label = "C", u = 0.1)
s6.u_star=(1,1,1,1,1,1)
s7 = xray.scatterer(label = "C", u = (1,1,1,1,1,1))
s7.u_iso=0.1
s8 = xray.scatterer(label = "C", u = (1,1,1,1,1,1))
s8.u_iso=0.1
s8.flags.set_use_u_iso(True)
s9 = xray.scatterer(label = "C")
s10 = xray.scatterer(label = "C")
s10.flags.set_use_u_iso(False)
a = flex.xray_scatterer((s1,s2,s3,s4,s5,s6,s7,s8,s9,s10))
u_cart_total = a.extract_u_cart_plus_u_iso(uc)
assert approx_equal(u_cart_total,
[(0.1,0.1,0.1,0,0,0),
(0,0,0,0,0,0),
(1,1,1,1,1,1),
(0,0,0,0,0,0),
(1.1,1.1,1.1,1,1,1),
(0.1,0.1,0.1,0,0,0),
(1,1,1,1,1,1),
(1.1,1.1,1.1,1,1,1),
(0,0,0,0,0,0),
(0,0,0,0,0,0)])
def run():
exercise_flex_miller_index()
exercise_flex_hendrickson_lattman()
exercise_flex_xray_scatterer()
exercise_extract_u_cart_plus_u_iso()
print("OK")
if (__name__ == "__main__"):
run()
|
desktop/core/ext-py/docutils-0.14/test/test_transforms/test___init__.py | kokosing/hue | 5,079 | 12692377 | #! /usr/bin/env python
# $Id: test___init__.py 5174 2007-05-31 00:01:52Z wiemann $
# Author: <NAME> <<EMAIL>>
# Copyright: This module has been placed in the public domain.
"""
Test module for transforms/__init__.py.
"""
from __init__ import DocutilsTestSupport # must be imported before docutils
from docutils import transforms, utils
import unittest
class TestTransform(transforms.Transform):
default_priority = 100
applied = 0
def apply(self, **kwargs):
self.applied += 1
assert kwargs == {'foo': 42}
class KwargsTestCase(unittest.TestCase):
def test_kwargs(self):
transformer = transforms.Transformer(utils.new_document('test data'))
transformer.add_transform(TestTransform, foo=42)
transformer.apply_transforms()
self.assertEqual(len(transformer.applied), 1)
self.assertEqual(len(transformer.applied[0]), 4)
transform_record = transformer.applied[0]
self.assertEqual(transform_record[1], TestTransform)
self.assertEqual(transform_record[3], {'foo': 42})
if __name__ == '__main__':
unittest.main()
|
tests/test_pybadges.py | wakatime/pybadges | 368 | 12692381 | # Copyright 2018 The pybadge Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pybadges."""
import base64
import doctest
import json
import os.path
import unittest
import pathlib
import sys
import tempfile
import xmldiff.main
import pybadges
TEST_DIR = os.path.dirname(__file__)
PNG_IMAGE_B64 = (
'iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91JpzAAAAD0lEQVQI12P4zw'
'AD/xkYAA/+Af8iHnLUAAAAAElFTkSuQmCC')
PNG_IMAGE = base64.b64decode(PNG_IMAGE_B64)
class TestPybadgesBadge(unittest.TestCase):
"""Tests for pybadges.badge."""
def test_docs(self):
doctest.testmod(pybadges, optionflags=doctest.ELLIPSIS)
def test_whole_link_and_left_link(self):
with self.assertRaises(ValueError):
pybadges.badge(left_text='foo',
right_text='bar',
left_link='http://example.com/',
whole_link='http://example.com/')
def test_changes(self):
with open(os.path.join(TEST_DIR, 'test-badges.json'), 'r') as f:
examples = json.load(f)
for example in examples:
file_name = example.pop('file_name')
with self.subTest(example=file_name):
filepath = os.path.join(TEST_DIR, 'golden-images', file_name)
with open(filepath, mode="r", encoding="utf-8") as f:
golden_image = f.read()
pybadge_image = pybadges.badge(**example)
diff = xmldiff.main.diff_texts(golden_image, pybadge_image)
self.assertFalse(diff)
class TestEmbedImage(unittest.TestCase):
"""Tests for pybadges._embed_image."""
def test_data_url(self):
url = 'data:image/png;base64,' + PNG_IMAGE_B64
self.assertEqual(url, pybadges._embed_image(url))
def test_http_url(self):
url = 'https://dev.w3.org/SVG/tools/svgweb/samples/svg-files/python.svg'
self.assertRegex(pybadges._embed_image(url),
r'^data:image/svg(\+xml)?;base64,')
def test_not_image_url(self):
with self.assertRaisesRegex(ValueError,
'expected an image, got "text"'):
pybadges._embed_image('http://www.google.com/')
@unittest.skipIf(sys.platform.startswith("win"), "requires Unix filesystem")
def test_svg_file_path(self):
image_path = os.path.abspath(
os.path.join(TEST_DIR, 'golden-images', 'build-failure.svg'))
self.assertRegex(pybadges._embed_image(image_path),
r'^data:image/svg(\+xml)?;base64,')
@unittest.skipIf(sys.platform.startswith("win"), "requires Unix filesystem")
def test_png_file_path(self):
with tempfile.NamedTemporaryFile() as png:
png.write(PNG_IMAGE)
png.flush()
self.assertEqual(pybadges._embed_image(png.name),
'data:image/png;base64,' + PNG_IMAGE_B64)
@unittest.skipIf(sys.platform.startswith("win"), "requires Unix filesystem")
def test_unknown_type_file_path(self):
with tempfile.NamedTemporaryFile() as non_image:
non_image.write(b'Hello')
non_image.flush()
with self.assertRaisesRegex(ValueError,
'not able to determine file type'):
pybadges._embed_image(non_image.name)
@unittest.skipIf(sys.platform.startswith("win"), "requires Unix filesystem")
def test_text_file_path(self):
with tempfile.NamedTemporaryFile(suffix='.txt') as non_image:
non_image.write(b'Hello')
non_image.flush()
with self.assertRaisesRegex(ValueError,
'expected an image, got "text"'):
pybadges._embed_image(non_image.name)
def test_file_url(self):
image_path = os.path.abspath(
os.path.join(TEST_DIR, 'golden-images', 'build-failure.svg'))
with self.assertRaisesRegex(ValueError, 'unsupported scheme "file"'):
pybadges._embed_image(pathlib.Path(image_path).as_uri())
if __name__ == '__main__':
unittest.main()
|
backend/src/utils/alru_cache.py | rutvikpadhiyar000/github-trends | 157 | 12692424 | <reponame>rutvikpadhiyar000/github-trends
from datetime import datetime, timedelta
from functools import wraps
from typing import Any, Callable, Dict, List, Tuple
# NOTE: return flag = False to avoid caching
# considers one optional parameter, no_cache
# if true, bypass cache system, otherwise use normally
def alru_cache(max_size: int = 128, ttl: timedelta = timedelta(minutes=1)):
def decorator(func: Callable[..., Any]) -> Any:
cache: Dict[Any, Tuple[datetime, Any]] = {}
keys: List[Any] = []
def in_cache(key: Any) -> bool:
# key not in cache
if key not in cache:
return False
# key in cache but expired
if datetime.now() - cache[key][0] > ttl:
return False
# key in cache and not expired
return True
def update_cache_and_return(key: Any, flag: bool, value: Any) -> Any:
# if flag = False, do not update cache and return value
if not flag:
return value
# if flag = True, update cache
now = datetime.now()
cache[key] = (now, value)
keys.append(key)
# remove oldest key if cache is full
if len(keys) > max_size:
del cache[keys.pop(0)]
# return value from cache
return cache[key][1]
@wraps(func)
async def wrapper(*args: List[Any], **kwargs: Dict[str, Any]) -> Any:
key = tuple(args), frozenset(
{k: v for k, v in kwargs.items() if k not in ["no_cache"]}
)
if "no_cache" in kwargs and kwargs["no_cache"]:
(flag, value) = await func(*args, **kwargs)
return update_cache_and_return(key, flag, value)
if in_cache(key):
return cache[key][1]
(flag, value) = await func(*args, **kwargs)
return update_cache_and_return(key, flag, value)
return wrapper
return decorator
|
PhysicsTools/PatAlgos/python/slimming/genParticleAssociation_cff.py | malbouis/cmssw | 852 | 12692426 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
import SimTracker.TrackAssociation.packedCandidatesGenAssociationDefault_cfi as _mod
packedPFCandidateToGenAssociation = _mod.packedCandidatesGenAssociationDefault.clone(
trackToGenAssoc = "prunedTrackMCMatch",
)
lostTracksToGenAssociation = _mod.packedCandidatesGenAssociationDefault.clone(
trackToGenAssoc = "prunedTrackMCMatch",
trackToPackedCandidatesAssoc = "lostTracks"
)
packedCandidateToGenAssociationTask = cms.Task(packedPFCandidateToGenAssociation,lostTracksToGenAssociation)
|
qiskit/algorithms/minimum_eigen_solvers/numpy_minimum_eigen_solver.py | Roshan-Thomas/qiskit-terra | 1,599 | 12692431 | <filename>qiskit/algorithms/minimum_eigen_solvers/numpy_minimum_eigen_solver.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Numpy Minimum Eigensolver algorithm."""
from typing import List, Optional, Union, Callable
import logging
import numpy as np
from qiskit.opflow import OperatorBase
from ..eigen_solvers.numpy_eigen_solver import NumPyEigensolver
from .minimum_eigen_solver import MinimumEigensolver, MinimumEigensolverResult, ListOrDict
logger = logging.getLogger(__name__)
class NumPyMinimumEigensolver(MinimumEigensolver):
"""
The Numpy Minimum Eigensolver algorithm.
"""
def __init__(
self,
filter_criterion: Callable[
[Union[List, np.ndarray], float, Optional[ListOrDict[float]]], bool
] = None,
) -> None:
"""
Args:
filter_criterion: callable that allows to filter eigenvalues/eigenstates. The minimum
eigensolver is only searching over feasible states and returns an eigenstate that
has the smallest eigenvalue among feasible states. The callable has the signature
`filter(eigenstate, eigenvalue, aux_values)` and must return a boolean to indicate
whether to consider this value or not. If there is no
feasible element, the result can even be empty.
"""
self._ces = NumPyEigensolver(filter_criterion=filter_criterion)
self._ret = MinimumEigensolverResult()
@property
def filter_criterion(
self,
) -> Optional[Callable[[Union[List, np.ndarray], float, Optional[ListOrDict[float]]], bool]]:
"""returns the filter criterion if set"""
return self._ces.filter_criterion
@filter_criterion.setter
def filter_criterion(
self,
filter_criterion: Optional[
Callable[[Union[List, np.ndarray], float, Optional[ListOrDict[float]]], bool]
],
) -> None:
"""set the filter criterion"""
self._ces.filter_criterion = filter_criterion
@classmethod
def supports_aux_operators(cls) -> bool:
return NumPyEigensolver.supports_aux_operators()
def compute_minimum_eigenvalue(
self, operator: OperatorBase, aux_operators: Optional[ListOrDict[OperatorBase]] = None
) -> MinimumEigensolverResult:
super().compute_minimum_eigenvalue(operator, aux_operators)
result_ces = self._ces.compute_eigenvalues(operator, aux_operators)
self._ret = MinimumEigensolverResult()
if result_ces.eigenvalues is not None and len(result_ces.eigenvalues) > 0:
self._ret.eigenvalue = result_ces.eigenvalues[0]
self._ret.eigenstate = result_ces.eigenstates[0]
if result_ces.aux_operator_eigenvalues:
self._ret.aux_operator_eigenvalues = result_ces.aux_operator_eigenvalues[0]
logger.debug("MinimumEigensolver:\n%s", self._ret)
return self._ret
|
src/postings/admin.py | zhuguangjun2002/REST-API-Basics-JWT | 206 | 12692441 | from django.contrib import admin
from .models import BlogPost
admin.site.register(BlogPost) |
data/tss_dataset.py | Arka161/cnngeometric_pytorch | 262 | 12692456 | from __future__ import print_function, division
import os
import torch
from torch.autograd import Variable
from skimage import io
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from geotnf.transformation import GeometricTnf
from geotnf.flow import read_flo_file
class TSSDataset(Dataset):
"""
TSS image pair dataset
http://taniai.space/projects/cvpr16_dccs/
Args:
csv_file (string): Path to the csv file with image names and annotation files.
dataset_path (string): Directory with the images.
output_size (2-tuple): Desired output size
transform (callable): Transformation for post-processing the training pair (eg. image normalization)
"""
def __init__(self, csv_file, dataset_path,output_size=(240,240),transform=None):
self.out_h, self.out_w = output_size
self.pairs = pd.read_csv(csv_file)
self.img_A_names = self.pairs.iloc[:,0]
self.img_B_names = self.pairs.iloc[:,1]
self.flow_direction = self.pairs.iloc[:, 2].values.astype('int')
self.flip_img_A = self.pairs.iloc[:, 3].values.astype('int')
self.pair_category = self.pairs.iloc[:, 4].values.astype('int')
self.dataset_path = dataset_path
self.transform = transform
# no cuda as dataset is called from CPU threads in dataloader and produces confilct
self.affineTnf = GeometricTnf(out_h=self.out_h, out_w=self.out_w, use_cuda = False)
def __len__(self):
return len(self.pairs)
def __getitem__(self, idx):
# get pre-processed images
flip_img_A = self.flip_img_A[idx]
image_A,im_size_A = self.get_image(self.img_A_names,idx,flip_img_A)
image_B,im_size_B = self.get_image(self.img_B_names,idx)
# get flow output path
flow_path = self.get_GT_flow_relative_path(idx)
sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'flow_path': flow_path}
# # get ground-truth flow
# flow = self.get_GT_flow(idx)
# sample = {'source_image': image_A, 'target_image': image_B, 'source_im_size': im_size_A, 'target_im_size': im_size_B, 'flow_GT': flow}
if self.transform:
sample = self.transform(sample)
return sample
def get_image(self,img_name_list,idx,flip=False):
img_name = os.path.join(self.dataset_path, img_name_list[idx])
image = io.imread(img_name)
# if grayscale convert to 3-channel image
if image.ndim==2:
image=np.repeat(np.expand_dims(image,2),axis=2,repeats=3)
# flip horizontally if needed
if flip:
image=np.flip(image,1)
# get image size
im_size = np.asarray(image.shape)
# convert to torch Variable
image = np.expand_dims(image.transpose((2,0,1)),0)
image = torch.Tensor(image.astype(np.float32))
image_var = Variable(image,requires_grad=False)
# Resize image using bilinear sampling with identity affine tnf
image = self.affineTnf(image_var).data.squeeze(0)
im_size = torch.Tensor(im_size.astype(np.float32))
return (image, im_size)
def get_GT_flow(self,idx):
img_folder = os.path.dirname(self.img_A_names[idx])
flow_dir = self.flow_direction[idx]
flow_file = 'flow'+str(flow_dir)+'.flo'
flow_file_path = os.path.join(self.dataset_path, img_folder , flow_file)
flow = torch.FloatTensor(read_flo_file(flow_file_path))
return flow
def get_GT_flow_relative_path(self,idx):
img_folder = os.path.dirname(self.img_A_names[idx])
flow_dir = self.flow_direction[idx]
flow_file = 'flow'+str(flow_dir)+'.flo'
flow_file_path = os.path.join(img_folder , flow_file)
return flow_file_path
|
formiko/__main__.py | benburrill/formiko | 116 | 12692557 | from formiko.main import main
exit(main())
|
rest-service/manager_rest/deployment_update/validator.py | TS-at-WS/cloudify-manager | 124 | 12692567 | <reponame>TS-at-WS/cloudify-manager
#########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from manager_rest.deployment_update import utils
from manager_rest.storage import models, get_node
from manager_rest.manager_exceptions import UnknownModificationStageError
from manager_rest.deployment_update.constants import ENTITY_TYPES, ACTION_TYPES
OUTPUT_ENTITY_LEN = 2
WORKFLOW_ENTITY_LEN = 2
OPERATION_ENTITY_LEN = 2
PROPERTY_ENTITY_LEN = 2
RELATIONSHIP_ENTITY_LEN = 4
NODE_ENTITY_LEN = 2
class EntityValidatorBase(object):
def __init__(self, sm):
self.sm = sm
self._validation_mapper = {
ACTION_TYPES.ADD: self._validate_add,
ACTION_TYPES.MODIFY: self._validate_modify,
ACTION_TYPES.REMOVE: self._validate_remove
}
def validate(self, dep_update, step):
try:
self._validate_entity(dep_update, step)
except UnknownModificationStageError as e:
entity_identifier_msg = \
"Entity type {0} with entity id {1}".format(step.entity_type,
step.entity_id)
err_msg = "{0}: {1}".format(entity_identifier_msg, e.message)
raise UnknownModificationStageError(err_msg)
def _validate_entity(self, dep_update, step):
raise NotImplementedError
def _in_old(self, *args, **kwargs):
raise NotImplementedError
def _in_new(self, *args, **kwargs):
raise NotImplementedError
def _validate_add(self, entity_id, entity_type, **kwargs):
if not (self._in_new(**kwargs) and not self._in_old(**kwargs)):
raise UnknownModificationStageError(
"The entity either doesn't exist in the deployment update "
"blueprint or exists in the original deployment blueprint")
def _validate_modify(self, entity_id, entity_type, **kwargs):
if not (self._in_new(**kwargs) and self._in_old(**kwargs)):
raise UnknownModificationStageError(
"The entity either doesn't exist in the deployment update "
"blueprint or it doesn't exists in the original deployment "
"blueprint")
def _validate_remove(self, entity_id, entity_type, **kwargs):
if not (not self._in_new(**kwargs) and self._in_old(**kwargs)):
raise UnknownModificationStageError(
"The entity either exists in the deployment update blueprint "
"or doesn't exists in the original deployment blueprint")
def _get_storage_node(self, deployment_id, node_id):
node = get_node(deployment_id, node_id)
return node.to_dict() if node else {}
class NodeValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
entity_keys = utils.get_entity_keys(step.entity_id)
if len(entity_keys) != NODE_ENTITY_LEN:
return
_, node_id = entity_keys
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
node_id=node_id)
def _in_old(self, dep_update, node_id):
storage_node = self._get_storage_node(dep_update.deployment_id,
node_id)
return bool(storage_node)
def _in_new(self, dep_update, node_id):
raw_node = utils.get_raw_node(dep_update.deployment_plan, node_id)
return bool(raw_node)
class RelationshipValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
entity_keys = utils.get_entity_keys(step.entity_id)
if len(entity_keys) < RELATIONSHIP_ENTITY_LEN:
return
_, source_node_id, relationships, source_relationship_index = \
entity_keys[:RELATIONSHIP_ENTITY_LEN]
target_relationship_index = entity_keys[RELATIONSHIP_ENTITY_LEN] \
if len(entity_keys) > RELATIONSHIP_ENTITY_LEN else None
# assert the index is indeed readable
source_relationship_index = utils.parse_index(
source_relationship_index)
target_relationship_index = utils.parse_index(
target_relationship_index)
if not (source_relationship_index or target_relationship_index):
return
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
source_node_id=source_node_id,
relationships=relationships,
source_relationship_index=source_relationship_index,
target_relationship_index=target_relationship_index)
def _in_new(self,
dep_update,
source_node_id,
relationships,
source_relationship_index,
target_relationship_index):
source_node = utils.get_raw_node(dep_update.deployment_plan,
source_node_id)
if not (source_node and
len(source_node[relationships]) > source_relationship_index):
return
target_node_id = \
source_node[relationships][source_relationship_index]['target_id']
raw_target_node = utils.get_raw_node(dep_update.deployment_plan,
target_node_id)
return raw_target_node
def _in_old(self,
dep_update,
source_node_id,
relationships,
source_relationship_index,
target_relationship_index):
source_node = self._get_storage_node(dep_update.deployment_id,
source_node_id)
if not (source_node and
len(source_node[relationships]) > target_relationship_index):
return
target_node_id = \
source_node[relationships][target_relationship_index]['target_id']
storage_target_node = self._get_storage_node(dep_update.deployment_id,
target_node_id)
return storage_target_node
class PropertyValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
property_keys = utils.get_entity_keys(step.entity_id)
if len(property_keys) < PROPERTY_ENTITY_LEN:
return
_, node_id = property_keys[:PROPERTY_ENTITY_LEN]
property_id = property_keys[PROPERTY_ENTITY_LEN:]
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
node_id=node_id,
property_id=property_id)
@staticmethod
def _in_new(dep_update, node_id, property_id):
raw_node = utils.get_raw_node(dep_update.deployment_plan, node_id)
return utils.traverse_object(raw_node, property_id) is not None
def _in_old(self, dep_update, node_id, property_id):
storage_node = self._get_storage_node(dep_update.deployment_id,
node_id)
return utils.traverse_object(storage_node, property_id) is not None
class OperationValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
operation_keys = utils.get_entity_keys(step.entity_id)
if len(operation_keys) < OPERATION_ENTITY_LEN:
return
_, node_id = operation_keys[:OPERATION_ENTITY_LEN]
operation_id = operation_keys[OPERATION_ENTITY_LEN:]
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
node_id=node_id,
operation_id=operation_id)
def _in_new(self, dep_update, node_id, operation_id):
raw_node = utils.get_raw_node(dep_update.deployment_plan, node_id)
return utils.traverse_object(raw_node, operation_id) is not None
def _in_old(self, dep_update, node_id, operation_id):
storage_node = self._get_storage_node(dep_update.deployment_id,
node_id)
return utils.traverse_object(storage_node, operation_id) is not None
class WorkflowValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
workflow_keys = utils.get_entity_keys(step.entity_id)
if len(workflow_keys) < WORKFLOW_ENTITY_LEN:
return
workflows = workflow_keys[0]
workflow_id = workflow_keys[1:]
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
workflow_id=workflow_id,
workflows=workflows)
@staticmethod
def _in_new(dep_update, workflow_id, workflows):
raw_workflows = dep_update.deployment_plan[workflows]
return utils.traverse_object(raw_workflows, workflow_id) is not None
def _in_old(self, dep_update, workflow_id, workflows):
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
storage_workflows = deployment.workflows or {}
return utils.traverse_object(storage_workflows,
workflow_id) is not None
class OutputValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
output_keys = utils.get_entity_keys(step.entity_id)
if len(output_keys) < OUTPUT_ENTITY_LEN:
return
outputs = output_keys[0]
output_id = output_keys[1:]
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
output_id=output_id,
outputs=outputs)
@staticmethod
def _in_new(dep_update, output_id, outputs):
raw_outputs = dep_update.deployment_plan[outputs]
return utils.traverse_object(raw_outputs, output_id) is not None
def _in_old(self, dep_update, output_id, outputs):
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
storage_outputs = deployment.outputs or {}
return utils.traverse_object(storage_outputs, output_id) is not None
class DescriptionValidator(EntityValidatorBase):
def _validate_entity(self, dep_update, step):
description_key = step.entity_id
validate = self._validation_mapper[step.action]
return validate(step.entity_id,
step.entity_type,
dep_update=dep_update,
description_key=description_key)
def _in_new(self, dep_update, description_key):
raw_description = dep_update.deployment_plan[description_key]
return bool(raw_description)
def _in_old(self, dep_update, description_key):
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
storage_description = deployment.description or {}
return bool(storage_description)
class StepValidator(object):
def __init__(self, sm):
self._validation_mapper = {
ENTITY_TYPES.NODE: NodeValidator(sm),
ENTITY_TYPES.RELATIONSHIP: RelationshipValidator(sm),
ENTITY_TYPES.PROPERTY: PropertyValidator(sm),
ENTITY_TYPES.OPERATION: OperationValidator(sm),
ENTITY_TYPES.WORKFLOW: WorkflowValidator(sm),
ENTITY_TYPES.OUTPUT: OutputValidator(sm),
ENTITY_TYPES.DESCRIPTION: DescriptionValidator(sm)
}
def validate(self, dep_update, step):
"""
validate an entity id of provided type exists in provided blueprint.
raises error if id doesn't exist
:param dep_update: the deployment update object.
:param step: the deployment update step object
:return: None
"""
if step.entity_type in ENTITY_TYPES:
self._validation_mapper[step.entity_type].validate(dep_update,
step)
|
examples/werkzeug_server.py | bcb/jsonrpcserver | 144 | 12692571 | <reponame>bcb/jsonrpcserver
from jsonrpcserver import method, Result, Success, dispatch
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
@method
def ping() -> Result:
return Success("pong")
@Request.application
def application(request):
return Response(dispatch(request.data.decode()), 200, mimetype="application/json")
if __name__ == "__main__":
run_simple("localhost", 5000, application)
|
panoramix/utils/supplement.py | git-github-com-warren1990-Github-git/panoramix | 259 | 12692610 | import json
import logging
import lzma
import os
import sqlite3
import sys
import time
import urllib.request
from pathlib import Path
from zipfile import ZipFile
from panoramix.utils.helpers import (
COLOR_BLUE,
COLOR_BOLD,
COLOR_GRAY,
COLOR_GREEN,
COLOR_HEADER,
COLOR_OKGREEN,
COLOR_UNDERLINE,
COLOR_WARNING,
ENDC,
FAIL,
cache_dir,
cached,
opcode,
)
"""
a module for management of bytes4 signatures from the database
db schema:
hash - 0x12345678
name - transferFrom
folded_name - transferFrom(address,address,uint256)
cooccurs - comma-dellimeted list of hashes: `0x12312312,0xabababab...`
params - json: `[
{
"type": "address",
"name": "_from"
},
{
"type": "address",
"name": "_to"
},
{
"type": "uint256",
"name": "_value"
}
]`
"""
logger = logging.getLogger(__name__)
conn = None
def supplements_path():
return cache_dir() / "supplement.db"
def check_supplements():
panoramix_supplements = supplements_path()
if not panoramix_supplements.is_file():
compressed_supplements = (
Path(__file__).parent.parent / "data" / "supplement.db.xz"
)
logger.info(
"Decompressing %s into %s...", compressed_supplements, panoramix_supplements
)
with lzma.open(compressed_supplements) as inf, panoramix_supplements.open(
"wb"
) as outf:
while (buf := inf.read(1024 * 1024)) :
outf.write(buf)
assert panoramix_supplements.is_file()
def _cursor():
global conn
check_supplements()
if conn is None:
conn = sqlite3.connect(supplements_path())
# try:
c = conn.cursor()
# except Exception:
# # fails in multi-threading, this should help
# conn = sqlite3.connect("supplement.db")
# return conn.cursor()
return c
@cached
def fetch_sigs(hash):
c = _cursor()
c.execute("SELECT * from functions where hash=?", (hash,))
results = c.fetchall()
res = []
for row in results:
res.append(
{
"hash": row[0],
"name": row[1],
"folded_name": row[2],
"params": json.loads(row[3]),
"cooccurs": row[4].split(","),
}
)
return res
@cached
def fetch_sig(hash):
if type(hash) == str:
hash = int(hash, 16)
hash = "{:#010x}".format(hash)
c = _cursor()
c.execute(
"SELECT hash, name, folded_name, params, cooccurs from functions where hash=?",
(hash,),
)
results = c.fetchall()
if len(results) == 0:
return None
# Take the one that cooccurs with the most things, it's probably the most relevant.
row = max(results, key=lambda row: len(row[4]))
return {
"hash": hash,
"name": row[1],
"folded_name": row[2],
"params": json.loads(row[3]),
}
"""
Abi crawler and parser. used to refill supplement.py with new ABI/func definitions.
It's used by scripts that are not a part of panoramix repo.
The function is here, so people wanting to parse ABIs on their own can use parse_insert_abi
implementation as a reference. It handles some unobvious edge-cases, like arrays of tuples.
"""
def crawl_abis_from_cache():
# imports here, because this is not used as a part of a regular panoramix run,
# and we don't want to import stuff unnecessarily.
import json
import os
import re
import sqlite3
import sys
import time
import urllib
import urllib.request
try:
from web3 import Web3
except Exception:
print(
"install web3:\n\t`pip install web3`"
) # the only dependency in the project :D
conn = sqlite3.connect("supplement.db")
cursor = conn.cursor()
conn2 = sqlite3.connect("supp2.db")
cursor2 = conn2.cursor()
def parse_insert_abi(abi):
def parse_inputs(func_inputs):
inputs = []
params = []
param_counter = 0
for r in func_inputs:
param_counter += 1
type_ = r["type"]
name_ = r["name"]
if len(name_) == 0:
name_ = "param" + str(param_counter)
if name_[0] != "_":
name_ = "_" + name_
params.append({"type": r["type"], "name": name_})
if "tuple" not in type_:
inputs.append(type_)
else:
type_ = f"({parse_inputs(r['components'])[0]})" + type_[5:]
inputs.append(type_)
return ",".join(inputs), params
output = {}
for func in abi:
if func["type"] in ["constructor", "fallback"]:
continue
inputs, params = parse_inputs(func["inputs"])
fname = f"{func['name']}({inputs})"
sha3 = Web3.sha3(text=fname).hex()[:10]
if sha3 in output:
print("double declaration for the same hash! {}".format(fname))
continue
output[sha3] = {
"name": func["name"],
"folded_name": fname,
"params": params,
}
for sha3, row in output.items():
row["cooccurs"] = list(output.keys())
insert_row = (
sha3,
row["name"],
row["folded_name"],
json.dumps(row["params"]),
",".join(row["cooccurs"]),
)
insert_row2 = (
int(sha3, 16),
row["name"],
row["folded_name"],
json.dumps(row["params"]),
)
test_hash, test_cooccurs = insert_row[0], insert_row[4]
cursor.execute(
"SELECT * from functions where hash=? and cooccurs=?",
(test_hash, test_cooccurs),
)
results = cursor.fetchall()
if len(results) == 0:
print("inserting", sha3, row["folded_name"])
cursor.execute(
"INSERT INTO functions VALUES (?, ?, ?, ?, ?)", insert_row
)
conn.commit()
cursor2.execute("SELECT * from functions where hash=?", (insert_row2[0],))
results = cursor2.fetchall()
if len(results) == 0:
print("inserting2", sha3, row["folded_name"])
cursor2.execute(
"INSERT INTO functions VALUES (?, ?, ?, ?)", insert_row2
)
conn2.commit()
def crawl_cache():
idx = 0
path = "./cache_abis/"
if not os.path.isdir(path):
print(
"dir cache_abis doesn't exist. it should be there and it should contain abi files"
)
return
for fname in os.listdir(path):
address = fname[:-4]
fname = path + fname
idx += 1
print(idx, address)
with open(fname) as f:
abi = json.loads(f.read())
parse_insert_abi(abi)
crawl_cache()
|
flexneuart/io/utils.py | gitter-badger/FlexNeuART | 101 | 12692651 | #
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gzip, bz2
import json
import re
import os
import tempfile
from flexneuart.config import DOCID_FIELD
def create_temp_file():
""""Create a temporary file
:return temporary file name
"""
f, file_name = tempfile.mkstemp()
os.close(f)
return file_name
class FileWrapper:
def __enter__(self):
return self
def __init__(self, file_name, flags='r'):
"""Constructor, which opens a regular or gzipped-file
:param file_name a name of the file, it has a '.gz' or '.bz2' extension, we open a compressed stream.
:param flags open flags such as 'r' or 'w'
"""
dir_name = os.path.dirname(file_name)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if file_name.endswith('.gz'):
self._file = gzip.open(file_name, flags)
self._is_compr = True
elif file_name.endswith('.bz2'):
self._file = bz2.open(file_name, flags)
self._is_compr = True
else:
self._file = open(file_name, flags)
self._is_compr = False
def write(self, s):
if self._is_compr:
self._file.write(s.encode())
else:
self._file.write(s)
def read(self, qty):
if self._is_compr:
return self._file.read(qty).decode()
else:
return self._file.read(qty)
def close(self):
self._file.close()
def __exit__(self, type, value, tb):
self._file.close()
def __iter__(self):
for line in self._file:
yield line.decode() if self._is_compr else line
def jsonl_gen(file_name):
"""A generator that produces parsed doc/query entries one by one.
:param file_name: an input file name
"""
with FileWrapper(file_name) as f:
for i, line in enumerate(f):
ln = i + 1
line = line.strip()
if not line:
continue
try:
data = json.loads(line)
except:
raise Exception('Error parsing JSON in line: %d' % ln)
if not DOCID_FIELD in data:
raise Exception('Missing %s field in JSON in line: %d' % (DOCID_FIELD, ln))
yield data
def multi_file_linegen(dir_name, pattern):
"""A generator that reads all files from a given directory matching the pattern
and yields their contents line by line.
:param dir_name: a source directory name
:param pattern: a pattern should match fully (we use fullmatch)
"""
for fn in os.listdir(dir_name):
if re.fullmatch(pattern, fn):
full_path = os.path.join(dir_name, fn)
print('Processing: ' + full_path)
with FileWrapper(full_path) as inp:
for line in inp:
yield line
|
data/fbms_data_utils.py | FilippoAleotti/unsupervised_detection | 194 | 12692655 | <reponame>FilippoAleotti/unsupervised_detection
"""
This File implements the data reader for the FBMS59
Dataset. See the file davis2016_data_utils.py for
a more detailed documentation of the functions.
The main difference with respect to DAVIS2016 is
the fact that the data reader returns the number
of images per category (used to explain away the
large class imbalance of this dataset in score computation).
After the first use, you can speed up the code by commenting
pre-processing away (See line 109).
"""
import numpy as np
import os
import cv2
import re
import tensorflow as tf
from data.aug_flips import random_flip_images
class DirectoryIterator(object):
"""
Class for managing data loading.of images and labels
We assume that the folder structure is:
"""
def __init__(self, directory, part='train', for_testing=False, test_temporal_t=1):
self.directory = directory
self.num_experiments = 0
self.samples_per_cat = {}
parsing_dir ={'train': ['Trainingset'],
'val' : ['Testset'],
'trainval': ['Trainingset', 'Testset']}
data_dirs = [os.path.join(directory, d) for d in parsing_dir.get(part)]
for d in data_dirs:
if not os.path.isdir(d):
raise IOError("Directory {} file not found".format(d))
# First count how many experiments are out there
self.samples = 0
# Each filename is a tuple image / components
self.image_filenames = []
self.annotation_filenames = []
for d in data_dirs:
if for_testing:
self._parse_testtime_dir(d, test_temporal_t)
else:
self._parse_data_dir(d)
if self.samples == 0:
raise IOError("Did not find any file in the dataset folder")
if not for_testing:
self.num_experiments = len(self.image_filenames)
print('Found {} images belonging to {} experiments.'.format(
self.samples, self.num_experiments))
def _parse_data_dir(self, data_dir):
"""
This function will read all the files in data_dir and return a list of
lists containing the different fnames for each category.
"""
categories = os.listdir(data_dir)
for folder_name in categories:
all_fnames_list_fname = os.path.join(data_dir, folder_name,
folder_name + ".bmf")
if not os.path.isfile(all_fnames_list_fname):
raise IOError("Not found file {}".format(all_fnames_list_fname))
all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,
skiprows=1)
# Correct from pgm to jpg
all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]
all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \
in all_fnames_list]
self.samples += len(all_fnames_list)
# Append the last
self.image_filenames.append(all_fnames_list)
def _parse_testtime_dir(self, data_dir, test_temporal_t=1):
"""
This function will read all the files in data_dir and return a list of
lists containing the different fnames for each category.
"""
self.test_tuples = []
categories = os.listdir(data_dir)
for folder_name in categories:
all_fnames_list_fname = os.path.join(data_dir, folder_name,
folder_name + ".bmf")
if not os.path.isfile(all_fnames_list_fname):
raise IOError("Not found file {}".format(all_fnames_list_fname))
all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,
skiprows=1)
# Correct from pgm to jpg
all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]
all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \
in all_fnames_list]
# Get ground_truth
annotation_fnames, numbers, type_weird = self.find_gt(os.path.join(data_dir,
folder_name,
'GroundTruth'))
goal_annotation_fnames = [f.split('.')[0] + '.jpg' for f in annotation_fnames]
goal_annotation_fnames = [os.path.join(data_dir, folder_name, 'GroundTruth', f) for f \
in goal_annotation_fnames]
# NOTE: Run the commented part only once to preprocess GT
annotation_fnames = [os.path.join(data_dir, folder_name, 'GroundTruth', f) for f \
in annotation_fnames]
for i in range(len(goal_annotation_fnames)):
mask = cv2.imread(annotation_fnames[i])
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
mask = mask / 255.0
if type_weird:
mask[mask>0.99] = 0.0
if 'marple7' == folder_name:
mask = mask>0.05
elif 'marple2' == folder_name:
mask = mask>0.4
else:
mask = mask>0.1
mask = np.asarray(mask*255, dtype=np.uint8)
cv2.imwrite(goal_annotation_fnames[i], mask)
# Create offsets
numbers = np.array(numbers) - np.min(numbers)
seq_len = np.max(numbers)
offsets = numbers + test_temporal_t
if offsets[0] < numbers[0]:
# test was negative, needs to increase:
offsets[0] += 2*abs(test_temporal_t)
if offsets[-1] > numbers[-1]:
# test was positive, needs to decrease:
offsets[-1] -= 2*abs(test_temporal_t)
for i in range(len(offsets)):
offsets[i] = np.maximum(offsets[i], 0)
offsets[i] = np.minimum(offsets[i], seq_len)
for i, k in enumerate(numbers):
self.test_tuples.append((all_fnames_list[k], all_fnames_list[offsets[i]],
goal_annotation_fnames[i], "{}".format(len(annotation_fnames))))
self.samples += len(annotation_fnames)
self.samples_per_cat[folder_name] = len(annotation_fnames)
self.num_experiments+=1
def find_gt(self, directory):
all_files = os.listdir(directory)
# Check in which kind of folder you are
type_weird=False
for file in all_files:
if file.endswith('ppm'):
type_weird=True
break
if not type_weird:
all_files = [file for file in all_files if file.endswith('pgm')]
# Sort them
try:
all_files = sorted(all_files, key=lambda x: int(x.split('.')[0].split('_')[-1]))
numbers = [int(file.split('.')[0].split('_')[-1]) for file in all_files]
except:
all_files = sorted(all_files, key=lambda x: int(re.search(r'\d+', x).group()))
numbers = [int(re.search(r'\d+', file).group()) for file in all_files]
return all_files, numbers, type_weird
# Solve weird type
all_files = [file for file in all_files if file.endswith('ppm') and not 'PROB' in file]
all_files = sorted(all_files, key=lambda x: int(x.split('_')[1]))
numbers = [int(file.split('_')[1]) for file in all_files]
return all_files, numbers, type_weird
class FBMS59Reader(object):
def __init__(self, root_dir, max_temporal_len=3, min_temporal_len=2,
num_threads=6):
self.root_dir = root_dir
self.max_temporal_len = max_temporal_len
self.min_temporal_len = min_temporal_len
assert min_temporal_len < max_temporal_len, "Temporal lenghts are not consistent"
assert min_temporal_len > 0, "Min temporal len should be positive"
self.num_threads = num_threads
def get_filenames_list(self, partition):
iterator = DirectoryIterator(self.root_dir, partition)
filenames, annotation_filenames = iterator.image_filenames, \
iterator.annotation_filenames
#Training calls it before, so it will be overwritten
self.val_samples = iterator.samples
return filenames, annotation_filenames
def get_test_tuples(self, partition, test_temporal_t=1):
iterator = DirectoryIterator(self.root_dir, partition, for_testing=True,
test_temporal_t=test_temporal_t)
test_tuples = iterator.test_tuples
#Training calls it before, so it will be overwritten
self.val_samples = iterator.samples
self.samples_per_cat = iterator.samples_per_cat
self.num_categories = len(iterator.samples_per_cat.keys())
return test_tuples
def preprocess_image(self, img):
orig_width = 640
orig_height = 384
img = ( tf.cast(img,tf.float32) / tf.constant(255.0) ) - 0.5
img = tf.image.resize_images(img, [orig_height, orig_width])
return img
def preprocess_mask(self, mask):
orig_width = 640
orig_height = 384
mask = (tf.cast(mask,tf.float32) / tf.constant(255.0))
mask = tf.image.resize_images(mask, [orig_height, orig_width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return mask
def random_crop_image_pair(self, image_1, image_2, max_cropping_percent=0.9):
'''
Produces an (equal) random crop for image_1 and image_2 that is
at minimum max_cropping_percent smaller than the original image.
The resulting patch is then reshaped to original size
'''
rand = tf.random_uniform(shape=[], minval=0, maxval=1, dtype=tf.float32)
cropping_percent = max_cropping_percent + rand*(1-max_cropping_percent)
image_width = image_1.get_shape().as_list()[1]
image_height = image_1.get_shape().as_list()[0]
num_channels = image_1.get_shape().as_list()[2]
crop_width = tf.cast(image_width*cropping_percent, tf.int32)
crop_height = tf.cast(image_height*cropping_percent, tf.int32)
image_c = tf.concat((image_1, image_2), axis=-1)
image_c = tf.random_crop(image_c, size=[crop_height,
crop_width,
num_channels*2])
image_c.set_shape([None, None, num_channels*2])
# Resize
image_c = tf.image.resize_images(image_c,
[image_height, image_width])
image_1 = image_c[:,:,:3]
image_2 = image_c[:,:,3:6]
return image_1, image_2
def central_cropping(self, img, cropping_percent):
orig_height, orig_width = img.get_shape().as_list()[0:2]
img = tf.image.central_crop(img, cropping_percent)
img = tf.image.resize_images(img, [orig_height, orig_width])
return img
def augment_pair(self, image_1, image_2):
# Random flips
image_1, image_2 = random_flip_images(image_1, image_2)
image_1, image_2 = self.random_crop_image_pair(image_1, image_2,
self.train_crop)
return image_1, image_2
def dataset_map(self, input_queue):
fname_number, direction = input_queue[0], input_queue[1]
# Take care with the casting when sampling!!
t_shift = tf.random_uniform(shape=[], minval=self.min_temporal_len,
maxval=self.max_temporal_len+1,
dtype=tf.int32)
t_shift = tf.cast(t_shift, dtype=tf.float32)
img2_fname_number = t_shift * direction + fname_number
# Conversions
fname_number = tf.cast(fname_number, dtype=tf.int32)
img2_fname_number = tf.cast(img2_fname_number, dtype=tf.int32)
# Reading
fname_1 = tf.gather(self.filenames, fname_number)
fname_2 = tf.gather(self.filenames, img2_fname_number)
file_content = tf.read_file(fname_1)
image_1 = tf.image.decode_jpeg(file_content, channels=3)
image_1 = self.preprocess_image(image_1)
file_content = tf.read_file(fname_2)
image_2 = tf.image.decode_jpeg(file_content, channels=3)
image_2 = self.preprocess_image(image_2)
# Data augmentation
image_1, image_2 = self.augment_pair(image_1, image_2)
return image_1, image_2
def image_inputs(self, batch_size=32, partition='train',
train_crop=1.0):
# Generates input batches for FBMS dataset.
t_len = self.max_temporal_len
file_list, _ = self.get_filenames_list(partition)
self.train_crop = train_crop
# Accumulates subsequent filenames, and makes a dataset with
# end-points.
N = 0
last_fname_numbers = [] # Will be used to calculate flow backward
first_fname_numbers = [] # Will be used to calculate flow forward
for fnames in file_list:
last_fname_numbers.append(np.arange(N + t_len, N + len(fnames),
dtype=np.int32))
first_fname_numbers.append(np.arange(N, N + len(fnames) - t_len,
dtype=np.int32))
N += len(fnames)
self.filenames = np.concatenate(file_list)
last_fname_numbers = np.concatenate(last_fname_numbers)
last_fname_numbers = np.vstack((last_fname_numbers, -1.0*np.ones_like(last_fname_numbers))).T
first_fname_numbers = np.concatenate(first_fname_numbers)
first_fname_numbers = np.vstack((first_fname_numbers, 1.0*np.ones_like(first_fname_numbers))).T
all_fname_numbers = np.vstack((first_fname_numbers, last_fname_numbers))
all_fname_numbers = np.asarray(all_fname_numbers, dtype=np.float32)
np.random.shuffle(all_fname_numbers)
# Form training batches
dataset = tf.data.Dataset.from_tensor_slices(all_fname_numbers)
dataset = dataset.shuffle(buffer_size=all_fname_numbers.shape[0],
reshuffle_each_iteration=True)
dataset = dataset.repeat(None)
dataset = dataset.map(self.dataset_map,
num_parallel_calls=self.num_threads)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size=3*batch_size)
iterator = dataset.make_initializable_iterator()
img1s, img2s = iterator.get_next()
# Extra arguments returned for compatibility with test functions.
return (img1s, img2s, tf.constant(1.0),'f', 1.0), iterator
def test_inputs(self, batch_size=32, partition='val',
t_len=2, with_fname=False, test_crop=1.0):
# Reads test inputs data
# The main difference with Davis2016 consists in retuning
# the number of elements per category.
test_tuples = self.get_test_tuples(partition, t_len)
self.test_crop = test_crop
self.num_threads = 1
# Form training batches
dataset = tf.data.Dataset.from_tensor_slices(test_tuples)
dataset = dataset.repeat(None)
dataset = dataset.map(self.test_dataset_map,
num_parallel_calls=1)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.prefetch(buffer_size=3*batch_size)
iterator = dataset.make_initializable_iterator()
img1s, img2s, seg1s, fnames, samples_per_cat = iterator.get_next()
if with_fname:
return (img1s, img2s, seg1s, fnames, samples_per_cat), iterator
return (img1s, img2s, seg1s, samples_per_cat), iterator
def test_dataset_map(self, input_queue):
fname_1, fname_2, annotation_fname, samples_per_cat = input_queue[0],\
input_queue[1], input_queue[2], input_queue[3]
samples_per_cat = tf.string_to_number(samples_per_cat)
file_content = tf.read_file(fname_1)
image_1 = tf.image.decode_jpeg(file_content, channels=3)
image_1 = self.preprocess_image(image_1)
file_content = tf.read_file(fname_2)
image_2 = tf.image.decode_jpeg(file_content, channels=3)
image_2 = self.preprocess_image(image_2)
file_content = tf.read_file(annotation_fname)
seg_1 = tf.image.decode_jpeg(file_content, channels=1)
seg_1 = self.preprocess_mask(seg_1)
# Cropping preprocess
image_1 = self.central_cropping(image_1, self.test_crop)
image_2 = self.central_cropping(image_2, self.test_crop)
seg_1 = self.central_cropping(seg_1, self.test_crop)
return image_1, image_2, seg_1, fname_1, samples_per_cat
def augmented_inputs(self, partition='val', t_len=2,
test_crops=[1.0]):
(img_1, img_2, seg_1, fname, _), itr = self.test_inputs(batch_size=1,
t_len=t_len,
partition=partition,
with_fname=True,
test_crop=1.0)
img_1 = tf.squeeze(img_1, axis=0)
img_2 = tf.squeeze(img_2, axis=0)
seg_1 = tf.squeeze(seg_1, axis=0)
batch_dict = {'img_1s': {}, 'img_2s': {}, 'seg_1s': {}}
for crop in test_crops:
cropped_img_1 = self.central_cropping(img_1, cropping_percent=crop)
cropped_img_2 = self.central_cropping(img_2, cropping_percent=crop)
cropped_seg_1 = self.central_cropping(seg_1, cropping_percent=crop)
batch_dict['seg_1s'][crop] = cropped_seg_1
batch_dict['img_1s'][crop] = cropped_img_1
batch_dict['img_2s'][crop] = cropped_img_2
return batch_dict, fname, itr
|
docs/source/conf.py | skyhoshi/galaxy-integrations-python-api | 1,165 | 12692665 | # Configuration file for the Sphinx documentation builder.
# Documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import sys
import subprocess
# -- Path setup --------------------------------------------------------------
_ROOT = os.path.join('..', '..')
sys.path.append(os.path.abspath(os.path.join(_ROOT, 'src')))
# -- Project information -----------------------------------------------------
project = 'GOG Galaxy Integrations API'
copyright = '2019, GOG.com'
_author, _version = subprocess.check_output(
['python', os.path.join(_ROOT, 'setup.py'), '--author', '--version'],
universal_newlines=True).strip().split('\n')
author = _author
version = _version
release = _version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.asyncio',
'sphinx_autodoc_typehints',
'm2r2' # mdinclude directive for makrdown files
]
autodoc_member_order = 'bysource'
autodoc_inherit_docstrings = False
autodoc_mock_imports = ["aiohttp"]
set_type_checking_flag = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [] # type: ignore
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {
# 'canonical_url': '', # main page to be serach in google with trailing slash
'display_version': True,
'style_external_links': True,
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 4,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
master_doc = 'index'
|
docs/tutorial_data-13.py | ankitshah009/dcase_util | 122 | 12692685 | import dcase_util
# Metadata
meta = dcase_util.containers.MetaDataContainer([
{
'filename': 'test1.wav',
'event_label': 'cat',
'onset': 1.0,
'offset': 3.0
},
{
'filename': 'test1.wav',
'event_label': 'dog',
'onset': 2.0,
'offset': 6.0
},
{
'filename': 'test1.wav',
'event_label': 'speech',
'onset': 5.0,
'offset': 8.0
},
])
# Initilize encoder
event_roll_encoder = dcase_util.data.EventRollEncoder(
label_list=meta.unique_event_labels,
time_resolution=0.02
)
# Encode
event_roll = event_roll_encoder.encode(
metadata_container=meta,
length_seconds=10.0
)
# Visualize
event_roll.plot() |
flexx/app/_session.py | rajjamdar05/flexx | 1,662 | 12692686 | """
Definition of the Session class.
"""
import re
import sys
import time
import json
import base64
import random
import hashlib
import asyncio
import weakref
import datetime
from http.cookies import SimpleCookie
from ..event._component import new_type
from ._component2 import PyComponent, JsComponent, AppComponentMeta
from ._asset import Asset, Bundle, solve_dependencies
from ._assetstore import AssetStore, INDEX
from ._assetstore import assets as assetstore
from ._clientcore import serializer
from . import logger
from .. import config
reprs = json.dumps
# Use the system PRNG for session id generation (if possible)
# NOTE: secure random string generation implementation is adapted
# from the Django project.
def get_random_string(length=24, allowed_chars=None):
""" Produce a securely generated random string.
With a length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
allowed_chars = allowed_chars or ('abcdefghijklmnopqrstuvwxyz' +
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
try:
srandom = random.SystemRandom()
except NotImplementedError: # pragma: no cover
srandom = random
logger.warning('Falling back to less secure Mersenne Twister random string.')
bogus = "%s%s%s" % (random.getstate(), time.time(), 'sdkhfbsdkfbsdbhf')
random.seed(hashlib.sha256(bogus.encode()).digest())
return ''.join(srandom.choice(allowed_chars) for i in range(length))
class Session:
""" A connection between Python and the client runtime (JavaScript).
The session is what holds together the app widget, the web runtime,
and the websocket instance that connects to it.
Responsibilities:
* Send messages to the client and process messages received by the client.
* Keep track of PyComponent instances used by the session.
* Keep track of JsComponent instances associated with the session.
* Ensure that the client has all the module definitions it needs.
"""
STATUS = new_type('Enum', (), {'PENDING': 1, 'CONNECTED': 2, 'CLOSED': 0})
def __init__(self, app_name, store=None,
request=None): # Allow custom store for testing
self._store = store if (store is not None) else assetstore
assert isinstance(self._store, AssetStore)
self._creation_time = time.time() # used by app manager
# Id and name of the app
self._id = get_random_string()
self._app_name = app_name
# To keep track of what modules are defined at the client
self._present_classes = set() # Component classes known by the client
self._present_modules = set() # module names that, plus deps
self._present_assets = set() # names of used associated assets
self._assets_to_ignore = set() # user settable
# Data for this session (in addition to the data provided by the store)
self._data = {}
# More vars
self._runtime = None # init web runtime, will be set when used
self._ws = None # init websocket, will be set when a connection is made
self._closing = False # Flag to help with shutdown
# PyComponent or JsComponent instance, can be None if app_name is __default__
self._component = None
# The session assigns component id's and keeps track of component objects
self._component_counter = 0
self._component_instances = weakref.WeakValueDictionary()
self._dead_component_ids = set()
# Keep track of roundtrips. The _ping_calls elements are:
# [ping_count, {objects}, *(callback, args)]
self._ping_calls = []
self._ping_counter = 0
self._eval_result = {}
self._eval_count = 0
# While the client is not connected, we keep a queue of
# commands, which are send to the client as soon as it connects
self._pending_commands = []
# request related information
self._request = request
if request and request.cookies:
cookies = request.cookies
else:
cookies = {}
self._set_cookies(cookies)
def __repr__(self):
t = '<%s for %r (%i) at 0x%x>'
return t % (self.__class__.__name__, self.app_name, self.status, id(self))
@property
def request(self):
"""The tornado request that was at the origin of this session.
"""
return self._request
@property
def id(self):
""" The unique identifier of this session.
"""
return self._id
@property
def app_name(self):
""" The name of the application that this session represents.
"""
return self._app_name
@property
def app(self):
""" The root PyComponent or JsComponent instance that represents the app.
"""
return self._component
@property
def runtime(self):
""" The runtime that is rendering this app instance. Can be
None if the client is a browser.
"""
return self._runtime
@property
def status(self):
""" The status of this session.
The lifecycle for each session is:
* status 1: pending
* status 2: connected
* status 0: closed
"""
if self._ws is None:
return self.STATUS.PENDING # not connected yet
elif self._ws.close_code is None:
return self.STATUS.CONNECTED # alive and kicking
else:
return self.STATUS.CLOSED # connection closed
@property
def present_modules(self):
""" The set of module names that is (currently) available at the client.
"""
return set(self._present_modules)
@property
def assets_to_ignore(self):
""" The set of names of assets that should *not* be pushed to
the client, e.g. because they are already present on the page.
Add names to this set to prevent them from being loaded.
"""
return self._assets_to_ignore
def close(self):
""" Close the session: close websocket, close runtime, dispose app.
"""
# Stop guarding objects to break down any circular refs
self._ping_calls = []
self._closing = True # suppress warnings for session being closed.
try:
# Close the websocket
if self._ws:
self._ws.close_this()
# Close the runtime
if self._runtime:
self._runtime.close()
# Dispose the component and break the circular reference
if self._component is not None:
self._component.dispose()
self._component = None
# Discard data
self._data = {}
finally:
self._closing = False
## Hooking up with app, websocket, runtime
def _set_ws(self, ws):
""" A session is always first created, so we know what page to
serve. The client will connect the websocket, and communicate
the session_id so it can be connected to the correct Session
via this method
"""
if self._ws is not None:
raise RuntimeError('Session is already connected.')
# Set websocket object - this is what changes the status to CONNECTED
self._ws = ws
self._ws.write_command(("PRINT", "Flexx session says hi"))
# Send pending commands
for command in self._pending_commands:
self._ws.write_command(command)
self._ws.write_command(('INIT_DONE', ))
def _set_cookies(self, cookies=None):
""" To set cookies, must be an http.cookie.SimpleCookie object.
When the app is loaded as a web app, the cookies are set *before* the
main component is instantiated. Otherwise they are set when the websocket
is connected.
"""
self._cookies = cookies if cookies else SimpleCookie()
def _set_runtime(self, runtime):
if self._runtime is not None:
raise RuntimeError('Session already has a runtime.')
self._runtime = runtime
## Cookies, mmm
def get_cookie(self, name, default=None, max_age_days=31, min_version=None):
""" Gets the value of the cookie with the given name, else default.
Note that cookies only really work for web apps.
"""
from tornado.web import decode_signed_value
if name in self._cookies:
value = self._cookies[name].value
value = decode_signed_value(config.cookie_secret,
name, value, max_age_days=max_age_days,
min_version=min_version)
return value.decode()
else:
return default
def set_cookie(self, name, value, expires_days=30, version=None,
domain=None, expires=None, path="/", **kwargs):
""" Sets the given cookie name/value with the given options. Set value
to None to clear. The cookie value is secured using
`flexx.config.cookie_secret`; don't forget to set that config
value in your server. Additional keyword arguments are set on
the Cookie.Morsel directly.
"""
# This code is taken (in modified form) from the Tornado project
# Copyright 2009 Facebook
# Licensed under the Apache License, Version 2.0
# Assume tornado is available ...
from tornado.escape import native_str
from tornado.httputil import format_timestamp
from tornado.web import create_signed_value
# Clear cookie?
if value is None:
value = ""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
else:
secret = config.cookie_secret
value = create_signed_value(secret, name, value, version=version,
key_version=None)
# The cookie library only accepts type str, in both python 2 and 3
name = native_str(name)
value = native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if name in self._cookies:
del self._cookies[name]
self._cookies[name] = value
morsel = self._cookies[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
morsel["expires"] = format_timestamp(expires)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
# skip falsy values for httponly and secure flags because
# SimpleCookie sets them regardless
if k in ['httponly', 'secure'] and not v:
continue
morsel[k] = v
self.send_command('EXEC', 'document.cookie = "%s";' %
morsel.OutputString().replace('"', '\\"'))
## Data
def add_data(self, name, data):
""" Add data to serve to the client (e.g. images), specific to this
session. Returns the link at which the data can be retrieved.
Note that actions can be used to send (binary) data directly
to the client (over the websocket).
Parameters:
name (str): the name of the data, e.g. 'icon.png'. If data has
already been set on this name, it is overwritten.
data (bytes): the data blob.
Returns:
str: the (relative) url at which the data can be retrieved.
"""
if not isinstance(name, str):
raise TypeError('Session.add_data() name must be a str.')
if name in self._data:
raise ValueError('Session.add_data() got existing name %r.' % name)
if not isinstance(data, bytes):
raise TypeError('Session.add_data() data must be bytes.')
self._data[name] = data
return 'flexx/data/%s/%s' % (self.id, name) # relative path for export
def remove_data(self, name):
""" Remove the data associated with the given name. If you need this,
consider using actions instead. Note that data is automatically
released when the session is closed.
"""
self._data.pop(name, None)
def get_data_names(self):
""" Get a list of names of the data provided by this session.
"""
return list(self._data.keys())
def get_data(self, name):
""" Get the data corresponding to the given name. This can be
data local to the session, or global data. Returns None if data
by that name is unknown.
"""
if True:
data = self._data.get(name, None)
if data is None:
data = self._store.get_data(name)
return data
def _dump_data(self):
""" Get a dictionary that contains all data specific to this session.
The keys represent relative paths, the values are all bytes.
Private method, used by App.dump().
"""
d = {}
for fname in self.get_data_names():
d['flexx/data/{}/{}'.format(self.id, fname)] = self.get_data(fname)
return d
## Keeping track of component objects
def _register_component(self, component, id=None):
""" Called by PyComponent and JsComponent to give them an id
and register with the session.
"""
assert isinstance(component, (PyComponent, JsComponent))
assert component.session is self
cls = component.__class__
if self._component is None:
self._component = component # register root component (i.e. the app)
# Set id
if id is None:
self._component_counter += 1
id = cls.__name__ + '_' + str(self._component_counter)
component._id = id
component._uid = self.id + '_' + id
# Register the instance using a weakref
self._component_instances[component._id] = component
# Register the class to that the client has the needed definitions
self._register_component_class(cls)
self.keep_alive(component)
def _unregister_component(self, component):
self._dead_component_ids.add(component.id)
# self.keep_alive(component) # does not work on pypy; deletion in final
# Because we use weak refs, and we want to be able to keep (the id of)
# the object so that INVOKE on it can be silently ignored (because it
# is disposed). The object id gets removed by the DISPOSE_ACK command.
def get_component_instance(self, id):
""" Get PyComponent or JsComponent instance that is associated with
this session and has the corresponding id. The returned value can be
None if it does not exist, and a returned component can be disposed.
"""
return self._component_instances.get(id, None)
## JIT asset definitions
def _register_component_class(self, cls):
""" Mark the given PyComponent or JsComponent class as used; ensure
that the client knows about the module that it is defined in,
dependencies of this module, and associated assets of any of these
modules.
"""
if not (isinstance(cls, type) and issubclass(cls, (PyComponent, JsComponent))):
raise TypeError('_register_component_class() needs a PyComponent '
'or JsComponent class')
# Early exit if we know the class already
if cls in self._present_classes:
return
# Make sure that no two Component classes have the same name, or we get problems
# that are difficult to debug. Unless classes are defined interactively.
# The modules of classes that are re-registered are re-defined. The base
# class of such a component is assumed to be either unchanged or defined
# in the same module. It can also happen that a class is registered for
# which the module was defined earlier (e.g. ui.html). Such modules
# are redefined as well.
same_name = [c for c in self._present_classes if c.__name__ == cls.__name__]
if same_name:
is_interactive = self._app_name == '__default__'
same_name.append(cls)
is_dynamic_cls = all([c.__module__ == '__main__' for c in same_name])
if not (is_interactive and is_dynamic_cls):
raise RuntimeError('Cannot have multiple Component classes with '
'the same name unless using interactive session '
'and the classes are dynamically defined: %r'
% same_name)
# Mark the class and the module as used
logger.debug('Registering Component class %r' % cls.__name__)
self._register_module(cls.__jsmodule__)
def _register_module(self, mod_name):
""" Register a module with the client, as well as its
dependencies, and associated assests of the module and its
dependencies. If the module was already defined, it is
re-defined.
"""
if (mod_name.startswith(('flexx.app', 'flexx.event')) and
'.examples' not in mod_name):
return # these are part of flexx core assets
modules = set()
assets = []
def collect_module_and_deps(mod):
if mod.name.startswith(('flexx.app', 'flexx.event')):
return # these are part of flexx core assets
if mod.name not in self._present_modules:
self._present_modules.add(mod.name)
for dep in mod.deps:
if dep.startswith(('flexx.app', 'flexx.event')):
continue
submod = self._store.modules[dep]
collect_module_and_deps(submod)
modules.add(mod)
# Collect module and dependent modules that are not yet defined
self._store.update_modules() # Ensure up-to-date module definition
mod = self._store.modules[mod_name]
collect_module_and_deps(mod)
f = lambda m: (m.name.startswith('__main__'), m.name)
modules = solve_dependencies(sorted(modules, key=f))
# Collect associated assets
for mod in modules:
for asset_name in self._store.get_associated_assets(mod.name):
if asset_name not in self._present_assets:
self._present_assets.add(asset_name)
assets.append(self._store.get_asset(asset_name))
# If the module was already defined and thus needs to be re-defined,
# we only redefine *this* module, no deps and no assoctated assets.
if not modules:
modules.append(mod)
# Collect CSS and JS assets
for mod in modules:
if mod.get_css().strip():
assets.append(self._store.get_asset(mod.name + '.css'))
for mod in modules:
assets.append(self._store.get_asset(mod.name + '.js'))
# Mark classes as used
for mod in modules:
for cls in mod.component_classes:
self._present_classes.add(cls)
# Push assets over the websocket. Note how this works fine with the
# notebook because we turn ws commands into display(HTML()).
# JS can be defined via eval() or by adding a <script> to the DOM.
# The latter allows assets that do not use strict mode, but sourceURL
# does not work on FF. So we only want to eval our own assets.
for asset in assets:
if asset.name in self._assets_to_ignore:
continue
logger.debug('Loading asset %s' % asset.name)
# Determine command suffix. All our sources come in bundles,
# for which we use eval because it makes sourceURL work on FF.
# (It does not work in Chrome in either way.)
suffix = asset.name.split('.')[-1].upper()
if suffix == 'JS' and isinstance(asset, Bundle):
suffix = 'JS-EVAL'
self.send_command('DEFINE', suffix, asset.name, asset.to_string())
## Communication with the client
def send_command(self, *command):
""" Send a command to the other side. Commands consists of at least one
argument (a string representing the type of command).
"""
assert len(command) >= 1
if self._closing:
pass
elif self.status == self.STATUS.CONNECTED:
self._ws.write_command(command)
elif self.status == self.STATUS.PENDING:
self._pending_commands.append(command)
else:
#raise RuntimeError('Cannot send commands; app is closed')
logger.warning('Cannot send commands; app is closed')
def _receive_command(self, command):
""" Received a command from JS.
"""
cmd = command[0]
if cmd == 'EVALRESULT':
self._eval_result[command[2]] = command[1]
elif cmd == 'PRINT':
print('JS:', command[1])
elif cmd == 'INFO':
logger.info('JS: ' + command[1])
elif cmd == 'WARN':
logger.warning('JS: ' + command[1])
elif cmd == 'ERROR':
logger.error('JS: ' + command[1] +
' - stack trace in browser console (hit F12).')
elif cmd == 'INVOKE':
id, name, args = command[1:]
ob = self.get_component_instance(id)
if ob is None:
if id not in self._dead_component_ids:
t = 'Cannot invoke %s.%s; session does not know it (anymore).'
logger.warning(t % (id, name))
elif ob._disposed:
pass # JS probably send something before knowing the object was dead
else:
func = getattr(ob, name, None)
if func:
func(*args)
elif cmd == 'PONG':
self._receive_pong(command[1])
elif cmd == 'INSTANTIATE':
modulename, cname, id, args, kwargs = command[1:]
# Maybe we still have the instance?
c = self.get_component_instance(id)
if c and not c._disposed:
self.keep_alive(c)
return
# Try to find the class
m, cls, e = None, None, 0
if modulename in assetstore.modules:
m = sys.modules[modulename]
cls = getattr(m, cname, None)
if cls is None:
e = 1
elif not (isinstance(cls, type) and issubclass(cls, JsComponent)):
cls, e = None, 2
elif cls not in AppComponentMeta.CLASSES:
cls, e = None, 3
if cls is None:
raise RuntimeError('Cannot INSTANTIATE %s.%s (%i)' %
(modulename, cname, e))
# Instantiate
kwargs['flx_session'] = self
kwargs['flx_id'] = id
assert len(args) == 0
c = cls(**kwargs) # calls keep_alive via _register_component()
elif cmd == 'DISPOSE': # Gets send from local to proxy
id = command[1]
c = self.get_component_instance(id)
if c and not c._disposed: # no need to warn if component does not exist
c._dispose()
self.send_command('DISPOSE_ACK', command[1])
self._component_instances.pop(id, None) # Drop local ref now
elif cmd == 'DISPOSE_ACK': # Gets send from proxy to local
self._component_instances.pop(command[1], None)
self._dead_component_ids.discard(command[1])
else:
logger.error('Unknown command received from JS:\n%s' % command)
def keep_alive(self, ob, iters=1):
""" Keep an object alive for a certain amount of time, expressed
in Python-JS ping roundtrips. This is intended for making JsComponent
(i.e. proxy components) survice the time between instantiation
triggered from JS and their attachement to a property, though any type
of object can be given.
"""
ping_to_schedule_at = self._ping_counter + iters
el = self._get_ping_call_list(ping_to_schedule_at)
el[1][id(ob)] = ob # add to dict of objects to keep alive
def call_after_roundtrip(self, callback, *args):
""" A variant of ``call_soon()`` that calls a callback after
a py-js roundrip. This can be convenient to delay an action until
after other things have settled down.
"""
# The ping_counter represents the ping count that is underway.
# Since we want at least a full ping, we want one count further.
ping_to_schedule_at = self._ping_counter + 1
el = self._get_ping_call_list(ping_to_schedule_at)
el.append((callback, args))
async def co_roundtrip(self):
""" Coroutine to wait for one Py-JS-Py roundtrip.
"""
count = 0
def up():
nonlocal count
count += 1
self.call_after_roundtrip(up)
while count < 1:
await asyncio.sleep(0.02)
async def co_eval(self, js):
""" Coroutine to evaluate JS in the client, wait for the result,
and then return it. It is recomended to use this method only
for testing purposes.
"""
id = self._eval_count
self._eval_count += 1
self.send_command('EVALANDRETURN', js, id)
while id not in self._eval_result:
await asyncio.sleep(0.2)
return self._eval_result.pop(id)
def _get_ping_call_list(self, ping_count):
""" Get an element from _ping_call for the specified ping_count.
The element is a list [ping_count, {objects}, *(callback, args)]
"""
# No pending ping_calls?
if len(self._ping_calls) == 0:
# Start pinging
send_ping_later(self)
# Append element
el = [ping_count, {}]
self._ping_calls.append(el)
return el
# Try to find existing element, or insert it
for i in reversed(range(len(self._ping_calls))):
el = self._ping_calls[i]
if el[0] == ping_count:
return el
elif el[0] < ping_count:
el = [ping_count, {}]
self._ping_calls.insert(i + 1, el)
return el
else:
el = [ping_count, {}]
self._ping_calls.insert(0, el)
return el
def _receive_pong(self, count):
# Process ping calls
while len(self._ping_calls) > 0 and self._ping_calls[0][0] <= count:
_, objects, *callbacks = self._ping_calls.pop(0)
objects.clear()
del objects
for callback, args in callbacks:
asyncio.get_event_loop().call_soon(callback, *args)
# Continue pinging?
if len(self._ping_calls) > 0:
send_ping_later(self)
def send_ping_later(session):
# This is to prevent the prevention of the session from being discarded due
# to a ref lingering in an asyncio loop.
def x(weaksession):
s = weaksession()
if s is not None and s.status > 0:
s._ping_counter += 1
s.send_command('PING', s._ping_counter)
# asyncio.get_event_loop().call_soon(x, weakref.ref(session))
asyncio.get_event_loop().call_later(0.01, x, weakref.ref(session))
## Functions to get page
# These could be methods, but are only for internal use
def get_page(session):
""" Get the string for the HTML page to render this session's app.
Not a lot; all other JS and CSS assets are pushed over the websocket.
"""
css_assets = [assetstore.get_asset('reset.css')]
js_assets = [assetstore.get_asset('flexx-core.js')]
return _get_page(session, js_assets, css_assets, 3, False)
def get_page_for_export(session, commands, link=0):
""" Get the string for an exported HTML page (to run without a server).
In this case, there is no websocket to push JS/CSS assets over; these
need to be included inside or alongside the main html page.
"""
# This function basically collects all assets that the session needs,
# creates a special -export.js asset that executes the given commands,
# and puts it al together using _get_page().
# We start as a normal page ...
css_assets = [assetstore.get_asset('reset.css')]
js_assets = [assetstore.get_asset('flexx-core.js')]
# Get all the used modules
modules = [assetstore.modules[name] for name in session.present_modules]
f = lambda m: (m.name.startswith('__main__'), m.name)
modules = solve_dependencies(sorted(modules, key=f))
# First the associated assets
asset_names = set()
for mod in modules:
for asset_name in assetstore.get_associated_assets(mod.name):
if asset_name not in asset_names:
asset_names.add(asset_name)
asset = assetstore.get_asset(asset_name)
if asset.name.lower().endswith('.js'):
js_assets.append(asset)
else:
css_assets.append(asset)
# Then the modules themselves
for mod in modules:
if mod.get_css().strip():
css_assets.append(assetstore.get_asset(mod.name + '.css'))
for mod in modules:
js_assets.append(assetstore.get_asset(mod.name + '.js'))
# Create asset for launching the app (commands that normally get send
# over the websocket)
lines = []
lines.append('flexx.is_exported = true;\n')
lines.append('flexx.run_exported_app = function () {')
lines.append(' var commands_b64 = [')
for command in commands:
if command[0] != 'DEFINE':
command_str = base64.encodebytes(serializer.encode(command)).decode()
lines.append(' "' + command_str.replace('\n', '') + '",')
lines.append(' ];')
lines.append(' bb64 = flexx.require("bb64");')
lines.append(' for (var i=0; i<commands_b64.length; i++) {')
lines.append(' var command = flexx.serializer.decode('
'bb64.decode(commands_b64[i]));')
lines.append(' flexx.s1._receive_command(command);')
lines.append(' }\n};\n')
# Create a session asset for it, "-export.js" is always embedded
export_asset = Asset('flexx-export.js', '\n'.join(lines))
js_assets.append(export_asset)
# Combine it all
return _get_page(session, js_assets, css_assets, link, True)
def _get_page(session, js_assets, css_assets, link, export):
""" Compose index page. Depending on the value of link and the types
of assets, the assets are either embedded or linked.
"""
pre_path = 'flexx/assets' if export else '/flexx/assets' # relative / abs
codes = []
for assets in [css_assets, js_assets]:
for asset in assets:
if link in (0, 1):
html = asset.to_html('{}', link)
else:
if asset.name.endswith(('-info.js', '-export.js')):
# Special case, is always embedded, see get_page_for_export()
html = asset.to_html('', 0)
else:
html = asset.to_html(pre_path + '/shared/{}', link)
codes.append(html)
if export and assets is js_assets:
codes.append('<script>window.flexx.spin();</script>')
codes.append('') # whitespace between css and js assets
codes.append('<script>flexx.create_session("%s", "%s");</script>\n' %
(session.app_name, session.id))
src = INDEX
if link in (0, 1):
asset_names = [a.name for a in css_assets + js_assets]
toc = '<!-- Contents:\n\n- ' + '\n- '.join(asset_names) + '\n\n-->'
codes.insert(0, toc)
src = src.replace('ASSET-HOOK', '\n\n\n'.join(codes))
else:
src = src.replace('ASSET-HOOK', '\n'.join(codes))
return src
|
pydocx/openxml/wordprocessing/deleted_run.py | botzill/pydocx | 127 | 12692699 | <reponame>botzill/pydocx
# coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.models import XmlModel, XmlCollection
from pydocx.openxml.wordprocessing.run import Run
from pydocx.openxml.wordprocessing.smart_tag_run import SmartTagRun
class DeletedRun(XmlModel):
XML_TAG = 'del'
children = XmlCollection(
Run,
SmartTagRun,
'wordprocessing.DeletedRun',
# TODO Needs InsertedRun
)
|
scale/messaging/__init__.py | kaydoh/scale | 121 | 12692745 | <reponame>kaydoh/scale
default_app_config = 'messaging.apps.MessagingConfig'
|
tools/android/loading/content_classification_lens_unittest.py | google-ar/chromium | 2,151 | 12692748 | <filename>tools/android/loading/content_classification_lens_unittest.py<gh_stars>1000+
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import unittest
from content_classification_lens import (ContentClassificationLens,
_RulesMatcher)
from request_track import Request
import test_utils
class ContentClassificationLensTestCase(unittest.TestCase):
_DOCUMENT_URL = 'http://bla.com'
_MAIN_FRAME_ID = '123.1'
_REQUEST = Request.FromJsonDict({'url': _DOCUMENT_URL,
'document_url': _DOCUMENT_URL,
'request_id': '1234.1',
'frame_id': _MAIN_FRAME_ID,
'initiator': {'type': 'other'},
'timestamp': 2,
'status': 200,
'timing': {},
'resource_type': 'Document'})
_PAGE_EVENTS = [{'method': 'Page.frameStartedLoading',
'frame_id': _MAIN_FRAME_ID},
{'method': 'Page.frameAttached',
'frame_id': '123.13', 'parent_frame_id': _MAIN_FRAME_ID}]
_RULES = ['bla.com']
def testGetDocumentUrl(self):
trace = test_utils.LoadingTraceFromEvents(
[self._REQUEST], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, [], [])
self.assertEquals(self._DOCUMENT_URL, lens._GetDocumentUrl())
# Don't be fooled by redirects.
request = copy.deepcopy(self._REQUEST)
request.status = 302
request.document_url = 'http://www.bla.com'
trace = test_utils.LoadingTraceFromEvents(
[request, self._REQUEST], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, [], [])
self.assertEquals(self._DOCUMENT_URL, lens._GetDocumentUrl())
def testGetDocumentUrlSeveralChanges(self):
request = copy.deepcopy(self._REQUEST)
request.status = 200
request.document_url = 'http://www.blabla.com'
request2 = copy.deepcopy(request)
request2.document_url = 'http://www.blablabla.com'
trace = test_utils.LoadingTraceFromEvents(
[self._REQUEST, request, request2], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, [], [])
self.assertEquals(request2.document_url, lens._GetDocumentUrl())
def testNoRules(self):
trace = test_utils.LoadingTraceFromEvents(
[self._REQUEST], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, [], [])
self.assertFalse(lens.IsAdRequest(self._REQUEST))
self.assertFalse(lens.IsTrackingRequest(self._REQUEST))
def testAdRequest(self):
trace = test_utils.LoadingTraceFromEvents(
[self._REQUEST], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, self._RULES, [])
self.assertTrue(lens.IsAdRequest(self._REQUEST))
self.assertFalse(lens.IsTrackingRequest(self._REQUEST))
def testTrackingRequest(self):
trace = test_utils.LoadingTraceFromEvents(
[self._REQUEST], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, [], self._RULES)
self.assertFalse(lens.IsAdRequest(self._REQUEST))
self.assertTrue(lens.IsTrackingRequest(self._REQUEST))
def testMainFrameIsNotAnAdFrame(self):
trace = test_utils.LoadingTraceFromEvents(
[self._REQUEST], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, self._RULES, [])
self.assertFalse(lens.IsAdOrTrackingFrame(self._MAIN_FRAME_ID))
def testAdFrame(self):
request = copy.deepcopy(self._REQUEST)
request.request_id = '1234.2'
request.frame_id = '123.123'
trace = test_utils.LoadingTraceFromEvents(
[self._REQUEST, request], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, self._RULES, [])
self.assertTrue(lens.IsAdOrTrackingFrame(request.frame_id))
def testAdAndTrackingRequests(self):
ad_request = copy.deepcopy(self._REQUEST)
ad_request.request_id = '1234.2'
ad_request.frame_id = '123.123'
non_ad_request_non_ad_frame = copy.deepcopy(self._REQUEST)
non_ad_request_non_ad_frame.request_id = '1234.3'
non_ad_request_non_ad_frame.url = 'http://www.example.com'
non_ad_request_non_ad_frame.frame_id = '123.456'
non_ad_request_ad_frame = copy.deepcopy(self._REQUEST)
non_ad_request_ad_frame.request_id = '1234.4'
non_ad_request_ad_frame.url = 'http://www.example.com'
non_ad_request_ad_frame.frame_id = ad_request.frame_id
trace = test_utils.LoadingTraceFromEvents(
[self._REQUEST, ad_request, non_ad_request_non_ad_frame,
non_ad_request_ad_frame], self._PAGE_EVENTS)
lens = ContentClassificationLens(trace, self._RULES, [])
self.assertSetEqual(
set([self._REQUEST, ad_request, non_ad_request_ad_frame]),
set(lens.AdAndTrackingRequests()))
class _MatcherTestCase(unittest.TestCase):
_RULES_WITH_WHITELIST = ['/thisisanad.', '@@myadvertisingdomain.com/*',
'@@||www.mydomain.com/ads/$elemhide']
_SCRIPT_RULE = 'domainwithscripts.com/*$script'
_THIRD_PARTY_RULE = 'domainwithscripts.com/*$third-party'
_SCRIPT_REQUEST = Request.FromJsonDict(
{'url': 'http://domainwithscripts.com/bla.js',
'resource_type': 'Script',
'request_id': '1234.1',
'frame_id': '123.1',
'initiator': {'type': 'other'},
'timestamp': 2,
'timing': {}})
def testRemovesWhitelistRules(self):
matcher = _RulesMatcher(self._RULES_WITH_WHITELIST, False)
self.assertEquals(3, len(matcher._rules))
matcher = _RulesMatcher(self._RULES_WITH_WHITELIST, True)
self.assertEquals(1, len(matcher._rules))
def testScriptRule(self):
matcher = _RulesMatcher([self._SCRIPT_RULE], False)
request = copy.deepcopy(self._SCRIPT_REQUEST)
request.resource_type = 'Stylesheet'
self.assertFalse(matcher.Matches(
request, ContentClassificationLensTestCase._DOCUMENT_URL))
self.assertTrue(matcher.Matches(
self._SCRIPT_REQUEST, ContentClassificationLensTestCase._DOCUMENT_URL))
def testGetTldPlusOne(self):
self.assertEquals(
'easy.com',
_RulesMatcher._GetTldPlusOne('http://www.easy.com/hello/you'))
self.assertEquals(
'not-so-easy.co.uk',
_RulesMatcher._GetTldPlusOne('http://www.not-so-easy.co.uk/hello/you'))
self.assertEquals(
'hard.co.uk',
_RulesMatcher._GetTldPlusOne('http://hard.co.uk/'))
def testThirdPartyRule(self):
matcher = _RulesMatcher([self._THIRD_PARTY_RULE], False)
request = copy.deepcopy(self._SCRIPT_REQUEST)
document_url = 'http://www.domainwithscripts.com/good-morning'
self.assertFalse(matcher.Matches(request, document_url))
document_url = 'http://anotherdomain.com/good-morning'
self.assertTrue(matcher.Matches(request, document_url))
if __name__ == '__main__':
unittest.main()
|
saleor/order/migrations/0048_auto_20180629_1055.py | elwoodxblues/saleor | 15,337 | 12692762 | <gh_stars>1000+
# Generated by Django 2.0.3 on 2018-06-29 15:55
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("order", "0047_order_line_name_length")]
operations = [
migrations.AlterField(
model_name="order",
name="token",
field=models.CharField(blank=True, max_length=36, unique=True),
),
migrations.AlterField(
model_name="order",
name="voucher",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="discount.Voucher",
),
),
]
|
lib/cmdlib/instance_query.py | modulus-sa/ganeti | 396 | 12692781 | <filename>lib/cmdlib/instance_query.py
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Logical units for querying instances."""
import itertools
from ganeti import constants
from ganeti import locking
from ganeti import utils
from ganeti.cmdlib.base import NoHooksLU
from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
CheckInstancesNodeGroups, AnnotateDiskParams
from ganeti.cmdlib.instance_utils import NICListToTuple
from ganeti.hypervisor import hv_base
class LUInstanceQueryData(NoHooksLU):
"""Query runtime instance data.
"""
REQ_BGL = False
def ExpandNames(self):
self.needed_locks = {}
# Use locking if requested or when non-static information is wanted
if not (self.op.static or self.op.use_locking):
self.LogWarning("Non-static data requested, locks need to be acquired")
self.op.use_locking = True
if self.op.instances or not self.op.use_locking:
# Expand instance names right here
(_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
else:
# Will use acquired locks
self.wanted_names = None
if self.op.use_locking:
self.share_locks = ShareAll()
if self.wanted_names is None:
self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
else:
self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
self.needed_locks[locking.LEVEL_NODEGROUP] = []
self.needed_locks[locking.LEVEL_NODE] = []
self.needed_locks[locking.LEVEL_NETWORK] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
self.dont_collate_locks[locking.LEVEL_NODE] = True
self.dont_collate_locks[locking.LEVEL_NETWORK] = True
def DeclareLocks(self, level):
if self.op.use_locking:
owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
self.owned_locks(locking.LEVEL_INSTANCE)))
if level == locking.LEVEL_NODEGROUP:
# Lock all groups used by instances optimistically; this requires going
# via the node before it's locked, requiring verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
frozenset(group_uuid
for instance_uuid in owned_instances
for group_uuid in
self.cfg.GetInstanceNodeGroups(instance_uuid))
elif level == locking.LEVEL_NODE:
self._LockInstancesNodes()
elif level == locking.LEVEL_NETWORK:
self.needed_locks[locking.LEVEL_NETWORK] = \
frozenset(net_uuid
for instance_uuid in owned_instances.keys()
for net_uuid in
self.cfg.GetInstanceNetworks(instance_uuid))
def CheckPrereq(self):
"""Check prerequisites.
This only checks the optional instance list against the existing names.
"""
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
if self.wanted_names is None:
assert self.op.use_locking, "Locking was not used"
self.wanted_names = owned_instances
instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
if self.op.use_locking:
CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
owned_node_uuids, None)
else:
assert not (owned_instances or owned_groups or
owned_node_uuids or owned_networks)
self.wanted_instances = list(instances.values())
def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
"""Returns the status of a block device
"""
if self.op.static or not node_uuid:
return None
result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
if result.offline:
return None
result.Raise("Can't compute disk status for %s" % instance.name)
status = result.payload
if status is None:
return None
return (status.dev_path, status.major, status.minor,
status.sync_percent, status.estimated_time,
status.is_degraded, status.ldisk_status)
def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
"""Compute block device status.
"""
(anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg)
return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn,
anno_dev)
def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
dev):
"""Compute block device status.
@attention: The device has to be annotated already.
"""
drbd_info = None
output_logical_id = dev.logical_id
if dev.dev_type in constants.DTS_DRBD:
# we change the snode then (otherwise we use the one passed in)
if dev.logical_id[0] == instance.primary_node:
snode_uuid = dev.logical_id[1]
snode_minor = dev.logical_id[4]
pnode_minor = dev.logical_id[3]
else:
snode_uuid = dev.logical_id[0]
snode_minor = dev.logical_id[3]
pnode_minor = dev.logical_id[4]
drbd_info = {
"primary_node": node_uuid2name_fn(instance.primary_node),
"primary_minor": pnode_minor,
"secondary_node": node_uuid2name_fn(snode_uuid),
"secondary_minor": snode_minor,
"port": dev.logical_id[2],
}
# replace the secret present at the end of the ids with None
output_logical_id = dev.logical_id[:-1] + (None,)
dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
instance, dev)
dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
if dev.children:
dev_children = [
self._ComputeDiskStatusInner(instance, snode_uuid, node_uuid2name_fn, d)
for d in dev.children
]
else:
dev_children = []
return {
"iv_name": dev.iv_name,
"dev_type": dev.dev_type,
"logical_id": output_logical_id,
"drbd_info": drbd_info,
"pstatus": dev_pstatus,
"sstatus": dev_sstatus,
"children": dev_children,
"mode": dev.mode,
"size": dev.size,
"spindles": dev.spindles,
"name": dev.name,
"uuid": dev.uuid,
}
def Exec(self, feedback_fn):
"""Gather and return data"""
result = {}
cluster = self.cfg.GetClusterInfo()
node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i.uuid)
for i in self.wanted_instances))
nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
for node in nodes.values()))
for instance in self.wanted_instances:
pnode = nodes[instance.primary_node]
hvparams = cluster.FillHV(instance, skip_globals=True)
if self.op.static or pnode.offline:
remote_state = None
if pnode.offline:
self.LogWarning("Primary node %s is marked offline, returning static"
" information only for instance %s" %
(pnode.name, instance.name))
else:
remote_info = self.rpc.call_instance_info(
instance.primary_node, instance.name, instance.hypervisor,
cluster.hvparams[instance.hypervisor])
remote_info.Raise("Error checking node %s" % pnode.name)
remote_info = remote_info.payload
allow_userdown = \
cluster.enabled_user_shutdown and \
(instance.hypervisor != constants.HT_KVM or
hvparams[constants.HV_KVM_USER_SHUTDOWN])
if remote_info and "state" in remote_info:
if hv_base.HvInstanceState.IsShutdown(remote_info["state"]):
if allow_userdown:
remote_state = "user down"
else:
remote_state = "down"
else:
remote_state = "up"
else:
if instance.admin_state == constants.ADMINST_UP:
remote_state = "down"
elif instance.admin_state == constants.ADMINST_DOWN:
if instance.admin_state_source == constants.USER_SOURCE:
remote_state = "user down"
else:
remote_state = "down"
else:
remote_state = "offline"
group2name_fn = lambda uuid: groups[uuid].name
node_uuid2name_fn = lambda uuid: nodes[uuid].name
disk_objects = self.cfg.GetInstanceDisks(instance.uuid)
output_disks = [self._ComputeDiskStatus(instance, node_uuid2name_fn, d)
for d in disk_objects]
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
snodes_group_uuids = [nodes[snode_uuid].group
for snode_uuid in secondary_nodes]
result[instance.name] = {
"name": instance.name,
"config_state": instance.admin_state,
"run_state": remote_state,
"pnode": pnode.name,
"pnode_group_uuid": pnode.group,
"pnode_group_name": group2name_fn(pnode.group),
"snodes": [node_uuid2name_fn(n) for n in secondary_nodes],
"snodes_group_uuids": snodes_group_uuids,
"snodes_group_names": [group2name_fn(u) for u in snodes_group_uuids],
"os": instance.os,
# this happens to be the same format used for hooks
"nics": NICListToTuple(self, instance.nics),
"disk_template": utils.GetDiskTemplate(disk_objects),
"disks": output_disks,
"hypervisor": instance.hypervisor,
"network_port": instance.network_port,
"hv_instance": instance.hvparams,
"hv_actual": hvparams,
"be_instance": instance.beparams,
"be_actual": cluster.FillBE(instance),
"os_instance": instance.osparams,
"os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
"serial_no": instance.serial_no,
"mtime": instance.mtime,
"ctime": instance.ctime,
"uuid": instance.uuid,
}
return result
|
moto/datasync/exceptions.py | gtourkas/moto | 5,460 | 12692818 | from moto.core.exceptions import JsonRESTError
class DataSyncClientError(JsonRESTError):
code = 400
class InvalidRequestException(DataSyncClientError):
def __init__(self, msg=None):
self.code = 400
super().__init__("InvalidRequestException", msg or "The request is not valid.")
|
examples/mxnet/scenegraph/utils/metric.py | ketyi/dgl | 9,516 | 12692841 | import dgl
import mxnet as mx
import numpy as np
import logging, time
from operator import attrgetter, itemgetter
from mxnet import nd, gluon
from mxnet.gluon import nn
from dgl.utils import toindex
from dgl.nn.mxnet import GraphConv
from gluoncv.model_zoo import get_model
from gluoncv.data.batchify import Pad
def iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA) * max(0, yB - yA)
if interArea < 1e-7 :
return 0
boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
if boxAArea + boxBArea - interArea < 1e-7:
return 0
iou_val = interArea / float(boxAArea + boxBArea - interArea)
return iou_val
def object_iou_thresh(gt_object, pred_object, iou_thresh=0.5):
obj_iou = iou(gt_object[1:5], pred_object[1:5])
if obj_iou >= iou_thresh:
return True
return False
def triplet_iou_thresh(pred_triplet, gt_triplet, iou_thresh=0.5):
sub_iou = iou(gt_triplet[5:9], pred_triplet[5:9])
if sub_iou >= iou_thresh:
ob_iou = iou(gt_triplet[9:13], pred_triplet[9:13])
if ob_iou >= iou_thresh:
return True
return False
@mx.metric.register
@mx.metric.alias('auc')
class AUCMetric(mx.metric.EvalMetric):
def __init__(self, name='auc', eps=1e-12):
super(AUCMetric, self).__init__(name)
self.eps = eps
def update(self, labels, preds):
mx.metric.check_label_shapes(labels, preds)
label_weight = labels[0].asnumpy()
preds = preds[0].asnumpy()
tmp = []
for i in range(preds.shape[0]):
tmp.append((label_weight[i], preds[i][1]))
tmp = sorted(tmp, key=itemgetter(1), reverse=True)
label_sum = label_weight.sum()
if label_sum == 0 or label_sum == label_weight.size:
return
label_one_num = np.count_nonzero(label_weight)
label_zero_num = len(label_weight) - label_one_num
total_area = label_zero_num * label_one_num
height = 0
width = 0
area = 0
for a, _ in tmp:
if a == 1.0:
height += 1.0
else:
width += 1.0
area += height
self.sum_metric += area / total_area
self.num_inst += 1
@mx.metric.register
@mx.metric.alias('predcls')
class PredCls(mx.metric.EvalMetric):
'''Metric with ground truth object location and label'''
def __init__(self, topk=20, iou_thresh=0.99):
super(PredCls, self).__init__('predcls@%d'%(topk))
self.topk = topk
self.iou_thresh = iou_thresh
def update(self, labels, preds):
if labels is None or preds is None:
self.num_inst += 1
return
preds = preds[preds[:,0].argsort()[::-1]]
m = min(self.topk, preds.shape[0])
count = 0
gt_edge_num = labels.shape[0]
label_matched = [False for label in labels]
for i in range(m):
pred = preds[i]
for j in range(gt_edge_num):
if label_matched[j]:
continue
label = labels[j]
if int(label[2]) == int(pred[2]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += 1
label_matched[j] = True
total = labels.shape[0]
self.sum_metric += count / total
self.num_inst += 1
@mx.metric.register
@mx.metric.alias('phrcls')
class PhrCls(mx.metric.EvalMetric):
'''Metric with ground truth object location and predicted object label from detector'''
def __init__(self, topk=20, iou_thresh=0.99):
super(PhrCls, self).__init__('phrcls@%d'%(topk))
self.topk = topk
self.iou_thresh = iou_thresh
def update(self, labels, preds):
if labels is None or preds is None:
self.num_inst += 1
return
preds = preds[preds[:,1].argsort()[::-1]]
m = min(self.topk, preds.shape[0])
count = 0
gt_edge_num = labels.shape[0]
label_matched = [False for label in labels]
for i in range(m):
pred = preds[i]
for j in range(gt_edge_num):
if label_matched[j]:
continue
label = labels[j]
if int(label[2]) == int(pred[2]) and \
int(label[3]) == int(pred[3]) and \
int(label[4]) == int(pred[4]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += 1
label_matched[j] = True
total = labels.shape[0]
self.sum_metric += count / total
self.num_inst += 1
@mx.metric.register
@mx.metric.alias('sgdet')
class SGDet(mx.metric.EvalMetric):
'''Metric with predicted object information by the detector'''
def __init__(self, topk=20, iou_thresh=0.5):
super(SGDet, self).__init__('sgdet@%d'%(topk))
self.topk = topk
self.iou_thresh = iou_thresh
def update(self, labels, preds):
if labels is None or preds is None:
self.num_inst += 1
return
preds = preds[preds[:,1].argsort()[::-1]]
m = min(self.topk, len(preds))
count = 0
gt_edge_num = labels.shape[0]
label_matched = [False for label in labels]
for i in range(m):
pred = preds[i]
for j in range(gt_edge_num):
if label_matched[j]:
continue
label = labels[j]
if int(label[2]) == int(pred[2]) and \
int(label[3]) == int(pred[3]) and \
int(label[4]) == int(pred[4]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += 1
label_matched[j] =True
total = labels.shape[0]
self.sum_metric += count / total
self.num_inst += 1
@mx.metric.register
@mx.metric.alias('sgdet+')
class SGDetPlus(mx.metric.EvalMetric):
'''Metric proposed by `Graph R-CNN for Scene Graph Generation`'''
def __init__(self, topk=20, iou_thresh=0.5):
super(SGDetPlus, self).__init__('sgdet+@%d'%(topk))
self.topk = topk
self.iou_thresh = iou_thresh
def update(self, labels, preds):
label_objects, label_triplets = labels
pred_objects, pred_triplets = preds
if label_objects is None or pred_objects is None:
self.num_inst += 1
return
count = 0
# count objects
object_matched = [False for obj in label_objects]
m = len(pred_objects)
gt_obj_num = label_objects.shape[0]
for i in range(m):
pred = pred_objects[i]
for j in range(gt_obj_num):
if object_matched[j]:
continue
label = label_objects[j]
if int(label[0]) == int(pred[0]) and \
object_iou_thresh(pred, label, self.iou_thresh):
count += 1
object_matched[j] = True
# count predicate and triplet
pred_triplets = pred_triplets[pred_triplets[:,1].argsort()[::-1]]
m = min(self.topk, len(pred_triplets))
gt_triplet_num = label_triplets.shape[0]
triplet_matched = [False for label in label_triplets]
predicate_matched = [False for label in label_triplets]
for i in range(m):
pred = pred_triplets[i]
for j in range(gt_triplet_num):
label = label_triplets[j]
if not predicate_matched:
if int(label[2]) == int(pred[2]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += label[3]
predicate_matched[j] = True
if not triplet_matched[j]:
if int(label[2]) == int(pred[2]) and \
int(label[3]) == int(pred[3]) and \
int(label[4]) == int(pred[4]) and \
triplet_iou_thresh(pred, label, self.iou_thresh):
count += 1
triplet_matched[j] = True
# compute sum
total = labels.shape[0]
N = gt_obj_num + 2 * total
self.sum_metric += count / N
self.num_inst += 1
def extract_gt(g, img_size):
'''extract prediction from ground truth graph'''
if g is None or g.number_of_nodes() == 0:
return None, None
gt_eids = np.where(g.edata['rel_class'].asnumpy() > 0)[0]
if len(gt_eids) == 0:
return None, None
gt_class = g.ndata['node_class'][:,0].asnumpy()
gt_bbox = g.ndata['bbox'].asnumpy()
gt_bbox[:, 0] /= img_size[1]
gt_bbox[:, 1] /= img_size[0]
gt_bbox[:, 2] /= img_size[1]
gt_bbox[:, 3] /= img_size[0]
gt_objects = np.vstack([gt_class, gt_bbox.transpose(1, 0)]).transpose(1, 0)
gt_node_ids = g.find_edges(gt_eids)
gt_node_sub = gt_node_ids[0].asnumpy()
gt_node_ob = gt_node_ids[1].asnumpy()
gt_rel_class = g.edata['rel_class'][gt_eids,0].asnumpy() - 1
gt_sub_class = gt_class[gt_node_sub]
gt_ob_class = gt_class[gt_node_ob]
gt_sub_bbox = gt_bbox[gt_node_sub]
gt_ob_bbox = gt_bbox[gt_node_ob]
n = len(gt_eids)
gt_triplets = np.vstack([np.ones(n), np.ones(n),
gt_rel_class, gt_sub_class, gt_ob_class,
gt_sub_bbox.transpose(1, 0),
gt_ob_bbox.transpose(1, 0)]).transpose(1, 0)
return gt_objects, gt_triplets
def extract_pred(g, topk=100, joint_preds=False):
'''extract prediction from prediction graph for validation and visualization'''
if g is None or g.number_of_nodes() == 0:
return None, None
pred_class = g.ndata['node_class_pred'].asnumpy()
pred_class_prob = g.ndata['node_class_logit'].asnumpy()
pred_bbox = g.ndata['pred_bbox'][:,0:4].asnumpy()
pred_objects = np.vstack([pred_class, pred_bbox.transpose(1, 0)]).transpose(1, 0)
score_pred = g.edata['score_pred'].asnumpy()
score_phr = g.edata['score_phr'].asnumpy()
score_pred_topk_eids = (-score_pred).argsort()[0:topk].tolist()
score_phr_topk_eids = (-score_phr).argsort()[0:topk].tolist()
topk_eids = sorted(list(set(score_pred_topk_eids + score_phr_topk_eids)))
pred_rel_prob = g.edata['preds'][topk_eids].asnumpy()
if joint_preds:
pred_rel_class = pred_rel_prob[:,1:].argmax(axis=1)
else:
pred_rel_class = pred_rel_prob.argmax(axis=1)
pred_node_ids = g.find_edges(topk_eids)
pred_node_sub = pred_node_ids[0].asnumpy()
pred_node_ob = pred_node_ids[1].asnumpy()
pred_sub_class = pred_class[pred_node_sub]
pred_sub_class_prob = pred_class_prob[pred_node_sub]
pred_sub_bbox = pred_bbox[pred_node_sub]
pred_ob_class = pred_class[pred_node_ob]
pred_ob_class_prob = pred_class_prob[pred_node_ob]
pred_ob_bbox = pred_bbox[pred_node_ob]
pred_triplets = np.vstack([score_pred[topk_eids], score_phr[topk_eids],
pred_rel_class, pred_sub_class, pred_ob_class,
pred_sub_bbox.transpose(1, 0),
pred_ob_bbox.transpose(1, 0)]).transpose(1, 0)
return pred_objects, pred_triplets
|
bin/basenji_bench_phylopvcf.py | egilbertson-ucsf/basenji | 232 | 12692844 | <filename>bin/basenji_bench_phylopvcf.py
#!/usr/bin/env python
from optparse import OptionParser
import joblib
import os
import pdb
import sys
import h5py
import numpy as np
import pysam
import pyBigWig
from scipy.stats import pearsonr
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import explained_variance_score
from sklearn.model_selection import KFold
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from basenji import dna_io
'''
basenji_bench_phylop.py
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <sad_file> <vcf_file>'
parser = OptionParser(usage)
parser.add_option('-d', dest='n_components',
default=None, type='int',
help='PCA n_components [Default: %default]')
parser.add_option('-e', dest='num_estimators',
default=100, type='int',
help='Number of random forest estimators [Default: %default]')
parser.add_option('-i', dest='iterations',
default=1, type='int',
help='Cross-validation iterations [Default: %default]')
parser.add_option('--msl', dest='msl',
default=1, type='int',
help='Random forest min_samples_leaf [Default: %default]')
parser.add_option('-o', dest='out_dir',
default='regr_out')
parser.add_option('-p', dest='parallel_threads',
default=1, type='int',
help='Parallel threads passed to scikit-learn n_jobs [Default: %default]')
parser.add_option('-r', dest='random_seed',
default=44, type='int')
(options,args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide ISM scores and PhyloP VCF file.')
else:
sad_file = args[0]
phylop_vcf_file = args[1]
np.random.seed(options.random_seed)
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
################################################################
# read mutation scores
with h5py.File(sad_file, 'r') as h5o:
mut_sad = h5o['SAD'][:].astype('float32')
num_muts, num_targets = mut_sad.shape
################################################################
# read mutation phylop
mut_phylop = []
for line in open(phylop_vcf_file):
if not line.startswith('#'):
a = line.split()
phylop = float(a[-1].replace('PP=',''))
mut_phylop.append(phylop)
# transform PhyloP
mut_phylop = np.array(mut_phylop, dtype='float32')
mut_phylop = np.nan_to_num(mut_phylop)
mut_phylop = np.clip(mut_phylop, -1.5, 5)
# verify?
################################################################
# regression
# regressor
r2s, pcors = randfor_cv(mut_sad, mut_phylop,
iterations=options.iterations,
n_estimators=options.num_estimators,
msl=options.msl,
random_state=options.random_seed,
n_jobs=options.parallel_threads)
# save
np.save('%s/r2.npy' % options.out_dir, r2s)
np.save('%s/pcor.npy' % options.out_dir, pcors)
# print stats
iterations = len(r2s)
stats_out = open('%s/stats.txt' % options.out_dir, 'w')
print('R2 %.4f (%.4f)' % (r2s.mean(), r2s.std()/np.sqrt(iterations)), file=stats_out)
print('pR %.4f (%.4f)' % (pcors.mean(), pcors.std()/np.sqrt(iterations)), file=stats_out)
stats_out.close()
def randfor_cv(Xs, ys, folds=8, iterations=1, n_estimators=50, msl=1,
max_features='log2', random_state=44, n_jobs=8):
"""Compute random forest regression accuracy statistics, shuffling at the sequence level."""
r2s = []
pcors = []
for i in range(iterations):
rs_iter = random_state + i
kf = KFold(n_splits=folds, shuffle=True, random_state=rs_iter)
for train_index, test_index in kf.split(Xs):
X_train = Xs[train_index]
y_train = ys[train_index]
X_test = Xs[test_index]
y_test = ys[test_index]
# fit model
if random_state is None:
rs_rf = None
else:
rs_rf = rs_iter+test_index[0]
model = RandomForestRegressor(n_estimators=n_estimators, max_features=max_features,
max_depth=64, min_samples_leaf=msl, min_samples_split=2,
random_state=rs_rf, n_jobs=n_jobs)
model.fit(X_train, y_train)
# predict test set
preds = model.predict(X_test)
# compute R2
r2s.append(explained_variance_score(y_test, preds))
# compute pearsonr
pcors.append(pearsonr(y_test, preds)[0])
r2s = np.array(r2s)
pcors = np.array(pcors)
return r2s, pcors
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
benches/microbenchmarks/loop_string.py | mainsail-org/RustPython | 11,058 | 12692860 | string = "a" * ITERATIONS
# ---
for char in string:
pass
|
text/symbol.py | zachbarrow/talon_community | 125 | 12692882 | from talon.voice import Context, Key
ctx = Context("symbol")
keymap = {
# simple
"(question [mark] | questo)": "?",
"plus": "+",
"tilde": "~",
"(bang | exclamation point | clamor)": "!",
"(dollar [sign] | dolly)": "$",
"(downscore | crunder)": "_",
"colon": ":",
"(lparen | [left] paren | precorp )": "(",
"(rparen | are paren | right paren | precose)": ")",
"(brace | left brace | kirksorp)": "{",
"(rbrace | are brace | right brace | kirkos)": "}",
"(angle | left angle | less than)": "<",
"(rangle | are angle | right angle | greater than)": ">",
"(star | asterisk)": "*",
"(pound | hash [sign] | octo | number sign)": "#",
"percent [sign]": "%",
"caret": "^",
"at sign": "@",
"(and sign | ampersand | amper)": "&",
"(pipe | spike)": "|",
"(dubquote | double quote | quatches)": '"',
# compound
"mintwice": "--",
"plustwice": "++",
"minquall": "-=",
"pluqual": "+=",
"starqual": "*=",
"triple quote": "'''",
"triple tick": "```",
"[forward] dubslash": "//",
"coal twice": "::",
"(dot dot | dotdot)": "..",
"(ellipsis | dot dot dot | dotdotdot)": "...",
# unnecessary: use repetition commands?
}
ctx.keymap(keymap)
|
functests/test_continuous_queries.py | adulau/Akumuli | 1,094 | 12692884 | <reponame>adulau/Akumuli
from __future__ import print_function
import akumulid_test_tools as att
import datetime
import itertools
import json
import math
import multiprocessing
import os
import sys
import time
import traceback
try:
from urllib2 import urlopen
except ImportError:
from urllib import urlopen
HOST = '127.0.0.1'
TCPPORT = 8282
HTTPPORT = 8181
"""
Test plan:
Process 1 (reader).
- Start process 2 (writer).
- Read all data in fwd direction in range [begin, end-window].
Process 2 (writer).
- Write data in range [begin, mid] in a loop.
- Long pause.
- Write data in range (mid, end] in a loop.
- Exit.
"""
def writer(dt, delta, N):
try:
chan = att.TCPChan(HOST, TCPPORT)
# fill data in
print("Sending {0} messages through TCP...".format(N))
tags = {
"tag": ['Foo'],
}
print("Generating first {0} messages...".format(N/2))
messages = att.generate_messages(dt, delta, N, 'test', **tags)
for it in itertools.islice(messages, N/2):
chan.send(it)
time.sleep(10)
print("Generating last {0} messages...".format(N/2))
for it in messages:
chan.send(it)
print("{0} messages sent".format(N))
time.sleep(10)
except:
print("Exception in writer")
traceback.print_exc()
sys.exit(1)
def reader(dtstart, delta, N):
# Start writer process
wproc = multiprocessing.Process(name='Writer', target=writer, args=[dtstart, delta, N])
wproc.start()
try:
window = att.get_window_width()
end = dtstart + delta*(N-1) - 2*window
begin = dtstart
timedelta = end - begin
points_required = int(math.ceil((timedelta.seconds*1000000.0 + timedelta.microseconds) /
(delta.seconds*1000000.0 + delta.microseconds))) + 1
query_params = {"output": { "format": "csv" }}
query = att.makequery("test", begin, end, **query_params)
queryurl = "http://{0}:{1}/api/query".format(HOST, HTTPPORT)
response = urlopen(queryurl, json.dumps(query))
exp_ts = begin
exp_value = 0
iterations = 0
print("Test #1 - continuous queries")
for line in response:
try:
columns = line.split(',')
tagline = columns[0].strip()
timestamp = att.parse_timestamp(columns[1].strip())
value = float(columns[2].strip())
exp_tags = 'test tag=Foo'
att.check_values(exp_tags, tagline, 'ENDS', exp_ts, timestamp, exp_value*1.0, value, iterations)
exp_ts += delta
exp_value += 1
iterations += 1
except:
print("Error at line: {0}".format(line))
raise
print("Query completed")
# Check that we received all values
if iterations != points_required:
raise ValueError("Expect {0} data points, get {1} data points".format(points_required, iterations))
print("Test #1 passed")
finally:
wproc.join()
def main(path, debug=False):
if not os.path.exists(path):
print("Path {0} doesn't exists".format(path))
sys.exit(1)
akumulid = att.Akumulid(path)
if not debug:
# Reset database
akumulid.delete_database()
akumulid.create_database()
# start ./akumulid server
print("Starting server...")
akumulid.serve()
time.sleep(5)
else:
print("Akumulid should be started first")
try:
dt = datetime.datetime.utcnow()
delta = datetime.timedelta(milliseconds=1)
nmsgs = 100000
rproc = multiprocessing.Process(name='Reader', target=reader, args=[dt, delta, nmsgs])
rproc.start()
rproc.join()
except:
traceback.print_exc()
sys.exit(1)
finally:
if not debug:
print("Stopping server...")
akumulid.stop()
time.sleep(5)
if __name__ == '__main__':
print(' '.join(sys.argv))
if len(sys.argv) < 2:
print("Not enough arguments")
sys.exit(1)
main(sys.argv[1], sys.argv[2] == 'debug' if len(sys.argv) == 3 else False)
else:
raise ImportError("This module shouldn't be imported")
|
poet/console/commands/init.py | sdispater/poet | 367 | 12692889 | <filename>poet/console/commands/init.py
# -*- coding: utf-8 -*-
import os
import re
from collections import OrderedDict
from pygments import highlight
from pygments.formatters.terminal import TerminalFormatter
from .index_command import IndexCommand
from ...version_parser import VersionParser
from ...version_selector import VersionSelector
from ...utils.lexers import TOMLLexer
from ...utils.helpers import call, template
from ...build import Builder
class InitCommand(IndexCommand):
"""
Creates a basic <comment>poetry.toml</> file in current directory.
init
{ template? : Template to use }
{--name= : Name of the package}
{--description= : Description of the package}
{--author= : Author name of the package}
{--dependency=* : Package to require with a version constraint,
e.g. requests:^2.10.0 or requests==2.11.1}
{--dev-dependency=* : Package to require for development with a version constraint,
e.g. requests:^2.10.0 or requests==2.11.1}
{--l|license= : License of the package}
"""
help = """
The <info>init</info> command creates a basic <comment>poetry.toml</> file
in the current directory.
<info>poet init</info>
"""
def __init__(self):
self._git_config = None
super(InitCommand, self).__init__()
def handle(self):
formatter = self.get_helper('formatter')
self.line([
'',
formatter.format_block('Welcome to the Poet config generator', 'bg=blue;fg=white', True),
''
])
template_name = self.argument('template')
if template_name:
self.line([
'',
'Using <comment>{}</> template to create '
'your <info>poetry.toml</> config.'.format(template_name),
''
])
if template_name == 'default':
output = template('poetry.toml').render()
with open(self.poet_file, 'w') as fd:
fd.write(output)
return
self.line([
'',
'This command will guide you through creating your <info>poetry.toml</> config.',
''
])
poet_file = self.poet_file
git_config = self.git_config()
name = self.option('name')
if not name:
name = os.path.basename(os.path.dirname(poet_file))
name = name.lower()
question = self.create_question(
'Package name [<comment>{}</comment>]: '
.format(name),
default=name
)
name = self.ask(question)
version = '0.1.0'
question = self.create_question(
'Version [<comment>{}</comment>]: '.format(version),
default=version
)
version = self.ask(question)
description = self.option('description') or ''
question = self.create_question(
'Description [<comment>{}</comment>]: '
.format(description),
default=description
)
description = self.ask(question)
author = self.option('author')
if not author and git_config.get('user.name') and git_config.get('user.email'):
author = '{} <{}>'.format(git_config['user.name'], git_config['user.email'])
question = self.create_question(
'Author [<comment>{}</comment>, n to skip]: '
.format(author),
default=author
)
question.validator = lambda v: self._validate_author(v, author)
author = self.ask(question)
if not author:
authors = []
else:
authors = [author]
license = self.option('license') or ''
question = self.create_question(
'License [<comment>{}</comment>]: '
.format(license),
default=license
)
license = self.ask(question)
self.line('')
requirements = []
question = 'Would you like to define your dependencies' \
' (require) interactively?'
if self.confirm(question, True):
requirements = self._format_requirements(
self._determine_requirements(self.option('dependency'))
)
dev_requirements = []
question = '<question>Would you like to define your dev dependencies' \
' (require-dev) interactively'
if self.confirm(question, True):
dev_requirements = self._format_requirements(
self._determine_requirements(self.option('dev-dependency'))
)
output = template('poetry.toml.jinja2').render(
name=name,
version=version,
description=description,
authors=authors,
license=license,
dependencies=requirements,
dev_dependencies=dev_requirements
)
if self.input.is_interactive():
self.line('<info>Generated file</>')
if self.output.is_decorated():
self.line([
'',
highlight(
output,
TOMLLexer(),
TerminalFormatter()
),
''
])
else:
self.line(['', output, ''])
if not self.confirm(
'Do you confirm generation?', True
):
self.line('<error>Command aborted</error>')
return 1
with open(self.poet_file, 'w') as fd:
fd.write(output)
def _determine_requirements(self, requires):
if requires:
requires = self._normalize_requirements(requires)
result = []
for requirement in requires:
if 'version' not in requirement:
# determine the best version automatically
version = self._find_best_version_for_package(requirement['name'])
requirement['version'] = version
self.line(
'Using version <info>{}</info> for <info{}</info>'
.format(requirement['version'], requirement['name'])
)
result.append(requirement['name'] + ' ' + requirement['version'])
version_parser = VersionParser()
question = self.create_question('Search for a package:')
package = self.ask(question)
while package is not None:
matches = self._find_packages(package)
if not matches:
self.line('<error>Unable to find package</>')
package = False
else:
exact_match = None
choices = []
for found_package in matches:
choices.append(found_package['name'])
# Removing exact match feature for now
# if found_package['name'] == package:
# exact_match = True
# break
if not exact_match:
self.line(
'Found <info>{}</info> packages matching <info>{}</info>'
.format(
len(matches),
package
)
)
package = self.choice(
'\nEnter package # to add, or the complete package name if it is not listed',
choices,
attempts=3
)
# no constraint yet, determine the best version automatically
if package is not False and ' ' not in package:
question = self.create_question(
'Enter the version constraint to require '
'(or leave blank to use the latest version):'
)
question.attempts = 3
question.validator = lambda x: (x or '').strip() or False
constraint = self.ask(question)
if constraint is False:
constraint = self._find_best_version_for_package(package)
self.line(
'Using version <info>{}</info> for <info>{}</info>'
.format(constraint, package)
)
package += ' {}'.format(constraint)
if package is not False:
requires.append(package)
package = self.ask('\nSearch for a package:')
return requires
def _validate_author(self, author, default):
author = author or default
if author in ['n', 'no']:
return
m = Builder.AUTHOR_REGEX.match(author)
if not m:
raise ValueError(
'Invalid author string. Must be in the format: '
'<NAME> <<EMAIL>>'
)
return author
def _find_packages(self, package):
return self._repository.search(package, 1)
def _find_best_version_for_package(self, package):
selector = VersionSelector(self._repository)
package = selector.find_best_candidate(package)
return selector.find_recommended_require_version(package)
def _format_requirements(self, requirements):
requires = OrderedDict()
requirements = self._normalize_requirements(requirements)
for requirement in requirements:
requires[requirement['name']] = requirement['version']
return requires
def _normalize_requirements(self, requirements):
parser = VersionParser()
return parser.parse_name_version_pairs(requirements)
def git_config(self):
config_list = call(['git', 'config', '-l'])
git_config = {}
m = re.findall('(?ms)^([^=]+)=(.*?)$', config_list)
if m:
for group in m:
git_config[group[0]] = group[1]
return git_config
|
services/ui_backend_service/data/cache/search_artifacts_action.py | Netflix/metaflow-service | 103 | 12692914 | import hashlib
import json
from .client import CacheAction
from services.utils import get_traceback_str
from .utils import (error_event_msg, progress_event_msg,
artifact_cache_id, unpack_pathspec_with_attempt_id,
MAX_S3_SIZE)
from ..refiner.refinery import unpack_processed_value
from services.ui_backend_service.api.utils import operators_to_filters
from metaflow import DataArtifact
class SearchArtifacts(CacheAction):
"""
Fetches artifacts by pathspecs and performs a search against the object contents.
Caches artifacts based on pathspec, and search results based on a combination of query&artifacts searched
Parameters
----------
pathspecs : List[str]
A list of artifact pathspecs (with attempt id as last component)
to fetch and match the search term against: ["FlowId/RunNumber/StepName/TaskId/ArtifactName/0"]
searchterm : str
A searchterm to match against the fetched S3 artifacts contents.
Returns
-------
Dict or None
example:
{
"pathspec": {
"included": boolean,
"matches": boolean
}
}
matches: determines whether object content matched search term
included: denotes if the object content was able to be included in the search (accessible or not)
"""
@classmethod
def format_request(cls, pathspecs, searchterm, operator="eq", invalidate_cache=False):
msg = {
'pathspecs': list(frozenset(sorted(pathspecs))),
'searchterm': searchterm,
'operator': operator
}
artifact_keys = []
for pathspec in pathspecs:
artifact_keys.append(artifact_cache_id(pathspec))
request_id = lookup_id(pathspecs, searchterm, operator)
stream_key = 'search:stream:%s' % request_id
result_key = 'search:result:%s' % request_id
return msg,\
[result_key, *artifact_keys],\
stream_key,\
[stream_key, result_key],\
invalidate_cache
@classmethod
def response(cls, keys_objs):
"""
Action should respond with a dictionary of
{
"pathspec": {
"matches": boolean,
"included": boolean
}
}
that tells the client whether the search term matches in the given pathspec, or if performing search was impossible
"""
return [json.loads(val) for key, val in keys_objs.items() if key.startswith('search:result')][0]
@classmethod
def stream_response(cls, it):
for msg in it:
if msg is None:
yield msg
else:
yield {'event': msg}
@classmethod
def execute(cls,
message=None,
keys=None,
existing_keys={},
stream_output=None,
invalidate_cache=False,
**kwargs):
pathspecs = message['pathspecs']
if invalidate_cache:
results = {}
pathspecs_to_fetch = [loc for loc in pathspecs]
else:
# make a copy of already existing results, as the cache action has to produce all keys it promised
# in the format_request response.
results = {**existing_keys}
# Make a list of artifact pathspecs that require fetching (not cached previously)
pathspecs_to_fetch = [loc for loc in pathspecs if not artifact_cache_id(loc) in existing_keys]
artifact_keys = [key for key in keys if key.startswith('search:artifactdata')]
result_key = [key for key in keys if key.startswith('search:result')][0]
# Helper functions for streaming status updates.
def stream_progress(num):
return stream_output(progress_event_msg(num))
def stream_error(err, id, traceback=None):
return stream_output(error_event_msg(err, id, traceback))
# Fetch artifacts that are not cached already
for idx, pathspec in enumerate(pathspecs_to_fetch):
stream_progress((idx + 1) / len(pathspecs_to_fetch))
try:
pathspec_without_attempt, attempt_id = unpack_pathspec_with_attempt_id(pathspec)
artifact_key = "search:artifactdata:{}".format(pathspec)
artifact = DataArtifact(pathspec_without_attempt, attempt=attempt_id)
if artifact.size < MAX_S3_SIZE:
results[artifact_key] = json.dumps([True, artifact.data])
else:
results[artifact_key] = json.dumps(
[False, 'artifact-too-large', "{}: {} bytes".format(artifact.pathspec, artifact.size)])
except Exception as ex:
stream_error(str(ex), ex.__class__.__name__, get_traceback_str())
results[artifact_key] = json.dumps([False, ex.__class__.__name__, get_traceback_str()])
# Perform search on loaded artifacts.
search_results = {}
searchterm = message['searchterm']
operator = message['operator']
filter_fn = operators_to_filters[operator] if operator in operators_to_filters else operators_to_filters["eq"]
def format_loc(x):
"extract pathspec from the artifact cache key"
return x[len("search:artifactdata:"):]
for key in artifact_keys:
if key in results:
load_success, value, detail = unpack_processed_value(json.loads(results[key]))
else:
load_success, value, _ = False, None, None
# keep the matching case-insensitive
matches = filter_fn(str(value).lower(), searchterm.lower())
search_results[format_loc(key)] = {
"included": load_success,
"matches": matches,
"error": None if load_success else {
"id": value or "artifact-handle-failed",
"detail": detail or "Unknown error during artifact processing"
}
}
results[result_key] = json.dumps(search_results)
return results
def lookup_id(locations, searchterm, operator):
"construct a unique id to be used with stream_key and result_key"
_string = "-".join(list(frozenset(sorted(locations)))) + searchterm + operator
return hashlib.sha1(_string.encode('utf-8')).hexdigest()
|
scripts/feature_matching.py | Soldie/Human-detection-and-Tracking | 906 | 12692924 | <filename>scripts/feature_matching.py
import numpy as np
import cv2
import sys
import matplotlib.pyplot as plt
img1 = cv2.imread(sys.argv[1], 0)
img2 = cv2.imread(sys.argv[2], 0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)
cv2.imshow('window', img3)
cv2.waitKey(0)
|
docassemble_webapp/docassemble/webapp/cloud.py | knod/docassemble | 568 | 12692944 | <filename>docassemble_webapp/docassemble/webapp/cloud.py
from docassemble.base.config import s3_config, S3_ENABLED, azure_config, AZURE_ENABLED
def get_cloud():
if S3_ENABLED:
import docassemble.webapp.amazon
cloud = docassemble.webapp.amazon.s3object(s3_config)
elif AZURE_ENABLED:
import docassemble.webapp.microsoft
cloud = docassemble.webapp.microsoft.azureobject(azure_config)
else:
cloud = None
return cloud
def get_custom_cloud(provider, config):
if provider is None or config is None:
return None
if provider == 's3':
import docassemble.webapp.amazon
cloud = docassemble.webapp.amazon.s3object(config)
elif provider == 'azure':
import docassemble.webapp.microsoft
cloud = docassemble.webapp.microsoft.azureobject(config)
else:
cloud = None
return cloud
|
pyftdi/tests/backend/mpsse.py | marcjordan2112/pyftdi | 345 | 12692984 | <gh_stars>100-1000
"""PyUSB virtual FTDI device."""
# Copyright (c) 2020, <NAME> <<EMAIL>>
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from collections import deque
from logging import getLogger
from struct import unpack as sunpack
from typing import Union
from pyftdi.tracer import FtdiMpsseEngine, FtdiMpsseTracer
class VirtMpsseTracer(FtdiMpsseTracer):
"""Reuse MPSSE tracer as a MPSSE command decoder engine.
"""
def __init__(self, port: 'VirtFtdiPort', version: int):
super().__init__(version)
self.log = getLogger('pyftdi.virt.mpsse.{port.iface}')
self._port = port
def _get_engine(self, iface: int):
iface -= 1
try:
self._engines[iface]
except IndexError as exc:
raise ValueError('No MPSSE engine available on interface %d' %
iface) from exc
if not self._engines[iface]:
self._engines[iface] = VirtMpsseEngine(self, self._port)
return self._engines[iface]
class VirtMpsseEngine(FtdiMpsseEngine):
"""Virtual implementation of a MPSSE.
Far from being complete for now :-)
"""
def __init__(self, tracer: VirtMpsseTracer, port: 'VirtFtdiPort'):
super().__init__(port.iface)
self.log = getLogger(f'pyftdi.virt.mpsse.{port.iface}')
self._tracer = tracer
self._port = port
self._width = port.width
self._mask = (1 << self._width) - 1
self._reply_q = deque()
def send(self, buf: Union[bytes, bytearray]) -> None:
super().send(buf)
# cannot post the response before the send() method has completed
# see FtdiMpsseEngine.send() for execution steps: expected reply size
# is only known (stored) once the command execution has completed
self.reply()
def reply(self) -> None:
"""Post the reply to a command back into the virtual FTDI FIFO."""
while self._reply_q:
self._port.write_from_mpsse(self, self._reply_q.popleft())
def _cmd_get_bits_low(self):
super()._cmd_get_bits_low()
byte = self._port.gpio & 0xff
buf = bytes([byte])
self._reply_q.append(buf)
return True
def _cmd_get_bits_high(self):
super()._cmd_get_bits_high()
byte = (self._port.gpio >> 8) & 0xff
buf = bytes([byte])
self._reply_q.append(buf)
return True
def _cmd_set_bits_low(self):
buf = self._trace_tx[1:3]
if not super()._cmd_set_bits_low():
return False
port = self._port
byte, direction = sunpack('BB', buf)
gpi = port.gpio & ~direction & self._mask
gpo = byte & direction & self._mask
msb = port.gpio & ~0xFF
gpio = gpi | gpo | msb
port.update_gpio(self, False, direction, gpio)
self.log.debug('. bbwl %04x: %s', port.gpio, f'{port.gpio:016b}')
return True
def _cmd_set_bits_high(self):
buf = self._trace_tx[1:3]
if not super()._cmd_set_bits_high():
return False
port = self._port
byte, direction = sunpack('BB', buf)
byte <<= 8
direction <<= 8
gpi = port.gpio & ~direction & self._mask
gpo = byte & direction & self._mask
lsb = port.gpio & 0xFF
gpio = gpi | gpo | lsb
port.update_gpio(self, False, direction, gpio)
self.log.debug('. bbwh %04x: %s', port.gpio, f'{port.gpio:016b}')
return True
|
ltc/urls.py | r1990v/JMeter-Control-Center | 166 | 12692994 | from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from . import settings
from django.urls import path
admin.autodiscover()
urlpatterns = [
path('admin', admin.site.urls),
path('', include('ltc.web.urls'), name='index'),
path('analyzer', include('ltc.analyzer.urls'), name='analyzer'),
path('online', include('ltc.online.urls'), name='online'),
path('controller', include('ltc.controller.urls'), name='controller'),
path('administrator', include('ltc.administrator.urls'), name='administrator')
] + static(
settings.STATIC_URL, document_root=settings.STATIC_URL
)
|
afl_utils/afl_cron.py | viniul/afl-utils | 438 | 12693011 | """
Copyright 2015-2016 @_rc0r <<EMAIL>>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import datetime
try:
import simplejson as json
except ImportError:
import json
import sys
import time
import afl_utils
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
class AflCronDaemon(object):
def __init__(self, config_file, quiet=False):
self.config = self.load_config(config_file)
self.quiet = quiet
def load_config(self, config_file):
with open(config_file, 'r') as raw_config:
config = json.load(raw_config)
return config
def get_module(self, module_path):
module_name = module_path.rsplit('.', 1)[1]
try:
module = __import__(module_path, fromlist=[module_name])
except ImportError:
raise ValueError('Module \'{}\' could not be imported' .format(module_path,))
return module
def get_member(self, module, member_name):
try:
cls = getattr(module, member_name)
except AttributeError:
raise ValueError('Module \'{}\' has no member \'{}\''.format(module, member_name, ))
return cls
def run_job(self, job):
job_module = self.get_module(job['module'])
job_func = self.get_member(job_module, job['function'])
job_args = [job['module'].rsplit('.', 1)[1]] + job['params'].split()
if not self.quiet:
print_ok('Executing \'{}\' ({}.{})'.format(job['name'], job['module'], job['function']))
job_func(job_args)
def run(self):
doExit = False
while not doExit:
try:
time_start = datetime.datetime.now()
for job in self.config['jobs']:
self.run_job(job)
print_ok('All jobs done [{}]'.format(datetime.datetime.now()-time_start))
if float(self.config['interval']) < 0:
doExit = True
else:
time.sleep(float(self.config['interval']) * 60)
except KeyboardInterrupt:
print('\b\b')
print_ok('Aborted by user. Good bye!')
doExit = True
def show_info():
print(clr.CYA + 'afl-cron ' + clr.BRI + '%s' % afl_utils.__version__ + clr.RST + ' by %s' % afl_utils.__author__)
print('Periodically run tools from the afl-utils collection.')
print('')
def main(argv):
parser = argparse.ArgumentParser(description='Post selected contents of fuzzer_stats to Twitter.',
usage='afl-stats [-c config] [-d] [-h] [-q]\n')
parser.add_argument('-c', '--config', dest='config_file',
help='afl-stats config file (Default: afl-stats.conf)!', default='afl-cron.conf')
parser.add_argument('-d', '--daemon', dest='daemon', action='store_const', const=True,
help='Daemon mode: run in background', default=False)
parser.add_argument('-q', '--quiet', dest='quiet', action='store_const', const=True,
help='Suppress any output', default=False)
args = parser.parse_args(argv[1:])
if not args.quiet and not args.daemon:
show_info()
cron = AflCronDaemon(args.config_file, quiet=args.quiet)
cron.run()
if __name__ == "__main__":
main(sys.argv)
|
src/backend/common/manipulators/match_manipulator.py | ofekashery/the-blue-alliance | 266 | 12693025 | <reponame>ofekashery/the-blue-alliance
from typing import List
from backend.common.cache_clearing import get_affected_queries
from backend.common.manipulators.manipulator_base import ManipulatorBase
from backend.common.models.cached_model import TAffectedReferences
from backend.common.models.match import Match
class MatchManipulator(ManipulatorBase[Match]):
"""
Handle Match database writes.
"""
@classmethod
def getCacheKeysAndQueries(
cls, affected_refs: TAffectedReferences
) -> List[get_affected_queries.TCacheKeyAndQuery]:
return get_affected_queries.match_updated(affected_refs)
"""
@classmethod
def postDeleteHook(cls, matches):
'''
To run after the match has been deleted.
'''
for match in matches:
try:
FirebasePusher.delete_match(match)
except Exception:
logging.warning("Firebase delete_match failed!")
"""
"""
@classmethod
def postUpdateHook(cls, matches, updated_attr_list, is_new_list):
'''
To run after the match has been updated.
Send push notifications to subscribed users
Only if the match is part of an active event
'''
unplayed_match_events = []
for (match, updated_attrs, is_new) in zip(matches, updated_attr_list, is_new_list):
event = match.event.get()
# Only continue if the event is currently happening
if event.now:
if match.has_been_played:
if is_new or 'alliances_json' in updated_attrs:
# There is a score update for this match, push a notification
logging.info("Sending push notifications for {}".format(match.key_name))
try:
NotificationHelper.send_match_score_update(match)
except Exception, exception:
logging.error("Error sending match updates: {}".format(exception))
logging.error(traceback.format_exc())
try:
TBANSHelper.match_score(match)
except Exception, exception:
logging.error("Error sending match {} updates: {}".format(match.key_name, exception))
logging.error(traceback.format_exc())
else:
if is_new or (set(['alliances_json', 'time', 'time_string']).intersection(set(updated_attrs)) != set()):
# The match has not been played and we're changing a property that affects the event's schedule
# So send a schedule update notification for the parent event
if event not in unplayed_match_events:
unplayed_match_events.append(event)
# Try to send video notifications
if '_video_added' in updated_attrs:
try:
NotificationHelper.send_match_video(match)
except Exception, exception:
logging.error("Error sending match video updates: {}".format(exception))
logging.error(traceback.format_exc())
try:
TBANSHelper.match_video(match)
except Exception, exception:
logging.error("Error sending match video updates: {}".format(exception))
logging.error(traceback.format_exc())
'''
If we have an unplayed match during an event within a day, send out a schedule update notification
'''
for event in unplayed_match_events:
try:
logging.info("Sending schedule updates for: {}".format(event.key_name))
NotificationHelper.send_schedule_update(event)
except Exception, exception:
logging.error("Eror sending schedule updates for: {}".format(event.key_name))
try:
TBANSHelper.event_schedule(event)
except Exception, exception:
logging.error("Eror sending schedule updates for: {}".format(event.key_name))
logging.error(traceback.format_exc())
try:
# When an event gets a new schedule, we should schedule `match_upcoming` notifications for the first matches for the event
TBANSHelper.schedule_upcoming_matches(event)
except Exception, exception:
logging.error("Eror scheduling match_upcoming for: {}".format(event.key_name))
logging.error(traceback.format_exc())
'''
Enqueue firebase push
'''
affected_stats_event_keys = set()
for (match, updated_attrs, is_new) in zip(matches, updated_attr_list, is_new_list):
# Only attrs that affect stats
if is_new or set(['alliances_json', 'score_breakdown_json']).intersection(set(updated_attrs)) != set():
affected_stats_event_keys.add(match.event.id())
try:
FirebasePusher.update_match(match, updated_attrs)
except Exception:
logging.warning("Firebase update_match failed!")
logging.warning(traceback.format_exc())
# Enqueue statistics
for event_key in affected_stats_event_keys:
# Enqueue task to calculate matchstats
try:
taskqueue.add(
url='/tasks/math/do/event_matchstats/' + event_key,
method='GET')
except Exception:
logging.error("Error enqueuing event_matchstats for {}".format(event_key))
logging.error(traceback.format_exc())
# Enqueue task to calculate district points
try:
taskqueue.add(
url='/tasks/math/do/district_points_calc/{}'.format(event_key),
method='GET')
except Exception:
logging.error("Error enqueuing district_points_calc for {}".format(event_key))
logging.error(traceback.format_exc())
# Enqueue task to calculate event team status
try:
taskqueue.add(
url='/tasks/math/do/event_team_status/{}'.format(event_key),
method='GET')
except Exception:
logging.error("Error enqueuing event_team_status for {}".format(event_key))
logging.error(traceback.format_exc())
# Enqueue updating playoff advancement
try:
taskqueue.add(
url='/tasks/math/do/playoff_advancement_update/{}'.format(event_key),
method='GET')
except Exception:
logging.error("Error enqueuing advancement update for {}".format(event_key))
logging.error(traceback.format_exc())
"""
@classmethod
def updateMerge(
cls, new_model: Match, old_model: Match, auto_union: bool = True
) -> Match:
# Lets postUpdateHook know if videos went from 0 to >0
added_video = not old_model.has_video and new_model.has_video
cls._update_attrs(new_model, old_model, auto_union)
if added_video:
old_model._updated_attrs.add("_video_added")
return old_model
|
stable_nalu/dataset/simple_function_static_test.py | wlm2019/Neural-Arithmetic-Units | 147 | 12693069 |
from nose.tools import *
import scipy.stats
import torch
import numpy as np
from stable_nalu.dataset import SimpleFunctionStaticDataset
def test_solveable_by_linear_algebra():
dataset = SimpleFunctionStaticDataset(
operation='add', seed=0
)
dataset_test = iter(dataset.fork(input_range=1).dataloader(batch_size=100))
x_batch, t_batch = next(dataset_test)
x_batch_np = np.stack(x_batch)
t_batch_np = np.stack(t_batch)
w_merged_np = np.linalg.solve(x_batch_np, t_batch_np.ravel())
w_merged_np_int = np.round(w_merged_np, 0).astype('int8')
# W is whole numbers
np.testing.assert_almost_equal(
w_merged_np - w_merged_np_int,
np.zeros(100),
decimal=4
)
# W is either 0, 1, 2
# NOTE: a different seed might not result in an overlap, thus {2} might
# not be present.
assert_equal(
set(w_merged_np_int.tolist()),
{0, 1, 2}
)
# Compute a, b range parameters
# For seed=0, the b subset, is a subset of the a subset, which is assumed
# by the following algorithm.
a_start = None
a_end = None
b_start = None
b_end = None
previuse_w_value = 0
for w_index, w_value in enumerate(w_merged_np_int.tolist()):
if w_value == 1 and previuse_w_value == 0:
a_start = w_index
elif w_value == 0 and previuse_w_value == 1:
a_end = w_index
elif w_value == 2 and previuse_w_value == 1:
b_start = w_index
elif w_value == 1 and previuse_w_value == 2:
b_end = w_index
previuse_w_value = w_value
# Compare a and b range parameters
assert_equal(a_start, dataset.a_start)
assert_equal(a_end, dataset.a_end)
assert_equal(b_start, dataset.b_start)
assert_equal(b_end, dataset.b_end)
def test_input_range():
dataset = SimpleFunctionStaticDataset(
operation='add',
vector_size=10000,
seed=0
)
x, t = dataset.fork(input_range=5)[0]
_, p = scipy.stats.kstest(
x,
scipy.stats.uniform(loc=0, scale=5).cdf
)
assert p > 0.5
def test_output_shape():
dataset = SimpleFunctionStaticDataset(
operation='add',
seed=0
)
x, t = dataset.fork(input_range=5)[0]
assert_equal(x.shape, (100, ))
# Note, t.shape should be a 1-long vector, not a scalar. Otherwise
# the loss function gets confused about what the observation dimention
# is.
assert_equal(t.shape, (1, ))
|
incomplete/rasterizer/rasterizer/examples/e1.py | choosewhatulike/500lines | 26,185 | 12693080 | <filename>incomplete/rasterizer/rasterizer/examples/e1.py
from .. import *
def run(image):
scene = Scene()
scene.add(Triangle([Vector(0.5, 0.5), Vector(0.8, 0.5), Vector(0.5, 0.8)],
Color(1,0,0,1)))
scene.draw(image)
|
graph4nlp/pytorch/modules/utils/logger.py | cminusQAQ/graph4nlp | 1,269 | 12693081 | import json
import os
from . import constants as Constants
class Logger:
def __init__(self, dirname, config=None, overwrite=False, logging=True):
self.logging = logging
if os.path.exists(dirname):
if not overwrite:
raise Exception("Directory already exists: {}".format(dirname))
else:
os.makedirs(dirname)
if config is not None:
self.log_json(config, os.path.join(dirname, Constants._CONFIG_FILE))
if logging:
self.fout = open(os.path.join(dirname, Constants._SAVED_METRICS_FILE), "a")
def log_json(self, data, filename, mode="w"):
with open(filename, mode) as outfile:
outfile.write(json.dumps(data, indent=4, ensure_ascii=False))
def write(self, text):
if self.logging:
self.fout.writelines(text + "\n")
self.fout.flush()
def close(self):
if self.logging:
self.fout.close()
|
api/resources.py | annevandalfsen/screenbird | 121 | 12693106 | from django.core.urlresolvers import reverse
from djangorestframework.compat import View
from djangorestframework.mixins import ResponseMixin
from djangorestframework.renderers import JSONRenderer
from django.contrib.auth.models import User
from djangorestframework.resources import ModelResource
from djangorestframework.response import Response
from accounts.models import UserProfile
from videos.models import Video, Channel
def is_allowed(user):
"""
"""
return (user.userprofile.is_paid) or (user.userprofile.is_using_trial) or (user.userprofile.api_key)
class VideoResource(ResponseMixin, View):
"""
Returns all videos under the account of the api_key provided.
The format of the response is in JSON.
"""
renderers = [JSONRenderer,]
csrf_exempt = True
def get(self, request):
key = request.GET.get('api_key', None)
account_id = -1
if key:
userprofile = None
try:
userprofile = UserProfile.objects.get(api_key=key)
account_id = userprofile.user_id
except:
pass
if userprofile:
user = None
try:
user = User.objects.get(pk=account_id)
except:
pass
if user:
if is_allowed(user):
videos = Video.objects.filter(uploader__id=account_id)
json_videos = []
for video in videos:
channel_name = None
if video.channel:
channel_name = video.channel.name
json_videos.append(
{
'id':video.id,
'channel':channel_name,
'url':video.get_absolute_url(),
'title':video.title,
'embed_code':video.get_embed_code()
}
)
response = Response(200, {'success':True,'videos':json_videos} )
else:
response = Response(401)
else:
response = Response(401)
else:
response = Response(401)
else:
response = Response(400)
return self.render(response)
class ChannelVideoResource(ResponseMixin, View):
"""
Returns all videos under the channel of an account of the api_key provided.
The format of the response is in JSON.
"""
renderers = [JSONRenderer,]
csrf_exempt = True
def get(self, request):
channel_link = request.GET.get('channel_link', None)
key = request.GET.get('api_key', None)
account_id = -1
if key and channel_link:
userprofile = None
try:
userprofile = UserProfile.objects.get(api_key=key)
account_id = userprofile.user_id
except:
pass
channel = None
try:
channel = Channel.objects.get(api_link=channel_link)
except:
pass
if channel:
if (channel.owner.id == account_id) and is_allowed(channel.owner):
videos = Video.objects.filter(channel=channel)
json_videos = []
for video in videos:
channel_name = None
if video.channel:
channel_name = video.channel.name
json_videos.append(
{
'id':video.id,
'channel':channel_name,
'url':video.get_absolute_url(),
'title':video.title,
'embed_code':video.get_embed_code()
}
)
response = Response(200, {'success':True,'videos':json_videos} )
else:
response = Response(401)
else:
response = Response(401)
else:
response = Response(401)
return self.render(response)
class LatestVideoResource(ResponseMixin, View):
"""
Returns the latest video under the account of the api_key provided.
The format of the response is in JSON.
"""
renderers = [JSONRenderer,]
csrf_exempt = True
def get(self, request):
key = request.GET.get('api_key', None)
account_id = -1
if key:
userprofile = None
try:
userprofile = UserProfile.objects.get(api_key=key)
account_id = userprofile.user_id
except:
pass
if userprofile:
user = None
try:
user = User.objects.get(pk=account_id)
except:
pass
if user:
if is_allowed(user):
videos = Video.objects.filter(uploader__id=account_id).order_by('-created')[:1:]
json_videos = []
for video in videos:
channel_name = None
if video.channel:
channel_name = video.channel.name
json_videos.append(
{
'id':video.id,
'channel':channel_name,
'url':video.get_absolute_url(),
'title':video.title,
'embed_code':video.get_embed_code()
}
)
response = Response(200, {'success':True,'videos':json_videos} )
else:
response = Response(401)
else:
response = Response(401)
else:
response = Response(401)
else:
response = Response(400)
return self.render(response)
class LatestChannelVideoResource(ResponseMixin, View):
renderers = [JSONRenderer,]
csrf_exempt = True
def get(self, request):
key = request.GET.get('api_key', None)
channel_link = request.GET.get('channel_link', None)
account_id = -1
if key and channel_link:
userprofile = None
try:
userprofile = UserProfile.objects.get(api_key=key)
account_id = userprofile.user_id
except:
pass
channel = None
try:
channel = Channel.objects.get(api_link=channel_link)
except:
pass
if channel:
if (channel.owner.id == account_id) and is_allowed(channel.owner):
videos = Video.objects.filter(channel=channel).order_by('-created')[:1:]
json_videos = []
for video in videos:
channel_name = None
if video.channel:
channel_name = video.channel.name
json_videos.append(
{
'id':video.id,
'channel':channel_name,
'url':video.get_absolute_url(),
'title':video.title,
'embed_code':video.get_embed_code()
}
)
response = Response(200, {'success':True,'videos':json_videos} )
else:
response = Response(401)
else:
response = Response(401)
else:
response = Response(401)
return self.render(response)
|
google/colab/syntax.py | figufema/TesteClone | 1,521 | 12693120 | <reponame>figufema/TesteClone
"""Utility to add editor syntax highlighting to literal code strings.
Example:
from google.colab import syntax
query = syntax.sql('''
SELECT * from tablename
''')
"""
def html(s):
"""Noop function to enable HTML highlighting for its argument."""
return s
def javascript(s):
"""Noop function to enable JavaScript highlighting for its argument."""
return s
def sql(s):
"""Noop function to enable SQL highlighting for its argument."""
return s
def css(s):
"""Noop function to enable CSS highlighting for its argument."""
return s
|
pythran/tests/cases/calculate_u.py | davidbrochart/pythran | 1,647 | 12693121 | # from the paper `using cython to speedup numerical python programs'
#pythran export timeloop(float, float, float, float, float, float list list, float list list, float list list)
#pythran export timeloop(float, float, float, float, float, int list list, int list list, int list list)
#bench A=[list(range(70)) for i in range(100)] ; B=[list(range(70)) for i in range(100)] ; C=[list(range(70)) for i in range(100)] ; timeloop(1.,2.,.01,.1,.18, A,B,C )
#runas A=[list(range(10)) for i in range(5)] ; B=[list(range(10)) for i in range(5)] ; C=[list(range(10)) for i in range(5)] ; timeloop(1.,2.,.1,.1,.2, A,B,C )
def timeloop(t, t_stop, dt, dx, dy, u, um, k):
while t <= t_stop:
t += dt
new_u = calculate_u(dt, dx, dy, u, um, k)
um = u
u = new_u
return u
def calculate_u(dt, dx, dy, u, um, k):
up = [ [0.]*len(u[0]) for i in range(len(u)) ]
"omp parallel for"
for i in range(1, len(u)-1):
for j in range(1, len(u[0])-1):
up[i][j] = 2*u[i][j] - um[i][j] + \
(dt/dx)**2*(
(0.5*(k[i+1][j] + k[i][j])*(u[i+1][j] - u[i][j]) -
0.5*(k[i][j] + k[i-1][j])*(u[i][j] - u[i-1][j]))) + \
(dt/dy)**2*(
(0.5*(k[i][j+1] + k[i][j])*(u[i][j+1] - u[i][j]) -
0.5*(k[i][j] + k[i][j-1])*(u[i][j] - u[i][j-1])))
return up
|
datasets/amttl/amttl.py | WojciechKusa/datasets | 10,608 | 12693142 | <reponame>WojciechKusa/datasets<filename>datasets/amttl/amttl.py
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Introduction to AMTTL CWS Dataset"""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{xing2018adaptive,
title={Adaptive multi-task transfer learning for Chinese word segmentation in medical text},
author={<NAME> and <NAME> and <NAME>},
booktitle={Proceedings of the 27th International Conference on Computational Linguistics},
pages={3619--3630},
year={2018}
}
"""
_DESCRIPTION = """\
Chinese word segmentation (CWS) trained from open source corpus faces dramatic performance drop
when dealing with domain text, especially for a domain with lots of special terms and diverse
writing styles, such as the biomedical domain. However, building domain-specific CWS requires
extremely high annotation cost. In this paper, we propose an approach by exploiting domain-invariant
knowledge from high resource to low resource domains. Extensive experiments show that our mode
achieves consistently higher accuracy than the single-task CWS and other transfer learning
baselines, especially when there is a large disparity between source and target domains.
This dataset is the accompanied medical Chinese word segmentation (CWS) dataset.
The tags are in BIES scheme.
For more details see https://www.aclweb.org/anthology/C18-1307/
"""
_URL = "https://raw.githubusercontent.com/adapt-sjtu/AMTTL/master/medical_data/"
_TRAINING_FILE = "forum_train.txt"
_DEV_FILE = "forum_dev.txt"
_TEST_FILE = "forum_test.txt"
class AmttlConfig(datasets.BuilderConfig):
"""BuilderConfig for AMTTL"""
def __init__(self, **kwargs):
"""BuilderConfig for AMTTL.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(AmttlConfig, self).__init__(**kwargs)
class Amttl(datasets.GeneratorBasedBuilder):
"""AMTTL Chinese Word Segmentation dataset."""
BUILDER_CONFIGS = [
AmttlConfig(
name="amttl",
version=datasets.Version("1.0.0"),
description="AMTTL medical Chinese word segmentation dataset",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"B",
"I",
"E",
"S",
]
)
),
}
),
supervised_keys=None,
homepage="https://www.aclweb.org/anthology/C18-1307/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
tags = []
for line in f:
line_stripped = line.strip()
if line_stripped == "":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"tags": tags,
}
guid += 1
tokens = []
tags = []
else:
splits = line_stripped.split("\t")
if len(splits) == 1:
splits.append("O")
tokens.append(splits[0])
tags.append(splits[1])
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"tags": tags,
}
|
examples/simple/stellar_lifetime_vs_mass.py | rknop/amuse | 131 | 12693153 | <reponame>rknop/amuse
# -*- coding: ascii -*-
"""
Calculates the stellar lifetime in a range of masses between
Mmax and Mmin using SSE (or another stellar evolution code)
and an analytic expression.
"""
from __future__ import print_function
import numpy
from optparse import OptionParser
from amuse.units import units
from amuse.datamodel import Particle
from amuse.plot import plot
from matplotlib import pyplot as plt
from amuse.community.sse.interface import SSE
se = None
def stellar_remnant_state(star):
return 10 <= star.stellar_type.value_in(units.stellar_type) < 16
def stellar_lifetime(mZAMS, z=0.02):
global se
if se is None:
se = SSE()
se.parameters.metallicity = z
se.particles.add_particle(Particle(mass=mZAMS))
while not stellar_remnant_state(se.particles[0]):
se.evolve_model()
t_end = se.particles[0].age
# tpe = se.particles[0].stellar_type
se.particles.remove_particle(se.particles[0])
return t_end
def power_law_fit_to_main_sequence_lifetime(mZAMS):
return 2 + 1.0E+4 / pow(mZAMS.value_in(units.MSun), 2.5) | units.Myr
def main(n=10, mmin=1.0, mmax=100, z=0.02):
dm = (mmax - mmin) / n
mZAMS = numpy.arange(mmin, mmax, dm) | units.MSun
mmin = mmin | units.MSun
mmax = mmax | units.MSun
print(mZAMS)
t_sse = [] | units.Myr
t_analytic = [] | units.Myr
for mi in mZAMS:
t_sse.append(stellar_lifetime(mi, z))
t_analytic.append(power_law_fit_to_main_sequence_lifetime(mi))
plot(mZAMS, t_sse, label="sse")
plot(mZAMS, t_analytic, label="analytic")
plt.loglog()
plt.legend()
plt.title("comparison between SSE and analytic with z=" + str(z))
plt.show()
def new_option_parser():
result = OptionParser()
result.add_option("-n", dest="n", type="int", default=10,
help="number of stars")
result.add_option("-m", dest="mmin", type="float", default=1.0,
help="Minimal mass [1.0] MSun")
result.add_option("-M", dest="mmax", type="float", default=100.0,
help="Maximal mass [100] MSun")
result.add_option("-z", dest="z", type="float", default=0.02,
help="metalicity [0.02]")
return result
if __name__ == "__main__":
o, arguments = new_option_parser().parse_args()
main(**o.__dict__)
|
common/markdown/markdown.py | ujlbu4/vas3k.club | 496 | 12693161 | <reponame>ujlbu4/vas3k.club
import mistune
from common.markdown.club_renderer import ClubRenderer
from common.markdown.email_renderer import EmailRenderer
from common.markdown.plain_renderer import PlainRenderer
def markdown_text(text, renderer=ClubRenderer):
markdown = mistune.create_markdown(
escape=True, renderer=renderer(), plugins=["strikethrough", "url"]
)
return (markdown(text) or "").strip()
def markdown_plain(text):
return markdown_text(text, renderer=PlainRenderer)
def markdown_email(text):
return markdown_text(text, renderer=EmailRenderer)
|
src/opnsense/scripts/ipsec/vici/test/test_protocol.py | johanneskastl/opnsense-core | 2,109 | 12693183 | import pytest
from ..protocol import Packet, Message, FiniteStream
from ..exception import DeserializationException
class TestPacket(object):
# test data definitions for outgoing packet types
cmd_request = b"\x00\x0c" b"command_type"
cmd_request_msg = b"\x00\x07" b"command" b"payload"
event_register = b"\x03\x0a" b"event_type"
event_unregister = b"\x04\x0a" b"event_type"
# test data definitions for incoming packet types
cmd_response = b"\x01" b"reply"
cmd_unknown = b"\x02"
event_confirm = b"\x05"
event_unknown = b"\x06"
event = b"\x07\x03" b"log" b"message"
def test_request(self):
assert Packet.request("command_type") == self.cmd_request
assert Packet.request("command", b"payload") == self.cmd_request_msg
def test_register_event(self):
assert Packet.register_event("event_type") == self.event_register
def test_unregister_event(self):
assert Packet.unregister_event("event_type") == self.event_unregister
def test_parse(self):
parsed_cmd_response = Packet.parse(self.cmd_response)
assert parsed_cmd_response.response_type == Packet.CMD_RESPONSE
assert parsed_cmd_response.payload.getvalue() == self.cmd_response
parsed_cmd_unknown = Packet.parse(self.cmd_unknown)
assert parsed_cmd_unknown.response_type == Packet.CMD_UNKNOWN
assert parsed_cmd_unknown.payload.getvalue() == self.cmd_unknown
parsed_event_confirm = Packet.parse(self.event_confirm)
assert parsed_event_confirm.response_type == Packet.EVENT_CONFIRM
assert parsed_event_confirm.payload.getvalue() == self.event_confirm
parsed_event_unknown = Packet.parse(self.event_unknown)
assert parsed_event_unknown.response_type == Packet.EVENT_UNKNOWN
assert parsed_event_unknown.payload.getvalue() == self.event_unknown
parsed_event = Packet.parse(self.event)
assert parsed_event.response_type == Packet.EVENT
assert parsed_event.payload.getvalue() == self.event
class TestMessage(object):
"""Message (de)serialization test."""
# data definitions for test of de(serialization)
# serialized messages holding a section
ser_sec_unclosed = b"\x01\x08unclosed"
ser_sec_single = b"\x01\x07section\x02"
ser_sec_nested = b"\x01\x05outer\x01\x0asubsection\x02\x02"
# serialized messages holding a list
ser_list_invalid = b"\x04\x07invalid\x05\x00\x02e1\x02\x03sec\x06"
ser_list_0_item = b"\x04\x05empty\x06"
ser_list_1_item = b"\x04\x01l\x05\x00\x02e1\x06"
ser_list_2_item = b"\x04\x01l\x05\x00\x02e1\x05\x00\x02e2\x06"
# serialized messages with key value pairs
ser_kv_pair = b"\<KEY>"
ser_kv_zero = b"\x03\x0azerolength\x00\x00"
# deserialized messages holding a section
des_sec_single = { "section": {} }
des_sec_nested = { "outer": { "subsection": {} } }
# deserialized messages holding a list
des_list_0_item = { "empty": [] }
des_list_1_item = { "l": [ b"e1" ] }
des_list_2_item = { "l": [ b"e1", b"e2" ] }
# deserialized messages with key value pairs
des_kv_pair = { "key": b"value" }
des_kv_zero = { "zerolength": b"" }
def test_section_serialization(self):
assert Message.serialize(self.des_sec_single) == self.ser_sec_single
assert Message.serialize(self.des_sec_nested) == self.ser_sec_nested
def test_list_serialization(self):
assert Message.serialize(self.des_list_0_item) == self.ser_list_0_item
assert Message.serialize(self.des_list_1_item) == self.ser_list_1_item
assert Message.serialize(self.des_list_2_item) == self.ser_list_2_item
def test_key_serialization(self):
assert Message.serialize(self.des_kv_pair) == self.ser_kv_pair
assert Message.serialize(self.des_kv_zero) == self.ser_kv_zero
def test_section_deserialization(self):
single = Message.deserialize(FiniteStream(self.ser_sec_single))
nested = Message.deserialize(FiniteStream(self.ser_sec_nested))
assert single == self.des_sec_single
assert nested == self.des_sec_nested
with pytest.raises(DeserializationException):
Message.deserialize(FiniteStream(self.ser_sec_unclosed))
def test_list_deserialization(self):
l0 = Message.deserialize(FiniteStream(self.ser_list_0_item))
l1 = Message.deserialize(FiniteStream(self.ser_list_1_item))
l2 = Message.deserialize(FiniteStream(self.ser_list_2_item))
assert l0 == self.des_list_0_item
assert l1 == self.des_list_1_item
assert l2 == self.des_list_2_item
with pytest.raises(DeserializationException):
Message.deserialize(FiniteStream(self.ser_list_invalid))
def test_key_deserialization(self):
pair = Message.deserialize(FiniteStream(self.ser_kv_pair))
zerolength = Message.deserialize(FiniteStream(self.ser_kv_zero))
assert pair == self.des_kv_pair
assert zerolength == self.des_kv_zero
def test_roundtrip(self):
message = {
"key1": "value1",
"section1": {
"sub-section": {
"key2": b"value2",
},
"list1": [ "item1", "item2" ],
},
}
serialized_message = FiniteStream(Message.serialize(message))
deserialized_message = Message.deserialize(serialized_message)
# ensure that list items and key values remain as undecoded bytes
deserialized_section = deserialized_message["section1"]
assert deserialized_message["key1"] == b"value1"
assert deserialized_section["sub-section"]["key2"] == b"value2"
assert deserialized_section["list1"] == [ b"item1", b"item2" ]
|
src/ralph/reports/factories.py | DoNnMyTh/ralph | 1,668 | 12693211 | <reponame>DoNnMyTh/ralph<filename>src/ralph/reports/factories.py
# -*- coding: utf-8 -*-
import factory
from factory.django import DjangoModelFactory
from ralph.reports.models import Report, ReportLanguage, ReportTemplate
class ReportFactory(DjangoModelFactory):
name = factory.Sequence(lambda n: 'Report {}'.format(n))
class Meta:
model = Report
class ReportLanguageFactory(DjangoModelFactory):
name = factory.Sequence(lambda n: 'Report-lang {}'.format(n))
default = False
class Meta:
model = ReportLanguage
class ReportTemplateFactory(DjangoModelFactory):
template = factory.django.FileField(filename='the_file.dat')
language = factory.SubFactory(ReportLanguageFactory)
default = False
report = factory.SubFactory(ReportFactory)
class Meta:
model = ReportTemplate
|
tests/test_utils.py | upgradvisor/vyper | 1,347 | 12693220 | <filename>tests/test_utils.py
import pytest
from vyper.utils import annotate_source_code, indent
TEST_TEXT = """
test
lines
to
indent
"""[
1:-1
]
def test_indent_indents_text():
assert (
indent(TEST_TEXT, indent_chars="-", level=1)
== """
-test
-lines
-to
-indent
"""[
1:-1
]
)
assert (
indent(TEST_TEXT, indent_chars=" ", level=4)
== """
test
lines
to
indent
"""[
1:-1
]
)
assert (
indent(TEST_TEXT, indent_chars=[" ", "*", "-", "="], level=4)
== """
test
****lines
----to
====indent
"""[
1:-1
]
)
def test_indent_raises_value_errors():
with pytest.raises(
ValueError,
match="Must provide indentation chars for each line",
):
indent(TEST_TEXT, indent_chars=[" "], level=1)
with pytest.raises(
ValueError,
match="Unrecognized indentation characters value",
):
indent(TEST_TEXT, indent_chars=None, level=1) # type: ignore
TEST_SOURCE_CODE = r"""
# Attempts to display the line and column of violating code.
class ParserException(Exception):
def __init__(self, message='Error Message not found.', item=None):
self.message = message
self.lineno = None
self.col_offset = None
if isinstance(item, tuple): # is a position.
self.lineno, self.col_offset = item
elif item and hasattr(item, 'lineno'):
self.set_err_pos(item.lineno, item.col_offset)
if hasattr(item, 'source_code'):
self.source_code = item.source_code.splitlines()
def set_err_pos(self, lineno, col_offset):
if not self.lineno:
self.lineno = lineno
if not self.col_offset:
self.col_offset = col_offset
def __str__(self):
output = self.message
if self.lineno and hasattr(self, 'source_code'):
output = f'line {self.lineno}: {output}\n{self.source_code[self.lineno -1]}'
if self.col_offset:
col = '-' * self.col_offset + '^'
output += '\n' + col
elif self.lineno is not None and self.col_offset is not None:
output = f'line {self.lineno}:{self.col_offset} {output}'
return output
"""[
1:-1
]
def test_annotate_source_code_marks_positions_in_source_code():
annotation = annotate_source_code(
TEST_SOURCE_CODE,
22,
col_offset=16,
context_lines=0,
line_numbers=False,
)
assert (
annotation
== r"""
def __str__(self):
----------------^
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE,
22,
col_offset=15,
context_lines=1,
line_numbers=False,
)
assert (
annotation
== r"""
def __str__(self):
---------------^
output = self.message
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE,
22,
col_offset=20,
context_lines=2,
line_numbers=False,
)
assert (
annotation
== r"""
self.col_offset = col_offset
def __str__(self):
--------------------^
output = self.message
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE,
1,
col_offset=5,
context_lines=3,
line_numbers=True,
)
assert (
annotation
== r"""
---> 1 # Attempts to display the line and column of violating code.
------------^
2 class ParserException(Exception):
3 def __init__(self, message='Error Message not found.', item=None):
4 self.message = message
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE,
36,
col_offset=8,
context_lines=4,
line_numbers=True,
)
assert (
annotation
== r"""
32
33 elif self.lineno is not None and self.col_offset is not None:
34 output = f'line {self.lineno}:{self.col_offset} {output}'
35
---> 36 return output
----------------^
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE,
15,
col_offset=8,
context_lines=11,
line_numbers=True,
)
assert (
annotation
== r"""
4 self.message = message
5 self.lineno = None
6 self.col_offset = None
7
8 if isinstance(item, tuple): # is a position.
9 self.lineno, self.col_offset = item
10 elif item and hasattr(item, 'lineno'):
11 self.set_err_pos(item.lineno, item.col_offset)
12 if hasattr(item, 'source_code'):
13 self.source_code = item.source_code.splitlines()
14
---> 15 def set_err_pos(self, lineno, col_offset):
----------------^
16 if not self.lineno:
17 self.lineno = lineno
18
19 if not self.col_offset:
20 self.col_offset = col_offset
21
22 def __str__(self):
23 output = self.message
24
25 if self.lineno and hasattr(self, 'source_code'):
26
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE,
15,
col_offset=None,
context_lines=3,
line_numbers=True,
)
assert (
annotation
== r"""
12 if hasattr(item, 'source_code'):
13 self.source_code = item.source_code.splitlines()
14
---> 15 def set_err_pos(self, lineno, col_offset):
16 if not self.lineno:
17 self.lineno = lineno
18
"""[
1:-1
]
)
annotation = annotate_source_code(
TEST_SOURCE_CODE,
15,
col_offset=None,
context_lines=2,
line_numbers=False,
)
assert (
annotation
== r"""
self.source_code = item.source_code.splitlines()
def set_err_pos(self, lineno, col_offset):
if not self.lineno:
self.lineno = lineno
"""[
1:-1
]
)
@pytest.mark.parametrize(
"bad_lineno",
(-100, -1, 0, 45, 1000),
)
def test_annotate_source_code_raises_value_errors(bad_lineno):
with pytest.raises(
ValueError,
match="Line number is out of range",
):
annotate_source_code(TEST_SOURCE_CODE, bad_lineno)
|
utils/check-category.py | Belyenochi/apisix | 5,886 | 12693233 | <filename>utils/check-category.py
#!/usr/bin/env python
# coding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import sys
from os import path
EXT = ".md"
try:
UNICODE_EXISTS = bool(type(unicode))
except:
# Py2
unicode = str
def collect_fn(entries, topic):
if "id" in topic:
fn = topic["id"]
entries.append(fn)
elif "items" in topic:
for item in topic["items"]:
if isinstance(item, unicode):
entries.append(item)
else:
collect_fn(entries, item)
def check_category(root):
index = root + "config.json"
with open(index) as f:
entries = []
data = json.load(f)
for topic in data["sidebar"]:
collect_fn(entries, topic)
for e in entries:
fn = root + e + EXT
if not path.exists(fn):
print("Entry %s in the sidebar can't be found. Please remove it from %s."
% (fn, index))
return False
ignore_list = ["examples/plugins-hmac-auth-generate-signature", "config", "README"]
entries.extend(ignore_list)
existed_files = []
for parent, dirs, files in os.walk(root):
for fn in files:
existed_files.append(path.join(parent[len(root):], path.splitext(fn)[0]))
for fn in existed_files:
if fn not in entries:
print("File %s%s%s is not indexed. Please add it to %s." % (root, fn, EXT, index))
return False
return True
roots = ["docs/en/latest/", "docs/zh/latest/"]
for r in roots:
if not check_category(r):
sys.exit(-1)
|
tests/test_decompressor_stream_reader.py | odidev/python-zstandard | 316 | 12693238 | <reponame>odidev/python-zstandard
import io
import os
import unittest
import zstandard as zstd
from .common import (
CustomBytesIO,
)
class TestDecompressor_stream_reader(unittest.TestCase):
def test_context_manager(self):
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
with self.assertRaisesRegex(
ValueError, "cannot __enter__ multiple times"
):
with reader as reader2:
pass
def test_not_implemented(self):
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
with self.assertRaises(io.UnsupportedOperation):
reader.readline()
with self.assertRaises(io.UnsupportedOperation):
reader.readlines()
with self.assertRaises(io.UnsupportedOperation):
iter(reader)
with self.assertRaises(io.UnsupportedOperation):
next(reader)
with self.assertRaises(io.UnsupportedOperation):
reader.write(b"foo")
with self.assertRaises(io.UnsupportedOperation):
reader.writelines([])
def test_constant_methods(self):
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
self.assertFalse(reader.closed)
self.assertTrue(reader.readable())
self.assertFalse(reader.writable())
self.assertFalse(reader.seekable())
self.assertFalse(reader.isatty())
self.assertFalse(reader.closed)
self.assertIsNone(reader.flush())
self.assertFalse(reader.closed)
self.assertTrue(reader.closed)
def test_read_closed(self):
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(b"foo") as reader:
reader.close()
self.assertTrue(reader.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read(1)
def test_read_sizes(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(foo) as reader:
with self.assertRaisesRegex(
ValueError, "cannot read negative amounts less than -1"
):
reader.read(-2)
self.assertEqual(reader.read(0), b"")
self.assertEqual(reader.read(), b"foo")
def test_read_buffer(self):
cctx = zstd.ZstdCompressor()
source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
self.assertEqual(reader.tell(), 0)
# We should get entire frame in one read.
result = reader.read(8192)
self.assertEqual(result, source)
self.assertEqual(reader.tell(), len(source))
# Read after EOF should return empty bytes.
self.assertEqual(reader.read(1), b"")
self.assertEqual(reader.tell(), len(result))
self.assertTrue(reader.closed)
def test_read_buffer_small_chunks(self):
cctx = zstd.ZstdCompressor()
source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
chunks = []
with dctx.stream_reader(frame, read_size=1) as reader:
while True:
chunk = reader.read(1)
if not chunk:
break
chunks.append(chunk)
self.assertEqual(reader.tell(), sum(map(len, chunks)))
self.assertEqual(b"".join(chunks), source)
def test_read_stream(self):
cctx = zstd.ZstdCompressor()
source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(io.BytesIO(frame)) as reader:
self.assertEqual(reader.tell(), 0)
chunk = reader.read(8192)
self.assertEqual(chunk, source)
self.assertEqual(reader.tell(), len(source))
self.assertEqual(reader.read(1), b"")
self.assertEqual(reader.tell(), len(source))
self.assertFalse(reader.closed)
self.assertTrue(reader.closed)
def test_read_stream_small_chunks(self):
cctx = zstd.ZstdCompressor()
source = b"".join([b"foo" * 60, b"bar" * 60, b"baz" * 60])
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
chunks = []
with dctx.stream_reader(io.BytesIO(frame), read_size=1) as reader:
while True:
chunk = reader.read(1)
if not chunk:
break
chunks.append(chunk)
self.assertEqual(reader.tell(), sum(map(len, chunks)))
self.assertEqual(b"".join(chunks), source)
def test_close(self):
foo = zstd.ZstdCompressor().compress(b"foo" * 1024)
buffer = io.BytesIO(foo)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(buffer)
reader.read(3)
self.assertFalse(reader.closed)
self.assertFalse(buffer.closed)
reader.close()
self.assertTrue(reader.closed)
self.assertTrue(buffer.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read()
with self.assertRaisesRegex(ValueError, "stream is closed"):
with reader:
pass
# Context manager exit should not close stream.
buffer = io.BytesIO(foo)
reader = dctx.stream_reader(buffer)
with reader:
reader.read(3)
self.assertTrue(reader.closed)
self.assertTrue(buffer.closed)
# Context manager exit should close stream if an exception raised.
buffer = io.BytesIO(foo)
reader = dctx.stream_reader(buffer)
with self.assertRaisesRegex(Exception, "ignore"):
with reader:
reader.read(3)
raise Exception("ignore")
self.assertTrue(reader.closed)
self.assertTrue(buffer.closed)
# Test with non-file source variant.
with dctx.stream_reader(foo) as reader:
reader.read(3)
self.assertFalse(reader.closed)
self.assertTrue(reader.closed)
def test_close_closefd_false(self):
foo = zstd.ZstdCompressor().compress(b"foo" * 1024)
buffer = io.BytesIO(foo)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(buffer, closefd=False)
reader.read(3)
self.assertFalse(reader.closed)
self.assertFalse(buffer.closed)
reader.close()
self.assertTrue(reader.closed)
self.assertFalse(buffer.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read()
with self.assertRaisesRegex(ValueError, "stream is closed"):
with reader:
pass
# Context manager exit should not close stream.
buffer = io.BytesIO(foo)
reader = dctx.stream_reader(buffer, closefd=False)
with reader:
reader.read(3)
self.assertTrue(reader.closed)
self.assertFalse(buffer.closed)
# Context manager exit should close stream if an exception raised.
buffer = io.BytesIO(foo)
reader = dctx.stream_reader(buffer, closefd=False)
with self.assertRaisesRegex(Exception, "ignore"):
with reader:
reader.read(3)
raise Exception("ignore")
self.assertTrue(reader.closed)
self.assertFalse(buffer.closed)
# Test with non-file source variant.
with dctx.stream_reader(foo, closefd=False) as reader:
reader.read(3)
self.assertFalse(reader.closed)
self.assertTrue(reader.closed)
def test_read_after_exit(self):
cctx = zstd.ZstdCompressor()
frame = cctx.compress(b"foo" * 60)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
while reader.read(16):
pass
self.assertTrue(reader.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read(10)
def test_illegal_seeks(self):
cctx = zstd.ZstdCompressor()
frame = cctx.compress(b"foo" * 60)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
with self.assertRaisesRegex(
OSError, "cannot seek to negative position"
):
reader.seek(-1, os.SEEK_SET)
reader.read(1)
with self.assertRaisesRegex(
OSError, "cannot seek zstd decompression stream backwards"
):
reader.seek(0, os.SEEK_SET)
with self.assertRaisesRegex(
OSError, "cannot seek zstd decompression stream backwards"
):
reader.seek(-1, os.SEEK_CUR)
with self.assertRaisesRegex(
OSError,
"zstd decompression streams cannot be seeked with SEEK_END",
):
reader.seek(0, os.SEEK_END)
reader.close()
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.seek(4, os.SEEK_SET)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.seek(0)
def test_seek(self):
source = b"foobar" * 60
cctx = zstd.ZstdCompressor()
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame) as reader:
reader.seek(3)
self.assertEqual(reader.read(3), b"bar")
reader.seek(4, os.SEEK_CUR)
self.assertEqual(reader.read(2), b"ar")
def test_no_context_manager(self):
source = b"foobar" * 60
cctx = zstd.ZstdCompressor()
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(frame)
self.assertEqual(reader.read(6), b"foobar")
self.assertEqual(reader.read(18), b"foobar" * 3)
self.assertFalse(reader.closed)
# Calling close prevents subsequent use.
reader.close()
self.assertTrue(reader.closed)
with self.assertRaisesRegex(ValueError, "stream is closed"):
reader.read(6)
def test_read_after_error(self):
source = io.BytesIO(b"")
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(source)
with reader:
reader.read(0)
with self.assertRaisesRegex(ValueError, "stream is closed"):
with reader:
pass
def test_partial_read(self):
# Inspired by https://github.com/indygreg/python-zstandard/issues/71.
buffer = io.BytesIO()
cctx = zstd.ZstdCompressor()
writer = cctx.stream_writer(buffer)
writer.write(bytearray(os.urandom(1000000)))
writer.flush(zstd.FLUSH_FRAME)
buffer.seek(0)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(buffer)
while True:
chunk = reader.read(8192)
if not chunk:
break
def test_read_multiple_frames(self):
cctx = zstd.ZstdCompressor()
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(b"foo")
writer.flush(zstd.FLUSH_FRAME)
writer.write(b"bar")
writer.flush(zstd.FLUSH_FRAME)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(source.getvalue())
self.assertEqual(reader.read(2), b"fo")
self.assertEqual(reader.read(2), b"o")
self.assertEqual(reader.read(2), b"ba")
self.assertEqual(reader.read(2), b"r")
source.seek(0)
reader = dctx.stream_reader(source)
self.assertEqual(reader.read(2), b"fo")
self.assertEqual(reader.read(2), b"o")
self.assertEqual(reader.read(2), b"ba")
self.assertEqual(reader.read(2), b"r")
reader = dctx.stream_reader(source.getvalue())
self.assertEqual(reader.read(3), b"foo")
self.assertEqual(reader.read(3), b"bar")
source.seek(0)
reader = dctx.stream_reader(source)
self.assertEqual(reader.read(3), b"foo")
self.assertEqual(reader.read(3), b"bar")
reader = dctx.stream_reader(source.getvalue())
self.assertEqual(reader.read(4), b"foo")
self.assertEqual(reader.read(4), b"bar")
source.seek(0)
reader = dctx.stream_reader(source)
self.assertEqual(reader.read(4), b"foo")
self.assertEqual(reader.read(4), b"bar")
reader = dctx.stream_reader(source.getvalue())
self.assertEqual(reader.read(128), b"foo")
self.assertEqual(reader.read(128), b"bar")
source.seek(0)
reader = dctx.stream_reader(source)
self.assertEqual(reader.read(128), b"foo")
self.assertEqual(reader.read(128), b"bar")
# Now tests for reads spanning frames.
reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
self.assertEqual(reader.read(3), b"foo")
self.assertEqual(reader.read(3), b"bar")
source.seek(0)
reader = dctx.stream_reader(source, read_across_frames=True)
self.assertEqual(reader.read(3), b"foo")
self.assertEqual(reader.read(3), b"bar")
reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
self.assertEqual(reader.read(6), b"foobar")
source.seek(0)
reader = dctx.stream_reader(source, read_across_frames=True)
self.assertEqual(reader.read(6), b"foobar")
reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
self.assertEqual(reader.read(7), b"foobar")
source.seek(0)
reader = dctx.stream_reader(source, read_across_frames=True)
self.assertEqual(reader.read(7), b"foobar")
reader = dctx.stream_reader(source.getvalue(), read_across_frames=True)
self.assertEqual(reader.read(128), b"foobar")
source.seek(0)
reader = dctx.stream_reader(source, read_across_frames=True)
self.assertEqual(reader.read(128), b"foobar")
def test_readinto(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
# Attempting to readinto() a non-writable buffer fails.
# The exact exception varies based on the backend.
reader = dctx.stream_reader(foo)
with self.assertRaises(Exception):
reader.readinto(b"foobar")
# readinto() with sufficiently large destination.
b = bytearray(1024)
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readinto(b), 3)
self.assertEqual(b[0:3], b"foo")
self.assertEqual(reader.readinto(b), 0)
self.assertEqual(b[0:3], b"foo")
# readinto() with small reads.
b = bytearray(1024)
reader = dctx.stream_reader(foo, read_size=1)
self.assertEqual(reader.readinto(b), 3)
self.assertEqual(b[0:3], b"foo")
# Too small destination buffer.
b = bytearray(2)
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readinto(b), 2)
self.assertEqual(b[:], b"fo")
def test_readinto1(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(foo)
with self.assertRaises(Exception):
reader.readinto1(b"foobar")
# Sufficiently large destination.
b = bytearray(1024)
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readinto1(b), 3)
self.assertEqual(b[0:3], b"foo")
self.assertEqual(reader.readinto1(b), 0)
self.assertEqual(b[0:3], b"foo")
# readinto() with small reads.
b = bytearray(1024)
reader = dctx.stream_reader(foo, read_size=1)
self.assertEqual(reader.readinto1(b), 3)
self.assertEqual(b[0:3], b"foo")
# Too small destination buffer.
b = bytearray(2)
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readinto1(b), 2)
self.assertEqual(b[:], b"fo")
def test_readall(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(foo)
self.assertEqual(reader.readall(), b"foo")
def test_read1(self):
cctx = zstd.ZstdCompressor()
foo = cctx.compress(b"foo")
dctx = zstd.ZstdDecompressor()
b = CustomBytesIO(foo)
reader = dctx.stream_reader(b)
self.assertEqual(reader.read1(), b"foo")
self.assertEqual(b._read_count, 1)
b = CustomBytesIO(foo)
reader = dctx.stream_reader(b)
self.assertEqual(reader.read1(0), b"")
self.assertEqual(reader.read1(2), b"fo")
self.assertEqual(b._read_count, 1)
self.assertEqual(reader.read1(1), b"o")
self.assertEqual(b._read_count, 1)
self.assertEqual(reader.read1(1), b"")
self.assertEqual(b._read_count, 2)
def test_read_lines(self):
cctx = zstd.ZstdCompressor()
source = b"\n".join(
("line %d" % i).encode("ascii") for i in range(1024)
)
frame = cctx.compress(source)
dctx = zstd.ZstdDecompressor()
reader = dctx.stream_reader(frame)
tr = io.TextIOWrapper(reader, encoding="utf-8")
lines = []
for line in tr:
lines.append(line.encode("utf-8"))
self.assertEqual(len(lines), 1024)
self.assertEqual(b"".join(lines), source)
reader = dctx.stream_reader(frame)
tr = io.TextIOWrapper(reader, encoding="utf-8")
lines = tr.readlines()
self.assertEqual(len(lines), 1024)
self.assertEqual("".join(lines).encode("utf-8"), source)
reader = dctx.stream_reader(frame)
tr = io.TextIOWrapper(reader, encoding="utf-8")
lines = []
while True:
line = tr.readline()
if not line:
break
lines.append(line.encode("utf-8"))
self.assertEqual(len(lines), 1024)
self.assertEqual(b"".join(lines), source)
|
tick/base_model/__init__.py | sumau/tick | 411 | 12693279 | <gh_stars>100-1000
# License: BSD 3 clause
from .model import Model
from .model_first_order import ModelFirstOrder
from .model_labels_features import ModelLabelsFeatures
from .model_second_order import ModelSecondOrder
from .model_self_concordant import ModelSelfConcordant
from .model_lipschitz import ModelLipschitz
from .model_generalized_linear import ModelGeneralizedLinear
from .model import LOSS
from .model import GRAD
from .model import LOSS_AND_GRAD
from .model import HESSIAN_NORM
from .model import N_CALLS_LOSS
from .model import N_CALLS_GRAD
from .model import N_CALLS_LOSS_AND_GRAD
from .model import N_CALLS_HESSIAN_NORM
from .model import PASS_OVER_DATA
__all__ = [
"Model",
"ModelFirstOrder",
"ModelSecondOrder",
"ModelLabelsFeatures",
"ModelSelfConcordant",
"ModelGeneralizedLinear",
"ModelLipschitz",
]
|
carla/recourse_methods/catalog/clue/library/clue_ml/AE_models/AE/fc_gauss_cat.py | jayanthyetukuri/CARLA | 140 | 12693305 | <gh_stars>100-1000
from __future__ import division
import numpy as np
import torch.backends.cudnn as cudnn
from torch.distributions import kl_divergence
from torch.distributions.normal import Normal
from carla import log
from carla.recourse_methods.catalog.clue.library.clue_ml.src.gauss_cat import *
from carla.recourse_methods.catalog.clue.library.clue_ml.src.probability import (
normal_parse_params,
)
from carla.recourse_methods.catalog.clue.library.clue_ml.src.radam import RAdam
from carla.recourse_methods.catalog.clue.library.clue_ml.src.utils import (
BaseNet,
to_variable,
)
from .models import MLP_preact_generator_net, MLP_preact_recognition_net
# TODO: implement for std changeable gaussian instead of rms
class VAE_gauss_cat(nn.Module):
def __init__(self, input_dim_vec, width, depth, latent_dim, pred_sig=False):
super(VAE_gauss_cat, self).__init__()
input_dim = 0
self.input_dim_vec = input_dim_vec
for e in input_dim_vec:
input_dim += e
self.encoder = MLP_preact_recognition_net(input_dim, width, depth, latent_dim)
if pred_sig:
raise NotImplementedError()
else:
self.decoder = MLP_preact_generator_net(input_dim, width, depth, latent_dim)
self.rec_loglike = rms_cat_loglike(self.input_dim_vec, reduction="none")
self.pred_sig = pred_sig
def encode(self, x):
"""Works with flattened representATION"""
approx_post_params = self.encoder(x)
approx_post = normal_parse_params(approx_post_params, 1e-3)
return approx_post
def decode(self, z_sample):
"""Works with flattened representATION"""
rec_params = self.decoder(z_sample)
return rec_params
def vlb(self, prior, approx_post, x, rec_params):
"""Works with flattened representATION"""
if self.pred_sig:
pass
else:
rec = self.rec_loglike(rec_params, x).view(x.shape[0], -1).sum(-1)
kl = kl_divergence(approx_post, prior).view(x.shape[0], -1).sum(-1)
return rec - kl
def iwlb(self, prior, approx_post, x, K=50):
estimates = []
for i in range(K):
latent = approx_post.rsample()
rec_params = self.decode(latent)
if self.pred_sig:
pass
else:
rec_loglike = (
self.rec_loglike(rec_params, x).view(x.shape[0], -1).sum(-1)
)
prior_log_prob = prior.log_prob(latent)
prior_log_prob = prior_log_prob.view(x.shape[0], -1)
prior_log_prob = prior_log_prob.sum(-1)
proposal_log_prob = approx_post.log_prob(latent)
proposal_log_prob = proposal_log_prob.view(x.shape[0], -1)
proposal_log_prob = proposal_log_prob.sum(-1)
estimate = rec_loglike + prior_log_prob - proposal_log_prob
estimates.append(estimate[:, None])
return torch.logsumexp(torch.cat(estimates, 1), 1) - np.log(K)
class VAE_gauss_cat_net(BaseNet):
def __init__(
self,
input_dim_vec,
width,
depth,
latent_dim,
pred_sig=False,
lr=1e-3,
cuda=True,
flatten=True,
):
super(VAE_gauss_cat_net, self).__init__()
log.info("VAE_gauss_net")
self.cuda = cuda
self.input_dim = 0
self.input_dim_vec = input_dim_vec
for e in self.input_dim_vec:
self.input_dim += e
self.flatten = flatten
if not self.flatten:
pass
self.width = width
self.depth = depth
self.latent_dim = latent_dim
self.lr = lr
self.pred_sig = pred_sig
self.create_net()
self.create_opt()
self.epoch = 0
self.schedule = None
if self.cuda:
self.prior = self.prior = Normal(
loc=torch.zeros(latent_dim).cuda(), scale=torch.ones(latent_dim).cuda()
)
else:
self.prior = Normal(
loc=torch.zeros(latent_dim), scale=torch.ones(latent_dim)
)
self.vlb_scale = 1 / len(
self.input_dim_vec
) # scale for dimensions of input so we can use same LR always
def create_net(self):
torch.manual_seed(42)
torch.cuda.manual_seed(42)
self.model = VAE_gauss_cat(
self.input_dim_vec, self.width, self.depth, self.latent_dim, self.pred_sig
)
if self.cuda:
self.model = self.model.cuda()
cudnn.benchmark = True
log.info("Total params: %.2fM" % (self.get_nb_parameters() / 1000000.0))
def create_opt(self):
self.optimizer = RAdam(self.model.parameters(), lr=self.lr)
def fit(self, x):
self.set_mode_train(train=True)
if self.flatten:
x_flat = gauss_cat_to_flat(x, self.input_dim_vec)
else:
x_flat = x
x = flat_to_gauss_cat(x, self.input_dim_vec)
x, x_flat = to_variable(var=(x, x_flat), cuda=self.cuda)
self.optimizer.zero_grad()
approx_post = self.model.encode(x_flat)
z_sample = approx_post.rsample()
rec_params = self.model.decode(z_sample)
vlb = self.model.vlb(self.prior, approx_post, x, rec_params)
loss = (-vlb * self.vlb_scale).mean()
loss.backward()
self.optimizer.step()
return vlb.mean().item(), rec_params
def eval(self, x, sample=False):
self.set_mode_train(train=False)
if self.flatten:
x_flat = gauss_cat_to_flat(x, self.input_dim_vec)
else:
x_flat = x
x = flat_to_gauss_cat(x, self.input_dim_vec)
x, x_flat = to_variable(var=(x, x_flat), cuda=self.cuda)
approx_post = self.model.encode(x_flat)
if sample:
z_sample = approx_post.sample()
else:
z_sample = approx_post.loc
rec_params = self.model.decode(z_sample)
vlb = self.model.vlb(self.prior, approx_post, x, rec_params)
return vlb.mean().item(), rec_params
def eval_iw(self, x, k=50):
self.set_mode_train(train=False)
if self.flatten:
x_flat = gauss_cat_to_flat(x, self.input_dim_vec)
else:
x_flat = x
x = flat_to_gauss_cat(x, self.input_dim_vec)
x, x_flat = to_variable(var=(x, x_flat), cuda=self.cuda)
approx_post = self.model.recognition_encode(x)
iw_lb = self.model.iwlb(self.prior, approx_post, x, k)
return iw_lb.mean().item()
def recongnition(self, x, grad=False, flatten=None):
if flatten is None:
flatten = self.flatten
if flatten and grad:
raise Exception("flatten and grad options are not compatible")
self.set_mode_train(train=False)
if flatten:
x = gauss_cat_to_flat(x, self.input_dim_vec)
if grad:
if not x.requires_grad:
x.requires_grad = True
else:
(x,) = to_variable(var=(x,), volatile=True, cuda=self.cuda)
approx_post = self.model.encode(x)
return approx_post
def regenerate(self, z, grad=False, unflatten=False):
if unflatten and grad:
raise Exception("flatten and grad options are not compatible")
self.set_mode_train(train=False)
if grad:
if not z.requires_grad:
z.requires_grad = True
else:
(z,) = to_variable(var=(z,), volatile=True, cuda=self.cuda)
out = self.model.decode(z)
if unflatten:
out = flat_to_gauss_cat(out, self.input_dim_vec)
else:
out = selective_softmax(out, self.input_dim_vec, grad=grad)
if self.pred_sig:
raise Exception("Not implemented")
else:
return out
|
tests/brevitas/test_brevitas_qlinear.py | AlexMontgomerie/finn | 283 | 12693328 | <reponame>AlexMontgomerie/finn
# Copyright (c) 2021, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import os
import numpy as np
import torch
import brevitas.onnx as bo
from brevitas.nn import QuantLinear
from brevitas.core.quant import QuantType
from finn.core.modelwrapper import ModelWrapper
from finn.core.datatype import DataType
import finn.core.onnx_exec as oxe
from finn.transformation.infer_shapes import InferShapes
from finn.util.basic import gen_finn_dt_tensor
export_onnx_path = "test_brevitas_qlinear.onnx"
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("out_features", [4])
@pytest.mark.parametrize("in_features", [3])
@pytest.mark.parametrize("w_bits", [4])
@pytest.mark.parametrize("i_dtype", [DataType.UINT4])
def test_brevitas_qlinear(bias, out_features, in_features, w_bits, i_dtype):
i_shape = (1, in_features)
w_shape = (out_features, in_features)
b_linear = QuantLinear(
out_features=out_features,
in_features=in_features,
bias=bias,
bias_quant_type=QuantType.FP,
weight_bit_width=w_bits,
weight_quant_type=QuantType.INT,
weight_scaling_per_output_channel=True,
)
weight_tensor_fp = np.random.uniform(low=-1.0, high=1.0, size=w_shape).astype(
np.float32
)
b_linear.weight.data = torch.from_numpy(weight_tensor_fp)
b_linear.eval()
bo.export_finn_onnx(b_linear, i_shape, export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(InferShapes())
inp_tensor = gen_finn_dt_tensor(i_dtype, i_shape)
idict = {model.graph.input[0].name: inp_tensor}
odict = oxe.execute_onnx(model, idict, True)
produced = odict[model.graph.output[0].name]
inp_tensor = torch.from_numpy(inp_tensor).float()
expected = b_linear.forward(inp_tensor).detach().numpy()
assert np.isclose(produced, expected, atol=1e-3).all()
os.remove(export_onnx_path)
|
image-generation/wgan/train.py | AaratiAkkapeddi/nnabla-examples | 228 | 12693359 | <reponame>AaratiAkkapeddi/nnabla-examples<filename>image-generation/wgan/train.py
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed, MonitorImageTile
import nnabla.utils.save as save
from nnabla.ext_utils import get_extension_context
from args import get_args, save_args
from helpers import denormalize
from models import generator, discriminator, gan_loss
from cifar10_data import data_iterator_cifar10
def train(args):
# Context
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
# Args
latent = args.latent
maps = args.maps
batch_size = args.batch_size
image_size = args.image_size
lambda_ = args.lambda_
# Model
# generator loss
z = nn.Variable([batch_size, latent])
x_fake = generator(z, maps=maps, up=args.up).apply(persistent=True)
p_fake = discriminator(x_fake, maps=maps)
loss_gen = gan_loss(p_fake).apply(persistent=True)
# discriminator loss
p_fake = discriminator(x_fake, maps=maps)
x_real = nn.Variable([batch_size, 3, image_size, image_size])
p_real = discriminator(x_real, maps=maps)
loss_dis = gan_loss(p_fake, p_real).apply(persistent=True)
# gradient penalty
eps = F.rand(shape=[batch_size, 1, 1, 1])
x_rmix = eps * x_real + (1.0 - eps) * x_fake
p_rmix = discriminator(x_rmix, maps=maps)
x_rmix.need_grad = True # Enabling gradient computation for double backward
grads = nn.grad([p_rmix], [x_rmix])
l2norms = [F.sum(g ** 2.0, [1, 2, 3]) ** 0.5 for g in grads]
gp = sum([F.mean((l - 1.0) ** 2.0) for l in l2norms])
loss_dis += lambda_ * gp
# generator with fixed value for test
z_test = nn.Variable.from_numpy_array(np.random.randn(batch_size, latent))
x_test = generator(z_test, maps=maps, test=True,
up=args.up).apply(persistent=True)
# Solver
solver_gen = S.Adam(args.lrg, args.beta1, args.beta2)
solver_dis = S.Adam(args.lrd, args.beta1, args.beta2)
with nn.parameter_scope("generator"):
params_gen = nn.get_parameters()
solver_gen.set_parameters(params_gen)
with nn.parameter_scope("discriminator"):
params_dis = nn.get_parameters()
solver_dis.set_parameters(params_dis)
# Monitor
monitor = Monitor(args.monitor_path)
monitor_loss_gen = MonitorSeries(
"Generator Loss", monitor, interval=10)
monitor_loss_cri = MonitorSeries(
"Negative Critic Loss", monitor, interval=10)
monitor_time = MonitorTimeElapsed(
"Training Time", monitor, interval=10)
monitor_image_tile_train = MonitorImageTile("Image Tile Train", monitor,
num_images=batch_size,
interval=1,
normalize_method=denormalize)
monitor_image_tile_test = MonitorImageTile("Image Tile Test", monitor,
num_images=batch_size,
interval=1,
normalize_method=denormalize)
# Data Iterator
di = data_iterator_cifar10(batch_size, True)
# Train loop
for i in range(args.max_iter):
# Train discriminator
x_fake.need_grad = False # no need backward to generator
for _ in range(args.n_critic):
solver_dis.zero_grad()
x_real.d = di.next()[0] / 127.5 - 1.0
z.d = np.random.randn(batch_size, latent)
loss_dis.forward(clear_no_need_grad=True)
loss_dis.backward(clear_buffer=True)
solver_dis.update()
# Train generator
x_fake.need_grad = True # need backward to generator
solver_gen.zero_grad()
z.d = np.random.randn(batch_size, latent)
loss_gen.forward(clear_no_need_grad=True)
loss_gen.backward(clear_buffer=True)
solver_gen.update()
# Monitor
monitor_loss_gen.add(i, loss_gen.d)
monitor_loss_cri.add(i, -loss_dis.d)
monitor_time.add(i)
# Save
if i % args.save_interval == 0:
monitor_image_tile_train.add(i, x_fake)
monitor_image_tile_test.add(i, x_test)
nn.save_parameters(os.path.join(
args.monitor_path, "params_{}.h5".format(i)))
# Last
x_test.forward(clear_buffer=True)
nn.save_parameters(os.path.join(
args.monitor_path, "params_{}.h5".format(i)))
monitor_image_tile_train.add(i, x_fake)
monitor_image_tile_test.add(i, x_test)
def main():
args = get_args()
save_args(args, "train")
train(args)
if __name__ == '__main__':
main()
|
pyEX/options/options.py | adamklaff/pyEX | 335 | 12693368 | # *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas as pd
from deprecation import deprecated
from ..common import _get, _raiseIfNotStr, _toDatetime, _timeseriesWrapper
from ..timeseries import timeSeries
def optionExpirations(symbol, token="", version="stable", filter="", format="json"):
"""Returns end of day options data
https://iexcloud.io/docs/api/#options
9:30am-5pm ET Mon-Fri
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
return _get(
"stock/" + symbol + "/options",
token=token,
version=version,
filter=filter,
format=format,
)
@deprecated(details="Deprecated: Migrate to `options`")
def stockOptions(
symbol,
expiration,
side="",
token="",
version="stable",
filter="",
format="json",
):
"""Returns end of day options data
https://iexcloud.io/docs/api/#options
9:30am-5pm ET Mon-Fri
Args:
symbol (str): Ticker to request
expiration (str): Expiration date
side (str): Side (optional)
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
if side:
return _get(
"stock/{symbol}/options/{expiration}/{side}".format(
symbol=symbol, expiration=expiration, side=side
),
token=token,
version=version,
filter=filter,
format=format,
)
return _get(
"stock/{symbol}/options/{expiration}/".format(
symbol=symbol, expiration=expiration
),
token=token,
version=version,
filter=filter,
format=format,
)
@wraps(stockOptions)
def stockOptionsDF(*args, **kwargs):
return _toDatetime(pd.DataFrame(stockOptions(*args, **kwargs)), tcols=["date"])
def options(
contract, token="", version="stable", filter="", format="json", **timeseries_kwargs
):
"""Options EOD prices
Args:
contract (str): Specific dated option contract, e.g. SPY20210714C00475000
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(contract)
_timeseriesWrapper(timeseries_kwargs)
return timeSeries(
id=contract,
key="chart",
token=token,
version=version,
overrideBase="options",
filter=filter,
format=format,
**timeseries_kwargs
)
@wraps(options)
def optionsDF(*args, **kwargs):
return _toDatetime(
pd.DataFrame(options(*args, **kwargs)),
reformatcols=["datetime", "date", "updated"],
)
|
demos/speech_recognition_deepspeech_demo/python/asr_utils/rnn_seq_pipeline.py | APrigarina/open_model_zoo | 1,031 | 12693391 | #
# Copyright (C) 2019-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# This file is based in part on deepspeech_openvino_0.5.py by <NAME> at
# https://github.com/openvinotoolkit/open_model_zoo/pull/419, commit 529805d011d9b405f142b2b40f4d202bd403a4f1 on Sep 19, 2019.
#
from copy import deepcopy
import numpy as np
from asr_utils.pipelines import BlockedSeqPipelineStage
class RnnSeqPipelineStage(BlockedSeqPipelineStage):
def __init__(self, profile, ie, model, device='CPU'):
"""
Load/compile to the target device the IE IR file with the network and initialize the pipeline stage.
profile (dict), a dict with pre/post-processing parameters, see profiles.py
ie (IECore), IECore object for model loading/compilation/inference
model (str), filename of .xml IR file
device (str), inferemnce device
"""
self.p = deepcopy(profile)
assert self.p['num_context_frames'] % 2 == 1, "num_context_frames must be odd"
padding_len = self.p['num_context_frames'] // 2
super().__init__(
block_len=16, context_len=self.p['num_context_frames'] - 1,
left_padding_len=padding_len, right_padding_len=padding_len,
padding_shape=(self.p['num_mfcc_dct_coefs'],), cut_alignment=True)
net = ie.read_network(model=model)
self.exec_net = ie.load_network(network=net, device_name=device)
def _reset_state(self):
super()._reset_state()
self._rnn_state = None
def process_data(self, data, finish=False):
if data is not None:
assert len(data.shape) == 2
return super().process_data(data, finish=finish)
def _process_blocks(self, buffer, finish=False):
assert buffer.shape[0] >= self._block_len + self._context_len
processed = []
for start_pos in range(self._context_len, buffer.shape[0] - self._block_len + 1, self._block_len):
block = buffer[start_pos - self._context_len:start_pos + self._block_len]
processed.append(self._process_block(block, finish=finish and start_pos + self._block_len >= buffer.shape[0]))
assert not self._cut_alignment or processed[-1].shape[0] == self._block_len, "Networks with stride != 1 are not supported"
# Here start_pos is its value on the last iteration of the loop
buffer_skip_len = start_pos + self._block_len - self._context_len
return processed, buffer_skip_len
def _process_block(self, mfcc_features, finish=False):
assert mfcc_features.shape[0] == self._block_len + self._context_len, "Wrong data length: _process_block() accepts a single block of data"
# Create a view into the array with overlapping strides to simulate convolution with FC.
# NB: Replacing this and the first FC layer with conv1d may improve speed a little.
mfcc_features = np.lib.stride_tricks.as_strided(
mfcc_features,
(self._block_len, self._context_len + 1, self.p['num_mfcc_dct_coefs']),
(mfcc_features.strides[0], mfcc_features.strides[0], mfcc_features.strides[1]),
writeable = False,
)
if self._rnn_state is None:
state_h = np.zeros(self.exec_net.input_info[self.p['in_state_h']].input_data.shape)
state_c = np.zeros(self.exec_net.input_info[self.p['in_state_c']].input_data.shape)
else:
state_h, state_c = self._rnn_state
infer_res = self.exec_net.infer(inputs={
self.p['in_state_c']: state_c,
self.p['in_state_h']: state_h,
self.p['in_data']: [mfcc_features],
})
state_c = infer_res[self.p['out_state_c']]
state_h = infer_res[self.p['out_state_h']]
self._rnn_state = (state_h, state_c)
probs = infer_res[self.p['out_data']].squeeze(1)
return probs
|
desktop/core/ext-py/simplejson/simplejson/tests/test_float.py | vinaymundada27/Hue | 550 | 12693401 | <reponame>vinaymundada27/Hue<filename>desktop/core/ext-py/simplejson/simplejson/tests/test_float.py
import math
from unittest import TestCase
import simplejson as json
class TestFloat(TestCase):
def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100, math.pi**-100, 3.1]:
self.assertEquals(float(json.dumps(num)), num)
self.assertEquals(json.loads(json.dumps(num)), num)
def test_ints(self):
for num in [1, 1L, 1<<32, 1<<64]:
self.assertEquals(json.dumps(num), str(num))
self.assertEquals(int(json.dumps(num)), num)
|
pytext/models/test/transformer_sentence_encoder_test.py | baronrustamov/pytext | 6,199 | 12693441 | <filename>pytext/models/test/transformer_sentence_encoder_test.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
from pytext.models.representations.transformer_sentence_encoder import (
TransformerSentenceEncoder,
)
class TransformerSentenceEncoderTest(unittest.TestCase):
def setUp(self):
self.batch_size = 10
self.num_tokens = 20
self.embedding_dim = 1024
self.vocab_size = 1000
self.padding_idx = 0
self.num_encoder_layers = 6
# Generate a tensor of token ids as input tokens
self.tokens = (
torch.randint(5, 1000, (self.batch_size, self.num_tokens))
).long()
self.lengths = torch.tensor([self.num_tokens])
self.pad_mask = (torch.ones(self.batch_size, self.num_tokens)).long()
self.segment_labels = (torch.ones(self.batch_size, self.num_tokens)).long()
self.positions = None
def test_monolingual_transformer_sentence_encoder(self):
input_tuple = (self.tokens, self.pad_mask, self.segment_labels, self.positions)
sentence_encoder = TransformerSentenceEncoder.from_config(
TransformerSentenceEncoder.Config(
embedding_dim=self.embedding_dim,
num_encoder_layers=self.num_encoder_layers,
multilingual=False,
),
output_encoded_layers=True,
padding_idx=self.padding_idx,
vocab_size=self.vocab_size,
)
encoded_layers, pooled_outputs = sentence_encoder(input_tuple)
# Check sizes for pooled output
self.assertEqual(pooled_outputs.size()[0], self.batch_size)
self.assertEqual(pooled_outputs.size()[1], self.embedding_dim)
# Check sizes for encoded_layers
self.assertEqual(encoded_layers.__len__(), self.num_encoder_layers + 1)
self.assertEqual(encoded_layers[-1].size()[0], self.batch_size)
self.assertEqual(encoded_layers[-1].size()[1], self.num_tokens)
self.assertEqual(encoded_layers[-1].size()[2], self.embedding_dim)
def test_multilingual_transformer_sentence_encoder(self):
input_tuple = (self.tokens, self.pad_mask, self.segment_labels, self.positions)
sentence_encoder = TransformerSentenceEncoder.from_config(
TransformerSentenceEncoder.Config(
embedding_dim=self.embedding_dim,
num_encoder_layers=self.num_encoder_layers,
multilingual=True,
),
output_encoded_layers=True,
padding_idx=self.padding_idx,
vocab_size=self.vocab_size,
)
encoded_layers, pooled_outputs = sentence_encoder(input_tuple)
# Check sizes for pooled output
self.assertEqual(pooled_outputs.size()[0], self.batch_size)
self.assertEqual(pooled_outputs.size()[1], self.embedding_dim)
# Check sizes for encoded_layers
self.assertEqual(encoded_layers.__len__(), self.num_encoder_layers + 1)
self.assertEqual(encoded_layers[-1].size()[0], self.batch_size)
self.assertEqual(encoded_layers[-1].size()[1], self.num_tokens)
self.assertEqual(encoded_layers[-1].size()[2], self.embedding_dim)
|
src/gausskernel/dbmind/tools/ai_manager/module/index_advisor/index_main.py | Yanci0/openGauss-server | 360 | 12693449 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#############################################################################
# Copyright (c): 2012-2021, Huawei Tech. Co., Ltd.
# FileName : ad_main.py
# Version : V1.0.0
# Date : 2021-03-01
# Description : Main entrance of module anomaly detection
#############################################################################
from module.index_advisor.install import Installer
from module.index_advisor.uninstall import UnInstaller
from config_cabin.config import PROJECT_PATH
TASK_MAPPING = {
'install': Installer,
'uninstall': UnInstaller
}
class IndexAdvisor(object):
def __init__(self, **params_dict):
self.args_dict = params_dict
self.action = self.args_dict.get('action')
self.package_path = self.args_dict.get('package_path')
self.project_path = PROJECT_PATH
self.version = self.args_dict.get('version')
self.install_path = self.args_dict.get('install_path')
self.task = None
def init_globals(self):
self.task = TASK_MAPPING[self.action](**self.args_dict)
def run(self):
self.init_globals()
self.task.run()
|
env/env.py | 0xflotus/DeepMimic | 1,812 | 12693452 | from abc import ABC, abstractmethod
import numpy as np
from enum import Enum
from learning.normalizer import Normalizer
class Env(ABC):
class Terminate(Enum):
Null = 0
Fail = 1
Succ = 2
def __init__(self, args, enable_draw):
self.enable_draw = enable_draw
return
@abstractmethod
def update(self, timestep):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def get_time(self):
pass
@abstractmethod
def get_name(self):
pass
# rendering and UI interface
def draw(self):
pass
def keyboard(self, key, x, y):
pass
def mouse_click(self, button, state, x, y):
pass
def mouse_move(self, x, y):
pass
def reshape(self, w, h):
pass
def shutdown(self):
pass
def is_done(self):
return False
def set_playback_speed(self, speed):
pass
def set_updates_per_sec(self, updates_per_sec):
pass
@abstractmethod
def get_win_width(self):
pass
@abstractmethod
def get_win_height(self):
pass
def get_num_update_substeps(self):
return 1
# rl interface
@abstractmethod
def is_rl_scene(self):
return False
@abstractmethod
def get_num_agents(self):
return 0
@abstractmethod
def need_new_action(self, agent_id):
return False
@abstractmethod
def record_state(self, agent_id):
pass
@abstractmethod
def record_goal(self, agent_id):
pass
@abstractmethod
def set_action(self, agent_id):
pass
@abstractmethod
def get_action_space(self, agent_id):
pass
@abstractmethod
def get_state_size(self, agent_id):
pass
@abstractmethod
def get_goal_size(self, agent_id):
pass
@abstractmethod
def get_action_size(self, agent_id):
pass
@abstractmethod
def get_num_actions(self, agent_id):
pass
@abstractmethod
def log_val(self, agent_id, val):
pass
def build_state_offset(self, agent_id):
state_size = self.get_state_size(agent_id)
return np.zeros(state_size)
def build_state_scale(self, agent_id):
state_size = self.get_state_size(agent_id)
return np.ones(state_size)
def build_goal_offset(self, agent_id):
goal_size = self.get_goal_size(agent_id)
return np.zeros(goal_size)
def build_goal_scale(self, agent_id):
goal_size = self.get_goal_size(agent_id)
return np.ones(goal_size)
def build_action_offset(self, agent_id):
action_size = self.get_action_size()
return np.zeros(action_size)
def build_action_scale(self, agent_id):
action_size = self.get_action_size()
return np.ones(action_size)
def build_action_bound_min(self, agent_id):
action_size = self.get_action_size()
return -inf * np.ones(action_size)
def build_action_bound_max(self, agent_id):
action_size = self.get_action_size()
return inf * np.ones(action_size)
def build_state_norm_groups(self, agent_id):
state_size = self.get_state_size(agent_id)
return Normalizer.NORM_GROUP_SINGLE * np.ones(state_size, dtype=np.int32)
def build_goal_norm_groups(self, agent_id):
goal_size = self.get_goal_size(agent_id)
return Normalizer.NORM_GROUP_SINGLE * np.ones(goal_size, dtype=np.int32)
@abstractmethod
def calc_reward(self, agent_id):
return 0
@abstractmethod
def get_reward_min(self, agent_id):
return 0
@abstractmethod
def get_reward_max(self, agent_id):
return 1
@abstractmethod
def get_reward_fail(self, agent_id):
return self.get_reward_min(agent_id)
@abstractmethod
def get_reward_succ(self, agent_id):
return self.get_reward_max(agent_id)
@abstractmethod
def is_episode_end(self):
return False
@abstractmethod
def check_terminate(self, agent_id):
return Terminate.Null
@abstractmethod
def check_valid_episode(self):
return True
@abstractmethod
def set_sample_count(self, count):
pass
@abstractmethod
def set_mode(self, mode):
pass |
docs/sections/section2/solutions/sol7.py | lingcog/2019-CS109A | 442 | 12693453 | # Confidence Interval using Stats Model Summary
thresh = 0.05
intervals = results.conf_int(alpha=thresh)
# Renaming column names
first_col = str(thresh/2*100)+"%"
second_col = str((1-thresh/2)*100)+"%"
intervals = intervals.rename(columns={0:first_col,1:second_col})
display(intervals) |
ptgnn/neuralmodels/reduceops/varsizedsummary.py | mir-am/ptgnn | 319 | 12693474 | from typing_extensions import Literal
import torch
from abc import abstractmethod
from math import sqrt
from torch import nn
from torch_scatter import scatter, scatter_log_softmax, scatter_sum
from typing import NamedTuple, Union
class ElementsToSummaryRepresentationInput(NamedTuple):
"""Input to AbstractVarSizedElementReduce layers."""
element_embeddings: torch.Tensor # float tensor of shape [num_elements, D], the representation of each node in all graphs.
element_to_sample_map: torch.Tensor # int tensor of shape [num_elements] with values in range [0, num_sampless-1], mapping each node to a sample ID.
num_samples: Union[torch.Tensor, int] # scalar, specifying the number of sets.
class AbstractVarSizedElementReduce(nn.Module):
"""Interface for computing summary representations from multiple variable-sized sets of representations."""
@abstractmethod
def forward(self, inputs: ElementsToSummaryRepresentationInput) -> torch.Tensor:
"""Returns: float tensor of shape [num_samples, D']"""
class SimpleVarSizedElementReduce(AbstractVarSizedElementReduce):
def __init__(self, summarization_type: Literal["sum", "mean", "max", "min"]):
super().__init__()
assert summarization_type in {"sum", "mean", "max", "min"}
self.__summarization_type = summarization_type
def forward(self, inputs: ElementsToSummaryRepresentationInput) -> torch.Tensor:
return scatter(
src=inputs.element_embeddings,
index=inputs.element_to_sample_map,
dim=0,
dim_size=inputs.num_samples,
reduce=self.__summarization_type,
)
class NormalizedWeightsVarSizedElementReduce(AbstractVarSizedElementReduce):
def __init__(self, input_representation_size: int, output_representation_size: int):
super().__init__()
self.__attention_layer = nn.Linear(input_representation_size, 1, bias=False)
self.__output_layer = nn.Linear(
input_representation_size, output_representation_size, bias=False
)
def forward(self, inputs: ElementsToSummaryRepresentationInput) -> torch.Tensor:
attention_scores = self.__attention_layer(inputs.element_embeddings).squeeze(
-1
) # [num_elements]
attention_probs = torch.exp(
scatter_log_softmax(attention_scores, index=inputs.element_to_sample_map, dim=0, eps=0)
) # [num_elements]
return scatter_sum(
self.__output_layer(inputs.element_embeddings) * attention_probs.unsqueeze(-1),
index=inputs.num_samples,
dim=0,
dim_size=inputs.num_samples,
) # [num_samples, D']
class WeightedSumVarSizedElementReduce(AbstractVarSizedElementReduce):
def __init__(self, representation_size: int):
super().__init__()
self.__weights_layer = nn.Linear(representation_size, 1, bias=False)
def forward(self, inputs: ElementsToSummaryRepresentationInput) -> torch.Tensor:
weights = torch.sigmoid(
self.__weights_layer(inputs.element_embeddings).squeeze(-1)
) # [num_elements]
return scatter_sum(
inputs.element_embeddings * weights.unsqueeze(-1),
index=inputs.element_to_sample_map,
dim=0,
dim_size=inputs.num_samples,
) # [num_samples, D']
class SelfAttentionVarSizedElementReduce(AbstractVarSizedElementReduce):
def __init__(
self,
input_representation_size: int,
hidden_size: int,
output_representation_size: int,
query_representation_summarizer: AbstractVarSizedElementReduce,
):
super().__init__()
self.__query_layer = query_representation_summarizer
self.__key_layer = nn.Linear(input_representation_size, hidden_size, bias=False)
self.__output_layer = nn.Linear(
input_representation_size, output_representation_size, bias=False
)
def forward(self, inputs: ElementsToSummaryRepresentationInput) -> torch.Tensor:
queries = self.__query_layer(inputs) # [num_samples, H]
queries_all = queries[inputs.element_to_sample_map] # [num_elements, H]
keys = self.__key_layer(inputs.element_embeddings) # [num_elements, H]
attention_scores = torch.einsum("vh,vh->v", queries_all, keys) # [num_elements]
attention_probs = torch.exp(
scatter_log_softmax(attention_scores, index=inputs.element_to_sample_map, dim=0, eps=0)
) # [num_elements]
return scatter_sum(
self.__output_layer(inputs.element_embeddings) * attention_probs.unsqueeze(-1),
index=inputs.element_to_sample_map,
dim=0,
dim_size=inputs.num_samples,
) # [num_samples, D']
class MultiheadSelfAttentionVarSizedElementReduce(AbstractVarSizedElementReduce):
def __init__(
self,
input_representation_size: int,
hidden_size: int,
output_representation_size: int,
num_heads: int,
query_representation_summarizer: AbstractVarSizedElementReduce,
use_value_layer: bool = False,
):
super().__init__()
self.__query_layer = query_representation_summarizer
self.__key_layer = nn.Linear(input_representation_size, hidden_size, bias=False)
assert hidden_size % num_heads == 0, "Hidden size must be divisible by the number of heads."
self.__use_value_layer = use_value_layer
if use_value_layer:
self.__value_layer = nn.Linear(input_representation_size, hidden_size, bias=False)
self.__output_layer = nn.Linear(hidden_size, output_representation_size, bias=False)
else:
self.__output_layer = nn.Linear(
input_representation_size * num_heads, output_representation_size, bias=False
)
self.__num_heads = num_heads
def forward(self, inputs: ElementsToSummaryRepresentationInput) -> torch.Tensor:
queries = self.__query_layer(inputs) # [num_samples, H]
queries_per_element = queries[inputs.element_to_sample_map] # [num_elements, H]
queries_per_element = queries_per_element.reshape(
(
queries_per_element.shape[0],
self.__num_heads,
queries_per_element.shape[1] // self.__num_heads,
)
)
keys = self.__key_layer(inputs.element_embeddings) # [num_elements, H]
keys = keys.reshape((keys.shape[0], self.__num_heads, keys.shape[1] // self.__num_heads))
attention_scores = torch.einsum("bhk,bhk->bh", queries_per_element, keys) / sqrt(
keys.shape[-1]
) # [num_elements, num_heads]
attention_probs = torch.exp(
scatter_log_softmax(attention_scores, index=inputs.element_to_sample_map, dim=0, eps=0)
) # [num_elements, num_heads]
if self.__use_value_layer:
values = self.__value_layer(inputs.element_embeddings) # [num_elements, hidden_size]
values = values.reshape(
(values.shape[0], self.__num_heads, values.shape[1] // self.__num_heads)
)
outputs = attention_probs.unsqueeze(-1) * values
else:
outputs = attention_probs.unsqueeze(-1) * inputs.element_embeddings.unsqueeze(
1
) # [num_elements, num_heads, D']
outputs = outputs.reshape((outputs.shape[0], -1)) # [num_elements, num_heads * D']
per_sample_outputs = scatter_sum(
outputs, index=inputs.element_to_sample_map, dim=0, dim_size=inputs.num_samples
) # [num_samples, num_heads, D']
return self.__output_layer(per_sample_outputs) # [num_samples, D']
|
webapp/tests/test_readers_rrd.py | romanek-adam/graphite-web | 4,281 | 12693485 | <reponame>romanek-adam/graphite-web<filename>webapp/tests/test_readers_rrd.py
from .base import TestCase
import os
from os.path import join, isdir
import rrdtool
import shutil
import six
import time
from django.conf import settings
from graphite.readers import RRDReader
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
class RRDReaderTests(TestCase):
test_dir = join(settings.RRD_DIR)
start_ts = 0
step = 60
points = 100
# Create/wipe test whisper files
hostcpu = os.path.join(test_dir, 'hosts/worker1/cpu.rrd')
# TODO Fix this!
def create_rrd(self):
if not isdir(self.test_dir):
os.makedirs(self.test_dir)
try:
os.makedirs(self.hostcpu.replace('cpu.rrd', ''))
except OSError:
pass
self.start_ts = int(time.time())
rrdtool.create(self.hostcpu, '--start', str(self.start_ts),
'--step', str(self.step),
'RRA:AVERAGE:0.5:1:{}'.format(self.points),
'DS:cpu:GAUGE:60:U:U')
def wipe_rrd(self):
try:
shutil.rmtree(self.test_dir)
except OSError:
pass
# Confirm the reader object is not none
def test_RRDReader_init(self):
self.create_rrd()
self.addCleanup(self.wipe_rrd)
reader = RRDReader(self.hostcpu, 'cpu')
self.assertIsNotNone(reader)
# must return a 'str' object on both py2 and py3 independent of the type of
# the argument (which is a 'unicode' on py2)
def test_RRDReader_convert_fs_path(self):
path = RRDReader._convert_fs_path(six.u(self.hostcpu))
self.assertIsInstance(path, str)
# Confirm the intervals
def test_RRDReader_get_intervals(self):
self.create_rrd()
self.addCleanup(self.wipe_rrd)
reader = RRDReader(self.hostcpu, 'cpu')
# Intervals are calculated on the actual time so tolerate a 2 second
# deviation for delays caused between file creation and test.
for interval in reader.get_intervals():
self.assertAlmostEqual(interval.start,
self.start_ts - self.points * self.step,
delta=2)
self.assertAlmostEqual(interval.end, self.start_ts, delta=2)
# Confirm fetch works.
def test_RRDReader_fetch(self):
self.create_rrd()
self.addCleanup(self.wipe_rrd)
# insert some data
for ts in range(self.start_ts + 60, self.start_ts + 10 * self.step,
self.step):
rrdtool.update(self.hostcpu, '{}:42'.format(ts))
reader = RRDReader(self.hostcpu, 'cpu')
(time_info, values) = reader.fetch(self.start_ts + self.step,
self.start_ts + self.step * 2)
self.assertEqual(list(values), [42.0])
def test_RRDReader_get_datasources(self):
self.create_rrd()
self.addCleanup(self.wipe_rrd)
datasource = RRDReader.get_datasources(self.hostcpu)
self.assertEqual(datasource, ['cpu'])
def test_RRDReader_get_retention(self):
self.create_rrd()
self.addCleanup(self.wipe_rrd)
retentions = RRDReader.get_retention(self.hostcpu)
self.assertEqual(retentions, self.points * self.step)
|
xt/agent/__init__.py | TianQi-777/xingtian | 240 | 12693497 | #!/usr/bin/env python
"""
DESC: The agent module is used to explore and test in the environment for a specialized task.
The module receives the raw data from the environment as the
input, and transfers the raw data into the training state for RL model , and
then outputs an action by some exploration policy to the environment,
finally the next training state and corresponding reward is obtained.
During this process, the tuples needed for RL training have to be returned.
You can also define your specialized reward function, explosion policy,
training state and so on.
"""
import zeus.common.util.common as common
from xt.agent.agent import Agent
__ALL__ = ["Agent", "AsyncAgent", "agent_builder"]
from zeus.common.util.register import Registers
def agent_builder(agent_name, env, alg, agent_config, **kwargs):
"""
Build an agent instance.
:param agent_name:
:param env:
:param alg:
:param agent_config:
:param kwargs:
:return:
"""
return Registers.agent[agent_name](env, alg, agent_config, **kwargs)
|
examples/two_grids_example.py | RensDimmendaal/streamlit-aggrid | 287 | 12693517 | <reponame>RensDimmendaal/streamlit-aggrid<gh_stars>100-1000
import streamlit as st
import numpy as np
import pandas as pd
from st_aggrid import AgGrid, DataReturnMode, GridUpdateMode, GridOptionsBuilder
@st.cache()
def get_data_ex4():
df = pd.DataFrame(
np.random.randint(0, 100, 50).reshape(-1, 5), columns=list("abcde")
)
return df
df = get_data_ex4()
st.markdown("""
### Two grids
As in other streamlit components, it is possible to render two components for the same data using distinct ```key``` parameters.
""")
st.subheader("Input data")
st.dataframe(df)
st.subheader("Editable Grids")
c1, c2 = st.beta_columns(2)
with c1:
grid_return1 = AgGrid(df, key='grid1', editable=True)
st.text("Grid 1 Return")
st.write(grid_return1['data'])
with c2:
grid_return2 = AgGrid(df, key='grid2', editable=True)
st.text("Grid 2 Return")
st.write(grid_return2['data']) |
utils/examples_tests/xdist_util.py | payoto/graphcore_examples | 260 | 12693529 | <filename>utils/examples_tests/xdist_util.py
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
from filelock import FileLock
from contextlib import contextmanager
@contextmanager
def lock(lock_path):
with FileLock(lock_path):
yield
|
Chapter15/train_lm.py | holestine/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 621 | 12693534 | <filename>Chapter15/train_lm.py<gh_stars>100-1000
#!/usr/bin/env python3
import gym
import ptan
import pathlib
import argparse
import itertools
import numpy as np
from typing import List
from textworld.gym import register_games
from textworld import EnvInfos
from lib import preproc, model, common
import torch
import torch.optim as optim
from ignite.engine import Engine
GAMMA = 0.9
BATCH_SIZE = 16
LEARNING_RATE_LM = 1e-5
LEARNING_RATE = 1e-5
# have to be less or equal to env.action_space.max_length
LM_MAX_TOKENS = 4
LM_MAX_COMMANDS = 10
LM_STOP_AVG_REWARD = -1.0
EXTRA_GAME_INFO = {
"inventory": True,
"description": True,
"intermediate_reward": True,
"admissible_commands": True,
"policy_commands": True,
"last_command": True,
}
def unpack_batch(batch: List[ptan.experience.ExperienceFirstLast], prep: preproc.Preprocessor):
states = []
rewards = []
not_done_idx = []
next_states = []
for idx, exp in enumerate(batch):
states.append(exp.state['obs'])
rewards.append(exp.reward)
if exp.last_state is not None:
not_done_idx.append(idx)
next_states.append(exp.last_state['obs'])
return prep.encode_sequences(states)
def batch_generator(exp_source: ptan.experience.ExperienceSourceFirstLast,
batch_size: int):
batch = []
for exp in exp_source:
batch.append(exp)
if len(batch) == batch_size:
yield batch
batch.clear()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--game", default="simple",
help="Game prefix to be used during training, default=simple")
parser.add_argument("--params", choices=list(common.PARAMS.keys()), default='small',
help="Training params, could be one of %s" % (list(common.PARAMS.keys())))
parser.add_argument("-s", "--suffices", type=int, default=1,
help="Count of game indices to use during training, default=1")
parser.add_argument("-v", "--validation", default='-val',
help="Suffix for game used for validation, default=-val")
parser.add_argument("--cuda", default=False, action='store_true',
help="Use cuda for training")
parser.add_argument("-r", "--run", required=True, help="Run name")
parser.add_argument("--load-cmd", help="If specified, command generator will be loaded "
"from given prefix, otherwise it will be trained")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
params = common.PARAMS[args.params]
game_files = ["games/%s%s.ulx" % (args.game, s) for s in range(1, args.suffices+1)]
val_game_file = "games/%s%s.ulx" % (args.game, args.validation)
if not all(map(lambda p: pathlib.Path(p).exists(), game_files)):
raise RuntimeError(f"Some game files from {game_files} not found! Probably you need to run make_games.sh")
action_space, observation_space = common.get_games_spaces(game_files + [val_game_file])
env_id = register_games(game_files, request_infos=EnvInfos(**EXTRA_GAME_INFO), name=args.game,
action_space=action_space, observation_space=observation_space)
print("Registered env %s for game files %s" % (env_id, game_files))
val_env_id = register_games([val_game_file], request_infos=EnvInfos(**EXTRA_GAME_INFO), name=args.game,
action_space=action_space, observation_space=observation_space)
print("Game %s, with file %s will be used for validation" % (val_env_id, val_game_file))
env = gym.make(env_id)
env = preproc.TextWorldPreproc(env, use_admissible_commands=False,
keep_admissible_commands=True,
reward_wrong_last_command=-0.1)
prep = preproc.Preprocessor(
dict_size=env.observation_space.vocab_size,
emb_size=params.embeddings, num_sequences=env.num_fields,
enc_output_size=params.encoder_size).to(device)
cmd = model.CommandModel(prep.obs_enc_size, env.observation_space.vocab_size, prep.emb,
max_tokens=LM_MAX_TOKENS,
max_commands=LM_MAX_COMMANDS,
start_token=env.action_space.BOS_id,
sep_token=env.action_space.EOS_id).to(device)
if args.load_cmd is not None:
load_path = pathlib.Path(args.load_cmd)
prep.load_state_dict(torch.load(load_path/"prep.dat"))
cmd.load_state_dict(torch.load(load_path/"cmd.dat"))
print("Preprocessor and command generator are loaded from %s" % load_path)
else:
agent = model.CmdAgent(env, cmd, prep, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(
env, agent, gamma=GAMMA, steps_count=1)
buffer = ptan.experience.ExperienceReplayBuffer(
exp_source, params.replay_size)
optimizer = optim.RMSprop(itertools.chain(prep.parameters(), cmd.parameters()),
lr=LEARNING_RATE_LM, eps=1e-5)
def process_batch(engine, batch):
optimizer.zero_grad()
obs_t = unpack_batch(batch, prep)
commands = []
for s in batch:
cmds = []
for c in s.state['admissible_commands']:
t = env.action_space.tokenize(c)
if len(t)-2 <= LM_MAX_TOKENS:
cmds.append(t)
commands.append(cmds)
loss_t = model.pretrain_loss(cmd, commands, obs_t)
loss_t.backward()
optimizer.step()
if engine.state.metrics.get('avg_reward', LM_STOP_AVG_REWARD) > LM_STOP_AVG_REWARD:
print("Mean reward reached %.2f, stop pretraining" % LM_STOP_AVG_REWARD)
engine.should_terminate = True
return {
"loss": loss_t.item(),
}
engine = Engine(process_batch)
run_name = f"lm-{args.params}_{args.run}"
save_path = pathlib.Path("saves") / run_name
save_path.mkdir(parents=True, exist_ok=True)
common.setup_ignite(engine, exp_source, run_name)
try:
engine.run(common.batch_generator(buffer, BATCH_SIZE, BATCH_SIZE))
except KeyboardInterrupt:
print("Interrupt got, saving the model...")
torch.save(prep.state_dict(), save_path/"prep.dat")
torch.save(cmd.state_dict(), save_path/"cmd.dat")
print("Using preprocessor and command generator")
prep.train(False)
cmd.train(False)
val_env = gym.make(val_env_id)
val_env = preproc.TextWorldPreproc(val_env, use_admissible_commands=False,
keep_admissible_commands=True,
reward_wrong_last_command=-0.1)
net = model.DQNModel(obs_size=prep.obs_enc_size,
cmd_size=prep.obs_enc_size).to(device)
tgt_net = ptan.agent.TargetNet(net)
cmd_encoder = preproc.Encoder(params.embeddings, prep.obs_enc_size).to(device)
tgt_cmd_encoder = ptan.agent.TargetNet(cmd_encoder)
agent = model.CmdDQNAgent(env, net, cmd, cmd_encoder, prep, epsilon=1, device=device)
exp_source = ptan.experience.ExperienceSourceFirstLast(
env, agent, gamma=GAMMA, steps_count=1)
buffer = ptan.experience.ExperienceReplayBuffer(
exp_source, params.replay_size)
optimizer = optim.RMSprop(itertools.chain(net.parameters(), cmd_encoder.parameters()),
lr=LEARNING_RATE, eps=1e-5)
def process_batch(engine, batch):
optimizer.zero_grad()
loss_t = model.calc_loss_dqncmd(
batch, prep, cmd,
cmd_encoder, tgt_cmd_encoder.target_model,
net, tgt_net.target_model, GAMMA, env, device)
loss_t.backward()
optimizer.step()
eps = 1 - engine.state.iteration / params.epsilon_steps
agent.epsilon = max(params.epsilon_final, eps)
if engine.state.iteration % params.sync_nets == 0:
tgt_net.sync()
tgt_cmd_encoder.sync()
return {
"loss": loss_t.item(),
"epsilon": agent.epsilon,
}
engine = Engine(process_batch)
run_name = f"dqn-{args.params}_{args.run}"
save_path = pathlib.Path("saves") / run_name
save_path.mkdir(parents=True, exist_ok=True)
common.setup_ignite(engine, exp_source, run_name,
extra_metrics=('val_reward', 'val_steps'))
@engine.on(ptan.ignite.PeriodEvents.ITERS_100_COMPLETED)
@torch.no_grad()
def validate(engine):
reward = 0.0
steps = 0
obs = val_env.reset()
while True:
obs_t = prep.encode_sequences([obs['obs']]).to(device)
cmds = cmd.commands(obs_t)[0]
cmd_embs_t = prep._apply_encoder(cmds, cmd_encoder)
q_vals = net.q_values_cmd(obs_t[0], cmd_embs_t)
act = np.argmax(q_vals)
best_cmd = cmds[act]
tokens = [
env.action_space.id2w[t]
for t in best_cmd
if t not in {cmd.sep_token, cmd.start_token}
]
action = " ".join(tokens)
obs, r, is_done, _ = val_env.step(action)
steps += 1
reward += r
if is_done:
break
engine.state.metrics['val_reward'] = reward
engine.state.metrics['val_steps'] = steps
print("Validation got %.3f reward in %d steps" % (reward, steps))
best_val_reward = getattr(engine.state, "best_val_reward", None)
if best_val_reward is None:
engine.state.best_val_reward = reward
elif best_val_reward < reward:
print("Best validation reward updated: %s -> %s" % (best_val_reward, reward))
save_net_name = save_path / ("best_val_%.3f.dat" % reward)
torch.save(net.state_dict(), save_net_name)
engine.state.best_val_reward = reward
@engine.on(ptan.ignite.EpisodeEvents.BEST_REWARD_REACHED)
def best_reward_updated(trainer: Engine):
reward = trainer.state.metrics['avg_reward']
if reward > 0:
save_net_name = save_path / ("best_train_%.3f_.dat" % reward)
torch.save(net.state_dict(), save_net_name)
print("%d: best avg training reward: %.3f, saved" % (
trainer.state.iteration, reward))
engine.run(common.batch_generator(buffer, 100 + 0*params.replay_initial, BATCH_SIZE))
|
samtranslator/public/models.py | hawflau/serverless-application-model | 1,279 | 12693544 | # flake8: noqa
from samtranslator.model.resource_policies import ResourcePolicies, PolicyTypes
|
scipy/optimize/tests/test_lbfgsb_setulb.py | Ennosigaeon/scipy | 9,095 | 12693623 | import numpy as np
from scipy.optimize import _lbfgsb
def objfun(x):
"""simplified objective func to test lbfgsb bound violation"""
x0 = [0.8750000000000278,
0.7500000000000153,
0.9499999999999722,
0.8214285714285992,
0.6363636363636085]
x1 = [1.0, 0.0, 1.0, 0.0, 0.0]
x2 = [1.0,
0.0,
0.9889733043149325,
0.0,
0.026353554421041155]
x3 = [1.0,
0.0,
0.9889917442915558,
0.0,
0.020341986743231205]
f0 = 5163.647901211178
f1 = 5149.8181642072905
f2 = 5149.379332309634
f3 = 5149.374490771297
g0 = np.array([-0.5934820547965749,
1.6251549718258351,
-71.99168459202559,
5.346636965797545,
37.10732723092604])
g1 = np.array([-0.43295349282641515,
1.008607936794592,
18.223666726602975,
31.927010036981997,
-19.667512518739386])
g2 = np.array([-0.4699874455100256,
0.9466285353668347,
-0.016874360242016825,
48.44999161133457,
5.819631620590712])
g3 = np.array([-0.46970678696829116,
0.9612719312174818,
0.006129809488833699,
48.43557729419473,
6.005481418498221])
if np.allclose(x, x0):
f = f0
g = g0
elif np.allclose(x, x1):
f = f1
g = g1
elif np.allclose(x, x2):
f = f2
g = g2
elif np.allclose(x, x3):
f = f3
g = g3
else:
raise ValueError(
'Simplified objective function not defined '
'at requested point')
return (np.copy(f), np.copy(g))
def test_setulb_floatround():
"""test if setulb() violates bounds
checks for violation due to floating point rounding error
"""
n = 5
m = 10
factr = 1e7
pgtol = 1e-5
maxls = 20
iprint = -1
nbd = np.full((n,), 2)
low_bnd = np.zeros(n, np.float64)
upper_bnd = np.ones(n, np.float64)
x0 = np.array(
[0.8750000000000278,
0.7500000000000153,
0.9499999999999722,
0.8214285714285992,
0.6363636363636085])
x = np.copy(x0)
f = np.array(0.0, np.float64)
g = np.zeros(n, np.float64)
fortran_int = _lbfgsb.types.intvar.dtype
wa = np.zeros(2*m*n + 5*n + 11*m*m + 8*m, np.float64)
iwa = np.zeros(3*n, fortran_int)
task = np.zeros(1, 'S60')
csave = np.zeros(1, 'S60')
lsave = np.zeros(4, fortran_int)
isave = np.zeros(44, fortran_int)
dsave = np.zeros(29, np.float64)
task[:] = b'START'
for n_iter in range(7): # 7 steps required to reproduce error
f, g = objfun(x)
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
pgtol, wa, iwa, task, iprint, csave, lsave,
isave, dsave, maxls)
assert (x <= upper_bnd).all() and (x >= low_bnd).all(), (
"_lbfgsb.setulb() stepped to a point outside of the bounds")
|
contrib/python/examples/ldns-mx1.py | elindsey/ldns | 178 | 12693633 | #!/usr/bin/python
#
# MX is a small program that prints out the mx records for a particular domain
#
import ldns
dname = ldns.ldns_dname("nic.cz")
print dname
resolver = ldns.ldns_resolver.new_frm_file("/etc/resolv.conf")
pkt = resolver.query(dname, ldns.LDNS_RR_TYPE_MX,ldns.LDNS_RR_CLASS_IN)
if (pkt):
mx = pkt.rr_list_by_type(ldns.LDNS_RR_TYPE_MX, ldns.LDNS_SECTION_ANSWER)
if (mx):
mx.sort()
print mx
|
pscript/tests/test_stdlib.py | JesusZerpa/pscript | 190 | 12693641 | """
Most of the stuff from the stdlib will be tested via test_parser3. That
will mostly test if the implemenation is correct. This module does some
meta tests.
"""
import sys
from pscript.testing import run_tests_if_main, raises
from pscript import py2js, evaljs, evalpy, Parser3, stdlib
def test_stdlib_full_and_partial():
code = stdlib.get_full_std_lib()
assert isinstance(code, str)
assert 'var %shasattr =' % stdlib.FUNCTION_PREFIX in code
assert 'var %slist =' % stdlib.FUNCTION_PREFIX in code
assert code.count('var') > 10
code = stdlib.get_partial_std_lib(['hasattr'], [], [])
assert isinstance(code, str)
assert 'var %shasattr =' % stdlib.FUNCTION_PREFIX in code
assert 'var %slist =' % stdlib.FUNCTION_PREFIX not in code
assert code.count('var') == 1
assert '_hasattr = function' in py2js('hasattr(x, "foo")')
assert '_hasattr = function' not in py2js('hasattr(x, "foo")', inline_stdlib=False)
def test_stdlib_has_all_list_methods():
method_names = [m for m in dir(list) if not m.startswith('_')]
for method_name in method_names:
assert method_name in stdlib.METHODS
def test_stdlib_has_all_dict_methods():
method_names = [m for m in dir(dict) if not m.startswith('_')]
if sys.version_info[0] == 2:
ignore = 'fromkeys has_key viewitems viewkeys viewvalues iteritems iterkeys itervalues'
else:
ignore = 'fromkeys'
for name in ignore.split(' '):
method_names.remove(name)
for method_name in method_names:
assert method_name in stdlib.METHODS
def test_stdlib_has_all_str_methods():
method_names = [m for m in dir(str) if not m.startswith('_')]
if sys.version_info[0] == 2:
ignore = 'encode decode'
else:
ignore = 'encode format_map isprintable maketrans isascii removeprefix removesuffix'
for name in ignore.split(' '):
if name in method_names:
method_names.remove(name)
for method_name in method_names:
assert method_name in stdlib.METHODS
run_tests_if_main()
|
Python/Tests/TestData/Ipc.Json/socket_handle_request.py | techkey/PTVS | 404 | 12693642 | <gh_stars>100-1000
import os
import sys
import ptvsd.ipcjson as _ipc
class SocketIpcChannel(_ipc.SocketIO, _ipc.IpcChannel):
def __init__(self, *args, **kwargs):
super(SocketIpcChannel, self).__init__(*args, **kwargs)
def on_testRequest(self, request, args):
self.send_response(
request,
success=True,
message='',
requestText=args['dataText'],
responseText='test response text'
)
def on_disconnect(self, request, args):
self.send_response(request)
self.__exit = True
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-r', '--result-port', type='int')
(opts, _) = parser.parse_args()
channel = SocketIpcChannel(port = opts.result_port)
channel.process_messages()
channel.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
recipes/Python/577482_Easy_property/recipe-577482.py | tdiprima/code | 2,023 | 12693698 | <gh_stars>1000+
#! /usr/bin/env python
######################################################################
# Written by <NAME> on 2008-05-03
# This code may be used pursuant to the MIT License.
######################################################################
"""
Property
========
The Property class provides basic functionality that allows class
level control over how a particular attribute is managed. In its
simplest form a Property attribute works exactly like a regular
attribute on an instance while providing documentation details
about the attribute accessible via the declaring class.
This class modifies how properties are created on a class. The Python
documentation contains the following example:
class C(object):
def __init__(self):
self._x = None
def getx(self):
return self._x
def setx(self, value):
self._x = value
def delx(self):
del self._x
x = property(getx, setx, delx, 'the "x" property.')
The equivalent using Property is as follows:
class C(object):
x = Property('x', None)
>>> x = C()
>>> repr(x.x)
'None'
>>> C.x.__doc__
'the "x" property'
Need a read-only property? Here is the Python example:
class Parrot(object):
def __init__(self):
self._voltage = 100000
@property
def voltage(self):
'Get the current voltage.'
return self._voltage
And here is the equivalent:
class Parrot(object):
voltage = Property('voltage', 100000, Property.Mode.READ_ONLY, 'Get the current voltage')
If your class needs to write to a property that is intended to be
public read-only you can use the set_property() function.
"""
__all__ = ( 'Enum', 'Property' )
def Enum(*names):
"""See immutable symbolic enumeration types by <NAME>
(see http://code.activestate.com/recipes/413486-first-class-enums-in-python/)
- Enums are immutable; attributes cannot be added, deleted or changed.
- Enums are iterable.
- Enum value access is symbolic and qualified, ex. Days.Monday (like in C#).
- Enum values are true constants.
- Enum values are comparable.
- Enum values are invertible (useful for 2-valued enums, like Enum('no', 'yes').
- Enum values are usable as truth values (in a C tradition, but this is debatable).
- Enum values are reasonably introspective (by publishing their enum type and numeric value)
Changed slightly to add '__doc__' tags to the generated
enumeration types. So to the above author's comments we add:
- Enums and Enum values are documented.
- enumeration values are type-checked during comparisons.
"""
assert names, "Empty enums are not supported" # <- Don't like empty enums? Uncomment!
class EnumClass(object):
__slots__ = names
def __contains__(self, v): return v in constants
def __getitem__(self, i): return constants[i]
def __iter__(self): return iter(constants)
def __len__(self): return len(constants)
def __repr__(self): return 'Enum' + str(names)
def __str__(self): return 'enum ' + str(constants)
class EnumValue(object):
__slots__ = ('__value')
def __init__(self, value): self.__value = value
value = property(lambda self: self.__value)
type = property(lambda self: EnumType)
def __hash__(self): return hash(self.__value)
def __cmp__(self, other):
try:
if self.type is other.type:
return cmp(self.__value, other.__value)
else:
raise TypeError, "requires a '%s' object but received a '%s'" % ( self.type.__class__.__name__, other.type.__class__.__name__ )
except AttributeError:
raise TypeError, "requires a '%s' object but received a '%s'" % ( self.type.__class__.__name__, other.__class__.__name__ )
def __invert__(self): return constants[maximum - self.__value]
def __nonzero__(self): return bool(self.__value)
def __repr__(self): return str(names[self.__value])
maximum = len(names) - 1
constants = [None] * len(names)
for i, each in enumerate(names):
val = type(EnumValue)(
'EnumValue', (EnumValue,), { '__doc__': 'Enumeration value "%s"' % each }
)(i)
setattr(EnumClass, each, val)
constants[i] = val
constants = tuple(constants)
EnumType = type(EnumClass)(
'EnumClass', (EnumClass,), { '__doc__': 'Enumeration of %s' % repr(constants) }
)()
return EnumType
class Property(object):
"""Construct a data descriptor suitable for associating
documentation with an attribute value. Attribute values are
instance specific and are stored within the instance dictionary
(so property values go away when the instance is garbage
collected). Properties have a class-wide default value used if
the property has not been specified on an instance.
The class has the ability to indicate the access mode of the
resulting attribute. The possible access modes may be specified
using exactly one of the following enumeration values:
Mode.READ_ONLY
==================
The attribute may only be read. The instance property effectively
becomes a class constant (as the attribute may not be written). A
READ_ONLY attribute must have the default value specified when the
Property is constructed.
Unlike an Enum class, the resulting Property is still accessable
through the declaring class (to provide access to the attribute
documentation). This has the side effect that the constant value
is only accessable through instances of the declaring class.
Mode.READ_WRITE
===================
The READ_WRITE mode is the default mode on Property instances and
is used to provide attributes with all the normal behaviors of
typical class attributes with supporting documentation and
optional default values.
Mode.WRITE_ONCE
===================
The WRITE_ONCE mode builds a data descriptor that allows every
instance of the declaring class to set the resulting attribute one
time. A default value may be specified that will be returned if
the attribute is accessed prior to the write; but the default does
not prevent the one-time write from occuring.
Additionally you may supply a documentation string so your class
properties may expose usage information.
"""
####
# Special value used to mark an undefined default value.
####
__NONE = object()
Mode = Enum('READ_ONLY', 'READ_WRITE', 'WRITE_ONCE')
def __init__(self, name, default = __NONE, mode = Mode.READ_WRITE, doc = None):
"""Construct a new Property data descriptor.
\var{name} the name of the attribute being created.
\var{default} the (optional) default value to use when
retrieving the attribute if it hasn't already been set.
\var{mode} the mode of the constructed Property.
\var{doc} the documentation string to use. This string is
accessed through the declaring class.
"""
self.__name = name
self.__key = '__property__' + name
if mode.__class__ not in (i.__class__ for i in self.Mode):
raise TypeError, "the mode parameter requires a member of the 'Property.Mode' enumeration but received a '%s'" % mode.__class__.__name__
self.__mode = mode
if default is not self.__NONE:
self.__default = default
elif mode is self.Mode.READ_ONLY:
raise ValueError, 'read only attributes require a default value'
if doc is None:
self.__doc__ = 'the "%s" property' % name
else:
self.__doc__ = doc
def __get__(self, obj, objType = None):
"""Get the attribute value.
"""
try: return obj.__dict__[self.__key]
except AttributeError: return self
except KeyError: pass
try: return objType.__dict__[self.__key]
except KeyError: pass
try: return self.__default
except AttributeError:
raise AttributeError, "'%s' object has no attribute '%s'" % ( obj.__class__.__name__, self.__name )
def __set__(self, obj, value):
"""Set the attribute value.
"""
if self.__mode is self.Mode.READ_ONLY:
raise AttributeError, "can't set attribute \"%s\"" % self.__name
elif self.__mode is self.Mode.WRITE_ONCE:
if self.__key in obj.__dict__:
raise AttributeError, "can't set attribute \"%s\"" % self.__name
obj.__dict__[self.__key] = value
def __delete__(self, obj):
"""Delete the attribute value.
"""
if self.__mode is not self.Mode.READ_WRITE:
raise AttributeError, "can't delete attribute \"%s\"" % self.__name
del(obj.__dict__[self.__key])
def set_property(obj, name, value):
"""Set or reset the property 'name' to 'value' on 'obj'.
This function may be used to modify the value of a WRITE_ONCE or
READ_ONLY property. Therefore use of this function should be
limited to the implementation class.
"""
obj.__dict__['__property__' + name] = value
if __name__ == '__main__':
from types import FloatType, ComplexType
def assertEquals( exp, got, msg = None ):
"""assertEquals( exp, got[, message] )
Two objects test as "equal" if:
* they are the same object as tested by the 'is' operator.
* either object is a float or complex number and the absolute
value of the difference between the two is less than 1e-8.
* applying the equals operator ('==') returns True.
"""
if exp is got:
r = True
elif ( type( exp ) in ( FloatType, ComplexType ) or
type( got ) in ( FloatType, ComplexType ) ):
r = abs( exp - got ) < 1e-8
else:
r = ( exp == got )
if not r:
print >>sys.stderr, "Error: expected <%s> but got <%s>%s" % ( repr( exp ), repr( got ), colon( msg ) )
traceback.print_stack()
def assertException( exceptionType, f, msg = None ):
"""Assert that an exception of type \var{exceptionType}
is thrown when the function \var{f} is evaluated.
"""
try:
f()
except exceptionType:
assert True
else:
print >>sys.stderr, "Error: expected <%s> to be thrown by function%s" % ( exceptionType.__name__, colon( msg ) )
traceback.print_stack()
def assertNone( x, msg = None ):
assertSame( None, x, msg )
def assertSame( exp, got, msg = None ):
if got is not exp:
print >>sys.stderr, "Error: expected <%s> to be the same object as <%s>%s" % ( repr( exp ), repr( got ), colon( msg ) )
traceback.print_stack()
def assertTrue( b, msg = None ):
if not b:
print >>sys.stderr, "Error: expected value to be True%s" % colon( msg )
traceback.print_stack()
####
# Test Property
####
class Test( object ):
ro_value = Property( 'ro_value', 'test', mode = Property.Mode.READ_ONLY )
assertException( ValueError, lambda: Property( 'ro_undef', mode = Property.Mode.READ_ONLY ),
'read-only attributes should require default' )
rw_undef = Property( 'rw_undef' )
rw_default = Property( 'rw_default', None )
rw_doc = Property( 'rw_default', doc = 'alternate documentation' )
assertException( TypeError, lambda: Property( 'bad_mode', mode = None ),
'bad Property mode should raise an exception' )
wo_undef = Property( 'wo_undef', mode = Property.Mode.WRITE_ONCE )
wo_default = Property( 'wo_default', 'test', mode = Property.Mode.WRITE_ONCE )
a = Test()
b = Test()
####
# Mode.READ_ONLY
assertEquals( 'test', a.ro_value )
assertEquals( 'test', b.ro_value )
assertException( AttributeError, lambda: setattr( a, 'ro_value', 5 ), 'unexpected write to a read-only attribute' )
# assertException( AttributeError, lambda: del( b.ro_value ), 'unexpected del() on a read-only attribute' )
set_property( a, 'ro_value', 'tset' )
assertEquals( 'tset', a.ro_value )
assertEquals( 'test', b.ro_value )
####
# Mode.READ_WRITE
assertException( AttributeError, lambda: getattr( a, 'rw_undef' ), 'unexpected read of an undefined attribute' )
assertNone( a.rw_default )
a.rw_undef = 5
assertEquals( 5, a.rw_undef )
assertTrue( '__property__rw_undef' in a.__dict__ )
assertEquals( 5, a.__dict__['__property__rw_undef'] )
assertEquals( 'the "rw_undef" property', Test.rw_undef.__doc__ )
assertSame( int, type( a.rw_undef ) )
assertSame( Property, type( Test.rw_undef ) )
assertEquals( 'alternate documentation', Test.rw_doc.__doc__ )
####
# Mode.READ_WRITE: changes to 'a' should not affect 'b'
assertException( AttributeError, lambda: getattr( b, 'rw_undef' ), 'invalid state change via a different instance' )
assertNone( b.rw_default )
####
# Mode.WRITE_ONCE
assertException( AttributeError, lambda: getattr( a, 'wo_undef' ), 'unexpected read of an undefined attribute' )
assertException( AttributeError, lambda: delattr( a, 'wo_undef' ), 'unexpected del() on a write-once attribute' )
a.wo_undef = 'write_once'
assertEquals( 'write_once', a.wo_undef )
assertException( AttributeError, lambda: setattr( a, 'wo_undef', 'write_twice' ), 'unexpected secondary write on a write-once attribute' )
assertEquals( 'write_once', a.wo_undef )
assertException( AttributeError, lambda: delattr( a, 'wo_value' ), 'unexpected del() on a write-once attribute' )
assertEquals( 'test', a.wo_default )
a.wo_default = 'write_once'
assertEquals( 'write_once', a.wo_default )
assertEquals( 'test', b.wo_default )
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.