filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_22649 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session with file:// URLs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class SessionDebugFileTest(session_debug_testlib.SessionDebugTestBase):
def _no_rewrite_session_config(self):
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
def _debug_urls(self, run_number=None):
return ["file://%s" % self._debug_dump_dir(run_number=run_number)]
def _debug_dump_dir(self, run_number=None):
if run_number is None:
return self._dump_root
else:
return os.path.join(self._dump_root, "run_%d" % run_number)
def testAllowsDifferentWatchesOnDifferentRuns(self):
"""Test watching different tensors on different runs of the same graph."""
with session.Session(config=self._no_rewrite_session_config()) as sess:
u_init_val = [[5.0, 3.0], [-1.0, 0.0]]
v_init_val = [[2.0], [-1.0]]
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "diff_Watch/u"
v_name = "diff_Watch/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name="diff_Watch/matmul")
u.initializer.run()
v.initializer.run()
for i in range(2):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_dump_root = self._debug_dump_dir(run_number=i)
debug_urls = self._debug_urls(run_number=i)
if i == 0:
# First debug run: Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
else:
# Second debug run: Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
run_dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# Each run should have generated only one dumped tensor, not two.
self.assertEqual(1, dump.size)
if i == 0:
self.assertAllClose([u_init_val],
dump.get_tensors("%s/read" % u_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % u_name, 0,
"DebugIdentity")[0], 0)
else:
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % v_name, 0,
"DebugIdentity")[0], 0)
class SessionDebugConcurrentTest(
session_debug_testlib.DebugConcurrentRunCallsTest):
def setUp(self):
self._num_concurrent_runs = 3
self._dump_roots = []
for _ in range(self._num_concurrent_runs):
self._dump_roots.append(tempfile.mkdtemp())
def tearDown(self):
ops.reset_default_graph()
for dump_root in self._dump_roots:
if os.path.isdir(dump_root):
shutil.rmtree(dump_root)
def _get_concurrent_debug_urls(self):
return [("file://%s" % dump_root) for dump_root in self._dump_roots]
if __name__ == "__main__":
googletest.main()
|
the-stack_0_22651 | import torch
from torch._C import ListType, OptionalType
from torch.nn.modules.utils import _single, _pair, _triple
import torch.onnx
# This import monkey-patches graph manipulation methods on Graph, used for the
# ONNX symbolics
import torch.onnx.utils
from functools import partial
from functools import wraps
import torch.onnx.symbolic_helper as sym_help
from torch.onnx.symbolic_helper import parse_args, _parse_arg, _unimplemented
from typing import Optional
import numpy
import math
import warnings
# EDITING THIS FILE? READ THIS FIRST!
# see Note [Edit Symbolic Files] in symbolic_helper.py
# This file exports ONNX ops for opset 9
# Opset 9 is supported by ONNX release 1.4.1
# release on 01/23/19
# Note [Pointwise by scalar]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# What happens if you add a tensor with a constant (e.g., x + 2)? There are
# some moving parts to implementing the ONNX translation in this case:
#
# - By the time we get the scalar in a symbolic function here, it is no longer
# a Python long/float, but a PyTorch tensor with numel == 1 (eventually, we
# want it to be a zero dim tensor but this change has not happened yet.)
# However, the type of this scalar is *exactly* what the user wrote in
# Python, which may not match the tensor it is being added to. PyTorch
# will do implicit conversions on scalars; however, ONNX will not, so
# we must do the conversion ourselves. This is what _if_scalar_type_as
# does.
#
# - Dispatch to these functions takes advantage an outrageous coincidence
# between the tensor and scalar name. When we add two tensors together,
# you get the dispatch:
#
# add(*[self, other], **{"alpha": alpha})
#
# When you add a tensor and a scalar, you get the dispatch:
#
# add(*[self], **{"other": other, "alpha": alpha})
#
# By having the argument name line up with the name of the scalar attribute
# if it exists, we can write a single function for both overloads.
#
# used to represent "missing" optional inputs
def unused(g):
n = g.op("prim::Constant")
n.setType(OptionalType.ofTensor())
return n
def _shape_as_tensor(g, input):
return g.op('Shape', input)
def _reshape_from_tensor(g, input, shape):
return g.op('Reshape', input, shape)
def reshape(g, self, shape):
return view(g, self, shape)
def reshape_as(g, self, other):
shape = g.op('Shape', other)
return reshape(g, self, shape)
def add(g, self, other, alpha=None):
if sym_help._is_value(self) and sym_help._is_tensor_list(self):
return sym_help._onnx_opset_unsupported_detailed('Add', 9, 11, 'Add between list of tensors not supported')
# default alpha arg is to allow no-alpha add (aten add st overload no alpha)
if alpha and sym_help._scalar(sym_help._maybe_get_scalar(alpha)) != 1:
return _unimplemented("add", "alpha != 1")
return g.op("Add", self, other)
def sub(g, self, other, alpha=None):
# default alpha arg is to allow no-alpha sub (aten sub st overload no alpha)
if alpha and sym_help._scalar(sym_help._maybe_get_scalar(alpha)) != 1:
return _unimplemented("sub", "alpha != 1")
return g.op("Sub", self, other)
def rsub(g, self, other, alpha=None):
return sub(g, other, self, alpha=alpha)
def mul(g, self, other):
return g.op("Mul", self, other)
def div(g, self, other, *args):
if len(args) == 0:
return true_divide(g, self, other)
else:
return _div_rounding_mode(g, self, other, *args)
@parse_args('v', 'v', 's')
def _div_rounding_mode(g, self, other, rounding_mode):
if rounding_mode == 'true':
return true_divide(g, self, other)
elif rounding_mode == 'floor':
return _floor_divide(g, self, other)
elif rounding_mode == 'trunc':
return _trunc_divide(g, self, other)
else:
raise RuntimeError(f'Unsupported rounding mode: "{rounding_mode}". Expected "true", "floor" or "trunc"')
def _trunc_divide(g, self, other):
out = g.op('Div', self, other)
# the correct operation is truncate, which is not supported in ONNX,
# we cannot call floor since it will behave differently for negative numbers
# (eg. -0.1 should become -0 )
# - if scalar_type information are not available, assume that
# we need to call floor (treat as float)
out = g.op("Cast", out, to_i=sym_help.cast_pytorch_to_onnx['Long'])
# Matching PyTorch's behavior:
# - if self is fp the output's type is self's type
# - if self is not fp and other is fp, the output is of type 'Float'
# - self is not fp and other is not fp, the output's type is self's output type
# - the output type defaults to Float
scalar_type = self.type().scalarType()
if scalar_type is not None:
if not sym_help._is_fp(self) and \
other.type().scalarType() is not None and \
sym_help._is_fp(other):
out = g.op("Cast", out, to_i=sym_help.cast_pytorch_to_onnx['Float'])
else:
out = g.op("Cast", out, to_i=sym_help.cast_pytorch_to_onnx[scalar_type])
else:
out = g.op("Cast", out, to_i=sym_help.cast_pytorch_to_onnx['Float'])
return out
def _floor_divide(g, self, other):
if sym_help._is_fp(self) or sym_help._is_fp(other):
out = true_divide(g, self, other)
return g.op('Floor', out)
else:
# Integer division does trunction rounding
div = g.op('Div', self, other)
# Division is negative if: self < 0 != other < 0
zero = g.op('Constant', value_t=torch.tensor(0, dtype=torch.int64))
negative = g.op('Xor',
g.op('Less', self, zero),
g.op('Less', other, zero))
# For negative numbers with self % other != 0, subtract 1 to round down instead of up
mod = g.op('Sub', self, g.op('Mul', div, other))
fixup_mask = g.op('And', negative,
g.op('Not', g.op('Equal', mod, zero)))
one = g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64))
fixup = g.op('Sub', div, one)
return g.op('Where', fixup_mask, fixup, div)
def floor_divide(g, self, other):
# Deprecated behavior, floor_divide actually truncates
return _trunc_divide(g, self, other)
def floordiv(g, self, other):
return floor_divide(g, self, other)
# Division where both inputs are cast to floating types
# If both inputs are floating, performs div as usual
# If only one input is a floating type, the other input is cast to its type
# If neither input is a floating type, both inputs are cast to the default scalar type
def true_divide(g, self, other):
# Case 1: both values are floating
# Performs div as usual
if sym_help._is_fp(self) and sym_help._is_fp(other):
return g.op("Div", self, other)
# Case 2: self is floating, other is not
# Casts other to self's dtype
if sym_help._is_fp(self):
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
return g.op("Div", self, other)
# Case 3: other is floating, self is not
# Casts self to other's dtype
if sym_help._is_fp(other):
self = g.op("Cast", self, to_i=sym_help.cast_pytorch_to_onnx[other.type().scalarType()])
return g.op("Div", self, other)
# Case 4: neither is floating
# Casts both inputs to the default scalar type
scalar_type = torch.get_default_dtype()
onnx_scalar_type = sym_help.cast_pytorch_to_onnx['Float']
assert scalar_type is torch.float or scalar_type is torch.double
if torch.get_default_dtype() is torch.double:
onnx_scalar_type = sym_help.cast_pytorch_to_onnx['Double']
self = g.op("Cast", self, to_i=onnx_scalar_type)
other = g.op("Cast", other, to_i=onnx_scalar_type)
return g.op("Div", self, other)
def reciprocal(g, self):
return g.op("Div", torch.ones(1), self)
@parse_args('v', 'i')
def cat(g, tensor_list, dim):
tensors = sym_help._unpack_list(tensor_list)
return g.op("Concat", *tensors, axis_i=dim)
@parse_args('v', 'i')
def stack(g, tensor_list, dim):
unsqueezed = [sym_help._unsqueeze_helper(g, t, [dim]) for t in sym_help._unpack_list(tensor_list)]
return g.op("Concat", *unsqueezed, axis_i=dim)
def _list(g, self):
return self
def mm(g, self, other):
# Create a dummy C tensor. Only needed for API purposes, the value is
# since beta = 0
C = g.op("Constant", value_t=torch.tensor([1]))
return g.op("Gemm", self, other, C, beta_f=0.0, alpha_f=1.0)
def bmm(g, self, other):
return g.op("MatMul", self, other)
def matmul(g, self, other):
return g.op("MatMul", self, other)
@parse_args('v', 'v', 'v', 't', 't')
def addmm(g, self, mat1, mat2, beta, alpha):
dtype = None
self_dtype = sym_help._try_get_scalar_type(self)
mat1_dtype = sym_help._try_get_scalar_type(mat1)
mat2_dtype = sym_help._try_get_scalar_type(mat2)
if self_dtype is not None:
dtype = self_dtype
elif mat1_dtype is not None:
dtype = mat1_dtype
elif mat2_dtype is not None:
dtype = mat2_dtype
mat1_rank = sym_help._get_tensor_rank(mat1)
mat2_rank = sym_help._get_tensor_rank(mat2)
def isNotNoneAnd(v, u):
return v is not None and v != u
if dtype is not None and (isNotNoneAnd(mat1_rank, 2) or isNotNoneAnd(mat2_rank, 2)):
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
dtype = sym_help.scalar_type_to_pytorch_type[dtype]
res1 = g.op("MatMul", mat1, mat2)
res2 = self
alpha = sym_help._scalar(alpha)
beta = sym_help._scalar(beta)
if alpha != 1:
alpha = g.op("Constant",
value_t=torch.tensor(alpha, dtype=dtype))
res1 = g.op("Mul", res1, alpha)
if beta != 1:
beta = g.op("Constant",
value_t=torch.tensor(sym_help._scalar(beta), dtype=dtype))
res2 = g.op("Mul", res2, beta)
return g.op("Add", res1, res2)
return g.op("Gemm", mat1, mat2, self, beta_f=sym_help._scalar(beta), alpha_f=sym_help._scalar(alpha))
def neg(g, self):
return g.op("Neg", self)
def sqrt(g, self):
return g.op("Sqrt", self)
def rsqrt(g, self):
return g.op("Div", sym_help._if_scalar_type_as(g, torch.ones(1), self), sqrt(g, self))
def tanh(g, self):
return g.op("Tanh", self)
def sin(g, self):
return g.op("Sin", self)
def cos(g, self):
return g.op("Cos", self)
def tan(g, self):
return g.op("Tan", self)
def asin(g, self):
return g.op("Asin", self)
def acos(g, self):
return g.op("Acos", self)
def atan(g, self):
return g.op("Atan", self)
def sigmoid(g, self):
return g.op("Sigmoid", self)
def sign(g, self):
return g.op("Sign", self)
def _slice(g, input, axes, starts, ends):
assert len(starts) == len(ends)
if len(starts) == 1 and starts[0] == 0 and ends[0] == 9223372036854775807:
return input
return g.op("Slice", input, axes_i=axes, starts_i=starts, ends_i=ends)
def _maybe_cast_reduce_op_input(g, self):
dtype = self.type().scalarType()
# This check only covers traced modules where dtype is present
if dtype is not None:
# pytorch reduce-ops cast all other integral types to int64
if not sym_help._is_fp(self) and not (dtype == 'Long'):
self = _cast_Long(g, self, False) # type: ignore
return self
def _reduce_op_symbolic(onnx_op_name, allow_multi_dim_support=True):
def symbolic(g, self, dim=None, keepdim=None):
self = _maybe_cast_reduce_op_input(g, self)
if dim is None:
# all-reduce path
return g.op(onnx_op_name, self, keepdims_i=0)
else:
# dim-reduce path
desc = 'is' if allow_multi_dim_support else 'i'
dim, keepdim = sym_help._get_const(dim, desc, 'dim'), sym_help._get_const(keepdim, 'i', 'keepdim')
dim_list = dim if allow_multi_dim_support else [dim]
return g.op(onnx_op_name, self, axes_i=dim_list, keepdims_i=keepdim)
return symbolic
def overload_by_arg_count(fn):
@wraps(fn)
def wrapper(g, *args):
overloads = fn(g, *args)
last_exception = None
for overload in overloads:
arg_descriptors = overload._arg_descriptors
if len(arg_descriptors) == len(args):
return overload(g, *args)
raise NotImplementedError("Unknown aten::{} signature".format(fn.__name__))
return wrapper
def _reduce_with_dtype(onnx_op, name, allow_multi_dim_support=True):
symbolic = _reduce_op_symbolic(onnx_op, allow_multi_dim_support=allow_multi_dim_support)
@overload_by_arg_count
def reduce(g, *args, **kwargs):
@parse_args('v', 'none')
def reduce_nodim(g, self, dtype):
if dtype.node().kind() != 'prim::Constant':
return _unimplemented(name, "dtype")
return symbolic(g, self)
dim_desc = 'is' if allow_multi_dim_support else 'i'
@parse_args('v', dim_desc, 'i', 'none')
def reduce_dim(g, self, dim, keepdim, dtype):
if dtype.node().kind() != 'prim::Constant':
return _unimplemented(name, "dtype")
return symbolic(g, self, dim, keepdim)
return reduce_nodim, reduce_dim
return reduce
sum = _reduce_with_dtype('ReduceSum', 'sum')
mean = _reduce_with_dtype('ReduceMean', 'mean')
prod = _reduce_with_dtype('ReduceProd', 'prod', allow_multi_dim_support=False) # torch.prod does not support multidimensional 'dim'
@parse_args('v', 'i', 'none')
def cumsum(g, input, dim, dtype):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
if dtype.node().kind() != 'prim::Constant':
return _unimplemented(name, "dtype")
return g.op("ATen", input, operator_s="cumsum", dim_i=dim)
else:
sym_help._onnx_opset_unsupported('cumsum', 9, 11)
def _sample_dirichlet(g, self, generator):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
if not sym_help._is_none(generator):
return _unimplemented('_sample_dirichlet',
'We are not able to export generator')
return g.op("ATen", self, operator_s="_sample_dirichlet")
else:
return sym_help._onnx_unsupported('_sample_dirichlet')
def _standard_gamma(g, self, generator):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
if not sym_help._is_none(generator):
return _unimplemented('_standard_gamma',
'We are not able to export generator')
return g.op("ATen", self, operator_s="_standard_gamma")
else:
return sym_help._onnx_unsupported('_standard_gamma')
def t(g, self):
return g.op("Transpose", self, perm_i=(1, 0))
def expand(g, self, size, implicit):
size = sym_help._maybe_get_const(size, 'is')
if not sym_help._is_value(size):
size = g.op("Constant", value_t=torch.LongTensor(size))
elif sym_help._is_packed_list(size):
# Expand with -1 dim value means dim is unchanged.
# Since onnx::expand supports two-way broadcasting,
# -1 dim value can be exported to onnx as 1
size = view(g, stack(g, size, 0), g.op("Constant", value_t=torch.tensor([-1])))
dtype = 4 # dim type is int64
ones = ones_like(g, size, dtype)
neg_ones = mul(g, ones, g.op("Constant", value_t=torch.tensor(-1)))
size = where(g, g.op("Equal", size, neg_ones), ones, size)
return g.op("Expand", self, size)
def expand_as(g, self, other):
shape = g.op("Shape", other)
return g.op("Expand", self, shape)
@parse_args('v', 'v', 'i', 'b', 'v')
def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):
if scale_grad_by_freq and sym_help._training_mode:
raise RuntimeError('Unsupported: ONNX export of embedding with scale_grad_by_freq=True '
'for training mode. ONNX does not support scaling the gradients.')
if padding_idx >= 0 and sym_help._training_mode:
warnings.warn('Warning: ONNX export of embedding with padding_idx >= 0 '
'for training mode. '
'ONNX does not support not updating the embedding vector at padding_idx during training.')
return g.op("Gather", weight, indices)
@parse_args('v', 'v', 'v', 'i', 'i', 'i', 'v', 'i')
def embedding_bag(g,
embedding_matrix,
indices,
offsets,
scale_grad_by_freq,
mode,
sparse,
per_sample_weights,
include_last_offset):
if not sym_help._is_none(per_sample_weights):
return sym_help._onnx_unsupported('embedding_bag with per_sample_weights')
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen",
embedding_matrix,
indices,
offsets,
operator_s="embedding_bag",
outputs=4,
scale_grad_by_freq_i=scale_grad_by_freq,
mode_i=mode,
sparse_i=sparse,
include_last_offset_i=include_last_offset)
else:
return sym_help._onnx_unsupported('embedding_bag')
def size(g, self, dim=None):
if dim is None:
return g.op("Shape", self)
if sym_help._maybe_get_const(dim, 'i') < 0:
rank = sym_help._get_tensor_rank(self)
if rank is not None:
dim = sym_help._maybe_get_const(dim, 'i') + rank
dim = g.op("Constant", value_t=torch.tensor(dim))
return sym_help._size_helper(g, self, dim)
@parse_args('v', 'i', 'i')
def transpose(g, self, dim0, dim1):
if dim0 == dim1: # micro-optimization
return self
# NB: Transpose in ONNX is actually a Permute
rank = sym_help._get_tensor_rank(self)
if rank is not None:
axes = list(range(rank))
axes[dim0], axes[dim1] = axes[dim1], axes[dim0]
return g.op("Transpose", self, perm_i=axes)
else:
# if we don't have dim information we cannot
# output a permute so use ATen instead
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, operator_s="transpose", dim0_i=dim0, dim1_i=dim1)
else:
raise RuntimeError('Unsupported: ONNX export of transpose for tensor '
'of unknown rank.')
@parse_args('v', 'is')
def permute(g, self, dims):
if dims == list(range(0, len(dims))):
return self
return g.op("Transpose", self, perm_i=dims)
def view(g, self, size):
size = sym_help._maybe_get_const(size, 'is')
if sym_help._is_value(size):
shape = size
else:
shape = g.op("Constant", value_t=torch.LongTensor(size))
return g.op("Reshape", self, shape)
def view_as(g, self, other):
shape = g.op("Shape", other)
return g.op("Reshape", self, shape)
def prim_ConstantSplit(g, self, split_size, dim):
size = sym_help._get_tensor_dim_size(self, dim)
if size is None:
return _unimplemented('prim::ConstantSplit', 'unknown dimension size')
splits = [split_size] * (size // split_size)
leftover = size % split_size
if leftover:
splits.append(leftover)
return g.op("Split", self, split_i=splits, axis_i=dim, outputs=len(splits))
# TODO: It would be better to export this as a chunk directly, as this is
# less sensitive to changes in input size.
# TODO: Once we have proper scoping, stop reimplementing chunk, delete this
# method, and use the desugared version
def prim_ConstantChunk(g, self, chunks, dim):
dim_size = sym_help._get_tensor_dim_size(self, dim)
if dim_size is None:
return _unimplemented('prim::ConstantChunk', 'unknown dimension size')
split_size = (dim_size + chunks - 1) // chunks
return prim_ConstantSplit(g, self, split_size, dim)
@parse_args('v', 'i', 'i', 'i')
def unsafe_chunk(g, self, chunks, dim, _outputs=None):
if _outputs is None:
return sym_help._onnx_opset_unsupported_detailed('unsafe_chunk', 9, 11, 'Dynamic number of outputs not supported')
size = sym_help._get_tensor_dim_size(self, dim)
if size is None:
return _unimplemented('unsafe_chunk', 'unknown dimension size')
split_size = (size + chunks - 1) // chunks
splits = [split_size] * (size // split_size)
leftover = size % split_size
if leftover:
splits.append(leftover)
return g.op("Split", self, split_i=splits, axis_i=dim, outputs=_outputs)
@parse_args('v', 'v', 'v', 'i')
def split(g, self, split_size_or_sizes, dim, _outputs=None):
if not sym_help._is_split_static(split_size_or_sizes, _outputs):
return sym_help._onnx_opset_unsupported_detailed('split', 9, 11, 'Dynamic number of outputs not supported')
split_val = split_size_or_sizes.node()['value']
if split_val.dim() > 0:
return split_with_sizes(g, self, split_size_or_sizes, dim, _outputs)
split_size = sym_help._get_const(split_size_or_sizes, 'i', 'split_size')
dim = sym_help._get_const(dim, 'i', 'dim')
size = sym_help._get_tensor_dim_size(self, dim)
if size is None:
if _outputs is not None:
size = split_size * _outputs
else:
return sym_help._onnx_opset_unsupported_detailed('split', 9, 11, 'Unknown dimension size not supported')
splits = [split_size] * (size // split_size)
leftover = size % split_size
if leftover:
splits.append(leftover)
return g.op("Split", self, split_i=splits, axis_i=dim, outputs=_outputs)
def unsafe_split(g, self, split_size_or_sizes, dim, _outputs=None):
return split(g, self, split_size_or_sizes, dim, _outputs)
@parse_args('v', 'is', 'i', 'i')
def split_with_sizes(g, self, split_sizes, dim, _outputs=None):
if not sym_help._is_split_static(split_sizes, _outputs):
return sym_help._onnx_opset_unsupported_detailed('split_with_sizes', 9, 11, 'Dynamic number of outputs not supported')
return g.op("Split", self, split_i=split_sizes, axis_i=dim, outputs=_outputs)
def unsafe_split_with_sizes(g, self, split_sizes, dim, _outputs=None):
return split_with_sizes(g, self, split_sizes, dim, _outputs)
@parse_args('v', 'i', 'i')
def unbind(g, self, dim=0, _outputs=None):
if _outputs is None:
return sym_help._onnx_opset_unsupported_detailed('unbind', 9, 11, 'Dynamic number of outputs not supported')
outputs = g.op("Split", self, split_i=[1] * _outputs, axis_i=dim, outputs=_outputs)
outputs = [outputs] if _outputs == 1 else outputs
squeezed_outputs = [sym_help._squeeze_helper(g, out, [dim]) for out in outputs]
return squeezed_outputs
@parse_args('v', 'i', 'v')
def select(g, self, dim, index):
index = sym_help._maybe_get_scalar(index)
if (not sym_help._is_value(index)) and (index < 0):
if index == -1:
end_index = 9223372036854775807
else:
end_index = index + 1
slice_node = sym_help._slice_helper(g, self, axes=[dim], starts=[index], ends=[end_index])
return sym_help._squeeze_helper(g, slice_node, [dim])
else:
return g.op("Gather", self, index, axis_i=dim)
def square(g, self):
return g.op("Mul", self, self)
def squeeze(g, self, dim=None):
if dim is None:
return g.op("Squeeze", self)
squeeze_dim = sym_help._get_const(dim, 'i', 'dim')
# Handle negative dims
if squeeze_dim < 0:
rank = sym_help._get_tensor_rank(self)
if rank is not None:
warnings.warn("ONNX export squeeze with negative axis " + str(squeeze_dim) +
" might cause the onnx model to be incorrect. " +
"Negative axis is not supported in ONNX. " +
"Axis is converted to " + str(squeeze_dim + rank) +
" based on input shape at export time. " +
"Passing an tensor of different rank in execution will be incorrect.")
squeeze_dim += rank
else:
return _unimplemented('squeeze', 'negative axis with unknown input rank')
dim_size = sym_help._get_tensor_dim_size(self, squeeze_dim)
if dim_size is None:
warnings.warn("This model contains a squeeze operation on dimension " + str(squeeze_dim) + " on an input " +
"with unknown shape. Note that if the size of dimension " + str(squeeze_dim) + " of the input " +
"is not 1, the ONNX model will return an error. Opset version 11 supports squeezing on " +
"non-singleton dimensions, it is recommended to export this model using opset " +
"version 11 or higher.")
return sym_help._squeeze_helper(g, self, axes_i=[squeeze_dim])
if dim_size > 1:
warnings.warn("This model contains a squeeze operation on dimension " + str(squeeze_dim) + ". The size of " +
"this dimension in the given input is " + str(dim_size) + ". The model will " +
"be exported without the squeeze node. If the model is intended to be used with dynamic " +
"input shapes, please use opset version 11 to " +
"export the model.")
return self
warnings.warn("This model contains a squeeze operation on dimension " + str(squeeze_dim) + ". If the model is " +
"intended to be used with dynamic input shapes, please use opset version 11 to export the model.")
return sym_help._squeeze_helper(g, self, axes_i=[squeeze_dim])
def prelu(g, self, weight):
self_rank = sym_help._get_tensor_rank(self)
if self_rank is not None and self_rank > 2:
weight = sym_help._unsqueeze_helper(g, weight, list(range(1, self_rank - 1)))
return g.op("PRelu", self, weight)
def silu(g, input):
return g.op('Mul', input, g.op('Sigmoid', input))
def relu(g, input):
return g.op("Relu", input)
def ceil(g, input):
return g.op("Ceil", input)
def floor(g, input):
return g.op("Floor", input)
def _len(g, self):
sz_0 = size(g, self, g.op("Constant", value_t=torch.LongTensor([0])))
return sym_help._squeeze_helper(g, sz_0, [0])
@parse_args('v', 't', 't')
def threshold(g, self, threshold, value):
# See Note [Export inplace]
if sym_help._scalar(threshold) != 0:
return _unimplemented("threshold", "non-zero threshold")
if sym_help._scalar(value) != 0:
return _unimplemented("threshold", "non-zero value")
return g.op("Relu", self)
def leaky_relu(g, input, negative_slope, inplace=False):
negative_slope = sym_help._get_const(negative_slope, 't', 'negative_slope')
# See Note [Export inplace]
# TODO: Talk to ONNX about unconditional cast of scalar to float
return g.op("LeakyRelu", input, alpha_f=sym_help._scalar(negative_slope))
@parse_args('v', 'i')
def glu(g, input, dim):
dim_size = sym_help._get_tensor_dim_size(input, dim)
if dim_size is not None:
assert dim_size % 2 == 0
first, second = g.op('Split', input, axis_i=dim, outputs=2)
return g.op('Mul', first, g.op('Sigmoid', second))
@parse_args('v', 'i', 'none')
def softmax(g, input, dim, dtype=None):
# Softmax does normalization at vector level.
# PyTorch and ONNX use different strategies to split the input tensor into vectors.
# Thus dim and axis have different meanings.
# PyTorch slices the input tensor into vectors along the `dim`-th dimension.
# ONNX reshapes the input into a 2-D tensor, and `axis` indicates where the input is coerced.
# If input is a 2 x 3 tensor:
# input = [[1.0, 1.0, 1.0],
# [1.0, 1,0, 1,0]]
# with dim = 0, the result is:
# result = [[0.5, 0.5, 0.5],
# [0.5, 0.5, 0.5]]
# with axis = 0, the result is:
# result = [[0.167, 0.167, 0.167],
# [0.167, 0.167, 0.167]]
# So only when dim and axis both equal to ndim - 1 (the last dimension),
# their semantics are equivalent.
# So use softmax when dim and axis both equal to ndim - 1,
# otherwise transpose the input to put the vectors to be normalized to the last dimension.
# When input rank is not known at export time we compute softmax using a subgraph
# with other operators
input_dim = sym_help._get_tensor_rank(input)
if input_dim is not None:
# TODO: remove this as onnx opset 11 spec allows negative axes
if dim < 0:
dim = input_dim + dim
is_transpose_required = (input_dim != dim + 1)
if is_transpose_required:
axes = list(range(input_dim))
axes[dim], axes[-1] = axes[-1], axes[dim]
input = g.op("Transpose", input, perm_i=axes)
dim = input_dim - 1
softmax = g.op('Softmax', input, axis_i=dim)
if dtype and dtype.node().kind() != 'prim::Constant':
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
softmax = g.op("Cast", softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
if is_transpose_required:
softmax = g.op("Transpose", softmax, perm_i=axes)
return softmax
# Apply max normalization.
input = g.op('Sub', input, g.op('ReduceMax', input, axes_i=[dim], keepdims_i=1))
exp = g.op('Exp', input)
sum = sym_help._reducesum_helper(g, exp, axes_i=[dim])
softmax = g.op('Div', exp, sum)
if dtype and dtype.node().kind() != 'prim::Constant':
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
softmax = g.op("Cast", softmax, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return softmax
@parse_args('v', 't', 'v')
def softplus(g, self, beta, threshold):
if beta != 1:
return _unimplemented("beta", "has to be 1")
return g.op('Softplus', self)
def get_pool_ceil_padding(input, kernel_size, stride, padding):
sizes = sym_help._get_tensor_sizes(input)
dim = sizes[-len(padding):] if sizes is not None else None
if dim is None or any([i is None for i in dim]):
return _unimplemented(name, "input size not accessible")
ceiled_output_dim = [int(math.ceil((dim[i] + 2 * padding[i] - kernel_size[i]) / float(stride[i]))) + 1
for i in range(0, len(padding))]
# ensure last pooling starts inside
ceiled_output_dim = [ceiled_output_dim[i] - 1
if (((ceiled_output_dim[i] - 1) * stride[i]) >= (dim[i] + padding[i]))
else ceiled_output_dim[i]
for i in range(0, len(ceiled_output_dim))]
padding_ceil = [0
if (stride[i] == 1)
else
(kernel_size[i] - (dim[i] + 2 * padding[i] - ((ceiled_output_dim[i] - 1) * stride[i] + 1)))
for i in range(0, len(padding))]
# ensure padding is not > kernel_size
padding_ceil = [(int(padding_ceil[i]) if padding_ceil[i] < kernel_size[i] - 1 else int(kernel_size[i] - 1))
if ((padding_ceil[i] + 2 * padding[i]) >= (kernel_size[i]))
else
int(padding_ceil[i])
for i in range(0, len(padding_ceil))]
return padding_ceil
def _max_pool(name, tuple_fn, ndims, return_indices):
@parse_args('v', 'is', 'is', 'is', 'is', 'i')
def symbolic_fn(g, input, kernel_size, stride, padding, dilation, ceil_mode):
if set(tuple_fn(dilation)) != {1}:
return _unimplemented(name, "dilation")
if not stride:
stride = kernel_size
padding = tuple(tuple_fn(padding))
if ceil_mode:
padding_ceil = get_pool_ceil_padding(input, kernel_size, stride, padding)
padding = padding + tuple(numpy.add(padding_ceil, padding))
else:
padding = padding * 2
kwargs = {
'kernel_shape_i': tuple_fn(kernel_size),
'pads_i': padding,
'strides_i': tuple_fn(stride),
}
# easy but hacky way to get flattened indices values
# to be used to convert the indices values to non-flattened.
# In ONNX the indices are computed as a flatten 1-D tensor,
# so the values in indices are in [0, N x C x D1 x ... x Dn).
# To convert the indices to the same format used by Pytorch,
# we first execute a maxpool with a kernel and stride of 1 on the same input.
# This will result in a tensor of indices in which each index will have it's own value.
# Using this tensor as a reference, we extract the first index of each axis and substract
# it from each index of this axis in the indices to convert.
# This step will result in a tensor were each dimension has values of indices within
# the dimension it is in.
# For more information :
# https://github.com/pytorch/pytorch/pull/16455#issuecomment-460776407
if return_indices:
r, indices = g.op("MaxPool", input, outputs=2, **kwargs)
_, flattened_indices = g.op("MaxPool", input, outputs=2,
kernel_shape_i=[1 for _ in range(ndims)],
strides_i=[1 for _ in range(ndims)])
# convert indices to have non-flattened indices values
s = sym_help._slice_helper(g, flattened_indices, axes=[2 + i for i in range(ndims)],
starts=tuple_fn(0), ends=tuple_fn(1))
indices = sub(g, indices, s)
return r, indices
else:
r = g.op("MaxPool", input, outputs=1, **kwargs)
return r
return symbolic_fn
max_pool1d = _max_pool("max_pool1d", _single, 1, return_indices=False)
max_pool2d = _max_pool("max_pool2d", _pair, 2, return_indices=False)
max_pool3d = _max_pool("max_pool3d", _triple, 3, return_indices=False)
max_pool1d_with_indices = _max_pool("max_pool1d_with_indices", _single, 1, return_indices=True)
max_pool2d_with_indices = _max_pool("max_pool2d_with_indices", _pair, 2, return_indices=True)
max_pool3d_with_indices = _max_pool("max_pool3d_with_indices", _triple, 3, return_indices=True)
def _avg_pool(name, tuple_fn):
@parse_args('v', 'is', 'is', 'is', 'i', 'i', 'none')
def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None):
if not stride:
stride = kernel_size
padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name)
if ceil_mode:
padding_ceil = get_pool_ceil_padding(input, kernel_size, stride, padding)
if count_include_pad:
input = g.op("Pad", input,
pads_i=((0,) * 2 + padding) * 2,
mode_s='constant',
value_f=0.)
padding = (0,) * len(padding)
if ceil_mode:
padding = padding + tuple(numpy.add(padding_ceil, padding))
else:
padding = padding * 2
output = g.op("AveragePool", input,
kernel_shape_i=tuple_fn(kernel_size),
strides_i=tuple_fn(stride),
pads_i=padding)
return output
return symbolic_fn
avg_pool1d = _avg_pool('avg_pool1d', _single)
avg_pool2d = _avg_pool('avg_pool2d', _pair)
avg_pool3d = _avg_pool('avg_pool3d', _triple)
def _adaptive_pool(name, type, tuple_fn, fn=None):
def symbolic_fn(g, input, output_size):
# _adaptive_pool is supported for cases where output_size is 1 for all dimensions,
# by executing a GlobalPool.
# It is also supported for cases where the output size is a factor of the input size.
# For these cases the stride and kernel size are uniform along all the indices of
# the same dimension, which makes it possible to export it to ONNX.
# for MaxPool, GlobalMaxPool does not return indices,
# so we try using max_poolxd_with_indices, and if it is not possible
# (input is not a complete tensor or output size not factor of input size)
# then we call GlobalAveragePool and return None for the indices
try:
output_size = _parse_arg(output_size, 'is')
except Exception:
return sym_help._onnx_unsupported('adaptive pooling, since output_size is not constant.')
if output_size == [1] * len(output_size) and type == "AveragePool":
return g.op("GlobalAveragePool", input)
sizes = sym_help._get_tensor_sizes(input)
try:
dim = sizes[2:]
except Exception:
dim = None
if dim is None or any([i is None for i in dim]):
if output_size == [1] * len(output_size):
return g.op("GlobalMaxPool", input), None
return _unimplemented(name, 'input size not accessible')
# verify if output size % input size = 0 for all dim
mod = [dim[i] % output_size[i] for i in range(0, len(dim))]
if mod != [0] * len(mod):
if output_size == [1] * len(output_size):
return g.op("GlobalMaxPool", input), None
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return _unimplemented(name, 'output size that are not factor of input size')
else:
return sym_help._onnx_unsupported(name + ', since output size is not factor of input size')
k = [int(dim[i] / output_size[i]) for i in range(0, len(dim))]
# call max_poolxd_with_indices to get indices in the output
if type == "MaxPool":
return fn(g, input, k, k, (0,) * len(dim), (1,) * len(dim), False)
output = g.op(type, input,
kernel_shape_i=tuple_fn(k),
strides_i=tuple_fn(k))
return output
return symbolic_fn
adaptive_avg_pool1d = _adaptive_pool('adaptive_avg_pool1d', "AveragePool", _single)
adaptive_avg_pool2d = _adaptive_pool('adaptive_avg_pool2d', "AveragePool", _pair)
adaptive_avg_pool3d = _adaptive_pool('adaptive_avg_pool3d', "AveragePool", _triple)
adaptive_max_pool1d = _adaptive_pool('adaptive_max_pool1d', "MaxPool", _single, max_pool1d_with_indices)
adaptive_max_pool2d = _adaptive_pool('adaptive_max_pool2d', "MaxPool", _pair, max_pool2d_with_indices)
adaptive_max_pool3d = _adaptive_pool('adaptive_max_pool3d', "MaxPool", _triple, max_pool3d_with_indices)
# Generate paddings in ONNX order based on pad in pytorch.
# Args:
# dim: the dimension of the tensor.
# pad: the paddings in pytorch.
# The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ...
def _prepare_onnx_paddings(dim, pad):
assert isinstance(dim, int)
# The desired order of paddings is
# dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.
# n is the dimension of input.
# assume zero-dimensions in the beginning
paddings = list(pad[:]) + [0] * (dim * 2 - len(pad))
# reverse order and collate first beginnings and then ends
paddings = paddings[-2::-2] + paddings[-1::-2]
return paddings
def _convert_padding_node(padding):
padding = sym_help._maybe_get_const(padding, 'is')
if sym_help._is_value(padding) and sym_help._is_packed_list(padding):
input_list = sym_help._unpack_list(padding)
try:
padding = [sym_help._get_const(v, 'i', 'padding') for v in input_list]
except Exception:
return sym_help._onnx_opset_unsupported_detailed('Pad', 9, 11, 'The sizes of the padding must be constant')
return padding
def constant_pad_nd(g, input, padding, value):
mode = "constant"
try:
value = sym_help._get_const(value, 'f', 'value')
except Exception:
return sym_help._onnx_opset_unsupported_detailed('Pad', 9, 11, 'The value for the padding must be constant')
padding = _convert_padding_node(padding)
paddings = _prepare_onnx_paddings(sym_help._get_tensor_rank(input), padding)
return g.op("Pad", input, pads_i=paddings, mode_s=mode, value_f=value)
def reflection_pad(g, input, padding):
mode = "reflect"
padding = _convert_padding_node(padding)
paddings = _prepare_onnx_paddings(sym_help._get_tensor_rank(input), padding)
return g.op("Pad", input, pads_i=paddings, mode_s=mode)
def replication_pad(g, input, padding):
mode = "edge"
padding = _convert_padding_node(padding)
paddings = _prepare_onnx_paddings(sym_help._get_tensor_rank(input), padding)
return g.op("Pad", input, pads_i=paddings, mode_s=mode)
reflection_pad1d = reflection_pad
reflection_pad2d = reflection_pad
reflection_pad3d = reflection_pad
replication_pad1d = replication_pad
replication_pad2d = replication_pad
replication_pad3d = replication_pad
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
scales, align_corners = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
sym_help._interpolate_warning(interpolate_mode)
align_corners = sym_help._maybe_get_scalar(align_corners)
if align_corners:
return _unimplemented(name, "align_corners == True")
if scales is None:
scales = sym_help._interpolate_size_to_scales(g, input, output_size, dim)
return g.op("Upsample", input, scales, mode_s=interpolate_mode)
return symbolic_fn
upsample_nearest1d = _interpolate('upsample_nearest1d', 3, "nearest")
upsample_nearest2d = _interpolate('upsample_nearest2d', 4, "nearest")
upsample_nearest3d = _interpolate('upsample_nearest3d', 5, "nearest")
upsample_linear1d = _interpolate('upsample_linear1d', 3, "linear")
upsample_bilinear2d = _interpolate('upsample_bilinear2d', 4, "linear")
upsample_trilinear3d = _interpolate('upsample_trilinear3d', 5, "linear")
def __interpolate(g, input, size, scale_factor, mode , align_corners, recompute_scale_factor):
scales, mode = sym_help._interpolate_get_scales_and_mode(g, input, size, scale_factor,
mode , align_corners)
return g.op("Upsample", input, scales, mode_s=mode)
@parse_args('v')
def bitwise_not(g, inp):
if inp.type().scalarType() != 'Bool':
return _unimplemented("bitwise_not", "non-bool tensor")
return g.op("Not", inp)
def wrap_logical_op_with_cast_to(to_type):
def decorator(fn):
def wrap_with_cast(g, input, other):
return g.op("Cast", fn(g, input, other), to_i=sym_help.cast_pytorch_to_onnx[to_type])
return wrap_with_cast
return decorator
def wrap_logical_op_with_cast_to_and_from(to_type):
def decorator(fn):
def wrap_with_cast(g, input, other):
to_cast_func = globals()['_cast_{}'.format(to_type)]
from_cast_func = wrap_logical_op_with_cast_to(input.type().scalarType())(fn)
return from_cast_func(g, to_cast_func(g, input, False), to_cast_func(g, other, False))
return wrap_with_cast
return decorator
def wrap_logical_op_with_negation(func):
def wrap_with_not(g, input, other):
return g.op("Not", func(g, input, other))
return wrap_with_not
def eq(g, self, other):
return g.op("Equal", self, other)
@wrap_logical_op_with_negation
def ne(g, self, other):
return g.op("Equal", self, other)
def gt(g, input, other):
return gt_impl(g, input, other)
def gt_impl(g, input, other):
if input.type().scalarType() is not None and input.type().scalarType() == 'Bool' and \
other.type().scalarType() is not None and other.type().scalarType() == 'Bool':
input = g.op("Cast", input, to_i=sym_help.cast_pytorch_to_onnx['Int'])
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx['Int'])
return g.op("Greater", input, other)
def lt(g, input, other):
return lt_impl(g, input, other)
def lt_impl(g, input, other):
if input.type().scalarType() is not None and input.type().scalarType() == 'Bool' and \
other.type().scalarType() is not None and other.type().scalarType() == 'Bool':
input = g.op("Cast", input, to_i=sym_help.cast_pytorch_to_onnx['Int'])
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx['Int'])
return g.op("Less", input, other)
@wrap_logical_op_with_negation
def ge(g, input, other):
return lt_impl(g, input, other)
@wrap_logical_op_with_negation
def le(g, input, other):
return gt_impl(g, input, other)
@wrap_logical_op_with_cast_to_and_from('Bool')
def __and_(g, input, other):
return g.op('And', input, other)
@wrap_logical_op_with_cast_to_and_from('Bool')
def __or_(g, input, other):
return g.op('Or', input, other)
@wrap_logical_op_with_cast_to_and_from('Bool')
def logical_and(g, input, other):
return g.op('And', input, other)
@wrap_logical_op_with_cast_to_and_from('Bool')
def logical_or(g, input, other):
return g.op('Or', input, other)
@wrap_logical_op_with_cast_to_and_from('Bool')
def logical_xor(g, input, other):
return g.op('Xor', input, other)
def __rshift_(g, self, other):
# make sure to cast other to self's type
# (when self is long, make sure that other is not float)
if other.type().scalarType() != self.type().scalarType():
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
two = g.op('Constant', value_t=torch.tensor(2, dtype=torch.float32))
# exponent (same type as self) has to be float or double in onnx::Pow
if not sym_help._is_fp(self):
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx['Float'])
two_pow = g.op('Pow', two, other)
two_pow = g.op('Cast', two_pow, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
rshift = g.op('Div', self, two_pow)
return rshift
def __lshift_(g, self, other):
# make sure to cast other to self's type
# (when self is long, make sure that other is not float)
if other.type().scalarType() != self.type().scalarType():
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
two = g.op('Constant', value_t=torch.tensor(2, dtype=torch.float32))
# exponent (same type as self) has to be float or double in onnx::Pow
if not sym_help._is_fp(self):
other = g.op("Cast", other, to_i=sym_help.cast_pytorch_to_onnx['Float'])
two_pow = g.op('Pow', two, other)
two_pow = g.op('Cast', two_pow, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
lshift = g.op('Mul', self, two_pow)
return lshift
@parse_args('v', 'v', 'v', 'i')
def where(g, condition, self=None, other=None, _outputs=None):
# Assumes that torch.where's first argument takes only Bool and Byte tensors.
if condition.type().scalarType() != 'Bool':
condition = g.op("Cast", condition, to_i=sym_help.cast_pytorch_to_onnx['Bool'])
if self is None:
condition = torch.onnx.symbolic_opset9.nonzero(g, condition)
return sym_help._unbind_helper(g, condition, g.op("Constant", value_t=torch.tensor(1)), _outputs)
return g.op("Where", condition, self, other)
@parse_args('v', 'i', 'none')
def log_softmax(g, input, dim, dtype=None):
# PyTorch dim and ONNX axis have different meanings.
# See Softmax comment for details.
# TODO: remove this as onnx opset 11 spec allows negative axes
input_dim = sym_help._get_tensor_rank(input)
if input_dim is None:
return _unimplemented("dim",
"ONNX and PyTorch use different strategies to split the input. "
"Input rank must be known at export time.")
if dim < 0:
dim = input_dim + dim
is_transpose_required = (input_dim != dim + 1)
# ONNX only supports log_softmax with dim = -1. Transpose must be added before and after log_softmax to support other cases.
if is_transpose_required:
axes = list(range(input_dim))
axes[dim], axes[-1] = axes[-1], axes[dim]
input = g.op("Transpose", input, perm_i=axes)
dim = input_dim - 1
return_op = g.op("LogSoftmax", input, axis_i=dim)
if dtype and dtype.node().kind() != 'prim::Constant':
parsed_dtype = sym_help._get_const(dtype, 'i', 'dtype')
return_op = g.op("Cast", return_op, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
if is_transpose_required:
return_op = g.op("Transpose", return_op, perm_i=axes)
return return_op
@parse_args('v', 'v', 'v', 'is', 'is', 'is', 'i', 'is', 'i', 'i', 'i', 'i', 'i')
def _convolution(g, input, weight, bias, stride, padding, dilation,
transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32):
weight_size = sym_help._get_tensor_sizes(weight)
try:
kernel_shape = weight_size[2:]
except Exception:
kernel_shape = None
if kernel_shape is None or any([i is None for i in kernel_shape]):
raise RuntimeError('Unsupported: ONNX export of convolution for kernel '
'of unknown shape.')
args = [input, weight]
# ONNX only supports 1D bias
if not sym_help._is_none(bias) and sym_help._get_tensor_rank(bias) == 1:
args.append(bias)
kwargs = {"kernel_shape_i": weight_size[2:],
"strides_i": stride,
# NB: ONNX supports asymmetric padding, whereas PyTorch supports only
# symmetric padding
"pads_i": padding + padding,
"dilations_i": dilation,
"group_i": groups}
if any(o != 0 for o in output_padding):
# ONNX supports both output_shape and output_padding. they are equivalent expressive.
# output_padding is more straightforward, so we use it here.
# output_shape = stride * (input_shape - 1) + output_padding + kernel_shape - padding * 2
assert transposed
assert len(stride) == len(output_padding)
kwargs["output_padding_i"] = output_padding
n = g.op("ConvTranspose" if transposed else "Conv", *args, **kwargs)
if not sym_help._is_none(bias) and sym_help._get_tensor_rank(bias) != 1:
return g.op("Add", n, bias)
else:
return n
@parse_args('v', 'v', 'v', 'is', 'is', 'is', 'i')
def conv1d(g, input, weight, bias, stride, padding, dilation, groups):
return _convolution(g, input, weight, bias, stride, padding, dilation, False, (), groups, None, None, None, None)
@parse_args('v', 'v', 'v', 'is', 'is', 'is', 'i')
def conv2d(g, input, weight, bias, stride, padding, dilation, groups):
return _convolution(g, input, weight, bias, stride, padding, dilation, False, (), groups, None, None, None, None)
@parse_args('v', 'v', 'v', 'is', 'is', 'is', 'i')
def conv3d(g, input, weight, bias, stride, padding, dilation, groups):
return _convolution(g, input, weight, bias, stride, padding, dilation, False, (), groups, None, None, None, None)
@parse_args('v', 'v', 'v', 'is', 'is', 'is', 'i', 'is')
def conv_transpose1d(g, input, weight, bias, stride, padding, output_padding, groups, dilation):
return _convolution(g, input, weight, bias, stride, padding, dilation, True, output_padding, groups, None, None, None, None)
@parse_args('v', 'v', 'v', 'is', 'is', 'is', 'i', 'is')
def conv_transpose2d(g, input, weight, bias, stride, padding, output_padding, groups, dilation):
return _convolution(g, input, weight, bias, stride, padding, dilation, True, output_padding, groups, None, None, None, None)
@parse_args('v', 'v', 'v', 'is', 'is', 'is', 'i', 'is')
def conv_transpose3d(g, input, weight, bias, stride, padding, output_padding, groups, dilation):
return _convolution(g, input, weight, bias, stride, padding, dilation, True, output_padding, groups, None, None, None, None)
@parse_args('v', 'v', 'v', 'v', 'v', 'i', 'f', 'f', 'i')
def batch_norm(g, input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled):
sym_help.assert_training_mode(training, "batch_norm")
batch_size = sym_help._get_tensor_dim_size(input, 0)
channel_size = sym_help._get_tensor_dim_size(input, 1)
if weight is None or sym_help._is_none(weight):
if channel_size is None:
raise RuntimeError('Unsupported: ONNX export of batch_norm for unknown '
'channel size.')
weight_value = torch.tensor([1.] * channel_size).type(
'torch.' + input.type().scalarType() + 'Tensor')
weight = g.op("Constant", value_t=weight_value)
if bias is None or sym_help._is_none(bias):
if channel_size is None:
raise RuntimeError('Unsupported: ONNX export of batch_norm for unknown '
'channel size.')
bias_value = torch.tensor([0.] * channel_size).type(
'torch.' + input.type().scalarType() + 'Tensor')
bias = g.op("Constant", value_t=bias_value)
# If track_running_stats is set to False batch statistics are instead used during evaluation time
if running_mean is None or sym_help._is_none(running_mean) or running_var is None or sym_help._is_none(running_var):
assert batch_size is not None and channel_size is not None
reshape_in = g.op("Reshape", input,
g.op("Constant", value_t=torch.tensor([batch_size, channel_size, -1], dtype=torch.int64)))
trans_in = g.op('Transpose', reshape_in, perm_i=[0, 2, 1])
running_var, running_mean = _var_mean(g, trans_in,
g.op("Constant", value_t=torch.tensor([0, 1], dtype=torch.int64)),
False, False)
out = g.op("BatchNormalization", input, weight, bias, running_mean, running_var,
epsilon_f=eps,
momentum_f=1 - momentum,
outputs=1 if not sym_help._training_mode else 5)
if not sym_help._training_mode:
return out
else:
res, new_running_mean, new_running_var, saved_mean, saved_var = out
new_running_mean.setType(running_mean.type())
new_running_var.setType(running_var.type())
saved_mean.setDebugName("batch_norm_dead_output-" + saved_mean.debugName())
saved_var.setDebugName("batch_norm_dead_output-" + saved_var.debugName())
return res
@parse_args('v', 'is', 'v', 'v', 'f', 'i')
def layer_norm(g, input, normalized_shape, weight, bias, eps, cudnn_enable):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", input, weight, bias, normalized_shape_i=normalized_shape,
eps_f=eps, cudnn_enable_i=cudnn_enable, operator_s="layer_norm")
axes = [-i for i in range(len(normalized_shape), 0, -1)]
two_cst = sym_help._generate_wrapped_number(g, 2.)
eps_cst = sym_help._generate_wrapped_number(g, eps)
mean = g.op("ReduceMean", input, axes_i=axes)
numerator = sub(g, input, mean)
# variance = e((x - e(x))^2), and (x - e(x)) is the numerator in the layer_norm formula
variance = g.op("ReduceMean", pow(g, numerator, two_cst), axes_i=axes)
denominator = sqrt(g, add(g, variance, eps_cst))
layer_norm = g.op("Div", numerator, denominator)
if not (weight is None or sym_help._is_none(weight)):
layer_norm = mul(g, layer_norm, weight)
if not (bias is None or sym_help._is_none(bias)):
layer_norm = add(g, layer_norm, bias)
return layer_norm
@parse_args('v', 'v', 'v', 'v', 'v', 'i', 'f', 'f', 'i')
def instance_norm(g, input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled):
channel_size = sym_help._get_tensor_dim_size(input, 1)
if weight is None or sym_help._is_none(weight):
if channel_size is None:
raise RuntimeError('Unsupported: ONNX export of instance_norm for unknown '
'channel size.')
weight_value = torch.tensor([1.] * channel_size).type(
'torch.' + input.type().scalarType() + 'Tensor')
weight = g.op("Constant", value_t=weight_value)
if bias is None or sym_help._is_none(bias):
if channel_size is None:
raise RuntimeError('Unsupported: ONNX export of instance_norm for unknown '
'channel size.')
bias_value = torch.tensor([0.] * channel_size).type(
'torch.' + input.type().scalarType() + 'Tensor')
bias = g.op("Constant", value_t=bias_value)
return g.op("InstanceNormalization", input, weight, bias, epsilon_f=eps)
@parse_args('v', 'i', 'i', 'i')
def unfold(g, input, dimension, size, step):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", input, operator_s="unfold", dimension_i=dimension, size_i=size, step_i=step)
sizes = sym_help._get_tensor_sizes(input)
try:
sizedim = sizes[dimension]
except Exception:
sizedim = None
if sizedim is not None:
low_indices = range(0, sizedim, step)
hi_indices = range(size, sizedim + 1, step)
stack = [sym_help._slice_helper(g, input, axes=[dimension], starts=[low], ends=[hi])
for low, hi in zip(low_indices, hi_indices)]
ndim = len(sizes)
perm = list(range(0, ndim))
perm.append(perm.pop(dimension))
unsqueeze = [sym_help._unsqueeze_helper(g, g.op("Transpose", t, perm_i=perm), [dimension]) for t in stack]
return g.op("Concat", *unsqueeze, axis_i=dimension)
else:
return _unimplemented("Unfold", "input size not accessible")
@parse_args('v', 't', 't', 't')
def elu(g, input, alpha, scale, input_scale):
if scale and scale != 1.:
return _unimplemented("scale", "does not support scale in Elu")
if input_scale and input_scale != 1.:
return _unimplemented("input_scale", "does not support input_scale in Elu")
# See Note [Export inplace]
return g.op("Elu", input, alpha_f=sym_help._scalar(alpha))
def selu(g, input):
return g.op("Selu", input)
@parse_args('v', 'i', 'v')
def index_select(g, self, dim, index):
# In case of a scalar index, index_select returns a tensor with the same rank as the input.
# To match this behavior in ONNX, we make index a 1D tensor so that the following gather
# also produces a tensor with the same rank as the input.
return sym_help._select_helper(g, self, dim, index)
def index_put(g, self, indices_list_value, values, accumulate):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
indices_list = sym_help._unpack_list(indices_list_value)
args = [self] + indices_list + [values, accumulate]
return g.op("ATen", *args, operator_s='index_put')
else:
sym_help._onnx_opset_unsupported('index_put', 9, 11)
def index_fill(g, self, dim, index, value):
dim_value = sym_help._parse_arg(dim, 'i')
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, index, value, dim_i=dim_value, operator_s="index_fill")
expanded_index_shape, expanded_index = sym_help._index_fill_reshape_helper(g, self, dim, index)
value = sym_help._maybe_get_scalar(value)
value = sym_help._if_scalar_type_as(g, value, self)
expanded_value = expand(g, value, expanded_index_shape, None)
return scatter(g, self, dim, expanded_index, expanded_value)
def index_copy(g, self, dim, index, source):
dim_value = sym_help._parse_arg(dim, 'i')
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, index, source, dim_i=dim_value, operator_s="index_copy")
expanded_index_shape, expanded_index = sym_help._index_fill_reshape_helper(g, self, dim, index)
return scatter(g, self, dim, expanded_index, source)
def type_as(g, self, other):
self_dtype = sym_help._try_get_scalar_type(self)
other_dtype = sym_help._try_get_scalar_type(other)
if self_dtype == other_dtype and self_dtype is not None:
return self
if other_dtype is not None:
return g.op("Cast", self, to_i=sym_help.cast_pytorch_to_onnx[other_dtype])
else:
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
# We don't know the type of other, bail by emitting ATen
return g.op("ATen", self, other, operator_s="type_as")
else:
raise RuntimeError('Unsupported: ONNX export of type_as for tensor '
'of unknown dtype.')
@parse_args('v', 'v', 'i', 'f')
def cosine_similarity(g, x1, x2, dim, eps):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", x1, x2, dim_i=dim, eps_f=eps, operator_s="cosine_similarity")
else:
return sym_help._onnx_unsupported('cosine_similarity')
# ignore clone operators that are inserted by PyTorch autograd
def clone(g, input, unused_memory_format):
return input
def abs(g, self):
return g.op("Abs", self)
def log(g, self):
return g.op("Log", self)
def log1p(g, self):
return log(g, add(g, sym_help._if_scalar_type_as(g, torch.ones(1), self), self))
def pow(g, self, exponent):
f_dtype = self_dtype = self.type().scalarType()
if not sym_help._is_fp(self):
f_dtype = 'Float'
self = g.op("Cast", self, to_i=sym_help.cast_pytorch_to_onnx[f_dtype])
if not sym_help._is_fp(exponent):
exponent = g.op("Cast", exponent, to_i=sym_help.cast_pytorch_to_onnx[f_dtype])
pow = g.op("Pow", self, exponent)
return pow
def clamp(g, self, min, max):
# min or max may be None that we need to dispatch to
# Clip separately, as ONNX does not have None syntax
if sym_help._is_none(min):
return clamp_max(g, self, max)
elif sym_help._is_none(max):
return clamp_min(g, self, min)
else:
min = _parse_arg(min, 'f')
max = _parse_arg(max, 'f')
return g.op("Clip", self, min_f=min, max_f=max)
@parse_args('v', 'f')
def clamp_min(g, self, min):
return g.op("Clip", self, min_f=min)
@parse_args('v', 'f')
def clamp_max(g, self, max):
return g.op("Clip", self, max_f=max)
# torch.max (same for torch.min) actually has two interfaces smashed together:
# torch.max(x, dim, keepdim) and torch.max(x, y)
def max(g, self, dim_or_y=None, keepdim=None):
# torch.max(input)
if dim_or_y is None and keepdim is None:
return g.op("ReduceMax", self, keepdims_i=0)
# torch.max(input, other)
if keepdim is None:
return g.op("Max", self, dim_or_y)
# torch.max(input, dim, keepdim)
else:
dim = sym_help._get_const(dim_or_y, 'i', 'dim')
keepdim = sym_help._get_const(keepdim, 'i', 'keepdim')
max = g.op("ReduceMax", self, axes_i=[dim], keepdims_i=keepdim)
indices = g.op('ArgMax', self, axis_i=dim, keepdims_i=keepdim)
return max, indices
def min(g, self, dim_or_y=None, keepdim=None):
# torch.min(input)
if dim_or_y is None and keepdim is None:
return g.op("ReduceMin", self, keepdims_i=0)
# torch.min(input, other)
if keepdim is None:
return g.op("Min", self, dim_or_y)
# torch.min(input, dim, keepdim)
else:
dim = sym_help._get_const(dim_or_y, 'i', 'dim')
keepdim = sym_help._get_const(keepdim, 'i', 'keepdim')
min = g.op("ReduceMin", self, axes_i=[dim], keepdims_i=keepdim)
indices = g.op('ArgMin', self, axis_i=dim, keepdims_i=keepdim)
return min, indices
def exp(g, self):
return g.op("Exp", self)
@parse_args('v', 'f', 'i')
def dropout(g, input, p, train):
sym_help.assert_training_mode(train, "dropout")
# in eval mode, dropout is non-op - if the node's train param is set to False, dropout is non-op
if not sym_help._training_mode:
return input
warnings.warn("Dropout is a training op and should not be exported in inference mode. "
"Make sure to call eval() on the model, and to export it with param training=False.")
r, _ = g.op("Dropout", input, ratio_f=p, outputs=2)
return r
def _unsupported_dropout(name):
@parse_args('v', 'f', 'i')
def feature_dropout(g, input, p, train):
# NB: In inference mode, FeatureDropout is exported as an identity op.
if train:
return _unimplemented(name, "training mode")
return input
return feature_dropout
feature_dropout = _unsupported_dropout("feature_dropout")
alpha_dropout = _unsupported_dropout("alpha_dropout")
feature_alpha_dropout = _unsupported_dropout("feature_alpha_dropout")
# See Note [Export inplace]
dropout_ = dropout
feature_dropout_ = feature_dropout
alpha_dropout_ = alpha_dropout
feature_alpha_dropout_ = feature_alpha_dropout
@parse_args('v', 't', 'is', 'i')
def norm(g, self, p, dim, keepdim):
if p == 1:
f = _reduce_op_symbolic("ReduceL1")
elif p == 2:
f = _reduce_op_symbolic("ReduceL2")
else:
raise RuntimeError("ONNX export only p-norms with p of 1 or 2")
return f(g, self, dim=dim, keepdim=keepdim)
@parse_args('v', 'v', 'v', 'i')
def conv_tbc(g, input, weight, bias, pad):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", input, weight, bias, operator_s="conv_tbc", pad_i=pad)
else:
return sym_help._onnx_unsupported('conv_tbc')
@parse_args('v', 'i', 'i')
def _unique(g, input, sorted, return_inverse):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", input, operator_s="_unique", sorted_i=sorted,
return_inverse_i=return_inverse, outputs=2)
else:
return sym_help._onnx_unsupported('_unique')
@parse_args('v', 'i', 'i', 'i')
def _unique2(g, input, sorted, return_inverse, return_counts):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", input, operator_s="_unique2", sorted_i=sorted,
return_inverse_i=return_inverse, return_counts_i=return_counts,
outputs=3)
else:
sym_help._onnx_opset_unsupported('_unique2', 9, 11)
for k, v in sym_help.cast_pytorch_to_onnx.items():
name = '_cast_{}'.format(k)
globals()[name] = parse_args('v', 'i')(partial(sym_help._cast_func_template, v))
@parse_args('v', 'i', 'v', 'v', 'v', 'v')
def empty(g, sizes, dtype, layout, device, pin_memory=False, memory_format=None):
return zeros(g, sizes, dtype, layout, device, pin_memory)
@parse_args('v', 'i', 'v', 'v', 'v', 'v')
def empty_like(g, input, dtype=None, layout=None, device=None, pin_memory=False, memory_format=None):
return zeros_like(g, input, dtype, layout, device, pin_memory)
def new_empty(g, self, sizes, dtype, layout, device, pin_memory=False):
self_dtype = sym_help._try_get_scalar_type(self)
if dtype is None and self_dtype is not None:
dtype = self_dtype
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
return empty(g, sizes, dtype, layout, device, pin_memory)
def scalar_tensor(g, scalar, dtype, *options):
dtype = sym_help._get_const(dtype, 'i', 'dtype')
if dtype is None:
dtype = 6 # float
scalar = g.op("Cast", scalar, to_i=sym_help.scalar_type_to_onnx[dtype])
return scalar
def tensor(g, data, dtype=None, device=None, requires_grad=False):
dtype = sym_help._get_const(dtype, 'i', 'dtype')
if sym_help._is_packed_list(data):
if dtype is None:
dtype = sym_help._unpack_list(data)[0].type().scalarType()
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
input_list = list()
for t in sym_help._unpack_list(data):
shape_reference = g.op("Constant", value_t=torch.LongTensor([1]))
t = g.op("Reshape", t, shape_reference)
t = g.op("Cast", t, to_i=sym_help.scalar_type_to_onnx[dtype])
input_list.append(t)
return g.op("Concat", *input_list, axis_i=0)
else:
if dtype is None:
dtype = data.type().scalarType()
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
return g.op("Cast", data, to_i=sym_help.scalar_type_to_onnx[dtype])
@parse_args('v', 'i', 'v', 'v', 'v')
def zeros(g, sizes, dtype, layout, device, pin_memory=False):
# NOTE: no way to set device, layout and pin_memory in ONNX, so we ignore it
if dtype is None:
dtype = 6 # float
return g.op("ConstantOfShape", sizes,
value_t=torch.tensor([0], dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
@parse_args('v', 'i', 'v', 'v', 'v', 'v')
def zeros_like(g, input, dtype=None, layout=None, device=None, pin_memory=False, memory_format=None):
shape = g.op("Shape", input)
if dtype is None:
dtype = 6 # float
return g.op("ConstantOfShape", shape,
value_t=torch.tensor([0], dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
def new_zeros(g, self, sizes, dtype, layout, device, pin_memory=False):
self_dtype = sym_help._try_get_scalar_type(self)
if dtype is None and self_dtype is not None:
dtype = self_dtype
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
return zeros(g, sizes, dtype, layout, device, pin_memory)
@parse_args('v', 'i', 'v', 'v', 'v')
def ones(g, sizes, dtype, layout, device, pin_memory=False):
if dtype is None:
dtype = 6 # float
return g.op("ConstantOfShape", sizes,
value_t=torch.tensor([1], dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
@parse_args('v', 'i', 'v', 'v', 'v', 'v')
def ones_like(g, input, dtype=None, layout=None, device=None, pin_memory=False, memory_format=None):
shape = g.op("Shape", input)
if dtype is None:
dtype = 6 # float
return g.op("ConstantOfShape", shape,
value_t=torch.tensor([1], dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
def full(g, sizes, value, dtype, layout, device, pin_memory=False):
const_value = sym_help._maybe_get_const(value, 't')
if sym_help._is_value(const_value):
dtype = 6 if dtype is None else dtype
tmp = zeros(g, sizes, dtype, layout, device)
return add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1)))
else:
dtype = sym_help._get_const(dtype, 'i', 'dtype')
dtype = 6 if dtype is None else dtype
return g.op("ConstantOfShape", sizes,
value_t=torch.tensor([const_value], dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
def full_like(g, input, fill_value, dtype=None, layout=None, device=None, pin_memory=False, memory_format=None):
fill_value = sym_help._maybe_get_const(fill_value, 'f')
if sym_help._is_value(fill_value):
dtype = 6 if dtype is None else dtype
tmp = zeros_like(g, input, dtype, layout, device)
return add(g, tmp, fill_value, g.op("Constant", value_t=torch.tensor(1)))
else:
dtype = sym_help._get_const(dtype, 'i', 'dtype')
dtype = 6 if dtype is None else dtype
shape = g.op("Shape", input)
return g.op("ConstantOfShape", shape,
value_t=torch.tensor([fill_value], dtype=sym_help.scalar_type_to_pytorch_type[dtype]))
def new_full(g, self, size, fill_value, dtype, layout, device, pin_memory=False):
self_dtype = sym_help._try_get_scalar_type(self)
if dtype is None and self_dtype is not None:
dtype = self_dtype
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
return full(g, size, fill_value, dtype, layout, device, pin_memory)
def eye(g, *args):
if len(args) == 5:
# aten::eye(n, dtype, layout, device, pin_memory)
n, dtype, layout, device, pin_memory = args
dim_size = sym_help._unsqueeze_helper(g, n, [0])
shape = g.op("Concat", dim_size, dim_size, axis_i=0)
tensor = zeros(g, shape, dtype, layout, device)
return g.op("EyeLike", tensor)
elif len(args) == 6:
# aten::eye(n, m, dtype, layout, device, pin_memory)
n, m, dtype, layout, device, pin_memory = args
shape = g.op("Concat", sym_help._unsqueeze_helper(g, n, [0]), sym_help._unsqueeze_helper(g, m, [0]), axis_i=0)
tensor = zeros(g, shape, dtype, layout, device)
return g.op("EyeLike", tensor)
else:
raise NotImplementedError("Unknown aten::eye signature")
def slice(g, self, *args):
if len(args) == 4:
# aten::slice(Tensor self, int dim, int start, int end, int step) -> Tensor
dim, start, end, step = args
step = _parse_arg(step, 'i')
if step != 1:
raise RuntimeError("step!=1 is currently not supported")
if start.node().kind() != 'onnx::Constant' or \
end.node().kind() != 'onnx::Constant' or dim.node().kind() != 'onnx::Constant':
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX:
raise RuntimeError('Unsupported: ONNX export of Slice with dynamic inputs. DynamicSlice '
'is a deprecated experimental op. Please use statically allocated '
'variables or export to a higher opset version.')
else:
start_unsqueezed = sym_help._unsqueeze_helper(g, start, [0])
end_unsqueezed = sym_help._unsqueeze_helper(g, end, [0])
dim_unsqueezed = sym_help._unsqueeze_helper(g, dim, [0])
return g.op("DynamicSlice", self, start_unsqueezed, end_unsqueezed, dim_unsqueezed)
else:
start = _parse_arg(start, 'i')
end = _parse_arg(end, 'i')
dim = _parse_arg(dim, 'i')
return sym_help._slice_helper(g, self, axes=[dim], starts=[start], ends=[end])
elif len(args) == 3:
# aten::slice(t[] l, int start, int end, int step) -> t[]
start, end, step = args
dim = 0
start = _parse_arg(start, 'i')
end = _parse_arg(end, 'i')
return sym_help._slice_helper(g, self, axes=[dim], starts=[start], ends=[end])
else:
raise NotImplementedError("Unknown aten::slice signature")
@parse_args('v', 'f', 'f')
def hardtanh(g, self, min_val, max_val):
return g.op("Clip", self, min_f=min_val, max_f=max_val)
@parse_args('v')
def hardswish(g, self):
input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float)))
hardtanh_ = sym_help._hardtanh_helper(g, input,
g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)),
g.op('Constant', value_t=torch.tensor(6, dtype=torch.float)))
hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float)))
return g.op("Mul", self, hardtanh_)
def alias(g, self):
return self
@parse_args('v', 'i')
def unsqueeze(g, self, dim):
# Handle negative dim
if dim < 0:
rank = sym_help._get_tensor_rank(self)
if rank is not None:
warnings.warn("ONNX export unsqueeze with negative axis " + str(dim) +
" might cause the onnx model to be incorrect. " +
"Negative axis is not supported in ONNX. " +
"Axis is converted to " + str(dim + rank + 1) +
" based on input shape at export time. " +
"Passing an tensor of different rank in execution will be incorrect.")
dim = dim + rank + 1
else:
return _unimplemented('unsqueeze', 'negative axis with unknown input rank')
return sym_help._unsqueeze_helper(g, self, axes_i=[dim])
@parse_args('v', 'i', 'i', 'none')
def sort(g, self, dim, decending, out=None):
if out is not None:
_unimplemented("Sort", "Out parameter is not supported for sort")
self_sizes = sym_help._get_tensor_sizes(self)
try:
dim_size = self_sizes[dim]
except Exception:
dim_size = None
if dim_size is None:
return _unimplemented("Sort", "input size not accessible")
return g.op("TopK", self, k_i=dim_size, axis_i=dim, outputs=2)
def numel(g, self):
shape = g.op("Shape", self)
return g.op("ReduceProd", shape, keepdims_i=0)
@parse_args('v', 'i', 'i', 'i', 'i', 'none')
def topk(g, self, k, dim, largest, sorted, out=None):
if out is not None:
_unimplemented("TopK", "Out parameter is not supported for topk")
if not largest:
_unimplemented("TopK", "Ascending TopK is not supported")
return g.op("TopK", self, k_i=k, axis_i=dim, outputs=2)
def to(g, self, *args):
# ONNX doesn't have a concept of a device, so we ignore device casts
if len(args) == 4:
if args[0].type().isSubtypeOf(ListType.ofInts()):
# aten::to(Tensor, Device, bool, bool, memory_format)
return self
else:
dtype = sym_help._maybe_get_const(args[0], 'i')
if sym_help._is_value(dtype):
# aten::to(Tensor, Tensor, bool, bool, memory_format)
other = args[0]
dtype = other.type().scalarType()
return g.op("Cast", self, to_i=sym_help.cast_pytorch_to_onnx[dtype])
else:
# aten::to(Tensor, ScalarType, bool, bool, memory_format)
# memory_format is ignored
return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype])
elif len(args) == 5:
# aten::to(Tensor, Device, ScalarType, bool, bool, memory_format)
dtype = sym_help._get_const(args[1], 'i', 'dtype')
# memory_format is ignored
return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype])
elif len(args) == 6:
# aten::to(Tensor, ScalarType, Layout, Device, bool, bool, memory_format) -> Tensor
dtype = sym_help._get_const(args[0], 'i', 'dtype')
# Layout, device and memory_format are ignored
return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype])
elif len(args) == 7:
# aten::to(Tensor, ScalarType, Layout, Device, bool, bool, bool, memory_format) -> Tensor
dtype = sym_help._get_const(args[0], 'i', 'dtype')
# Layout, device and memory_format are ignored
return g.op("Cast", self, to_i=sym_help.scalar_type_to_onnx[dtype])
else:
raise NotImplementedError("Unknown aten::to signature")
def repeat(g, self, repeats):
dtype = 4 # int64
shape_ = ones_like(g, repeats, dtype)
self = g.op("Expand", self, shape_)
return g.op("Tile", self, repeats)
def repeat_interleave(g, self, repeats, dim=None):
input = self
# if dim is None flatten
# By default, use the flattened input array, and return a flat output array
if sym_help._is_none(dim):
input = reshape(g, self, g.op("Constant", value_t=torch.tensor([-1])))
dim = 0
else:
dim = sym_help._maybe_get_scalar(dim)
repeats_dim = sym_help._get_tensor_rank(repeats)
repeats_sizes = sym_help._get_tensor_sizes(repeats)
input_sizes = sym_help._get_tensor_sizes(input)
if repeats_dim is None:
raise RuntimeError('Unsupported: ONNX export of repeat_interleave for unknown '
'repeats rank.')
if repeats_sizes is None:
raise RuntimeError('Unsupported: ONNX export of repeat_interleave for unknown '
'repeats size.')
if input_sizes is None:
raise RuntimeError('Unsupported: ONNX export of repeat_interleave for unknown '
'input size.')
input_sizes_temp = input_sizes.copy()
for idx, input_size in enumerate(input_sizes):
if input_size is None:
input_sizes[idx], input_sizes_temp[idx] = 0, -1
# Cases where repeats is an int or single value tensor
if (repeats_dim == 0 or (repeats_dim == 1 and repeats_sizes[0] == 1)):
if not sym_help._is_tensor(repeats):
repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
if input_sizes[dim] == 0:
raise NotImplementedError("Unsupported repeat_interleave along dimension with unknown input size")
else:
reps = input_sizes[dim]
repeats = expand(g, repeats, g.op("Constant", value_t=torch.tensor([reps])), None)
# Cases where repeats is a 1 dim Tensor
elif repeats_dim == 1:
assert repeats_sizes[0] == input_sizes[dim], "repeats must have the same size as input along dim"
reps = repeats_sizes[0]
else:
raise RuntimeError("repeats must be 0-dim or 1-dim tensor")
final_splits = list()
r_splits = sym_help._repeat_interleave_split_helper(g, repeats, reps, 0)
i_splits = sym_help._repeat_interleave_split_helper(g, input, reps, dim)
input_sizes[dim], input_sizes_temp[dim] = -1, 1
for idx, r_split in enumerate(r_splits):
i_split = unsqueeze(g, i_splits[idx], dim + 1)
r_concat = [g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[:dim + 1])),
r_split,
g.op("Constant", value_t=torch.LongTensor(input_sizes_temp[dim + 1:]))]
r_concat = g.op("Concat", *r_concat, axis_i=0)
i_split = expand(g, i_split, r_concat, None)
i_split = reshape(g, i_split, g.op("Constant", value_t=torch.LongTensor(input_sizes)))
final_splits.append(i_split)
return g.op("Concat", *final_splits, axis_i=dim)
@parse_args('v', 'i')
def pixel_shuffle(g, self, upscale_factor):
dims = sym_help._get_tensor_sizes(self)
if len(dims) != 4:
return _unimplemented("pixel_shuffle", "only support 4d input")
if any([i is None for i in dims[1:]]):
return _unimplemented("pixel_shuffle", "only support static input shape, except for batch size")
output_channel = dims[1] // upscale_factor // upscale_factor
after_view = view(g, self, g.op("Constant", value_t=torch.tensor([-1, output_channel, upscale_factor,
upscale_factor, dims[2], dims[3]])))
after_transpose = g.op("Transpose", after_view, perm_i=[0, 1, 4, 2, 5, 3])
return view(g, after_transpose,
g.op("Constant", value_t=torch.tensor([-1, output_channel, dims[2] * upscale_factor,
dims[3] * upscale_factor])))
def _generic_rnn(g, variant, input, initial_states, all_weights, has_biases,
num_layers, dropout, train, bidirectional, batch_first=None, batch_sizes=None):
warnings.warn("Exporting a model to ONNX with a batch_size other than 1, " +
"with a variable length with " + variant + " can cause an error " +
"when running the ONNX model with a different batch size. " +
"Make sure to save the model with a batch size of 1, " +
"or define the initial states (h0/c0) as inputs of the model. ")
onnxActivations = ['Relu', 'Tanh', 'Sigmoid', 'Affine', 'LeakyRelu', 'ThresholdedRelu',
'ScaledTanh', 'HardSigmoid', 'Elu', 'Softsign', 'Softplus']
variantToOnnxActivationMap = dict(zip([act_fun.lower() for act_fun in onnxActivations], onnxActivations))
weights_per_layer = 4 if has_biases else 2
# this means that projections are used inside LSTM, so need to tell user that it's not supported
if variant == 'LSTM' and len(all_weights) != num_layers * weights_per_layer * (1 + bidirectional):
return _unimplemented("LSTM", "LSTMs with projections")
assert len(all_weights) == num_layers * weights_per_layer * (1 + bidirectional)
layer_weights = [all_weights[i:i + weights_per_layer] for i in range(0, len(all_weights), weights_per_layer)]
if batch_first:
# batch, seq, feat -> seq, batch, feat
input = g.op('Transpose', input, perm_i=[1, 0, 2])
if dropout and train:
return _unimplemented("RNN/GRU/LSTM", "dropout in training mode")
if variant.startswith('RNN'):
nonlinearity = variantToOnnxActivationMap[variant[4:].lower()]
variant = 'RNN'
w_hh = all_weights[1]
hidden_size = sym_help._get_tensor_dim_size(w_hh, 1)
if hidden_size is None:
return _unimplemented("RNN/GRU/LSTM", "unknown hidden size")
unidirectional = not bidirectional
prev_output = input
h_outs = []
if variant == 'RNN' or variant == 'GRU':
h0 = initial_states
elif variant == 'LSTM':
h0, c0 = initial_states
c_outs = []
sequence_lens = unused(g) if batch_sizes is None else batch_sizes
if variant == 'GRU':
# pytorch is reset, input, hidden
# onnx is input, reset, hidden
reform_permutation = [(1, 2), (0, 1), (2, 3)]
elif variant == 'LSTM':
# pytorch is input, forget, cell, output.
# onnx is input, output, forget, cell.
reform_permutation = [(0, 1), (3, 4), (1, 3)]
def reform_weights(g, w, n, intervals):
slices = [sym_help._slice_helper(g, w, axes=[0], starts=[x * n], ends=[y * n]) for x, y in intervals]
return g.op('Concat', *slices, axis_i=0)
def transform_weights_no_bias(layer_index):
weights = layer_weights[layer_index]
if variant == 'RNN':
weight_ih, weight_hh = weights
elif variant == 'GRU' or variant == 'LSTM':
weight_ih, weight_hh = \
[reform_weights(g, w, hidden_size, reform_permutation) for w in weights]
return tuple(sym_help._unsqueeze_helper(g, x, [0]) for x in (weight_ih, weight_hh))
def transform_weights(layer_index):
weights = layer_weights[layer_index]
if variant == 'RNN':
weight_ih, weight_hh, bias_ih, bias_hh = weights
elif variant == 'GRU' or variant == 'LSTM':
weight_ih, weight_hh, bias_ih, bias_hh = \
[reform_weights(g, w, hidden_size, reform_permutation) for w in weights]
bias_concat = g.op('Concat', bias_ih, bias_hh, axis_i=0)
return tuple(sym_help._unsqueeze_helper(g, x, [0]) for x in (weight_ih, weight_hh, bias_concat))
def retrieve_state(x, start, end):
return x if num_layers == 1 else sym_help._slice_helper(g, x, axes=[0], starts=[start], ends=[end])
for i in range(num_layers):
if unidirectional:
if weights_per_layer == 4:
weight_ih, weight_hh, bias_concat = transform_weights(i)
else:
weight_ih, weight_hh = transform_weights_no_bias(i)
bias_concat = unused(g)
state_indices = i, i + 1
else:
if weights_per_layer == 4:
weight_ih_f, weight_hh_f, bias_f = transform_weights(2 * i)
weight_ih_b, weight_hh_b, bias_b = transform_weights(2 * i + 1)
bias_concat = g.op('Concat', bias_f, bias_b, axis_i=0)
else:
weight_ih_f, weight_hh_f = transform_weights_no_bias(2 * i)
weight_ih_b, weight_hh_b = transform_weights_no_bias(2 * i + 1)
bias_concat = unused(g)
weight_ih = g.op('Concat', weight_ih_f, weight_ih_b, axis_i=0)
weight_hh = g.op('Concat', weight_hh_f, weight_hh_b, axis_i=0)
state_indices = 2 * i, 2 * i + 2
inputs = [prev_output, weight_ih, weight_hh, bias_concat, sequence_lens]
inputs.append(retrieve_state(h0, *state_indices))
if variant == 'LSTM':
inputs.append(retrieve_state(c0, *state_indices))
extra_kwargs = {} if unidirectional else {'direction_s': 'bidirectional'}
if variant == 'RNN':
if bidirectional:
activation = [nonlinearity, nonlinearity]
else:
activation = [nonlinearity]
prev_output, h_out = g.op('RNN', *inputs, outputs=2,
hidden_size_i=hidden_size,
activations_s=activation,
**extra_kwargs)
elif variant == 'GRU':
prev_output, h_out = g.op('GRU', *inputs, outputs=2,
hidden_size_i=hidden_size,
linear_before_reset_i=1,
**extra_kwargs)
elif variant == 'LSTM':
prev_output, h_out, c_out = g.op('LSTM', *inputs, outputs=3,
hidden_size_i=hidden_size,
**extra_kwargs)
if bidirectional:
# The ONNX RNN/GRU/LSTM produce an output of dimensions
# seq_len, num_directions, batch, hidden_size
# We have to convert to match pytorch's expected
# seq_len, batch, num_directions * hidden_size
# by first moving num_directions before hidden_size with
# Transpose, and then combining it with hidden_size
# with Reshape.
prev_output = g.op('Transpose', prev_output, perm_i=[0, 2, 1, 3])
prev_output = g.op('Reshape', prev_output, g.op('Constant', value_t=torch.LongTensor([0, 0, -1])))
else:
prev_output = sym_help._squeeze_helper(g, prev_output, [1])
h_outs.append(h_out)
if variant == 'LSTM':
c_outs.append(c_out)
if batch_first:
# seq, batch, num_directions * hidden_size -> batch, seq, num_directions * hidden_size
prev_output = g.op('Transpose', prev_output, perm_i=[1, 0, 2])
h_outs = h_out if num_layers == 1 else g.op('Concat', *h_outs, axis_i=0)
if variant == 'RNN' or variant == 'GRU':
return prev_output, h_outs
elif variant == 'LSTM':
c_outs = c_out if num_layers == 1 else g.op('Concat', *c_outs, axis_i=0)
return prev_output, h_outs, c_outs
@parse_args('v', 'v', 'v', 'i', 'i', 'f', 'i', 'i', 'i')
def _lstm_full(g, input, hidden_v, weight_v, has_biases, num_layers, dropout, train, bidirectional, batch_first):
hidden, weight = sym_help._unpack_list(hidden_v), sym_help._unpack_list(weight_v)
return _generic_rnn(g, 'LSTM', input, hidden, weight, has_biases, num_layers,
dropout, train, bidirectional, batch_first)
@parse_args('v', 'v', 'v', 'v', 'i', 'i', 'f', 'i', 'i')
def _lstm_packed(g, input, batch_sizes, hidden_v, weight_v, has_biases, num_layers, dropout, train, bidirectional):
hidden, weight = sym_help._unpack_list(hidden_v), sym_help._unpack_list(weight_v)
return _generic_rnn(g, 'LSTM', input, hidden, weight, has_biases, num_layers,
dropout, train, bidirectional, batch_sizes=batch_sizes)
def lstm(g, *args):
if sym_help._is_tensor_list(args[3]):
return _lstm_packed(g, *args)
else:
return _lstm_full(g, *args)
def _one_hidden_rnn(kind):
@parse_args('v', 'v', 'v', 'i', 'i', 'f', 'i', 'i', 'i')
def _rnn_full(g, input, hidden, weight_v, has_biases, num_layers, dropout, train, bidirectional, batch_first):
weight = sym_help._unpack_list(weight_v)
return _generic_rnn(g, kind, input, hidden, weight, has_biases, num_layers,
dropout, train, bidirectional, batch_first)
@parse_args('v', 'v', 'v', 'v', 'i', 'i', 'f', 'i', 'i')
def _rnn_packed(g, input, batch_sizes, hidden, weight_v, has_biases, num_layers, dropout, train, bidirectional):
weight = sym_help._unpack_list(weight_v)
return _generic_rnn(g, kind, input, hidden, weight, has_biases, num_layers,
dropout, train, bidirectional, batch_sizes=batch_sizes)
def symbolic(g, *args):
if sym_help._is_tensor_list(args[3]):
return _rnn_packed(g, *args)
else:
return _rnn_full(g, *args)
return symbolic
gru = _one_hidden_rnn('GRU')
rnn_tanh = _one_hidden_rnn('RNN_TANH')
rnn_relu = _one_hidden_rnn('RNN_RELU')
@parse_args('v', 'i')
def _dim_arange(g, like, dim):
like_shape = g.op('Shape', like)
stop = g.op("Gather", like_shape, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0)
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("_caffe2::Range", stop)
else:
# aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
return arange(g, stop, 4, None, None, None)
def detach(g, input):
# Erase aten::detach nodes because ONNX is inference only
return input
@parse_args('v', 'i')
def contiguous(g, input, memory_format):
if memory_format > 2: # allower values are any, preserve and contiguous_format
raise RuntimeError("onnx memory_format support is not implemented")
return input
@parse_args('v', 'v', 'i')
def _pack_padded_sequence(g, input, lengths, batch_first):
# There currently is no PackPadded operator in ONNX. We rely on an
# optimization pass to remove this later. It is an error if all
# PackPadded operators cannot be optimized out.
if batch_first:
input = g.op('Transpose', input, perm_i=[1, 0, 2])
if not lengths.type().isSubtypeOf(torch._C.TensorType.get()):
raise RuntimeError("Lengths must be a Tensor for ONNX export")
# We know it's a TensorType so this check is now safe.
# It's really only necessary because those operators expand to something that
# only works with int32 types in Caffe2...
if lengths.type().scalarType() != 'Int':
lengths = _cast_Int(g, lengths, False) # type: ignore
return g.op("prim::PackPadded", input, lengths, outputs=2)
@parse_args('v', 'v', 'i', 't', 'v')
def _pad_packed_sequence(g, data, batch_sizes, batch_first, padding_value, total_length):
# Ignore total_length as it is not supported in _symbolic_pad_packed_sequence
# It is only useful/used when training using data_parallel model, so
# It shouldn't be relevant for ONNX anyway
data, lengths = g.op("prim::PadPacked", data, batch_sizes, outputs=2)
if batch_first:
data = g.op('Transpose', data, perm_i=[1, 0, 2])
return data, lengths
def randn(g, shapes, dtype, *options):
dtype = sym_help._get_const(dtype, 'i', 'dtype')
if dtype is None:
dtype = 6 # float
shape = sym_help._maybe_get_const(shapes, "is")
if sym_help._is_value(shape):
shape_const = g.op("ConstantOfShape", shapes,
value_t=torch.tensor([0], dtype=sym_help.scalar_type_to_pytorch_type[6]))
return g.op('RandomNormalLike', shape_const, dtype_i=sym_help.scalar_type_to_onnx[dtype])
return g.op('RandomNormal', shape_i=shape)
def rand(g, shapes, dtype, *options):
dtype = sym_help._get_const(dtype, 'i', 'dtype')
if dtype is None:
dtype = 6 # float
shape = sym_help._maybe_get_const(shapes, "is")
if sym_help._is_value(shape):
shape_const = g.op("ConstantOfShape", shapes,
value_t=torch.tensor([0], dtype=sym_help.scalar_type_to_pytorch_type[6]))
return g.op('RandomUniformLike', shape_const, dtype_i=sym_help.scalar_type_to_onnx[dtype])
return g.op('RandomUniform', shape_i=shape)
def randn_like(g, self, dtype, layout=None, device=None, pin_memory=False, memory_format=None):
dtype = sym_help._get_const(dtype, 'i', 'dtype')
if dtype is None:
dtype = 6 # float
return g.op('RandomNormalLike', self, dtype_i=sym_help.scalar_type_to_onnx[dtype])
def rand_like(g, self, dtype, layout=None, device=None, pin_memory=False, memory_format=None):
dtype = sym_help._get_const(dtype, 'i', 'dtype')
if dtype is None:
dtype = 6 # float
return g.op('RandomUniformLike', self, dtype_i=sym_help.scalar_type_to_onnx[dtype])
@parse_args('v', 'f', 'f', 'i', 'none')
def rrelu(g, input, lower, upper, training, generator):
p = g.op('RandomUniformLike', input, high_f=upper, low_f=lower)
return g.op('PRelu', input, p)
@parse_args('v')
def log_sigmoid(g, input):
p = g.op('Sigmoid', input)
return g.op('Log', p)
@parse_args('v')
def erf(g, input):
return g.op('Erf', input)
@parse_args('v', 'i', 'i')
def flatten(g, input, start_dim, end_dim):
dim = sym_help._get_tensor_rank(input)
if dim is None:
return _unimplemented("dim",
"ONNX and PyTorch use different strategies to split the input. "
"Input rank must be known at export time.")
# TODO: remove this as onnx opset 11 spec allows negative axes
if end_dim < 0 :
end_dim = dim + end_dim
# use ONNX's Flatten operator for cases where the output shape is 2D
if start_dim == 1 and end_dim == dim - 1 :
return g.op("Flatten", input, axis_i=start_dim)
if start_dim == 0 and end_dim == dim - 2 :
return g.op("Flatten", input, axis_i=end_dim + 1)
return sym_help._flatten_helper(g, input, start_dim, end_dim, dim)
# Emitted from `torch.nonzero(x, as_tuple=False)`
@parse_args('v')
def nonzero(g, input):
return t(g, g.op('NonZero', input))
# Emitted from `torch.nonzero(x, as_tuple=True)`
def nonzero_numpy(g, input, _outputs=None):
return unbind(g, nonzero(g, input), 1, _outputs=_outputs)
@parse_args('v')
def isnan(g, input):
output = g.op('IsNaN', input)
return output
def _any(g, input):
input = _cast_Long(g, input, False) # type: ignore
input_sum = sym_help._reducesum_helper(g, input, keepdims_i=0)
return gt(g, input_sum, g.op("Constant", value_t=torch.LongTensor([0])))
def _all(g, input):
return g.op("Not", _any(g, g.op("Not", input)))
@parse_args('v', 'i', 'i', 'i')
def narrow(g, input, dim, start, length):
return sym_help._slice_helper(g, input, axes=[dim], starts=[start], ends=[start + length])
def argmax(g, input, dim, keepdim):
if sym_help._is_none(dim):
flattened = reshape(g, input, g.op("Constant", value_t=torch.tensor([-1])))
return g.op('ArgMax', flattened, axis_i=0, keepdims_i=False)
else:
dim = _parse_arg(dim, 'i')
keepdim = _parse_arg(keepdim, 'i')
return g.op('ArgMax', input, axis_i=dim, keepdims_i=keepdim)
def argmin(g, input, dim, keepdim):
if sym_help._is_none(dim):
flattened = reshape(g, input, g.op("Constant", value_t=torch.tensor([-1])))
return g.op('ArgMin', flattened, axis_i=0, keepdims_i=False)
else:
dim = _parse_arg(dim, 'i')
keepdim = _parse_arg(keepdim, 'i')
return g.op('ArgMin', input, axis_i=dim, keepdims_i=keepdim)
@parse_args('v', 'i', 'v', 'v')
def scatter(g, self, dim, index, src):
src_type = src.type().scalarType()
src = sym_help._maybe_get_scalar(src)
if sym_help._is_value(src):
return g.op("Scatter", self, index, src, axis_i=dim)
else:
# Check if scalar 'src' has same type as self (PyTorch allows different
# type for scalar src (but not when src is tensor)). If not, insert Cast node.
if self.type().scalarType() != src_type:
src = g.op("Cast", src, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])
return g.op("Scatter", self, index, expand_as(g, src, index), axis_i=dim)
@parse_args('v', 'i', 'v', 'v')
def scatter_add(g, self, dim, index, src):
dtype = sym_help._try_get_scalar_type(self)
if dtype is None:
return _unimplemented("scatter_add", "input dtype not accessible")
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
dtype = sym_help.scalar_type_to_pytorch_type[dtype]
sizes = sym_help._get_tensor_sizes(self, allow_nonstatic=False)
if sizes:
to_add = g.op("Constant", value_t=torch.zeros(sizes, dtype=dtype))
else:
dtype = sym_help.scalar_type_to_pytorch_type.index(dtype)
to_add = zeros_like(g, self, dtype)
to_add = sym_help._scatter_helper(g, to_add, dim, index, src)
return add(g, self, to_add)
def log2(g, self):
_ln2 = 0.693147180559945309
return g.op('Div', log(g, self), g.op('Constant', value_t=torch.tensor([_ln2])))
def prim_shape(g, self):
return g.op('Shape', self)
def prim_max(g, self, other):
return g.op('Max', self, other)
def prim_data(g, self):
return self
def is_floating_point(g, self):
if sym_help._is_fp(self):
return g.op("Constant", value_t=torch.BoolTensor([1]))
return g.op("Constant", value_t=torch.BoolTensor([0]))
def __isnot_(g, self, other):
if sym_help._is_none(other):
if sym_help._is_none(self):
return g.op("Constant", value_t=torch.BoolTensor([0]))
return g.op("Constant", value_t=torch.BoolTensor([1]))
return ne(g, self, other)
# exists to refine the type of the Value
# if x is an optional Tensor, unchecked_cast will cast
# x to Tensor, so the rest of the graph knows that x is a Tensor
# this doesn't do anything in runtime and is a noop in ONNX
def prim_unchecked_cast(g, self):
return self
def prim_dtype(g, self):
dtype = sym_help._try_get_scalar_type(self)
if dtype is None:
dtype = "Float"
dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])
return g.op("Constant", value_t=torch.tensor(dtype))
# tolist is currently supported only for 1D input tensors.
# dim_val and elem_ty_val represent dimension and type annotations
# that need to match dimension and type of the input tensor.
def prim_tolist(g, input, dim_val, elem_ty_val):
dim = sym_help._maybe_get_const(dim_val, 'i')
if dim > 1:
return _unimplemented("prim_tolist", "dim_val > 1")
return input
@parse_args('v', 'i')
def one_hot(g, self, num_classes):
values = g.op("Constant", value_t=torch.LongTensor([0, 1]))
depth = g.op("Constant", value_t=torch.LongTensor([num_classes]))
return g.op("OneHot", self, depth, values, axis_i=-1)
@parse_args('v', 'i', 'v', 'v')
def gather(g, self, dim, index, sparse_grad=False):
if sym_help._maybe_get_const(sparse_grad, 'i'):
return _unimplemented("gather", "sparse_grad == True")
# NOTE: This workaround is needed since GatherElement is only supported
# since opset 11, and Gather in ONNX is not the same as torch.gather.
dtype = self.type().scalarType()
values = g.op("Constant", value_t=torch.LongTensor([0, 1]))
depth = size(g, self, g.op("Constant", value_t=torch.LongTensor([dim])))
index = g.op("Cast", g.op("OneHot", index, depth, values, axis_i=dim), to_i=sym_help.cast_pytorch_to_onnx[dtype])
mul = g.op("Mul", sym_help._unsqueeze_helper(g, self, [dim + 1]), index)
return sym_help._reducesum_helper(g, mul, axes_i=[dim], keepdims_i=0)
@parse_args('v', 'is', 'b', 'i')
def _var_mean(g, input, dim, unbiased, keepdim):
if dim is None:
mean = g.op("ReduceMean", input, keepdims_i=0)
t_mean = mean
num_elements = numel(g, input)
else:
mean = g.op("ReduceMean", input, axes_i=dim, keepdims_i=keepdim)
t_mean = g.op("ReduceMean", input, axes_i=dim, keepdims_i=1)
redudced_dims = g.op("Shape", input)
# dim could contain one or multiple dimensions
redudced_dims = g.op("Gather", redudced_dims, g.op("Constant", value_t=torch.tensor(dim)), axis_i=0)
num_elements = g.op("ReduceProd", redudced_dims, keepdims_i=0)
sub_v = g.op("Sub", input, t_mean)
sqr_sub = g.op("Mul", sub_v, sub_v)
keepdim_mean = 0 if dim is None else keepdim
var = g.op("ReduceMean", sqr_sub, axes_i=dim, keepdims_i=keepdim_mean)
# Correct bias in calculating variance, by dividing it over (N - 1) instead on N
if unbiased:
num_elements = g.op("Cast", num_elements, to_i=sym_help.cast_pytorch_to_onnx['Float'])
one = g.op("Constant", value_t=torch.tensor(1, dtype=torch.float))
mul = g.op("Mul", var, num_elements)
var = g.op("Div", mul, g.op("Sub", num_elements, one))
return var, mean
# Since position of optional arguments can change for std, this is a hack to find if first argument
# is 'dim' or 'unbiased'. As shown below, 'dim' argument could be listed before 'unbiased' :
# at::std(input, unbiased)
# at::std(input, dim, unbiased, keepdim)
def std(g, input, *args):
if len(args) == 3:
var, _ = _var_mean(g, input, *args)
else:
var, _ = _var_mean(g, input, None, args[0], None)
return g.op("Sqrt", var)
# Since position of optional arguments can change for var, this is a hack to find if first argument
# is 'dim' or 'unbiased'. As shown below, 'dim' argument could be listed before 'unbiased' :
# at::var(input, unbiased)
# at::var(input, dim, unbiased, keepdim)
def var(g, input, *args):
if len(args) == 3:
var, _ = _var_mean(g, input, *args)
else:
var, _ = _var_mean(g, input, None, args[0], None)
return var
# Since position of optional arguments can change for var_mean, this is a hack to find if first argument
# is 'dim' or 'unbiased'. As shown below, 'dim' argument could be listed before 'unbiased' :
# at::var_mean(input, unbiased)
# at::var_mean(input, dim, unbiased, keepdim)
def var_mean(g, input, *args):
if len(args) == 3:
var, mean = _var_mean(g, input, *args)
else:
var, mean = _var_mean(g, input, None, args[0], None)
return var, mean
# Since position of optional arguments can change for std_mean, this is a hack to find if first argument
# is 'dim' or 'unbiased'. As shown below, 'dim' argument could be listed before 'unbiased' :
# at::std_mean(input, unbiased)
# at::std_mean(input, dim, unbiased, keepdim)
def std_mean(g, input, *args):
if len(args) == 3:
var, mean = _var_mean(g, input, *args)
else:
var, mean = _var_mean(g, input, None, args[0], None)
return g.op("Sqrt", var), mean
@parse_args('v', 'is', 'i')
def logsumexp(g, input, dim, keepdim):
return g.op('ReduceLogSumExp', input, axes_i=dim, keepdims_i=keepdim)
def arange(g, *args):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", *args, operator_s="arange")
def _get_arange_dtype(dtype):
dtype = sym_help._maybe_get_const(dtype, 'i')
if sym_help._is_value(dtype):
dtype = 4 # default to int64
return dtype
if len(args) == 2:
# aten::arange(Scalar end, Tensor out)
end = sym_help._unsqueeze_helper(g, args[0], [0])
dtype = 4 # default to int64
arange_tensor = sym_help._squeeze_helper(g, nonzero(g, ones(g, end, dtype, None, None)), [1])
return g.op("Cast", arange_tensor, to_i=sym_help.scalar_type_to_onnx[dtype])
elif len(args) == 4:
# aten::arange(Scalar start, Scalar end, Scalar step, Tensor out)
dtype = 4 # default to int64
step = sym_help._unsqueeze_helper(g, args[2], [0])
end = sym_help._unsqueeze_helper(g, args[1], [0])
start = sym_help._unsqueeze_helper(g, args[0], [0])
range_tensor = g.op("Div", g.op("Sub", end, start), step)
arange_tensor = sym_help._squeeze_helper(g, nonzero(g, ones(g, range_tensor, None, None, None)), [1])
arange_tensor = g.op("Add", g.op("Mul", arange_tensor, step), start)
return g.op("Cast", arange_tensor, to_i=sym_help.scalar_type_to_onnx[dtype])
elif len(args) == 5:
# aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[1])
end = sym_help._unsqueeze_helper(g, args[0], [0])
arange_tensor = sym_help._squeeze_helper(g, nonzero(g, ones(g, end, dtype, *(args[2:]))), [1])
return g.op("Cast", arange_tensor, to_i=sym_help.scalar_type_to_onnx[dtype])
elif len(args) == 6:
# aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[2])
end = sym_help._unsqueeze_helper(g, args[1], [0])
start = sym_help._unsqueeze_helper(g, args[0], [0])
range_tensor = g.op("Sub", end, start)
arange_tensor = g.op("Add", sym_help._squeeze_helper(g, nonzero(g, ones(g, range_tensor, dtype, *(args[3:]))), [1]), start)
return g.op("Cast", arange_tensor, to_i=sym_help.scalar_type_to_onnx[dtype])
elif len(args) == 7:
# aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory)
dtype = _get_arange_dtype(args[3])
step = sym_help._unsqueeze_helper(g, args[2], [0])
end = sym_help._unsqueeze_helper(g, args[1], [0])
start = sym_help._unsqueeze_helper(g, args[0], [0])
range_tensor = g.op("Div", g.op("Sub", end, start), step)
arange_tensor = sym_help._squeeze_helper(g, nonzero(g, ones(g, range_tensor, dtype, *(args[4:]))), [1])
arange_tensor = g.op("Add", g.op("Mul", arange_tensor, step), start)
return g.op("Cast", arange_tensor, to_i=sym_help.scalar_type_to_onnx[dtype])
else:
raise NotImplementedError("Unknown aten::arange signature taking " + str(len(args)) + " arguments.")
def masked_fill(g, self, mask, value):
mask = _cast_Bool(g, mask, False) # type: ignore
value = sym_help._maybe_get_scalar(value)
return g.op('Where', mask, sym_help._if_scalar_type_as(g, value, self), self)
def index(g, self, index):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", self, index, operator_s="index")
if sym_help._is_packed_list(index):
indices = sym_help._unpack_list(index)
else:
indices = [index]
def try_mask_to_index(index):
if not sym_help._is_none(index) and (index.type().scalarType() == "Byte" or index.type().scalarType() == "Bool"):
if sym_help._export_onnx_opset_version < 9:
raise RuntimeError("Exporting masked indices are only supported after ONNX opset 9.")
warnings.warn("Exporting aten::index operator with indices of type Byte. "
"Only 1-D indices are supported. In any other case, "
"this will produce an incorrect ONNX graph.")
index = sym_help._squeeze_helper(g, nonzero(g, index), [1])
return index
indices = [try_mask_to_index(idx) for idx in indices]
if len(indices) == 1:
return sym_help._select_helper(g, self, 0, indices[0], apply_reshape=False)
else:
# Multiple tensors as indices. Each tensor could either be
# 1. prim::Constant()
# representing ":" in python indexing. E.g. tensor[:, :]
# 2. prim::Constant[value=...] or tensor output
# representing advanced indexing. E.g. tensor[[0, 1], [2, 0]].
# For more info on advanced indexing,
# check https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Consider a general case of
# t: [x_1, y_1, y_2, ..., x_m, ..., y_n]
# where t is a tensor of rank m+n, {x_i} are axes where tensor index is provided, and {y_i} are axes for ":".
# Same results can be achieved through transposing t into
# t: [x_1, x_2, ..., x_m, y_1, y_2, ..., y_n]
# and use gatherND. However ONNX does not have gatherND, to use 1d gather we'll need to flatten t
# and process the tensor indices.
# t: [x_1 * x_2 * ... * x_m, y_1 * y_2 * ... * y_n]
# tensor index = \sum_{i=1}^m (ind_i * \prod_{j=i+1}^m (x_j))
# After gather, reshape and transpose back.
adv_idx_indices = [i for i, idx in enumerate(indices) if not sym_help._is_none(idx)]
if len(adv_idx_indices) == 0:
return self
elif len(adv_idx_indices) == 1:
return index_select(g, self, adv_idx_indices[0], indices[adv_idx_indices[0]])
else:
rank = sym_help._get_tensor_rank(self)
if rank is None:
raise NotImplementedError("Unsupported aten::index operator of advanced indexing on tensor of unknown rank, " +
"try turning on shape and type propagate during export: " +
"torch.onnx._export(..., propagate=True).")
# TODO: If indexing is supported natively in ONNX in future opsets,
# update the warning to recommend exporting with higher opset version.
warnings.warn("Exporting aten::index operator of advanced indexing in opset " +
str(sym_help._export_onnx_opset_version) +
" is achieved by combination of multiple ONNX operators, " +
"including Reshape, Transpose, Concat, and Gather. " +
"If indices include negative values, the exported graph will produce incorrect results.")
adv_idx_count = len(adv_idx_indices)
shape_tensor = _shape_as_tensor(g, self)
dim_tensor_list = [
g.op("Gather", shape_tensor, g.op("Constant", value_t=torch.LongTensor([dim])), axis_i=0) for dim in range(rank)
]
self = g.op("Transpose", self, perm_i=adv_idx_indices + [i for i in range(rank) if i not in adv_idx_indices])
self = g.op("Flatten", self, axis_i=adv_idx_count)
# Note that tensor indices will be broadcasted while accumulating. Thus we get the final subarray shape as well.
cum_adv_index = indices[adv_idx_indices[-1]]
multiplier = dim_tensor_list[adv_idx_indices[-1]]
for i in range(adv_idx_count - 2, -1, -1):
adv_index = g.op("Mul", indices[adv_idx_indices[i]], multiplier)
cum_adv_index = g.op("Add", cum_adv_index, adv_index)
multiplier = g.op("Mul", multiplier, dim_tensor_list[adv_idx_indices[i]])
# perform gather
self = index_select(g, self, 0, cum_adv_index)
cum_adv_index_shape_tensor = _shape_as_tensor(g, cum_adv_index)
# check if all advanced indices are consecutive.
# Refer to https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing
# to understand how the subarray position is decided.
if adv_idx_indices == list(range(adv_idx_indices[0], adv_idx_indices[-1] + 1)):
# unfold regular index axes
folded_adv_idx_shape_list = [g.op("Constant", value_t=torch.LongTensor([-1]))] \
+ [dim_tensor_list[i] for i in range(rank) if i not in adv_idx_indices]
folded_adv_idx_shape = g.op("Concat", *folded_adv_idx_shape_list, axis_i=0)
self = g.op("Reshape", self, folded_adv_idx_shape)
# Transpose folded advanced indexed axis to its original location.
adv_idx_permute = list(range(1, adv_idx_indices[0] + 1)) \
+ [0] + list(range(adv_idx_indices[0] + 1, rank - adv_idx_count + 1))
self = g.op("Transpose", self, perm_i=adv_idx_permute)
# unfold advanced index axes
final_shape_list = [dim_tensor_list[i] for i in range(adv_idx_indices[0])] \
+ [cum_adv_index_shape_tensor] \
+ [dim_tensor_list[i] for i in range(adv_idx_indices[0], rank) if i not in adv_idx_indices]
final_shape = g.op("Concat", *final_shape_list, axis_i=0)
else:
final_shape = g.op(
"Concat",
cum_adv_index_shape_tensor,
*[dim_tensor_list[i] for i in range(rank) if i not in adv_idx_indices],
axis_i=0)
return g.op("Reshape", self, final_shape)
@parse_args('v', 'is', 'i')
def frobenius_norm(g, self, dim=None, keepdim=False):
sqr = g.op('Mul', self, self)
sumsqr = sym_help._reducesum_helper(g, sqr, axes_i=dim, keepdims_i=keepdim)
return g.op('Sqrt', sumsqr)
@parse_args('v', 'i', 'b', 'v')
def multinomial(g, input, num_samples, replacement=False, generator=None):
if generator is not None and not sym_help._is_none(generator):
_unimplemented("Multinomial", "generator is not supported for multinomial")
if not replacement and num_samples > 1:
_unimplemented("Multinomial", "replacement=False when num_samples > 1 is not supported for multinomial")
log_input = log(g, input)
return g.op("Multinomial", log_input,
dtype_i=sym_help.cast_pytorch_to_onnx['Long'],
sample_size_i=num_samples)
def baddbmm(g, self, batch1, batch2, beta, alpha):
dtype = self.type().scalarType()
batch_mul = matmul(g, batch1, batch2)
mul_a = mul(g, batch_mul, g.op("Cast", alpha, to_i=sym_help.cast_pytorch_to_onnx[dtype]))
mul_b = mul(g, self, g.op("Cast", beta, to_i=sym_help.cast_pytorch_to_onnx[dtype]))
return add(g, mul_a, mul_b)
def meshgrid(g, tensor_list):
tensors = [view(g, t, g.op("Constant", value_t=torch.LongTensor([-1]))) for t in sym_help._unpack_list(tensor_list)]
tensors_shape = [g.op("Shape", t) for t in tensors]
out_shape = g.op("Concat", *tensors_shape, axis_i=0)
out = []
for i, t in enumerate(tensors):
shape_i = [g.op("Constant", value_t=torch.ones(1, dtype=torch.int64))] * len(tensors)
shape_i[i] = tensors_shape[i]
t_reshaped = _reshape_from_tensor(g, t, g.op("Concat", *shape_i, axis_i=0))
out.append(g.op("Expand", t_reshaped, out_shape))
return g.op("prim::ListConstruct", *out)
def remainder(g, input, other):
div = g.op("Div", input, other)
if sym_help._is_fp(input) or sym_help._is_fp(other):
div = g.op("Floor", div)
quo = g.op("Mul", div, other)
return g.op("Sub", input, quo)
def gelu(g, self):
_sqrt2 = 1.4142135623730951
erf = g.op('Erf', g.op('Div', self, torch.tensor(_sqrt2, dtype=torch.double)))
erf_plusone = add(g, erf, g.op('Constant', value_t=torch.tensor(1, dtype=torch.double)))
return mul(g, mul(g, self, erf_plusone), g.op('Constant', value_t=torch.tensor(0.5, dtype=torch.double)))
@parse_args('v', 'i', 'v', 'v', 'f', 'i')
def group_norm(g, input, num_groups, weight, bias, eps, cudnn_enabled):
if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", input, weight, bias, num_groups_i=num_groups,
eps_f=eps, cudnn_enabled_i=cudnn_enabled, operator_s="group_norm")
channel_size = sym_help._get_tensor_dim_size(input, 1)
if channel_size is not None:
assert channel_size % num_groups == 0
input_rank = sym_help._get_tensor_rank(input)
if input_rank is None:
return _unimplemented("group_norm", "unknown input rank")
# 0 in the shape list keeps dimension value unchanged.
shape = [0, num_groups, -1]
input_reshaped = g.op('Reshape', input, g.op('Constant', value_t=torch.LongTensor(shape)))
# C is always divisible by num_groups
# Due to shape difference. we need to apply weight and bias after
# instance norm computation and reshape
weight_ = g.op("Constant", value_t=torch.tensor([1.] * num_groups).type(
'torch.' + input.type().scalarType() + 'Tensor'))
bias_ = g.op("Constant", value_t=torch.tensor([0.] * num_groups).type(
'torch.' + input.type().scalarType() + 'Tensor'))
norm_reshaped = g.op("InstanceNormalization", input_reshaped, weight_, bias_, epsilon_f=eps)
norm = g.op('Reshape', norm_reshaped, g.op("Shape", input))
if weight is None or weight.node().mustBeNone():
weight_value = torch.tensor([1.]).type(
'torch.' + input.type().scalarType() + 'Tensor')
weight = g.op("Constant", value_t=weight_value)
if bias is None or bias.node().mustBeNone():
bias_value = torch.tensor([0.]).type(
'torch.' + input.type().scalarType() + 'Tensor')
bias = g.op("Constant", value_t=bias_value)
# Norm has shape [N, C, *] so we reshape weight and bias to [C, *]
axes = list(range(1, input_rank - 1))
return add(g, mul(g, norm, sym_help._unsqueeze_helper(g, weight, axes)), sym_help._unsqueeze_helper(g, bias, axes))
@parse_args('v', 'v', 'i')
def _weight_norm(g, weight_v, weight_g, dim):
rank = sym_help._get_tensor_rank(weight_v)
if rank is not None:
# W = g * ((v) / ||v||)
# Compute norm_except_dim for l2 norm. dim = None means over all dims
# torch's weight_norm module sets dim = -1 if it's None.
# This conflicts the logic for negative axes to access dims backwards
# TODO: Might need a fix in torch group_norm module
axes = list(range(rank))
if dim is not None:
if dim < -1:
dim += rank
if dim != -1:
axes.remove(dim)
norm_v = norm(g, weight_v, 2, axes, 1)
div = g.op("Div", weight_v, norm_v)
return g.op("Mul", div, weight_g)
elif sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
return g.op("ATen", weight_v, weight_g, dim_i=dim, operator_s="_weight_norm")
else:
raise RuntimeError('Unsupported: ONNX export of _weight_norm for tensor '
'of unknown rank.')
def dim(g, self):
'''Implement the dim functionality available for a pytorch tensor in ONNX'''
# ONNX does not support dim directly in this opset so we can use 2 ops to get the info
shape = g.op('Shape', self)
return g.op('Size', shape)
def __getitem_(g, self, i):
return select(g, self, g.op("Constant", value_t=torch.tensor([0])), i)
def take(g, self, index):
self_flattened = g.op('Reshape', self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)))
out = index_select(g, self_flattened, 0, index)
out = reshape_as(g, out, index)
return out
def _kl_div_log_target_impl(g, input, target):
diff_ = sub(g, target, input)
exp_ = exp(g, target)
output = mul(g, exp_, diff_)
return output
def _kl_div_non_log_target_impl(g, input, target):
log_ = log(g, target)
diff_ = sub(g, log_, input)
output_pos = mul(g, target, diff_)
zeros_ = zeros_like(g, output_pos)
mask_ = gt(g, target, g.op("Constant", value_t=torch.tensor(0)))
output = where(g, mask_, output_pos, zeros_)
return output
@parse_args('v', 'v', 'i', 'b')
def kl_div(g, input, target, reduction, log_target):
if log_target:
output = _kl_div_log_target_impl(g, input, target)
else:
output = _kl_div_non_log_target_impl(g, input, target)
if reduction == 0:
return output
elif reduction == 1:
return g.op("ReduceMean", output, keepdims_i=0)
elif reduction == 2:
return sym_help._reducesum_helper(g, output, keepdims_i=0)
else:
return sym_help._onnx_unsupported("kl_div with reduction other than none, mean, or sum.")
@parse_args('v', 'v', 'is', 'i')
def as_strided(g, self, sizes, strides, offset=None):
sizes = sym_help._maybe_get_const(sizes, 'is')
rank = len(strides)
self_1d = g.op("Reshape", self, g.op("Constant", value_t=torch.tensor([-1], dtype=torch.int64)))
ind: Optional[torch.Tensor]
if not sym_help._is_value(sizes):
ind = torch.tensor([0], dtype=torch.long)
for i, (size, stride) in enumerate(zip(sizes, strides)):
r_size = [1] * rank
r_size[i] = -1
ind = ind + torch.arange(size).view(r_size) * stride
if offset:
ind = ind + offset
return g.op("Gather", self_1d, g.op("Constant", value_t=ind))
else:
ind = None
for i, stride in enumerate(strides):
r_size = [1] * rank
r_size[i] = -1
size = select(g, sizes, g.op("Constant", value_t=torch.tensor([0])), g.op("Constant", value_t=torch.tensor(i)))
tmp_ind = g.op("Reshape", arange(g, size, 4, None, None, None), g.op("Constant", value_t=torch.tensor(r_size)))
tmp_ind = g.op("Mul", tmp_ind, g.op("Constant", value_t=torch.tensor([stride])))
if ind is None:
ind = tmp_ind
else:
ind = g.op("Add", ind, tmp_ind)
if offset:
ind = g.op("Add", ind, g.op("Constant", torch.tensor([offset])))
return g.op("Gather", self_1d, ind)
def __derive_index(g, index, start, step):
return g.op("Add", start, g.op("Mul", index, step))
# Source code for aten op can be found here: pytorch/torch/csrc/jit/runtime/register_prim_ops.cpp
# if (step > 0 && lo < hi) {
# push(stack, 1 + (hi - 1 - lo) / step);
# } else if (step < 0 && lo > hi) {
# push(stack, 1 + (lo - 1 - hi) / (0 - step));
# } else {
# push(stack, 0);
# }
def __range_length(g, lo, hi, step):
sub = g.op("Sub", hi, lo)
div = g.op("Ceil", true_divide(g, sub, step))
return g.op("Cast", div, to_i=sym_help.cast_pytorch_to_onnx['Long'])
def linear(g, input, weight, bias):
rank = sym_help._get_tensor_rank(input)
weight = t(g, weight)
if rank == 2 and not bias.node().mustBeNone():
alpha = g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64))
beta = g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64))
output = addmm(g, bias, input, weight, alpha, beta)
else:
output = matmul(g, input, weight)
if not bias.node().mustBeNone():
output = add(g, bias, output)
return output
|
the-stack_0_22652 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds various types of targeting criteria to a given campaign.
To get campaigns, run get_campaigns.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
# Replace the value below with the ID of a feed that has been configured for
# location targeting, meaning it has an ENABLED FeedMapping with criterionType
# of 77. Feeds linked to a GMB account automatically have this FeedMapping.
# If you don't have such a feed, set this value to None.
LOCATION_FEED_ID = 'INSERT_LOCATION_FEED_ID_HERE'
def main(client, campaign_id, location_feed_id=None):
# Initialize appropriate service.
campaign_criterion_service = client.GetService(
'CampaignCriterionService', version='v201809')
# Create locations. The IDs can be found in the documentation or retrieved
# with the LocationCriterionService.
california = {
'xsi_type': 'Location',
'id': '21137'
}
mexico = {
'xsi_type': 'Location',
'id': '2484'
}
# Create languages. The IDs can be found in the documentation or retrieved
# with the ConstantDataService.
english = {
'xsi_type': 'Language',
'id': '1000'
}
spanish = {
'xsi_type': 'Language',
'id': '1003'
}
# Create a negative campaign criterion operation.
negative_campaign_criterion_operand = {
'xsi_type': 'NegativeCampaignCriterion',
'campaignId': campaign_id,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'jupiter cruise'
}
}
criteria = [california, mexico, english, spanish]
if location_feed_id:
# Distance targeting. Area of 10 miles around targets above.
criteria.append({
'xsi_type': 'LocationGroups',
'feedId': location_feed_id,
'matchingFunction': {
'operator': 'IDENTITY',
'lhsOperand': [{
'xsi_type': 'LocationExtensionOperand',
'radius': {
'xsi_type': 'ConstantOperand',
'type': 'DOUBLE',
'unit': 'MILES',
'doubleValue': 10
}
}]
}
})
# Create operations
operations = []
for criterion in criteria:
operations.append({
'operator': 'ADD',
'operand': {
'campaignId': campaign_id,
'criterion': criterion
}
})
# Add the negative campaign criterion.
operations.append({
'operator': 'ADD',
'operand': negative_campaign_criterion_operand
})
# Make the mutate request.
result = campaign_criterion_service.mutate(operations)
# Display the resulting campaign criteria.
for campaign_criterion in result['value']:
print('Campaign criterion with campaign id "%s", criterion id "%s", '
'and type "%s" was added.'
% (campaign_criterion['campaignId'],
campaign_criterion['criterion']['id'],
campaign_criterion['criterion']['type']))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID, LOCATION_FEED_ID)
|
the-stack_0_22654 | # CubETL
# Copyright (c) 2013-2019 Jose Juan Montes
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from dateutil import parser
from urllib import parse
import logging
import mimetypes
import re
import string
from slugify import slugify
# Get an instance of a logger
logger = logging.getLogger(__name__)
def slug(value):
return slugify(value)
def slugu(value):
return slug(value).replace("-", "_")
def labelify(value):
return string.capwords(slugify(value).replace("-", " "))
def re_search(pattern, text, match = 0):
m = re.search(pattern, text)
return m.group(match)
def urlparse(value):
return parse.urlparse(value)
#_html_parser = HTMLParser.HTMLParser()
def html_unescape(value):
return _html_parser.unescape(value)
def mimetype_guess(url, strict = False):
return mimetypes.guess_type(url, strict)[0]
def parsebool(value):
if (isinstance(value, bool)): return value
try:
v = value.strip().lower()
if (v == "true"):
return True
elif (v == "false"):
return False
else:
raise Exception("Invalid boolean value '%s' (valid values are 'True' or 'False')" % value)
except Exception as e:
raise Exception("Invalid boolean value '%r' (valid values are 'True' or 'False')" % value)
def extract_date(value, dayfirst, fuzzy=True):
if value is None:
raise ValueError("Tried to extract date from null value.")
datetime = parser.parse(value, dayfirst = dayfirst, fuzzy = fuzzy)
return datetime
def extract_number(value):
if value is None: return None
if isinstance(value, int): return value
if isinstance(value, float): return value
text = value
text = re.sub(r'\&\#[0-9A-Fa-f]+', '', text)
text = re.sub(r' +', ' ', text)
_pattern = r"""(?x) # enable verbose mode (which ignores whitespace and comments)
^ # start of the input
[^\d+-\.]* # prefixed junk
(?P<number> # capturing group for the whole number
(?P<sign>[+-])? # sign group (optional)
(?P<integer_part> # capturing group for the integer part
\d{1,3} # leading digits in an int with a thousands separator
(?P<sep> # capturing group for the thousands separator
[ ,.] # the allowed separator characters
)
\d{3} # exactly three digits after the separator
(?: # non-capturing group
(?P=sep) # the same separator again (a backreference)
\d{3} # exactly three more digits
)* # repeated 0 or more times
| # or
\d+ # simple integer (just digits with no separator)
)? # integer part is optional, to allow numbers like ".5"
(?P<decimal_part> # capturing group for the decimal part of the number
(?P<point> # capturing group for the decimal point
(?(sep) # conditional pattern, only tested if sep matched
(?! # a negative lookahead
(?P=sep) # backreference to the separator
)
)
[.,'] # the accepted decimal point characters
)
\d+ # one or more digits after the decimal point
)? # the whole decimal part is optional
)
[^\d]* # suffixed junk
$ # end of the input
"""
match = re.match(_pattern, text)
if match is None or not (match.group("integer_part") or
match.group("decimal_part")): # failed to match
return None # consider raising an exception instead
num_str = match.group("number") # get all of the number, without the junk
sep = match.group("sep")
if sep:
sep_count = num_str.count(sep)
num_str = num_str.replace(sep, "") # remove thousands separators
else:
sep_count = 0
if match.group("decimal_part"):
point = match.group("point")
if point != ".":
num_str = num_str.replace(point, ".") # regularize the decimal point
return float(num_str)
else:
# Special case for 1.500 (we want it to be parsed as float)
if (sep and sep != ' ' and sep_count == 1 ):
return float(match.group("number").replace(sep, ".")) # regularize the decimal point
return int(num_str)
|
the-stack_0_22656 | import os
from datetime import datetime
import gym_minigrid # MUST BE IMPORTED TO SEE ENVIRONMENTS
from gym_minigrid.wrappers import FlatObsWrapper
import torch as th
from stable_baselines3.common.vec_env import DummyVecEnv, VecMonitor
from stable_baselines3 import PPO
from novgrid.utils.parser import getparser
from novgrid.utils.novgrid_utils import make_env
from novgrid.novelty_generation.novelty_wrappers import *
device = th.device('cuda' if th.cuda.is_available() else 'cpu')
def main(args):
# Set up tracking
now = datetime.now()
dt_string = now.strftime("%d-%m-%Y_%H-%M-%S")
log_dir = os.path.abspath('./logs/' + args.saves_logs + '_' + dt_string)
os.makedirs(log_dir)
# Create environments
novelty_wrapper = eval(args.novelty_wrapper)
env_wrappers = [novelty_wrapper, FlatObsWrapper]
env_list = [make_env(args.env, log_dir, env_wrappers, args.novelty_episode) for _ in range(args.num_workers)]
env = VecMonitor(DummyVecEnv(env_list))
# Set up and create model
model = PPO("MlpPolicy",
env,
learning_rate=args.learning_rate,
verbose=1,
tensorboard_log=log_dir,
device=device)
if args.load_model:
print(f'loading model {args.load_model}')
model.set_parameters(args.load_model)
for exp in range(args.num_exp):
model.learn(
total_timesteps=args.total_timesteps,
tb_log_name='run_{}'.format(exp)
)
model.save(log_dir + '/' + 'run_{}'.format(exp) + '_final_model')
if __name__ == "__main__":
config_args = getparser()
main(config_args)
|
the-stack_0_22657 | # -*- coding: utf-8 -*-
'''
Joyent Cloud Module
===================
The Joyent Cloud module is used to interact with the Joyent cloud system.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/joyent.conf``:
.. code-block:: yaml
my-joyent-config:
driver: joyent
# The Joyent login user
user: fred
# The Joyent user's password
password: saltybacon
# The location of the ssh private key that can log into the new VM
private_key: /root/mykey.pem
# The name of the private key
private_key: mykey
When creating your profiles for the joyent cloud, add the location attribute to
the profile, this will automatically get picked up when performing tasks
associated with that vm. An example profile might look like:
.. code-block:: yaml
joyent_512:
provider: my-joyent-config
size: Extra Small 512 MB
image: centos-6
location: us-east-1
This driver can also be used with the Joyent SmartDataCenter project. More
details can be found at:
.. _`SmartDataCenter`: https://github.com/joyent/sdc
Using SDC requires that an api_host_suffix is set. The default value for this is
`.api.joyentcloud.com`. All characters, including the leading `.`, should be
included:
.. code-block:: yaml
api_host_suffix: .api.myhostname.com
:depends: PyCrypto
'''
# pylint: disable=invalid-name,function-redefined
# Import python libs
from __future__ import absolute_import
import os
import json
import logging
import base64
import pprint
import inspect
import yaml
import datetime
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
# Import salt libs
import salt.ext.six as six
from salt.ext.six.moves import http_client # pylint: disable=import-error,no-name-in-module
import salt.utils.http
import salt.utils.cloud
import salt.config as config
from salt.utils.cloud import is_public_ip
from salt.cloud.libcloudfuncs import node_state
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout,
SaltCloudNotFound,
)
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'joyent'
JOYENT_API_HOST_SUFFIX = '.api.joyentcloud.com'
JOYENT_API_VERSION = '~7.2'
JOYENT_LOCATIONS = {
'us-east-1': 'North Virginia, USA',
'us-west-1': 'Bay Area, California, USA',
'us-sw-1': 'Las Vegas, Nevada, USA',
'eu-ams-1': 'Amsterdam, Netherlands'
}
DEFAULT_LOCATION = 'us-east-1'
# joyent no longer reports on all data centers, so setting this value to true
# causes the list_nodes function to get information on machines from all
# data centers
POLL_ALL_LOCATIONS = True
VALID_RESPONSE_CODES = [
http_client.OK,
http_client.ACCEPTED,
http_client.CREATED,
http_client.NO_CONTENT
]
# Only load in this module if the Joyent configurations are in place
def __virtual__():
'''
Check for Joyent configs
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('user', 'password')
)
def get_image(vm_):
'''
Return the image object to use
'''
images = avail_images()
vm_image = config.get_cloud_config_value('image', vm_, __opts__)
if vm_image and str(vm_image) in images:
images[vm_image]['name'] = images[vm_image]['id']
return images[vm_image]
raise SaltCloudNotFound(
'The specified image, {0!r}, could not be found.'.format(vm_image)
)
def get_size(vm_):
'''
Return the VM's size object
'''
sizes = avail_sizes()
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
if not vm_size:
raise SaltCloudNotFound('No size specified for this VM.')
if vm_size and str(vm_size) in sizes:
return sizes[vm_size]
raise SaltCloudNotFound(
'The specified size, {0!r}, could not be found.'.format(vm_size)
)
def query_instance(vm_=None, call=None):
'''
Query an instance upon creation from the Joyent API
'''
if isinstance(vm_, six.string_types) and call == 'action':
vm_ = {'name': vm_, 'provider': 'joyent'}
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The query_instance action must be called with -a or --action.'
)
salt.utils.cloud.fire_event(
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
transport=__opts__['transport']
)
def _query_ip_address():
data = show_instance(vm_['name'], call='action')
if not data:
log.error(
'There was an error while querying Joyent. Empty response'
)
# Trigger a failure in the wait for IP function
return False
if isinstance(data, dict) and 'error' in data:
log.warn(
'There was an error in the query {0}'.format(data['error']) # pylint: disable=E1126
)
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: {0}'.format(data))
if 'primaryIp' in data[1]:
return data[1]['primaryIp']
return None
try:
data = salt.utils.cloud.wait_for_ip(
_query_ip_address,
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
pass
#destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(str(exc))
return data
def create(vm_):
'''
Create a single VM from a data dict
CLI Example:
.. code-block:: bash
salt-cloud -p profile_name vm_name
'''
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'joyent',
vm_['profile']) is False:
return False
except AttributeError:
pass
# Since using "provider: <provider-engine>" is deprecated, alias provider
# to use driver: "driver: <provider-engine>"
if 'provider' in vm_:
vm_['driver'] = vm_.pop('provider')
key_filename = config.get_cloud_config_value(
'private_key', vm_, __opts__, search_global=False, default=None
)
salt.utils.cloud.fire_event(
'event',
'starting create',
'salt/cloud/{0}/creating'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
log.info(
'Creating Cloud VM {0} in {1}'.format(
vm_['name'],
vm_.get('location', DEFAULT_LOCATION)
)
)
# added . for fqdn hostnames
salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9-.')
kwargs = {
'name': vm_['name'],
'image': get_image(vm_),
'size': get_size(vm_),
'location': vm_.get('location', DEFAULT_LOCATION)
}
salt.utils.cloud.fire_event(
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(vm_['name']),
{'kwargs': kwargs},
transport=__opts__['transport']
)
try:
data = create_node(**kwargs)
except Exception as exc:
log.error(
'Error creating {0} on JOYENT\n\n'
'The following exception was thrown when trying to '
'run the initial deployment: \n{1}'.format(
vm_['name'], str(exc)
),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return False
query_instance(vm_)
data = show_instance(vm_['name'], call='action')
vm_['key_filename'] = key_filename
vm_['ssh_host'] = data[1]['primaryIp']
salt.utils.cloud.bootstrap(vm_, __opts__)
salt.utils.cloud.fire_event(
'event',
'created instance',
'salt/cloud/{0}/created'.format(vm_['name']),
{
'name': vm_['name'],
'profile': vm_['profile'],
'provider': vm_['driver'],
},
transport=__opts__['transport']
)
return data[1]
def create_node(**kwargs):
'''
convenience function to make the rest api call for node creation.
'''
name = kwargs['name']
size = kwargs['size']
image = kwargs['image']
location = kwargs['location']
data = json.dumps({
'name': name,
'package': size['name'],
'image': image['name']
})
try:
ret = query(command='/my/machines', data=data, method='POST',
location=location)
if ret[0] in VALID_RESPONSE_CODES:
return ret[1]
except Exception as exc:
log.error(
'Failed to create node {0}: {1}'.format(name, exc)
)
return {}
def destroy(name, call=None):
'''
destroy a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: array of booleans , true if successfully stopped and true if
successfully removed
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name},
transport=__opts__['transport']
)
node = get_node(name)
ret = query(command='my/machines/{0}'.format(node['id']),
location=node['location'], method='DELETE')
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name},
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return ret[0] in VALID_RESPONSE_CODES
def reboot(name, call=None):
'''
reboot a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
node = get_node(name)
ret = take_action(name=name, call=call, method='POST',
command='/my/machines/{0}'.format(node['id']),
location=node['location'], data={'action': 'reboot'})
return ret[0] in VALID_RESPONSE_CODES
def stop(name, call=None):
'''
stop a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
node = get_node(name)
ret = take_action(name=name, call=call, method='POST',
command='/my/machines/{0}'.format(node['id']),
location=node['location'], data={'action': 'stop'})
return ret[0] in VALID_RESPONSE_CODES
def start(name, call=None):
'''
start a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a start vm_name
'''
node = get_node(name)
ret = take_action(name=name, call=call, method='POST',
command='/my/machines/{0}'.format(node['id']),
location=node['location'], data={'action': 'start'})
return ret[0] in VALID_RESPONSE_CODES
def take_action(name=None, call=None, command=None, data=None, method='GET',
location=DEFAULT_LOCATION):
'''
take action call used by start,stop, reboot
:param name: name given to the machine
:param call: call value in this case is 'action'
:command: api path
:data: any data to be passed to the api, must be in json format
:method: GET,POST,or DELETE
:location: data center to execute the command on
:return: true if successful
'''
caller = inspect.stack()[1][3]
if call != 'action':
raise SaltCloudSystemExit(
'This action must be called with -a or --action.'
)
if data:
data = json.dumps(data)
ret = []
try:
ret = query(command=command, data=data, method=method,
location=location)
log.info('Success {0} for node {1}'.format(caller, name))
except Exception as exc:
if 'InvalidState' in str(exc):
ret = [200, {}]
else:
log.error(
'Failed to invoke {0} node {1}: {2}'.format(caller, name, exc),
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
ret = [100, {}]
return ret
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def get_location(vm_=None):
'''
Return the joyent data center to use, in this order:
- CLI parameter
- VM parameter
- Cloud profile setting
'''
return __opts__.get(
'location',
config.get_cloud_config_value(
'location',
vm_ or get_configured_provider(),
__opts__,
default=DEFAULT_LOCATION,
search_global=False
)
)
def avail_locations(call=None):
'''
List all available locations
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
for key in JOYENT_LOCATIONS:
ret[key] = {
'name': key,
'region': JOYENT_LOCATIONS[key]
}
# this can be enabled when the bug in the joyent get data centers call is
# corrected, currently only the European dc (new api) returns the correct
# values
# ret = {}
# rcode, datacenters = query(
# command='my/datacenters', location=DEFAULT_LOCATION, method='GET'
# )
# if rcode in VALID_RESPONSE_CODES and isinstance(datacenters, dict):
# for key in datacenters:
# ret[key] = {
# 'name': key,
# 'url': datacenters[key]
# }
return ret
def has_method(obj, method_name):
'''
Find if the provided object has a specific method
'''
if method_name in dir(obj):
return True
log.error(
'Method {0!r} not yet supported!'.format(
method_name
)
)
return False
def key_list(items=None):
'''
convert list to dictionary using the key as the identifier
:param items: array to iterate over
:return: dictionary
'''
if items is None:
items = []
ret = {}
if items and isinstance(items, list):
for item in items:
if 'name' in item:
# added for consistency with old code
if 'id' not in item:
item['id'] = item['name']
ret[item['name']] = item
return ret
def get_node(name):
'''
gets the node from the full node list by name
:param name: name of the vm
:return: node object
'''
nodes = list_nodes()
if name in nodes:
return nodes[name]
return None
def show_instance(name, call=None):
'''
get details about a machine
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: machine information
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
'''
node = get_node(name)
ret = query(command='my/machines/{0}'.format(node['id']),
location=node['location'], method='GET')
return ret
def joyent_node_state(id_):
'''
Convert joyent returned state to state common to other data center return
values for consistency
:param id_: joyent state value
:return: state value
'''
states = {'running': 0,
'stopped': 2,
'stopping': 2,
'provisioning': 3,
'deleted': 2,
'unknown': 4}
if id_ not in states:
id_ = 'unknown'
return node_state(states[id_])
def reformat_node(item=None, full=False):
'''
Reformat the returned data from joyent, determine public/private IPs and
strip out fields if necessary to provide either full or brief content.
:param item: node dictionary
:param full: full or brief output
:return: dict
'''
desired_keys = [
'id', 'name', 'state', 'public_ips', 'private_ips', 'size', 'image',
'location'
]
item['private_ips'] = []
item['public_ips'] = []
if 'ips' in item:
for ip in item['ips']:
if is_public_ip(ip):
item['public_ips'].append(ip)
else:
item['private_ips'].append(ip)
# add any undefined desired keys
for key in desired_keys:
if key not in item:
item[key] = None
# remove all the extra key value pairs to provide a brief listing
to_del = []
if not full:
for key in six.iterkeys(item): # iterate over a copy of the keys
if key not in desired_keys:
to_del.append(key)
for key in to_del:
del item[key]
if 'state' in item:
item['state'] = joyent_node_state(item['state'])
return item
def list_nodes(full=False, call=None):
'''
list of nodes, keeping only a brief listing
CLI Example:
.. code-block:: bash
salt-cloud -Q
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
ret = {}
if POLL_ALL_LOCATIONS:
for location in JOYENT_LOCATIONS:
result = query(command='my/machines', location=location,
method='GET')
nodes = result[1]
for node in nodes:
if 'name' in node:
node['location'] = location
ret[node['name']] = reformat_node(item=node, full=full)
else:
result = query(command='my/machines', location=DEFAULT_LOCATION,
method='GET')
nodes = result[1]
for node in nodes:
if 'name' in node:
node['location'] = DEFAULT_LOCATION
ret[node['name']] = reformat_node(item=node, full=full)
return ret
def list_nodes_full(call=None):
'''
list of nodes, maintaining all content provided from joyent listings
CLI Example:
.. code-block:: bash
salt-cloud -F
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
return list_nodes(full=True)
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full('function'), __opts__['query.selection'], call,
)
def _get_proto():
'''
Checks configuration to see whether the user has SSL turned on. Default is:
.. code-block:: yaml
use_ssl: True
'''
use_ssl = config.get_cloud_config_value(
'use_ssl',
get_configured_provider(),
__opts__,
search_global=False,
default=True
)
if use_ssl is True:
return 'https'
return 'http'
def avail_images(call=None):
'''
Get list of available images
CLI Example:
.. code-block:: bash
salt-cloud --list-images
Can use a custom URL for images. Default is:
.. code-block:: yaml
image_url: images.joyent.com/image
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
user = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
)
img_url = config.get_cloud_config_value(
'image_url',
get_configured_provider(),
__opts__,
search_global=False,
default='{0}{1}/{2}/images'.format(DEFAULT_LOCATION, JOYENT_API_HOST_SUFFIX, user)
)
if not img_url.startswith('http://') and not img_url.startswith('https://'):
img_url = '{0}://{1}'.format(_get_proto(), img_url)
rcode, data = query(command='my/images', method='GET')
log.debug(data)
ret = {}
for image in data:
ret[image['name']] = image
return ret
def avail_sizes(call=None):
'''
get list of available packages
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
rcode, items = query(command='/my/packages')
if rcode not in VALID_RESPONSE_CODES:
return {}
return key_list(items=items)
def list_keys(kwargs=None, call=None):
'''
List the keys available
'''
if call != 'function':
log.error(
'The list_keys function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
ret = {}
rcode, data = query(command='my/keys', method='GET')
for pair in data:
ret[pair['name']] = pair['key']
return {'keys': ret}
def show_key(kwargs=None, call=None):
'''
List the keys available
'''
if call != 'function':
log.error(
'The list_keys function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
rcode, data = query(
command='my/keys/{0}'.format(kwargs['keyname']),
method='GET',
)
return {'keys': {data['name']: data['key']}}
def import_key(kwargs=None, call=None):
'''
List the keys available
CLI Example:
.. code-block:: bash
salt-cloud -f import_key joyent keyname=mykey keyfile=/tmp/mykey.pub
'''
if call != 'function':
log.error(
'The import_key function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
if 'keyfile' not in kwargs:
log.error('The location of the SSH keyfile is required.')
return False
if not os.path.isfile(kwargs['keyfile']):
log.error('The specified keyfile ({0}) does not exist.'.format(
kwargs['keyfile']
))
return False
with salt.utils.fopen(kwargs['keyfile'], 'r') as fp_:
kwargs['key'] = fp_.read()
send_data = {'name': kwargs['keyname'], 'key': kwargs['key']}
kwargs['data'] = json.dumps(send_data)
rcode, data = query(
command='my/keys',
method='POST',
data=kwargs['data'],
)
log.debug(pprint.pformat(data))
return {'keys': {data['name']: data['key']}}
def delete_key(kwargs=None, call=None):
'''
List the keys available
CLI Example:
.. code-block:: bash
salt-cloud -f delete_key joyent keyname=mykey
'''
if call != 'function':
log.error(
'The delete_keys function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'keyname' not in kwargs:
log.error('A keyname is required.')
return False
rcode, data = query(
command='my/keys/{0}'.format(kwargs['keyname']),
method='DELETE',
)
return data
def get_location_path(location=DEFAULT_LOCATION, api_host_suffix=JOYENT_API_HOST_SUFFIX):
'''
create url from location variable
:param location: joyent data center location
:return: url
'''
return '{0}://{1}{2}'.format(_get_proto(), location, api_host_suffix)
def query(action=None,
command=None,
args=None,
method='GET',
location=None,
data=None):
'''
Make a web call to Joyent
'''
user = config.get_cloud_config_value(
'user', get_configured_provider(), __opts__, search_global=False
)
password = config.get_cloud_config_value(
'password', get_configured_provider(), __opts__,
search_global=False
)
verify_ssl = config.get_cloud_config_value(
'verify_ssl', get_configured_provider(), __opts__,
search_global=False, default=True
)
ssh_keyfile = config.get_cloud_config_value(
'private_key', get_configured_provider(), __opts__,
search_global=False, default=True
)
ssh_keyname = config.get_cloud_config_value(
'keyname', get_configured_provider(), __opts__,
search_global=False, default=True
)
if not location:
location = get_location()
api_host_suffix = config.get_cloud_config_value(
'api_host_suffix', get_configured_provider(), __opts__,
search_global=False, default=JOYENT_API_HOST_SUFFIX
)
path = get_location_path(location=location, api_host_suffix=api_host_suffix)
if action:
path += action
if command:
path += '/{0}'.format(command)
log.debug('User: {0!r} on PATH: {1}'.format(user, path))
timenow = datetime.datetime.utcnow()
timestamp = timenow.strftime('%a, %d %b %Y %H:%M:%S %Z').strip()
with salt.utils.fopen(ssh_keyfile, 'r') as kh_:
rsa_key = RSA.importKey(kh_)
rsa_ = PKCS1_v1_5.new(rsa_key)
hash_ = SHA256.new()
hash_.update(timestamp)
signed = base64.b64encode(rsa_.sign(hash_))
keyid = '/{0}/keys/{1}'.format(user, ssh_keyname)
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Api-Version': JOYENT_API_VERSION,
'Date': timestamp,
'Authorization': 'Signature keyId="{0}",algorithm="rsa-sha256" {1}'.format(
keyid,
signed
),
}
if not isinstance(args, dict):
args = {}
# post form data
if not data:
data = json.dumps({})
return_content = None
result = salt.utils.http.query(
path,
method,
params=args,
header_dict=headers,
data=data,
decode=False,
text=True,
status=True,
headers=True,
verify=verify_ssl,
opts=__opts__,
)
log.debug(
'Joyent Response Status Code: {0}'.format(
result['status']
)
)
if 'Content-Length' in result['headers']:
content = result['text']
return_content = yaml.safe_load(content)
return [result['status'], return_content]
|
the-stack_0_22659 | import sys
import textwrap
import pkg_resources
import pip.download
from pip.basecommand import Command
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from distutils.version import StrictVersion, LooseVersion
class SearchCommand(Command):
name = 'search'
usage = '%prog QUERY'
summary = 'Search PyPI'
def __init__(self):
super(SearchCommand, self).__init__()
self.parser.add_option(
'--index',
dest='index',
metavar='URL',
default='http://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
def run(self, options, args):
if not args:
logger.warn('ERROR: Missing required argument (search query).')
return
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url, pip.download.xmlrpclib_transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
return cmp(LooseVersion(version1), LooseVersion(version2))
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
SearchCommand()
|
the-stack_0_22661 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^list/(?P<category_id>\d+)/(?P<page_num>\d+)/$', views.ListView.as_view(),name='list'),
# 热销排行
url(r'^hot/(?P<category_id>\d+)/$', views.HotView.as_view(), name='list'),
url(r'^detail/(?P<sku_id>\d+)/$', views.DetailView.as_view(), name='detail'),
# 商品分类 日 访问量 detail/visit/(?P<category_id>\d+)/
url(r'^detail/visit/(?P<category_id>\d+)/$', views.DetailVisitView.as_view(), name='detail'),
]
|
the-stack_0_22663 | import os
import sys
from PIL import Image
import numpy as np
# A function that creates initial points for the centroids.
def initialize_K_centroids(X, K):
m = len(X)
return X[np.random.choice(m, K, replace=False), :]
# A function to find the closest centroid for each training example
def find_closest_centroids(X, centroids):
m = len(X)
c = np.zeros(m)
for i in range(m):
distances = np.linalg.norm(X[i] - centroids, axis=1)
c[i] = np.argmin(distances)
return c
# Compute the distance of each example to its centroid and take the average of distance for every centroid
def compute_means(X, idx, K):
_, n = X.shape
centroids = np.zeros((K, n))
for k in range(K):
examples = X[np.where(idx == k)]
mean = [np.mean(column) for column in examples.T]
centroids[k] = mean
return centroids
# Find K-means
def find_k_means(X, K, max_iters=10):
centroids = initialize_K_centroids(X, K)
previous_centroids = centroids
for _ in range(max_iters):
idx = find_closest_centroids(X, centroids)
centroids = compute_means(X, idx, K)
if (centroids == previous_centroids).all():
# The centroids aren't moving anymore.
return centroids
else:
previous_centroids = centroids
return centroids, idx
# Get input Image
image_path = './test_goku.png'
# Load Image from the path and return as a Numpy array
def load_image(path):
image = Image.open(path)
return np.asarray(image) / 255
image = load_image(image_path)
w, h, d = image.shape
print(F'Image found with width: {w}, height: {h}, depth: {d}')
X = image.reshape((w * h, d))
K = 12 # The desired number of colors in the compressed image
# Get new colors with K-means
colors, _ = find_k_means(X, K, max_iters=20)
idx = find_closest_centroids(X, colors)
# Image Reconstruction
idx = np.array(idx, dtype=np.uint8)
X_reconstructed = np.array(colors[idx, :] * 255, dtype=np.uint8).reshape((w, h, d))
compressed_image = Image.fromarray(X_reconstructed)
compressed_image.save('out.png')
# Getting file stats
def convert_bytes(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def file_size(file_path):
if os.path.isfile(file_path):
file_info = os.stat(file_path)
return convert_bytes(file_info.st_size)
print(F"Original Image size is {file_size(image_path)}")
print(F"After Compression, the size is {file_size(r'out.png')}")
|
the-stack_0_22664 | #! /usr/bin/env python3
import sys
import argparse
import simplejson as json
from subprocess import Popen, PIPE
import difflib
def postprocess_sections(sections, postprocessor):
for section in sections:
if section["type"] == "speech":
for turn in section.get("turns", []):
words_full = turn["words"]
if len(words_full) > 0:
words_full_postprocessed = []
words = [w["word"] for w in turn["words"]]
words_str = " ".join(words)
if len(words_str) > 0:
postprocessor.stdin.write((words_str + "\n").encode('utf-8'))
postprocessor.stdin.flush()
words_str_postprocessed = postprocessor.stdout.readline().strip().decode('utf-8')
words_postprocessed = words_str_postprocessed.split()
s = difflib.SequenceMatcher(None, words, words_postprocessed)
for tag, i1, i2, j1, j2 in s.get_opcodes():
if tag in ["insert", "delete"]:
print("Warning: postprocessor should only replace words (or word blocks), but [%s] detected % tag", file=sys.stderr)
words_full_postprocessed = words_full
break
else:
if tag == "equal":
words_full_postprocessed.extend(words_full[i1:i2])
elif tag == "replace":
new_word = {"word" : " ".join(words_postprocessed[j1:j2])}
new_word["start"] = words_full[i1]["start"]
for key in words_full[i2-1].keys():
if key not in ["word", "start"]:
new_word[key] = words_full[i2-1][key]
if "word_with_punctuation" in new_word:
new_word["word_with_punctuation"] = new_word["word"] + new_word["punctuation"]
new_word["unnormalized_words"] = words_full[i1:i2]
if "confidence" in new_word:
new_word["confidence"] = min([w["confidence"] for w in words_full[i1:i2]])
words_full_postprocessed.append(new_word)
turn["words"] = words_full_postprocessed
turn["unnormalized_transcript"] = turn["transcript"]
if "word_with_punctuation" in turn["words"][0]:
turn["transcript"] = " ".join([w["word_with_punctuation"] for w in turn["words"]])
else:
turn["transcript"] = " ".join([w["word"] for w in turn["words"]])
if __name__ == '__main__':
parser = argparse.ArgumentParser("Postprocesses JSON text using an external program")
parser.add_argument('cmd', help="Normalizer command (pipe)")
parser.add_argument('json')
args = parser.parse_args()
postprocessor = Popen(args.cmd, shell=True, stdin=PIPE, stdout=PIPE)
trans = json.load(open(args.json))
postprocess_sections(trans["sections"], postprocessor)
print(json.dumps(trans, sort_keys=False, indent=4))
|
the-stack_0_22665 | from zope.interface import implementer
from wals3.interfaces import IBlog
from . import wordpress
@implementer(IBlog)
class Blog(object):
def __init__(self, settings, prefix='blog.'):
self.host = settings[prefix + 'host']
self.wp = wordpress.Client(
self.host, settings[prefix + 'user'], settings[prefix + 'password'])
def url(self, path=None):
path = path or '/'
if not path.startswith('/'):
path = '/' + path
return 'http://%s%s' % (self.host, path)
def _set_category(self, **cat):
return list(self.wp.set_categories([cat]).values())[0]
def post_url(self, obj, req, create=False):
res = self.url('%s/' % obj.wp_slug)
if create and not self.wp.get_post_id_from_path(res):
# create categories if missing:
languageCat, chapterCat, areaCat = None, None, None
for cat in self.wp.get_categories():
if cat['name'] == 'Languages':
languageCat = cat['id']
if cat['name'] == 'Chapters':
chapterCat = cat['id']
if cat['name'] == obj.parameter.chapter.area.name:
areaCat = cat['id']
if languageCat is None:
languageCat = self._set_category(name='Languages', slug='languages')
if chapterCat is None:
chapterCat = self._set_category(name='Chapters', slug='chapters')
if areaCat is None:
areaCat = self._set_category(
name=obj.parameter.chapter.area.name,
parent_id=chapterCat)
# now create the post:
categories = [
dict(name=obj.parameter.name, parent_id=areaCat),
dict(name=obj.language.name, parent_id=languageCat)]
self.wp.create_post(
'Datapoint %s' % obj.name,
'Discuss WALS Datapoint <a href="http://%s%s">%s</a>.' % (
req.dataset.domain, req.resource_path(obj), obj.name),
categories=categories,
published=True,
wp_slug=obj.wp_slug)
return res
def feed_path(self, obj, req):
return '%s/feed' % (obj if isinstance(obj, str) else obj.wp_slug,)
|
the-stack_0_22666 | import re
from django.core.management import BaseCommand
from gcutils.bigquery import Client, TableExporter
from gcutils.storage import Client as StorageClient
class Command(BaseCommand):
def handle(self, *args, **kwargs):
self.backup_table("prescribing_v2")
self.backup_table("practice_statistics")
def backup_table(self, table_name):
client = Client("hscic")
sql = "SELECT max(month) FROM {hscic}.%s" % table_name
latest_date = client.query(sql).rows[0][0]
latest_year_and_month = latest_date.strftime("%Y_%m")
table = client.get_table(table_name)
storage_client = StorageClient()
bucket = storage_client.bucket()
year_and_months = set()
prefix_base = "backups/{}/".format(table_name)
for blob in bucket.list_blobs(prefix=prefix_base):
match = re.search("/(\d{4}_\d{2})/", blob.name)
year_and_months.add(match.groups()[0])
if latest_year_and_month in year_and_months:
print(
"{} table already backed up for {}".format(
table_name, latest_year_and_month
)
)
return
storage_prefix = "{}/{}/{}-".format(
prefix_base, latest_year_and_month, table_name
)
exporter = TableExporter(table, storage_prefix)
exporter.export_to_storage()
|
the-stack_0_22667 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import itertools
from sqlalchemy.orm import contains_eager, joinedload, load_only, raiseload, selectinload, subqueryload, undefer
from indico.core.db import db
from indico.core.db.sqlalchemy.links import LinkType
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.core.db.sqlalchemy.util.queries import get_n_matching
from indico.modules.attachments.models.attachments import Attachment
from indico.modules.attachments.models.folders import AttachmentFolder
from indico.modules.attachments.models.principals import AttachmentFolderPrincipal, AttachmentPrincipal
from indico.modules.categories import Category
from indico.modules.categories.models.principals import CategoryPrincipal
from indico.modules.events import Event
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.principals import ContributionPrincipal
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.events.notes.models.notes import EventNote, EventNoteRevision
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.events.sessions.models.principals import SessionPrincipal
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.search.base import IndicoSearchProvider, SearchTarget
from indico.modules.search.result_schemas import (AttachmentResultSchema, CategoryResultSchema,
ContributionResultSchema, EventNoteResultSchema, EventResultSchema)
from indico.modules.search.schemas import (AttachmentSchema, DetailedCategorySchema, HTMLStrippingContributionSchema,
HTMLStrippingEventNoteSchema, HTMLStrippingEventSchema)
def _apply_acl_entry_strategy(rel, principal):
user_strategy = rel.joinedload('user')
user_strategy.lazyload('*')
user_strategy.load_only('id')
rel.joinedload('local_group').load_only('id')
if principal.allow_networks:
rel.joinedload('ip_network_group').load_only('id')
if principal.allow_category_roles:
rel.joinedload('category_role').load_only('id')
if principal.allow_event_roles:
rel.joinedload('event_role').load_only('id')
if principal.allow_registration_forms:
rel.joinedload('registration_form').load_only('id')
return rel
def _apply_event_access_strategy(rel):
rel.load_only('id', 'category_id', 'access_key', 'protection_mode')
return rel
def _apply_contrib_access_strategy(rel):
rel.load_only('id', 'session_id', 'event_id', 'protection_mode', 'title')
return rel
class InternalSearch(IndicoSearchProvider):
def search(self, query, user=None, page=None, object_types=(), *, admin_override_enabled=False,
**params):
category_id = params.get('category_id')
event_id = params.get('event_id')
if object_types == [SearchTarget.category]:
pagenav, results = self.search_categories(query, user, page, category_id,
admin_override_enabled)
elif object_types == [SearchTarget.event]:
pagenav, results = self.search_events(query, user, page, category_id,
admin_override_enabled)
elif set(object_types) == {SearchTarget.contribution, SearchTarget.subcontribution}:
pagenav, results = self.search_contribs(query, user, page, category_id, event_id,
admin_override_enabled)
elif object_types == [SearchTarget.attachment]:
pagenav, results = self.search_attachments(query, user, page, category_id, event_id,
admin_override_enabled)
elif object_types == [SearchTarget.event_note]:
pagenav, results = self.search_notes(query, user, page, category_id, event_id,
admin_override_enabled)
else:
pagenav, results = {}, []
return {
'total': -1 if results else 0,
'pagenav': pagenav,
'results': results,
}
def _paginate(self, query, page, column, user, admin_override_enabled):
reverse = False
pagenav = {'prev': None, 'next': None}
if not page:
query = query.order_by(column.desc())
elif page > 0: # next page
query = query.filter(column < page).order_by(column.desc())
# since we asked for a next page we know that a previous page exists
pagenav['prev'] = -(page - 1)
elif page < 0: # prev page
query = query.filter(column > -page).order_by(column)
# since we asked for a previous page we know that a next page exists
pagenav['next'] = -(page - 1)
reverse = True
preloaded_categories = set()
def _preload_categories(objs):
nonlocal preloaded_categories
obj_types = {type(o) for o in objs}
assert len(obj_types) == 1
obj_type = obj_types.pop()
if obj_type == Event:
chain_query = db.session.query(Event.category_chain).filter(Event.id.in_(o.id for o in objs))
elif obj_type == Category:
chain_query = db.session.query(Category.chain_ids).filter(Category.id.in_(o.id for o in objs))
elif obj_type == Contribution:
chain_query = (db.session.query(Event.category_chain)
.join(Contribution.event)
.filter(Contribution.id.in_(o.id for o in objs)))
elif obj_type == Attachment:
chain_query = (db.session.query(Event.category_chain)
.join(Attachment.folder)
.join(AttachmentFolder.event)
.filter(Attachment.id.in_(o.id for o in objs)))
elif obj_type == EventNote:
chain_query = (db.session.query(Event.category_chain)
.join(EventNote.event)
.filter(EventNote.id.in_(o.id for o in objs)))
else:
raise Exception(f'Unhandled object type: {obj_type}')
category_ids = set(itertools.chain.from_iterable(id for id, in chain_query))
query = (
Category.query
.filter(Category.id.in_(category_ids))
.options(load_only('id', 'parent_id', 'protection_mode'))
)
Category.preload_relationships(query, 'acl_entries',
strategy=lambda rel: _apply_acl_entry_strategy(subqueryload(rel),
CategoryPrincipal))
preloaded_categories |= set(query)
def _can_access(obj, allow_effective_protection_mode=True):
if isinstance(obj, (Category, Event, Session, Contribution)):
# more efficient for events/categories/contribs since it avoids climbing up the chain
protection_mode = (obj.effective_protection_mode if allow_effective_protection_mode
else obj.protection_mode)
elif isinstance(obj, Attachment):
# attachments don't have it so we can only skip access checks if they
# are public themselves
protection_mode = obj.protection_mode
elif isinstance(obj, EventNote):
# notes inherit from their parent
return _can_access(obj.object, allow_effective_protection_mode=False)
elif isinstance(obj, SubContribution):
# subcontributions inherit from their contribution
return _can_access(obj.contribution, allow_effective_protection_mode=False)
else:
raise Exception(f'Unexpected object: {obj}')
return (protection_mode == ProtectionMode.public or
obj.can_access(user, allow_admin=admin_override_enabled))
res = get_n_matching(query, self.RESULTS_PER_PAGE + 1, _can_access, prefetch_factor=20,
preload_bulk=_preload_categories)
if len(res) > self.RESULTS_PER_PAGE:
# we queried 1 more so we can see if there are more results available
del res[self.RESULTS_PER_PAGE:]
if reverse:
pagenav['prev'] = -res[-1].id
else:
pagenav['next'] = res[-1].id
if reverse:
res.reverse()
return res, pagenav
def search_categories(self, q, user, page, category_id, admin_override_enabled):
query = Category.query if not category_id else Category.get(category_id).deep_children_query
query = (query
.filter(Category.title_matches(q),
~Category.is_deleted)
.options(undefer('chain'),
undefer(Category.effective_protection_mode),
subqueryload(Category.acl_entries)))
objs, pagenav = self._paginate(query, page, Category.id, user, admin_override_enabled)
res = DetailedCategorySchema(many=True).dump(objs)
return pagenav, CategoryResultSchema(many=True).load(res)
def search_events(self, q, user, page, category_id, admin_override_enabled):
filters = [
Event.title_matches(q),
~Event.is_deleted
]
if category_id is not None:
filters.append(Event.category_chain_overlaps(category_id))
query = (
Event.query
.filter(*filters)
.options(
load_only('id', 'category_id', 'access_key', 'protection_mode'),
undefer(Event.effective_protection_mode),
_apply_acl_entry_strategy(selectinload(Event.acl_entries), EventPrincipal)
)
)
objs, pagenav = self._paginate(query, page, Event.id, user, admin_override_enabled)
query = (
Event.query
.filter(Event.id.in_(e.id for e in objs))
.options(
undefer(Event.detailed_category_chain),
selectinload(Event.person_links).joinedload('person').joinedload('user').load_only('is_system'),
joinedload(Event.own_venue),
joinedload(Event.own_room).options(raiseload('*'), joinedload('location')),
)
)
events_by_id = {e.id: e for e in query}
events = [events_by_id[e.id] for e in objs]
res = HTMLStrippingEventSchema(many=True).dump(events)
return pagenav, EventResultSchema(many=True).load(res)
def search_contribs(self, q, user, page, category_id, event_id, admin_override_enabled):
# XXX: Ideally we would search in subcontributions as well, but our pagination
# does not really work when we do not have a single unique ID
contrib_filters = [
Contribution.title_matches(q) | Contribution.description_matches(q),
~Contribution.is_deleted,
~Event.is_deleted
]
if category_id is not None:
contrib_filters.append(Event.category_chain_overlaps(category_id))
if event_id is not None:
contrib_filters.append(Contribution.event_id == event_id)
query = (
Contribution.query
.filter(*contrib_filters)
.join(Contribution.event)
.options(
load_only('id', 'session_id', 'event_id', 'protection_mode'),
undefer(Contribution.effective_protection_mode),
_apply_acl_entry_strategy(selectinload(Contribution.acl_entries), ContributionPrincipal),
joinedload(Contribution.session).options(
load_only('id', 'protection_mode', 'event_id'),
selectinload(Session.acl_entries)
),
contains_eager('event').options(
_apply_acl_entry_strategy(selectinload(Event.acl_entries), EventPrincipal)
)
)
)
objs, pagenav = self._paginate(query, page, Contribution.id, user, admin_override_enabled)
event_strategy = joinedload(Contribution.event)
event_strategy.joinedload(Event.own_venue)
event_strategy.joinedload(Event.own_room).options(raiseload('*'), joinedload('location'))
event_strategy.undefer(Event.detailed_category_chain)
session_strategy = joinedload(Contribution.session)
session_strategy.joinedload(Session.own_venue)
session_strategy.joinedload(Session.own_room).options(raiseload('*'), joinedload('location'))
session_block_strategy = joinedload(Contribution.session_block)
session_block_strategy.joinedload(SessionBlock.own_venue)
session_block_strategy.joinedload(SessionBlock.own_room).options(raiseload('*'), joinedload('location'))
session_block_session_strategy = session_block_strategy.joinedload(SessionBlock.session)
session_block_session_strategy.joinedload(Session.own_venue)
session_block_session_strategy.joinedload(Session.own_room).options(raiseload('*'), joinedload('location'))
query = (
Contribution.query
.filter(Contribution.id.in_(c.id for c in objs))
.options(
selectinload(Contribution.person_links).joinedload('person').joinedload('user').load_only('is_system'),
event_strategy,
session_strategy,
session_block_strategy,
joinedload(Contribution.type),
joinedload(Contribution.own_venue),
joinedload(Contribution.own_room).options(raiseload('*'), joinedload('location')),
joinedload(Contribution.timetable_entry),
)
)
contribs_by_id = {c.id: c for c in query}
contribs = [contribs_by_id[c.id] for c in objs]
res = HTMLStrippingContributionSchema(many=True).dump(contribs)
return pagenav, ContributionResultSchema(many=True).load(res)
def search_attachments(self, q, user, page, category_id, event_id, admin_override_enabled):
contrib_event = db.aliased(Event)
contrib_session = db.aliased(Session)
subcontrib_contrib = db.aliased(Contribution)
subcontrib_session = db.aliased(Session)
subcontrib_event = db.aliased(Event)
session_event = db.aliased(Event)
attachment_strategy = _apply_acl_entry_strategy(selectinload(Attachment.acl_entries), AttachmentPrincipal)
folder_strategy = contains_eager(Attachment.folder)
folder_strategy.load_only('id', 'protection_mode', 'link_type', 'category_id', 'event_id', 'linked_event_id',
'contribution_id', 'subcontribution_id', 'session_id')
_apply_acl_entry_strategy(folder_strategy.selectinload(AttachmentFolder.acl_entries), AttachmentFolderPrincipal)
# event
event_strategy = folder_strategy.contains_eager(AttachmentFolder.linked_event)
_apply_event_access_strategy(event_strategy)
_apply_acl_entry_strategy(event_strategy.selectinload(Event.acl_entries), EventPrincipal)
# contribution
contrib_strategy = folder_strategy.contains_eager(AttachmentFolder.contribution)
_apply_contrib_access_strategy(contrib_strategy)
_apply_acl_entry_strategy(contrib_strategy.selectinload(Contribution.acl_entries), ContributionPrincipal)
contrib_event_strategy = contrib_strategy.contains_eager(Contribution.event.of_type(contrib_event))
_apply_event_access_strategy(contrib_event_strategy)
_apply_acl_entry_strategy(contrib_event_strategy.selectinload(contrib_event.acl_entries), EventPrincipal)
contrib_session_strategy = contrib_strategy.contains_eager(Contribution.session.of_type(contrib_session))
contrib_session_strategy.load_only('id', 'event_id', 'protection_mode')
_apply_acl_entry_strategy(contrib_session_strategy.selectinload(contrib_session.acl_entries), SessionPrincipal)
# subcontribution
subcontrib_strategy = folder_strategy.contains_eager(AttachmentFolder.subcontribution)
subcontrib_strategy.load_only('id', 'contribution_id', 'title')
subcontrib_contrib_strategy = subcontrib_strategy.contains_eager(
SubContribution.contribution.of_type(subcontrib_contrib)
)
_apply_contrib_access_strategy(subcontrib_contrib_strategy)
_apply_acl_entry_strategy(subcontrib_contrib_strategy
.selectinload(subcontrib_contrib.acl_entries), ContributionPrincipal)
subcontrib_event_strategy = subcontrib_contrib_strategy.contains_eager(
subcontrib_contrib.event.of_type(subcontrib_event)
)
_apply_event_access_strategy(subcontrib_event_strategy)
_apply_acl_entry_strategy(subcontrib_event_strategy.selectinload(subcontrib_event.acl_entries), EventPrincipal)
subcontrib_session_strategy = subcontrib_contrib_strategy.contains_eager(
subcontrib_contrib.session.of_type(subcontrib_session)
)
subcontrib_session_strategy.load_only('id', 'event_id', 'protection_mode')
_apply_acl_entry_strategy(subcontrib_session_strategy.selectinload(subcontrib_session.acl_entries),
SessionPrincipal)
# session
session_strategy = folder_strategy.contains_eager(AttachmentFolder.session)
session_strategy.load_only('id', 'event_id', 'protection_mode')
session_event_strategy = session_strategy.contains_eager(Session.event.of_type(session_event))
_apply_event_access_strategy(session_event_strategy)
session_event_strategy.selectinload(session_event.acl_entries)
_apply_acl_entry_strategy(session_strategy.selectinload(Session.acl_entries), SessionPrincipal)
attachment_filters = [
Attachment.title_matches(q),
~Attachment.is_deleted,
~AttachmentFolder.is_deleted,
AttachmentFolder.link_type != LinkType.category,
db.or_(
AttachmentFolder.link_type != LinkType.event,
~Event.is_deleted,
),
db.or_(
AttachmentFolder.link_type != LinkType.contribution,
~Contribution.is_deleted & ~contrib_event.is_deleted
),
db.or_(
AttachmentFolder.link_type != LinkType.subcontribution,
db.and_(
~SubContribution.is_deleted,
~subcontrib_contrib.is_deleted,
~subcontrib_event.is_deleted,
)
),
db.or_(
AttachmentFolder.link_type != LinkType.session,
~Session.is_deleted & ~session_event.is_deleted
)
]
if category_id is not None:
attachment_filters.append(AttachmentFolder.event.has(Event.category_chain_overlaps(category_id)))
if event_id is not None:
attachment_filters.append(AttachmentFolder.event_id == event_id)
query = (
Attachment.query
.join(Attachment.folder)
.filter(*attachment_filters)
.options(folder_strategy, attachment_strategy, joinedload(Attachment.user).joinedload('_affiliation'))
.outerjoin(AttachmentFolder.linked_event)
.outerjoin(AttachmentFolder.contribution)
.outerjoin(Contribution.event.of_type(contrib_event))
.outerjoin(Contribution.session.of_type(contrib_session))
.outerjoin(AttachmentFolder.subcontribution)
.outerjoin(SubContribution.contribution.of_type(subcontrib_contrib))
.outerjoin(subcontrib_contrib.event.of_type(subcontrib_event))
.outerjoin(subcontrib_contrib.session.of_type(subcontrib_session))
.outerjoin(AttachmentFolder.session)
.outerjoin(Session.event.of_type(session_event))
)
objs, pagenav = self._paginate(query, page, Attachment.id, user, admin_override_enabled)
query = (
Attachment.query
.filter(Attachment.id.in_(a.id for a in objs))
.options(
joinedload(Attachment.folder).options(
joinedload(AttachmentFolder.subcontribution),
joinedload(AttachmentFolder.event).options(
undefer(Event.detailed_category_chain)
)
)
)
)
attachments_by_id = {a.id: a for a in query}
attachments = [attachments_by_id[a.id] for a in objs]
res = AttachmentSchema(many=True).dump(attachments)
return pagenav, AttachmentResultSchema(many=True).load(res)
def search_notes(self, q, user, page, category_id, event_id, admin_override_enabled):
contrib_event = db.aliased(Event)
contrib_session = db.aliased(Session)
subcontrib_contrib = db.aliased(Contribution)
subcontrib_session = db.aliased(Session)
subcontrib_event = db.aliased(Event)
session_event = db.aliased(Event)
note_strategy = load_only('id', 'link_type', 'event_id', 'linked_event_id', 'contribution_id',
'subcontribution_id', 'session_id', 'html')
# event
event_strategy = note_strategy.contains_eager(EventNote.linked_event)
event_strategy.undefer(Event.effective_protection_mode)
_apply_event_access_strategy(event_strategy)
_apply_acl_entry_strategy(event_strategy.selectinload(Event.acl_entries), EventPrincipal)
# contribution
contrib_strategy = note_strategy.contains_eager(EventNote.contribution)
_apply_contrib_access_strategy(contrib_strategy)
_apply_acl_entry_strategy(contrib_strategy.selectinload(Contribution.acl_entries), ContributionPrincipal)
contrib_event_strategy = contrib_strategy.contains_eager(Contribution.event.of_type(contrib_event))
_apply_event_access_strategy(contrib_event_strategy)
_apply_acl_entry_strategy(contrib_event_strategy.selectinload(contrib_event.acl_entries), EventPrincipal)
contrib_session_strategy = contrib_strategy.contains_eager(Contribution.session.of_type(contrib_session))
contrib_session_strategy.load_only('id', 'event_id', 'protection_mode')
_apply_acl_entry_strategy(contrib_session_strategy.selectinload(contrib_session.acl_entries), SessionPrincipal)
# subcontribution
subcontrib_strategy = note_strategy.contains_eager(EventNote.subcontribution)
subcontrib_contrib_strategy = subcontrib_strategy.contains_eager(
SubContribution.contribution.of_type(subcontrib_contrib)
)
_apply_contrib_access_strategy(subcontrib_contrib_strategy)
_apply_acl_entry_strategy(subcontrib_contrib_strategy
.selectinload(subcontrib_contrib.acl_entries), ContributionPrincipal)
subcontrib_event_strategy = subcontrib_contrib_strategy.contains_eager(
subcontrib_contrib.event.of_type(subcontrib_event)
)
_apply_event_access_strategy(subcontrib_event_strategy)
_apply_acl_entry_strategy(subcontrib_event_strategy.selectinload(subcontrib_event.acl_entries), EventPrincipal)
subcontrib_session_strategy = subcontrib_contrib_strategy.contains_eager(
subcontrib_contrib.session.of_type(subcontrib_session)
)
subcontrib_session_strategy.load_only('id', 'event_id', 'protection_mode')
_apply_acl_entry_strategy(subcontrib_session_strategy.selectinload(subcontrib_session.acl_entries),
SessionPrincipal)
# session
session_strategy = note_strategy.contains_eager(EventNote.session)
session_strategy.load_only('id', 'event_id', 'protection_mode')
session_event_strategy = session_strategy.contains_eager(Session.event.of_type(session_event))
_apply_event_access_strategy(session_event_strategy)
session_event_strategy.selectinload(session_event.acl_entries)
_apply_acl_entry_strategy(session_strategy.selectinload(Session.acl_entries), SessionPrincipal)
note_filters = [
EventNote.html_matches(q),
~EventNote.is_deleted,
db.or_(
EventNote.link_type != LinkType.event,
~Event.is_deleted
),
db.or_(
EventNote.link_type != LinkType.contribution,
~Contribution.is_deleted & ~contrib_event.is_deleted
),
db.or_(
EventNote.link_type != LinkType.subcontribution,
db.and_(
~SubContribution.is_deleted,
~subcontrib_contrib.is_deleted,
~subcontrib_event.is_deleted
)
),
db.or_(
EventNote.link_type != LinkType.session,
~Session.is_deleted & ~session_event.is_deleted
)
]
if category_id is not None:
note_filters.append(EventNote.event.has(Event.category_chain_overlaps(category_id)))
if event_id is not None:
note_filters.append(EventNote.event_id == event_id)
query = (
EventNote.query
.filter(*note_filters)
.options(note_strategy)
.outerjoin(EventNote.linked_event)
.outerjoin(EventNote.contribution)
.outerjoin(Contribution.event.of_type(contrib_event))
.outerjoin(Contribution.session.of_type(contrib_session))
.outerjoin(EventNote.subcontribution)
.outerjoin(SubContribution.contribution.of_type(subcontrib_contrib))
.outerjoin(subcontrib_contrib.event.of_type(subcontrib_event))
.outerjoin(subcontrib_contrib.session.of_type(subcontrib_session))
.outerjoin(EventNote.session)
.outerjoin(Session.event.of_type(session_event))
)
objs, pagenav = self._paginate(query, page, EventNote.id, user, admin_override_enabled)
query = (
EventNote.query
.filter(EventNote.id.in_(n.id for n in objs))
.options(
joinedload(EventNote.contribution),
joinedload(EventNote.subcontribution).joinedload(SubContribution.contribution),
joinedload(EventNote.event).options(undefer(Event.detailed_category_chain)),
joinedload(EventNote.current_revision).joinedload(EventNoteRevision.user).joinedload('_affiliation'),
)
)
notes_by_id = {n.id: n for n in query}
notes = [notes_by_id[n.id] for n in objs]
res = HTMLStrippingEventNoteSchema(many=True).dump(notes)
return pagenav, EventNoteResultSchema(many=True).load(res)
|
the-stack_0_22668 | # Python imports
from typing import Any
from typing import Tuple
from typing import Optional
from typing import List
# Deeplodocus imports
from deeplodocus.data.load.source import Source
from deeplodocus.utils.generic_utils import get_module
class SourceWrapper(Source):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
SourceWrapper class
"""
def __init__(self,
name: str,
module: str,
kwargs: dict,
index: int = -1,
is_loaded: bool = True,
is_transformed: bool = False,
num_instances: Optional[int] = None,
instance_id: int = 0,
instance_indices: Optional[List[int]] = None):
super().__init__(index=index,
is_loaded=is_loaded,
is_transformed=is_transformed,
num_instances=num_instances,
instance_id=instance_id)
# Module wrapped and its origin
module, self.origin = get_module(
module=module,
name=name
)
# Load module
self.module = module(**kwargs)
# Index of the desired source
self.instance_indices = instance_indices
def __getitem__(self, index: int) -> Tuple[Any, bool, bool]:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Get an item from the cache memory of the selected Entry
PARAMETERS:
-----------
:param index:
RETURN:
-------
:return item (Tuple[Any, bool, bool]):
"""
# Get the items from the wrapped module
items = self.module.__getitem__(index)
# If some specific items need to be loaded
if self.instance_indices is not None:
items = self.__select_items(items)
# Return the items, is_loaded and is_transformed
return items, self.is_loaded, self.is_transformed
def __select_items(self, items: List[Any]):
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Select a list of items within an existing list of items
PARAMETERS:
-----------
:param items (List[Any]): List of items to pick from
RETURN:
-------
:return selected_items (List[Any]): The list of selected items
"""
selected_items = []
for index in self.instance_indices:
selected_items.append(items[index])
return selected_items
def compute_length(self) -> None:
"""
AUTHORS:
--------
:author: Alix Leroy
DESCRIPTION:
------------
Compute the length of the SourcePointer instance by getting the Source instance and getting its length
PARAMETERS:
-----------
None
RETURN:
-------
:return (int): Return the length of the wrapped source
"""
# Compute the length of the wrapped module
return self.module.__len__()
|
the-stack_0_22669 | import os
import time
import docker
from docker.models.containers import Container
from ..utils import CONTAINER_NAME, get_logs, remove_previous_container
client = docker.from_env()
def verify_container(container: Container) -> None:
logs = get_logs(container)
assert "Checking for script in /app/prestart.sh" in logs
assert "Running script /app/prestart.sh" in logs
assert (
"Running inside /app/prestart.sh, you could add migrations to this file" in logs
)
assert "Uvicorn running on http://127.0.0.1:80" in logs
def test_env_vars_2() -> None:
name = os.getenv("NAME")
image = f"tiangolo/uvicorn-gunicorn:{name}"
sleep_time = int(os.getenv("SLEEP_TIME", 1))
time.sleep(sleep_time)
remove_previous_container(client)
container = client.containers.run(
image,
name=CONTAINER_NAME,
environment={"HOST": "127.0.0.1"},
ports={"80": "8000"},
detach=True,
command="/start-reload.sh",
)
time.sleep(sleep_time)
verify_container(container)
container.stop()
container.remove()
|
the-stack_0_22670 | # -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trestle href command."""
import os
import pathlib
import sys
from _pytest.monkeypatch import MonkeyPatch
from tests import test_utils
from trestle.cli import Trestle
from trestle.core.models.file_content_type import FileContentType
from trestle.oscal import profile
def test_href_cmd(
tmp_path: pathlib.Path, keep_cwd: pathlib.Path, simplified_nist_profile: profile.Profile, monkeypatch: MonkeyPatch
) -> None:
"""Test basic cmd invocation of href."""
# prepare trestle project dir with the file
models_path, profile_path = test_utils.prepare_trestle_project_dir(
tmp_path,
FileContentType.JSON,
simplified_nist_profile,
test_utils.PROFILES_DIR)
os.chdir(models_path)
# just list the hrefs
cmd_string = 'trestle href -n my_test_model'
monkeypatch.setattr(sys, 'argv', cmd_string.split())
rc = Trestle().run()
assert rc == 0
orig_href = simplified_nist_profile.imports[0].href
new_href = 'trestle://catalogs/my_catalog/catalog.json'
cmd_string = f'trestle href -n my_test_model -hr {new_href}'
monkeypatch.setattr(sys, 'argv', cmd_string.split())
rc = Trestle().run()
assert rc == 0
# confirm new href is correct
new_profile: profile.Profile = profile.Profile.oscal_read(profile_path)
assert new_profile.imports[0].href == new_href
# restore orig href to confirm models are otherwise equivalent
# only thing different should be last-modified
new_profile.imports[0].href = orig_href
assert test_utils.models_are_equivalent(new_profile, simplified_nist_profile)
def test_href_failures(
tmp_path: pathlib.Path, keep_cwd: pathlib.Path, simplified_nist_profile: profile.Profile, monkeypatch: MonkeyPatch
) -> None:
"""Test href failure modes."""
# prepare trestle project dir with the file
models_path, profile_path = test_utils.prepare_trestle_project_dir(
tmp_path,
FileContentType.JSON,
simplified_nist_profile,
test_utils.PROFILES_DIR)
cmd_string = 'trestle href -n my_test_model -hr foobar'
# not in trestle project so fail
monkeypatch.setattr(sys, 'argv', cmd_string.split())
rc = Trestle().run()
assert rc == 5
os.chdir(models_path)
cmd_string = 'trestle href -n my_test_model -hr foobar -i 2'
# add extra import to the profile and ask for import number 2
simplified_nist_profile.imports.append(simplified_nist_profile.imports[0])
simplified_nist_profile.oscal_write(profile_path)
monkeypatch.setattr(sys, 'argv', cmd_string.split())
rc = Trestle().run()
assert rc == 1
|
the-stack_0_22671 | import sys
sys.path.append('core')
import argparse
import os
import cv2
import glob
import numpy as np
import torch
from PIL import Image
from KLens_video import KLens
from frame_utils import writeFlow
import flow_viz
from raft import RAFT
from utils import flow_viz
from utils.utils import InputPadder
DEVICE = 'cuda'
def disparity_assessment(ref_image, neighbor_image, disp,path_ref,path_meas):
# Warping
root_path = "./out_video/"
if not os.path.exists(root_path):
os.makedirs(root_path)
writeFlow(
os.path.join(
root_path,
os.path.basename(
os.path.splitext(path_ref)[0]
)+
"_"+
os.path.basename(
os.path.splitext(path_meas)[0]
)+
".flo"),
disp
)
cv2.imwrite(
os.path.join(
root_path,
os.path.basename(
os.path.splitext(path_ref)[0]
)+
"_"+
os.path.basename(
os.path.splitext(path_meas)[0]
)+
".jpg"
),
flow_viz.flow_to_image(disp)[:,:,[2,1,0]]
)
# def viz(img, flo):
# img = img[0].permute(1,2,0).cpu().numpy()
# flo = flo[0].permute(1,2,0).cpu().numpy()
# map flow to rgb image
# flo = flow_viz.flow_to_image(flo)
# img_flo = np.concatenate([img, flo], axis=0)
# cv2.imwrite('image.jpg', img_flo[:, :, [2,1,0]])
# cv2.waitKey()
def demo(args,data_loader):
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model = model.module
model.to(DEVICE)
model.eval()
for idx, sample in enumerate(data_loader):
with torch.no_grad():
im0, im1, path_ref,path_meas = sample
print(path_ref)
# images = load_image_list(imagenames)
# for i in range(images.shape[0]-1):
# image1 = images[i,None]
# image2 = images[i+1,None]
# print("before cuda",im1.shape)
im0 = im0.to(DEVICE)
im1 = im1.to(DEVICE)
# print(im0.shape)
# print("after cuda",im1.shape)
flow_low, flow_up = model(im0[:,0,:,:,:], im1[:,0,:,:,:], iters=20, test_mode=True)
for i in range(flow_up.shape[0]):
print(
disparity_assessment(
np.moveaxis(im0.cpu().numpy()[i,:,:,:],0,2),
np.moveaxis(im1.cpu().numpy()[i,:,:,:],0,2),
flow_up[i,:,:,:].permute(1,2,0).cpu().numpy(),
path_ref[i],
path_meas[i]
)
) # print()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--path', help="dataset for evaluation")
parser.add_argument('--batch', help="batch_size")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
args = parser.parse_args()
dataset = KLens()
# print(args.batch)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
shuffle=False,
batch_size=int(args.batch),
num_workers=4,
drop_last=False,
pin_memory=True)
demo(args,data_loader)
|
the-stack_0_22673 | """
This file offers the methods to automatically retrieve the graph Mesorhizobium sp. WSM3224.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MesorhizobiumSpWsm3224(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Mesorhizobium sp. WSM3224 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Mesorhizobium sp. WSM3224 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MesorhizobiumSpWsm3224",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_22674 | import pandas as pd
import numpy as np
import os
import requests
import time
directory = 'C:/Users/phil_/OneDrive/Documents/GitHub/rocket-league-stats/stat_files/'
playerli = []
# loop through player files and add to data frame
for filename in os.listdir(directory):
if filename.startswith("PLAYER_"):
#print(os.path.join(directory, filename))
df = pd.read_csv(directory+filename, sep=';', index_col=None, header=0)
playerli.append(df)
else:
continue
playersummary = pd.concat(playerli, axis=0, ignore_index=True)
#playersummary
teamli = []
# loop through team files and add to data frame
for filename in os.listdir(directory):
if filename.startswith("TEAM_"):
#print(os.path.join(directory, filename))
df = pd.read_csv(directory+filename, sep=';', index_col=None, header=0)
teamli.append(df)
else:
continue
teamsummary = pd.concat(teamli, axis=0, ignore_index=True)
teamsummary['Count'] = 1
#write overall to csv
teamsummary.to_csv('C:/Users/phil_/OneDrive/Documents/GitHub/rocket-league-stats/stat_files/summary/CLMNTeamSummary.csv', sep=';', encoding='utf-8',index=False)
#game summary
gameresults = teamsummary[['color','Game','Result','franchise name','team name','Week Number','Series Number','Count','League']]
#get unique combinations
series_matchup = gameresults[['color','team name','franchise name','Week Number','Series Number','League']].value_counts().reset_index(name='Games In Series')
#calculate wins for each combination
blue_wins = gameresults[['color','team name','Week Number','Series Number','League']].loc[(gameresults["Result"]=="Win") & (gameresults["color"]=="blue")].value_counts().reset_index(name='Blue Match Wins')
orange_wins = gameresults[['color','team name','Week Number','Series Number','League']].loc[(gameresults["Result"]=="Win") & (gameresults["color"]=="orange")].value_counts().reset_index(name='Orange Match Wins')
#join back to store result
series_matchup = pd.merge(series_matchup,blue_wins,how="left" ,on=['Week Number','Series Number','League'])
series_matchup = pd.merge(series_matchup,orange_wins,how="left" ,on=['Week Number','Series Number','League'])
#fill in NaN with 0
series_matchup['Blue Match Wins'] = series_matchup['Blue Match Wins'].fillna(0)
series_matchup['Orange Match Wins'] = series_matchup['Orange Match Wins'].fillna(0)
#add column for color of series winner
series_matchup['Series Color Winner'] = np.where(series_matchup['Blue Match Wins'] > series_matchup['Orange Match Wins'],'blue','orange')
#add count column for series winner
series_matchup['Series Win Count'] = np.where(series_matchup['color_x'] == series_matchup['Series Color Winner'],1,0)
#add count column for series loser
series_matchup['Series Loss Count'] = np.where(series_matchup['color_x'] != series_matchup['Series Color Winner'],1,0)
#Add rows to account for forfeits. Each series forfeit needs two rows, 1 for winning team, 1 for losing team.
forfeits = []
forfeits.append(['blue','URSAS','BLOOMINGTON','Week 3','Series 4','CLMN',0,'blue','URSAS',0.0,'orange','KINGPINS',0,'blue',1,0])
forfeits.append(['orange','KINGPINS','ST. PAUL','Week 3','Series 4','CLMN',0,'orange','KINGPINS',0.0,'blue','URSAS',0,'blue',0,1])
forfeits.append(['blue','SOAR','ST. CLOUD','Week 4','Series 1','CLMN',0,'blue','SOAR',0.0,'orange','WARDENS',0,'blue',1,0])
forfeits.append(['orange','WARDENS','HIBBING','Week 4','Series 1','CLMN',0,'orange','WARDENS',0.0,'blue','SOAR',0,'blue',0,1])
forfeits.append(['blue','RIFF','ROCHESTER','Week 7','Series 5','CLMN',0,'blue','RIFF',0.0,'orange','SOAR',0,'blue',1,0])
forfeits.append(['orange','SOAR','ST. CLOUD','Week 7','Series 5','CLMN',0,'orange','SOAR',0.0,'blue','RIFF',0,'blue',0,1])
series_matchup = series_matchup.append(pd.DataFrame(forfeits, columns=series_matchup.columns),ignore_index=True)
#roll up to 1 row per team with sum of wins/losses
series_matchup = series_matchup.loc[series_matchup["Week Number"] !="Week 0"]
series_matchup = series_matchup.groupby(['franchise name',"team name_x"])['Series Win Count','Series Loss Count'].sum().sort_values(by=['Series Win Count'],ascending=False).reset_index()
series_matchup
#write overall to csv
series_matchup.to_csv('C:/Users/phil_/OneDrive/Documents/GitHub/rocket-league-stats/stat_files/summary/CLMNSeriesRecord.csv', sep=';', encoding='utf-8',index=False)
playersummary = pd.merge(playersummary, gameresults, on=['color', 'Game'])
playersummary['Count'] = 1
#playersummary[playersummary['Game']=='af5b73e4-322f-43f2-9df9-7b160bfed936']
#take only wins for MVP calculation
playerwins = playersummary[playersummary['Result'] == 'Win']
playerwins = playerwins[['color','score','Game']]
#find max score per color, game
mvpbygame = playerwins.groupby(['color','Game']).max()
#mvpbygame
#join back to playersummary on game, color, maxscore
playersummary = pd.merge(playersummary, mvpbygame,how='left', on=['color', 'Game'])
#add column for MVP where the score matches the max score from winning team
playersummary['MVP'] = np.where(playersummary['score_x'] == playersummary['score_y'],'Yes','No')
#drop column score_y
playersummary = playersummary.drop(columns='score_y')
#rename column score_x to score
playersummary.rename(columns={'score_x':'score'},inplace=True)
#drop column team name_x
playersummary = playersummary.drop(columns='team name_x')
#rename column team name_y to team name
playersummary.rename(columns={'team name_y':'team name'},inplace=True)
#remove players who have retired, or been dropped from CLMN
players_to_remove = []
players_to_remove.append("Thermal")
#update dataframe to only include rows where the player name is not in the above list
playersummary = playersummary[~playersummary['player name'].isin(players_to_remove)]
playersummary
#write overall to csv
playersummary.to_csv('C:/Users/phil_/OneDrive/Documents/GitHub/rocket-league-stats/stat_files/summary/CLMNPlayerSummary.csv', sep=';', encoding='utf-8',index=False)
#get regular season rolled up player stats
regularseasonplayeroverallsummary = playersummary[['franchise name','team name','player name','score','goals','assists','shots']].loc[playersummary['Match Type']=='Regular Season']
regularseasonplayeroverallsummary = regularseasonplayeroverallsummary.groupby(['franchise name',"team name","player name"])['score','goals','assists','shots'].mean().sort_values(by=['score','goals','assists','shots'],ascending=False).reset_index().round(2)
regularseasonplayeroverallsummary.to_csv('C:/Users/phil_/OneDrive/Documents/GitHub/rocket-league-stats/stat_files/summary/CLMNRegularSeasonOverallPlayerSummary.csv', sep=';', encoding='utf-8',index=False)
#regularseasonplayeroverallsummary
#get regular season rolled up team stats
regularseasonteamoverallsummary = teamsummary[['franchise name','team name','score','goals','assists','shots']].loc[teamsummary['Match Type']=='Regular Season']
regularseasonteamoverallsummary = regularseasonteamoverallsummary.groupby(['franchise name',"team name"])['score','goals','assists','shots'].mean().sort_values(by=['score','goals','assists','shots'],ascending=False).reset_index().round(2)
regularseasonteamoverallsummary.to_csv('C:/Users/phil_/OneDrive/Documents/GitHub/rocket-league-stats/stat_files/summary/CLMNRegularSeasonOverallTeamSummary.csv', sep=';', encoding='utf-8',index=False)
regularseasonteamoverallsummary
#regular season games played
regularseasonplayergamesplayed = playersummary.loc[playersummary['Match Type']=='Regular Season']
#count distinct "game"
regularseasonplayergamesplayed = regularseasonplayergamesplayed.groupby(['player name']).Game.nunique().reset_index()
#order data by player name ignoring "case"
regularseasonplayergamesplayed = regularseasonplayergamesplayed.iloc[regularseasonplayergamesplayed['player name'].str.lower().argsort()]
#name the new column
regularseasonplayergamesplayed.rename(columns={'Game':'Games Played'},inplace=True)
regularseasonplayergamesplayed
#write overall to csv
regularseasonplayergamesplayed.to_csv('C:/Users/phil_/OneDrive/Documents/GitHub/rocket-league-stats/stat_files/summary/CLMNRegularSeasonPlayerGamesPlayed.csv', sep=';', encoding='utf-8',index=False) |
the-stack_0_22676 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal, assert_array_less
from astropy import units as u
from astropy.coordinates import Angle
from astropy.modeling import InputParameterError, fitting, models
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import AstropyUserWarning
def test_sigma_constant():
"""
Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the
gaussian_sigma_to_fwhm constant in astropy.stats. We define
it manually in astropy.modeling to avoid importing from
astropy.stats.
"""
from astropy.modeling.functional_models import GAUSSIAN_SIGMA_TO_FWHM
from astropy.stats.funcs import gaussian_sigma_to_fwhm
assert gaussian_sigma_to_fwhm == GAUSSIAN_SIGMA_TO_FWHM
def test_Trapezoid1D():
"""Regression test for https://github.com/astropy/astropy/issues/1721"""
model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)
xx = np.linspace(0, 4, 8)
yy = model(xx)
yy_ref = [0., 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.]
assert_allclose(yy, yy_ref, rtol=0, atol=1e-6)
def test_Gaussian1D():
model = models.Gaussian1D(4.2, 1.7, stddev=5.1)
x = np.mgrid[0:5]
g = model(x)
g_ref = [3.97302977, 4.16062403, 4.19273985, 4.06574509, 3.79389376]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose(model.fwhm, 12.009582229657841)
def test_Gaussian2D():
"""
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038
"""
model = models.Gaussian2D(4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3,
theta=np.pi/6.)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],
[3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],
[3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],
[3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],
[3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose([model.x_fwhm, model.y_fwhm],
[12.009582229657841, 7.7709061486021325])
def test_Gaussian2DCovariance():
"""
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199
"""
cov_matrix = [[49., -16.], [-16., 9.]]
model = models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],
[8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],
[13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],
[16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],
[14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
# Test bad cov_matrix shape
cov_matrix = [[49., 3.14, -16.],
[3.14, -16., 9.],
[-16, 27, 3.14]]
with pytest.raises(ValueError) as err:
models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
assert str(err.value) == \
"Covariance matrix must be 2x2"
def test_Gaussian2DRotation():
amplitude = 42
x_mean, y_mean = 0, 0
x_stddev, y_stddev = 2, 3
theta = Angle(10, 'deg')
pars = dict(amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev)
rotation = models.Rotation2D(angle=theta.degree)
point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev)
point2 = rotation(*point1)
g1 = models.Gaussian2D(theta=0, **pars)
g2 = models.Gaussian2D(theta=theta.radian, **pars)
value1 = g1(*point1)
value2 = g2(*point2)
assert_allclose(value1, value2)
def test_Gaussian2D_invalid_inputs():
x_stddev = 5.1
y_stddev = 3.3
theta = 10
cov_matrix = [[49., -16.], [-16., 9.]]
# first make sure the valid ones are OK
models.Gaussian2D()
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None)
models.Gaussian2D(cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(theta=0, cov_matrix=cov_matrix)
def test_Gaussian2D_theta():
theta = Angle(90, 'deg')
model1 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta)
theta2 = np.pi / 2.
model2 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta2)
assert model1.theta.quantity.to('radian').value == model2.theta.value
assert model1.bounding_box == model2.bounding_box
assert model1(619.42, 31.314) == model2(619.42, 31.314)
@pytest.mark.parametrize('gamma', (10, -10))
def test_moffat_fwhm(gamma):
ans = 34.641016151377542
kwargs = {'gamma': gamma, 'alpha': 0.5}
m1 = models.Moffat1D(**kwargs)
m2 = models.Moffat2D(**kwargs)
assert_allclose([m1.fwhm, m2.fwhm], ans)
assert_array_less(0, [m1.fwhm, m2.fwhm])
def test_RedshiftScaleFactor():
"""Like ``test_ScaleModel()``."""
# Scale by a scalar
m = models.RedshiftScaleFactor(0.4)
assert m(0) == 0
assert_array_equal(m([1, 2]), [1.4, 2.8])
assert_allclose(m.inverse(m([1, 2])), [1, 2])
# Scale by a list
m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)
assert_array_equal(m(0), 0)
assert_array_equal(m([1, 2], model_set_axis=False),
[[0.5, 1], [1, 2], [1.5, 3]])
assert_allclose(m.inverse(m([1, 2], model_set_axis=False)),
[[1, 2], [1, 2], [1, 2]])
def test_RedshiftScaleFactor_inverse():
m = models.RedshiftScaleFactor(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_RedshiftScaleFactor_inverse_bounding_box():
model = models.RedshiftScaleFactor(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (3, 15)
assert_allclose(inverse_model(model(4, with_bounding_box=True), with_bounding_box=True), 4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_RedshiftScaleFactor_model_levmar_fit():
"""Test fitting RedshiftScaleFactor model with LevMarLSQFitter."""
init_model = models.RedshiftScaleFactor()
x = np.arange(10)
y = 2.7174 * x
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [1.7174])
def test_Ellipse2D():
"""Test Ellipse2D model."""
amplitude = 7.5
x0, y0 = 15, 15
theta = Angle(45, 'deg')
em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)
y, x = np.mgrid[0:30, 0:30]
e = em(x, y)
assert np.all(e[e > 0] == amplitude)
assert e[y0, x0] == amplitude
rotation = models.Rotation2D(angle=theta.degree)
point1 = [2, 0] # Rotation2D center is (0, 0)
point2 = rotation(*point1)
point1 = np.array(point1) + [x0, y0]
point2 = np.array(point2) + [x0, y0]
e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.)
e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)
assert e1(*point1) == e2(*point2)
def test_Ellipse2D_circular():
"""Test that circular Ellipse2D agrees with Disk2D [3736]."""
amplitude = 7.5
radius = 10
size = (radius * 2) + 1
y, x = np.mgrid[0:size, 0:size]
ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius,
theta=0)(x, y)
disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)
assert np.all(ellipse == disk)
def test_Ellipse2D_theta():
theta = Angle(90, 'deg')
model1 = models.Ellipse2D(1, 25, 25, 15, 5, theta=theta)
theta2 = np.pi / 2.
model2 = models.Ellipse2D(1, 25, 25, 15, 5, theta=theta2)
assert model1.theta.quantity.to('radian').value == model2.theta.value
assert model1.bounding_box == model2.bounding_box
assert model1(619.42, 31.314) == model2(619.42, 31.314)
def test_Scale_inverse():
m = models.Scale(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Scale_inverse_bounding_box():
model = models.Scale(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (2, 10)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
def test_Multiply_inverse():
m = models.Multiply(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Multiply_inverse_bounding_box():
model = models.Multiply(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (2, 10)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
def test_Shift_inverse():
m = models.Shift(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Shift_inverse_bounding_box():
model = models.Shift(10)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (11, 15)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
@pytest.mark.skipif('not HAS_SCIPY')
def test_Shift_model_levmar_fit():
"""Test fitting Shift model with LevMarLSQFitter (issue #6103)."""
init_model = models.Shift()
x = np.arange(10)
y = x + 0.1
fitter = fitting.LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [0.1], atol=1e-15)
def test_Shift_model_set_linear_fit():
"""Test linear fitting of Shift model (issue #6103)."""
init_model = models.Shift(offset=[0, 0], n_models=2)
x = np.arange(10)
yy = np.array([x+0.1, x-0.2])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_set_linear_fit(Model):
"""Test linear fitting of Scale model (#6103)."""
init_model = Model(factor=[0, 0], n_models=2)
x = np.arange(-3, 7)
yy = np.array([1.15*x, 0.96*x])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_evaluate_without_units(Model):
m = Model(factor=4*u.m)
kwargs = {'x': 3*u.m, 'y': 7*u.m}
mnu = m.without_units_for_data(**kwargs)
x = np.linspace(-1, 1, 100)
assert_allclose(mnu(x), 4*x)
# https://github.com/astropy/astropy/issues/6178
def test_Ring2D_rout():
# Test with none of r_in, r_out, width specified
m = models.Ring2D(amplitude=1, x_0=1, y_0=1)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 1
# Test with r_in specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=4)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 4
assert m.width.value == 1
# Test with r_out specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=7)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 6
# Error when r_out is too small for default r_in
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=0.5)
assert str(err.value) == "r_in=1 and width=-0.5 must both be >=0"
# Test with width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, width=11)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 11
# Test with r_in and r_out specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 2
assert m.width.value == 3
# Error when r_out is smaller than r_in
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, r_in=4)
assert str(err.value) == "r_in=4 and width=-3 must both be >=0"
# Test with r_in and width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, width=4)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 2
assert m.width.value == 4
# Test with r_out and width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=12, width=7)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 5
assert m.width.value == 7
# Error when width is larger than r_out
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, width=4)
assert str(err.value) == "r_in=-3 and width=4 must both be >=0"
# Test with r_in, r_out, and width all specified
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=8)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 3
assert m.width.value == 8
# error when specifying all
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=7)
assert str(err.value) == "Width must be r_out - r_in"
@pytest.mark.skipif("not HAS_SCIPY")
def test_Voigt1D():
voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
xarr = np.linspace(-5.0, 5.0, num=40)
yarr = voi(xarr)
voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
fitter = fitting.LevMarLSQFitter()
voi_fit = fitter(voi_init, xarr, yarr)
assert_allclose(voi_fit.param_sets, voi.param_sets)
# Invalid method
with pytest.raises(ValueError) as err:
models.Voigt1D(method='test')
assert str(err.value) ==\
"Not a valid method for Voigt1D Faddeeva function: test."
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('algorithm', ('humlicek2', 'wofz'))
def test_Voigt1D_norm(algorithm):
"""Test integral of normalized Voigt profile."""
from scipy.integrate import quad
voi = models.Voigt1D(amplitude_L=1.0/np.pi, x_0=0.0, fwhm_L=2.0, fwhm_G=1.5, method=algorithm)
if algorithm == 'wofz':
atol = 1e-14
else:
atol = 1e-8
assert_allclose(quad(voi, -np.inf, np.inf)[0], 1.0, atol=atol)
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('doppler', (1.e-3, 1.e-2, 0.1, 0.5, 1.0, 2.5, 5.0, 10))
def test_Voigt1D_hum2(doppler):
"""Verify accuracy of Voigt profile in Humlicek approximation to Faddeeva.cc (SciPy)."""
x = np.linspace(-20, 20, 400001)
voi_w = models.Voigt1D(amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler, method='wofz')
vf_w = voi_w(x)
dvda_w = voi_w.fit_deriv(x, x_0=0, amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler)
voi_h = models.Voigt1D(amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler, method='humlicek2')
vf_h = voi_h(x)
dvda_h = voi_h.fit_deriv(x, x_0=0, amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler)
assert_allclose(vf_h, vf_w, rtol=1e-7 * (2 + 1 / np.sqrt(doppler)))
assert_allclose(dvda_h, dvda_w, rtol=1e-9, atol=1e-7 * (1 + 30 / doppler))
@pytest.mark.skipif("not HAS_SCIPY")
def test_KingProjectedAnalytic1D_fit():
km = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=2)
xarr = np.linspace(0.1, 2, 10)
yarr = km(xarr)
km_init = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=1)
fitter = fitting.LevMarLSQFitter()
km_fit = fitter(km_init, xarr, yarr)
assert_allclose(km_fit.param_sets, km.param_sets)
assert_allclose(km_fit.concentration, 0.30102999566398136)
@pytest.mark.parametrize('model', [models.Exponential1D(), models.Logarithmic1D()])
def test_ExponentialAndLogarithmic1D_fit(model):
xarr = np.linspace(0.1, 10., 200)
assert_allclose(xarr, model.inverse(model(xarr)))
@pytest.mark.parametrize('model', [models.Exponential1D(), models.Logarithmic1D()])
def test_ExponentialAndLogarithmic_set_tau(model):
message = "0 is not an allowed value for tau"
with pytest.raises(ValueError) as err:
model.tau = 0
assert str(err.value) == message
def test_Linear1D_inverse():
model = models.Linear1D(slope=4, intercept=-12)
inverse = model.inverse
assert inverse.slope == 1/4
assert inverse.intercept == 3
@pytest.mark.parametrize('trig', [(models.Sine1D, [-0.25, 0.25]),
(models.ArcSine1D, [-0.25, 0.25]),
(models.Cosine1D, [0, 0.5]),
(models.ArcCosine1D, [0, 0.5]),
(models.Tangent1D, [-0.25, 0.25]),
(models.ArcTangent1D, [-0.25, 0.25])])
def test_trig_inverse(trig):
mdl = trig[0]()
lower, upper = trig[1]
x = np.arange(lower, upper, 0.01)
assert_allclose(mdl.inverse(mdl(x)), x, atol=1e-10)
assert_allclose(mdl(mdl.inverse(x)), x, atol=1e-10)
@pytest.mark.skipif('not HAS_SCIPY')
def test_Sersic2D_theta():
theta = Angle(90, 'deg')
model1 = models.Sersic2D(1, 5, 4, 25, 25, 0.5, theta=theta)
theta2 = np.pi / 2.
model2 = models.Sersic2D(1, 5, 4, 25, 25, 0.5, theta=theta2)
assert model1.theta.quantity.to('radian').value == model2.theta.value
assert model1(619.42, 31.314) == model2(619.42, 31.314)
|
the-stack_0_22678 | import cv2
import numpy as np
img1 = cv2.imread("0.png")
img2 = cv2.imread("1.png")
img3 = cv2.imread("3.png")
img4 = cv2.imread("2.png")
img5 = cv2.imread("4.png")
width = img1.shape[1]
height = img1.shape[0]
img6 = np.zeros((height*5, width, 3), np.uint8)
img6[0:height, 0:width] = img1
img6[height:height*2, 0:width] = img2
img6[height*2:height*3, 0:width] = img3
img6[height*3:height*4, 0:width] = img4
img6[height*4:height*5, 0:width] = img5
cv2.imwrite("aaaaa.png", img6)
|
the-stack_0_22681 | # model
# 在之前最优的模型上,在Adobe+Dist646 数据集上finetune
# 4卡训练
#
model = dict(
type='FBA',
backbone=dict(
type='FBAEncoderDecoder',
encoder=dict(type='FBAEncoder', in_channels=11, block='resnet50_GN_WS'),
decoder=dict(type='FBADecoder')),
pretrained='work_dirs/fba/resnet_50_GN_WS_rename.pth',
loss_alpha_l1=dict(type='L1Loss', loss_weight=1),
loss_alpha_comp=dict(type='L1CompositionLoss', loss_weight=1),
loss_alpha_grad=dict(type='GradientLoss', loss_weight=1),
loss_alpha_lap=dict(type='LaplacianLoss', loss_weight=1),
loss_f_l1=dict(type='L1Loss', loss_weight=0.25),
loss_b_l1=dict(type='L1Loss', loss_weight=0.25),
loss_fb_excl=dict(type='GradientExclusionLoss', loss_weight=0.25),
loss_fb_comp=dict(type='L1CompositionLoss', loss_weight=0.25),
loss_f_lap=dict(type='LaplacianLoss', loss_weight=0.25, channel=3),
loss_b_lap=dict(type='LaplacianLoss', loss_weight=0.25, channel=3)
)
train_cfg = dict(train_backbone=True)
test_cfg = dict(metrics=['SAD', 'MSE', 'GRAD', 'CONN'])
# dataset settings
dataset_type = 'AdobeComp1kDataset'
data_root = '/mnt/lustre/share/3darseg/segmentation/matting/'
img_norm_cfg = dict(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], to_rgb=True)
img_norm_cfg_test = dict(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], format='chw')
train_pipeline = [
dict(type='LoadImageFromFile', key='alpha', flag='grayscale'),
dict(type='LoadImageFromFile', key='fg'),
dict(type='LoadImageFromFile', key='bg'),
#dict(type='LoadImageFromFile', key='merged', save_original_img=True),
dict( # 到时候换成更换后的FG
type='CompositeFg',
fg_dirs=[
'/mnt/lustre/share/3darseg/segmentation/matting/st_portrait/fg_restimate',
'/mnt/lustre/share/3darseg/segmentation/matting/st_portrait_2/st_portrait_2_fg_restimate'
],
alpha_dirs=[
'/mnt/lustre/share/3darseg/segmentation/matting/st_portrait/alpha',
'/mnt/lustre/share/3darseg/segmentation/matting/st_portrait_2/st_portrait_2_alpha'
]),
dict(type='Flip', keys=['alpha', 'fg', 'bg']),
dict(type='RandomJitter'), # 只针对fg
dict(type='RandomGamma',keys=['fg', 'bg']),
dict(type='MergeFgAndBg'), # results['ori_merged']
dict(
type='CropAroundUnknown',
keys=['alpha', 'merged', 'ori_merged', 'fg', 'bg'],
crop_sizes=[320, 480, 640]),
dict(
type='Resize',
keys=['alpha', 'merged', 'ori_merged', 'fg', 'bg'],
scale=(320, 320),
keep_ratio=False),
dict(type='GenerateTrimap', kernel_size=(3, 25)),
dict(
type='RescaleToZeroOne',
keys=['merged', 'alpha', 'ori_merged', 'fg', 'bg', 'trimap']),
dict(type='Normalize', keys=['merged'], **img_norm_cfg),
dict(type='FormatTrimap2Channel', key='trimap'), # results['trimap_1channel']
dict(type='FormatTrimap6Channel', key='trimap'), # results['trimap_transformed']
dict(
type='Collect',
keys=['merged', 'alpha', 'trimap', 'trimap_transformed', 'ori_merged', 'fg', 'bg', 'trimap_1channel'],
meta_keys=[]),
dict(
type='ImageToTensor',
keys=['merged', 'alpha', 'trimap', 'trimap_transformed', 'ori_merged', 'fg', 'bg', 'trimap_1channel']),
]
test_pipeline = [
dict(
type='LoadImageFromFile',
key='alpha',
flag='grayscale',
save_original_img=True), # ori_alpha
dict(
type='LoadImageFromFile',
key='trimap',
flag='grayscale',
save_original_img=True), # ori_trimap
dict(
type='LoadImageFromFile',
key='merged',
#channel_order='rgb',
save_original_img=True), # ori_merged
dict(type='CopyImage', key='trimap'), # Copy a image for evaluate name: copy_trimap
dict(type='RescaleToZeroOne', keys=['merged', 'trimap', 'ori_merged', 'ori_trimap']),
dict(type='FormatTrimap2Channel', key='trimap'),
dict(type='FormatTrimap2Channel', key='ori_trimap'),
dict(
type='ScaleInput',
keys=['merged', 'trimap', 'ori_merged'],
scale=1.0,
scale_type=4), # INTER_LANCZOS4=4
dict(type='FormatTrimap6Channel', key='trimap'), # results['trimap_transformed']
dict(type='Normalize', keys=['merged'], **img_norm_cfg), # TODO: 删除自己实现的额GN,用统一的形式
# dict(type='ImageToTensor', keys=['merged']),
# dict(type='GroupNoraliseImage', keys=['merged'], **img_norm_cfg_test),
dict(
type='Collect',
keys=['ori_merged','trimap' , 'merged', 'trimap_transformed'],
meta_keys=[
'merged_path', 'merged_ori_shape', 'ori_alpha', 'ori_trimap', 'copy_trimap'
]),
# dict(type='ImageToTensor', keys=['ori_merged','trimap', 'trimap_transformed']),
dict(type='ImageToTensor', keys=['ori_merged','trimap', 'trimap_transformed', 'merged']),
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=4,
drop_last=False,
train=dict(
type=dataset_type,
ann_file=data_root + 'st-portrait-all-fg-restimate_train.json',
data_prefix=data_root,
pipeline=train_pipeline),
# validation
val_samples_per_gpu=1,
val_workers_per_gpu=4,
val=dict(
type=dataset_type,
ann_file=data_root + 'st-portrait-all-fg-restimate_val.json',
data_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'st-portrait-all-fg-restimate_val.json',
data_prefix=data_root,
pipeline=test_pipeline))
# optimizer
# optimizers_cfg = dict(type='Adam', lr=1e-5, momentun=0.9, weight_decay=0.0001)
# paramwise_cfg_1 = dict(custom_keys={'conv': dict(lr_mult=1, decay_mult=50), 'bn': dict(lr_mult=1, decay_mult=0.1})
# paramwise_cfg_2 = dict(custom_keys={)})
optimizers = dict(
constructor='DefaultOptimizerConstructor',
type='Adam',
lr=1e-5,
weight_decay=0.0001,
paramwise_cfg=dict(custom_keys={'conv':dict(lr_mult=1, decay_mult=50), 'bn':dict(lr_mult=1, decay_mult=0.1)})
)
# learning policy
#lr_config = dict(policy='Fixed')
lr_config = dict(policy='Step', step=[64000 * 10], gamma=0.1, by_epoch=False)
# checkpoint saving
checkpoint_config = dict(interval=16000, by_epoch=False)
evaluation = dict(interval=16000, save_image=False)
# yapf:disable
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook'),
# dict(type='PaviLoggerHook', init_kwargs=dict(project='dim'))
])
# yapf:enable
# runtime settings
total_iters = 64000*20
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/fba/train'
load_from = 'work_dirs/fba/mx-gn1-gpu6/iter_828000.pth' # best model
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_22682 | import datetime
import json
import logging
from copy import deepcopy
from typing import Any, List, Union
from great_expectations import __version__ as ge_version
from great_expectations.core.evaluation_parameters import (
_deduplicate_evaluation_parameter_dependencies,
)
from great_expectations.core.expectation_configuration import (
ExpectationConfiguration,
ExpectationConfigurationSchema,
)
from great_expectations.core.util import (
convert_to_json_serializable,
ensure_json_serializable,
nested_update,
)
from great_expectations.exceptions import (
DataContextError,
InvalidExpectationConfigurationError,
)
from great_expectations.marshmallow__shade import (
Schema,
ValidationError,
fields,
post_load,
pre_dump,
)
from great_expectations.types import SerializableDictDot
logger = logging.getLogger(__name__)
class ExpectationSuite(SerializableDictDot):
"""
This ExpectationSuite object has create, read, update, and delete functionality for its expectations:
-create: self.add_expectation()
-read: self.find_expectation_indexes()
-update: self.add_expectation() or self.patch_expectation()
-delete: self.remove_expectation()
"""
def __init__(
self,
expectation_suite_name,
expectations=None,
evaluation_parameters=None,
data_asset_type=None,
execution_engine_type=None,
meta=None,
):
self.expectation_suite_name = expectation_suite_name
if expectations is None:
expectations = []
self.expectations = [
ExpectationConfiguration(**expectation)
if isinstance(expectation, dict)
else expectation
for expectation in expectations
]
if evaluation_parameters is None:
evaluation_parameters = {}
self.evaluation_parameters = evaluation_parameters
self.data_asset_type = data_asset_type
self.execution_engine_type = execution_engine_type
if meta is None:
meta = {"great_expectations_version": ge_version}
if (
"great_expectations.__version__" not in meta.keys()
and "great_expectations_version" not in meta.keys()
):
meta["great_expectations_version"] = ge_version
# We require meta information to be serializable, but do not convert until necessary
ensure_json_serializable(meta)
self.meta = meta
def add_citation(
self,
comment,
batch_kwargs=None,
batch_markers=None,
batch_parameters=None,
citation_date=None,
):
if "citations" not in self.meta:
self.meta["citations"] = []
self.meta["citations"].append(
{
"citation_date": citation_date
or datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
),
"batch_kwargs": batch_kwargs,
"batch_markers": batch_markers,
"batch_parameters": batch_parameters,
"comment": comment,
}
)
def isEquivalentTo(self, other):
"""
ExpectationSuite equivalence relies only on expectations and evaluation parameters. It does not include:
- data_asset_name
- expectation_suite_name
- meta
- data_asset_type
"""
if not isinstance(other, self.__class__):
if isinstance(other, dict):
try:
other = expectationSuiteSchema.load(other)
except ValidationError:
logger.debug(
"Unable to evaluate equivalence of ExpectationConfiguration object with dict because "
"dict other could not be instantiated as an ExpectationConfiguration"
)
return NotImplemented
else:
# Delegate comparison to the other instance
return NotImplemented
return len(self.expectations) == len(other.expectations) and all(
[
mine.isEquivalentTo(theirs)
for (mine, theirs) in zip(self.expectations, other.expectations)
]
)
def __eq__(self, other):
"""ExpectationSuite equality ignores instance identity, relying only on properties."""
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return all(
(
self.expectation_suite_name == other.expectation_suite_name,
self.expectations == other.expectations,
self.evaluation_parameters == other.evaluation_parameters,
self.data_asset_type == other.data_asset_type,
self.meta == other.meta,
)
)
def __ne__(self, other):
# By using the == operator, the returned NotImplemented is handled correctly.
return not self == other
def __repr__(self):
return json.dumps(self.to_json_dict(), indent=2)
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
def to_json_dict(self):
myself = expectationSuiteSchema.dump(self)
# NOTE - JPC - 20191031: migrate to expectation-specific schemas that subclass result with properly-typed
# schemas to get serialization all-the-way down via dump
myself["expectations"] = convert_to_json_serializable(myself["expectations"])
try:
myself["evaluation_parameters"] = convert_to_json_serializable(
myself["evaluation_parameters"]
)
except KeyError:
pass # Allow evaluation parameters to be missing if empty
myself["meta"] = convert_to_json_serializable(myself["meta"])
return myself
def get_evaluation_parameter_dependencies(self):
dependencies = {}
for expectation in self.expectations:
t = expectation.get_evaluation_parameter_dependencies()
nested_update(dependencies, t)
dependencies = _deduplicate_evaluation_parameter_dependencies(dependencies)
return dependencies
def get_citations(self, sort=True, require_batch_kwargs=False):
citations = self.meta.get("citations", [])
if require_batch_kwargs:
citations = self._filter_citations(citations, "batch_kwargs")
if not sort:
return citations
return self._sort_citations(citations)
def get_table_expectations(self):
"""Return a list of table expectations."""
return [
e
for e in self.expectations
if e.expectation_type.startswith("expect_table_")
]
def get_column_expectations(self):
"""Return a list of column map expectations."""
return [e for e in self.expectations if "column" in e.kwargs]
@staticmethod
def _filter_citations(citations, filter_key):
citations_with_bk = []
for citation in citations:
if filter_key in citation and citation.get(filter_key):
citations_with_bk.append(citation)
return citations_with_bk
@staticmethod
def _sort_citations(citations):
return sorted(citations, key=lambda x: x["citation_date"])
# CRUD methods #
def append_expectation(self, expectation_config):
"""Appends an expectation.
Args:
expectation_config (ExpectationConfiguration): \
The expectation to be added to the list.
Notes:
May want to add type-checking in the future.
"""
self.expectations.append(expectation_config)
def remove_expectation(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
remove_multiple_matches: bool = False,
) -> List[ExpectationConfiguration]:
"""
Args:
expectation_configuration: A potentially incomplete (partial) Expectation Configuration to match against for
for the removal of expectations.
match_type: This determines what kwargs to use when matching. Options are 'domain' to match based
on the data evaluated by that expectation, 'success' to match based on all configuration parameters
that influence whether an expectation succeeds based on a given batch of data, and 'runtime' to match
based on all configuration parameters
remove_multiple_matches: If True, will remove multiple matching expectations. If False, will raise a ValueError.
Returns: The list of deleted ExpectationConfigurations
Raises:
No match
More than 1 match, if remove_multiple_matches = False
"""
found_expectation_indexes = self.find_expectation_indexes(
expectation_configuration, match_type
)
if len(found_expectation_indexes) < 1:
raise ValueError("No matching expectation was found.")
elif len(found_expectation_indexes) > 1:
if remove_multiple_matches:
removed_expectations = []
for index in sorted(found_expectation_indexes, reverse=True):
removed_expectations.append(self.expectations.pop(index))
return removed_expectations
else:
raise ValueError(
"More than one matching expectation was found. Specify more precise matching criteria,"
"or set remove_multiple_matches=True"
)
else:
return [self.expectations.pop(found_expectation_indexes[0])]
def remove_all_expectations_of_type(
self, expectation_types: Union[List[str], str]
) -> List[ExpectationConfiguration]:
if isinstance(expectation_types, str):
expectation_types = [expectation_types]
removed_expectations = [
expectation
for expectation in self.expectations
if expectation.expectation_type in expectation_types
]
self.expectations = [
expectation
for expectation in self.expectations
if expectation.expectation_type not in expectation_types
]
return removed_expectations
def find_expectation_indexes(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[int]:
"""
Args:
expectation_configuration: A potentially incomplete (partial) Expectation Configuration to match against to
find the index of any matching Expectation Configurations on the suite.
match_type: This determines what kwargs to use when matching. Options are 'domain' to match based
on the data evaluated by that expectation, 'success' to match based on all configuration parameters
that influence whether an expectation succeeds based on a given batch of data, and 'runtime' to match
based on all configuration parameters
Returns: A list of indexes of matching ExpectationConfiguration
Raises:
InvalidExpectationConfigurationError
"""
if not isinstance(expectation_configuration, ExpectationConfiguration):
raise InvalidExpectationConfigurationError(
"Ensure that expectation configuration is valid."
)
match_indexes = []
for idx, expectation in enumerate(self.expectations):
if expectation.isEquivalentTo(expectation_configuration, match_type):
match_indexes.append(idx)
return match_indexes
def find_expectations(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[ExpectationConfiguration]:
found_expectation_indexes = self.find_expectation_indexes(
expectation_configuration, match_type
)
if len(found_expectation_indexes) > 0:
return [
expectation
for idx, expectation in enumerate(self.expectations)
if idx in found_expectation_indexes
]
else:
return []
def patch_expectation(
self,
expectation_configuration: ExpectationConfiguration,
op: str,
path: str,
value: Any,
match_type: str,
) -> ExpectationConfiguration:
"""
Args:
expectation_configuration: A potentially incomplete (partial) Expectation Configuration to match against to
find the expectation to patch.
op: A jsonpatch operation (one of 'add','update', or 'remove') (see http://jsonpatch.com/)
path: A jsonpatch path for the patch operation (see http://jsonpatch.com/)
value: The value to patch (see http://jsonpatch.com/)
match_type: The match type to use for find_expectation_index()
Returns: The patched ExpectationConfiguration
Raises:
No match
More than 1 match
"""
found_expectation_indexes = self.find_expectation_indexes(
expectation_configuration, match_type
)
if len(found_expectation_indexes) < 1:
raise ValueError("No matching expectation was found.")
elif len(found_expectation_indexes) > 1:
raise ValueError(
"More than one matching expectation was found. Please be more specific with your search "
"criteria"
)
self.expectations[found_expectation_indexes[0]].patch(op, path, value)
return self.expectations[found_expectation_indexes[0]]
def add_expectation(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
overwrite_existing: bool = True,
) -> ExpectationConfiguration:
"""
Args:
expectation_configuration: The ExpectationConfiguration to add or update
match_type: The criteria used to determine whether the Suite already has an ExpectationConfiguration
and so whether we should add or replace.
overwrite_existing: If the expectation already exists, this will overwrite if True and raise an error if
False.
Returns:
The ExpectationConfiguration to add or replace.
Raises:
More than one match
One match if overwrite_existing = False
"""
found_expectation_indexes = self.find_expectation_indexes(
expectation_configuration, match_type
)
if len(found_expectation_indexes) > 1:
raise ValueError(
"More than one matching expectation was found. Please be more specific with your search "
"criteria"
)
elif len(found_expectation_indexes) == 1:
# Currently, we completely replace the expectation_configuration, but we could potentially use patch_expectation
# to update instead. We need to consider how to handle meta in that situation.
# patch_expectation = jsonpatch.make_patch(self.expectations[found_expectation_index] \
# .kwargs, expectation_configuration.kwargs)
# patch_expectation.apply(self.expectations[found_expectation_index].kwargs, in_place=True)
if overwrite_existing:
self.expectations[
found_expectation_indexes[0]
] = expectation_configuration
else:
raise DataContextError(
"A matching ExpectationConfiguration already exists. If you would like to overwrite this "
"ExpectationConfiguration, set overwrite_existing=True"
)
else:
self.append_expectation(expectation_configuration)
return expectation_configuration
class ExpectationSuiteSchema(Schema):
expectation_suite_name = fields.Str()
expectations = fields.List(fields.Nested(ExpectationConfigurationSchema))
evaluation_parameters = fields.Dict(allow_none=True)
data_asset_type = fields.Str(allow_none=True)
meta = fields.Dict()
# NOTE: 20191107 - JPC - we may want to remove clean_empty and update tests to require the other fields;
# doing so could also allow us not to have to make a copy of data in the pre_dump method.
def clean_empty(self, data):
if not hasattr(data, "evaluation_parameters"):
pass
elif len(data.evaluation_parameters) == 0:
del data.evaluation_parameters
if not hasattr(data, "meta"):
pass
elif data.meta is None or data.meta == []:
pass
elif len(data.meta) == 0:
del data.meta
return data
# noinspection PyUnusedLocal
@pre_dump
def prepare_dump(self, data, **kwargs):
data = deepcopy(data)
data.meta = convert_to_json_serializable(data.meta)
data = self.clean_empty(data)
return data
# noinspection PyUnusedLocal
@post_load
def make_expectation_suite(self, data, **kwargs):
return ExpectationSuite(**data)
expectationSuiteSchema = ExpectationSuiteSchema()
|
the-stack_0_22683 | from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
the-stack_0_22684 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__version__ = '0.2.5'
# Some global variables we use
source = None
shutdown_event = None
import math
import time
import os
import sys
import threading
import re
import signal
import socket
# Used for bound_interface
socket_socket = socket.socket
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
from xml.dom import minidom as DOM
ET = None
# Begin import game to handle Python 2 and Python 3
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request, HTTPError, URLError
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
except ImportError:
from optparse import OptionParser as ArgParser
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function taken from
https://pypi.python.org/pypi/six/
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
def bound_socket(*args, **kwargs):
"""Bind socket to a specified source IP address"""
global source
sock = socket_socket(*args, **kwargs)
sock.bind((source, 0))
return sock
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1))
* math.cos(math.radians(lat2)) * math.sin(dlon / 2)
* math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
class FileGetter(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, url, start):
self.url = url
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
self.result = [0]
try:
if (time.time() - self.starttime) <= 10:
f = urlopen(self.url)
while 1 and not shutdown_event.isSet():
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
def downloadSpeed(files, quiet=False):
"""Function to launch FileGetter threads and calculate download speeds"""
start = time.time()
def producer(q, files):
for file in files:
thread = FileGetter(file, start)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_files):
while len(finished) < total_files:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, files))
cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
start = time.time()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (time.time() - start))
class FilePutter(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, url, start, size):
self.url = url
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = chars * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % data[0:int(size) - 9]).encode()
del data
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
try:
if ((time.time() - self.starttime) <= 10 and
not shutdown_event.isSet()):
f = urlopen(self.url, self.data)
f.read(11)
f.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
def uploadSpeed(url, sizes, quiet=False):
"""Function to launch FilePutter threads and calculate upload speeds"""
start = time.time()
def producer(q, sizes):
for size in sizes:
thread = FilePutter(url, start, size)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_sizes):
while len(finished) < total_sizes:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(thread.result)
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, sizes))
cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
start = time.time()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (time.time() - start))
def getAttributesByTagName(dom, tagName):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tagName)[0]
return dict(list(elem.attributes.items()))
def getConfig():
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
uh = urlopen('http://www.speedtest.net/speedtest-config.php')
configxml = []
while 1:
configxml.append(uh.read(10240))
if len(configxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
root = ET.fromstring(''.encode().join(configxml))
config = {
'client': root.find('client').attrib,
'times': root.find('times').attrib,
'download': root.find('download').attrib,
'upload': root.find('upload').attrib}
except AttributeError:
root = DOM.parseString(''.join(configxml))
config = {
'client': getAttributesByTagName(root, 'client'),
'times': getAttributesByTagName(root, 'times'),
'download': getAttributesByTagName(root, 'download'),
'upload': getAttributesByTagName(root, 'upload')}
del root
del configxml
return config
def closestServers(client, all=False):
"""Determine the 5 closest speedtest.net servers based on geographic
distance
"""
uh = urlopen('http://www.speedtest.net/speedtest-servers.php')
serversxml = []
while 1:
serversxml.append(uh.read(10240))
if len(serversxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
root = ET.fromstring(''.encode().join(serversxml))
elements = root.getiterator('server')
except AttributeError:
root = DOM.parseString(''.join(serversxml))
elements = root.getElementsByTagName('server')
servers = {}
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
d = distance([float(client['lat']), float(client['lon'])],
[float(attrib.get('lat')), float(attrib.get('lon'))])
attrib['d'] = d
if d not in servers:
servers[d] = [attrib]
else:
servers[d].append(attrib)
del root
del serversxml
del elements
closest = []
for d in sorted(servers.keys()):
for s in servers[d]:
closest.append(s)
if len(closest) == 5 and not all:
break
else:
continue
break
del servers
return closest
def getBestServer(servers):
"""Perform a speedtest.net "ping" to determine which speedtest.net
server has the lowest latency
"""
results = {}
for server in servers:
cum = []
url = os.path.dirname(server['url'])
for i in range(0, 3):
try:
uh = urlopen('%s/latency.txt' % url)
except (HTTPError, URLError):
cum.append(3600)
continue
start = time.time()
text = uh.read(9)
total = time.time() - start
if int(uh.code) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
uh.close()
avg = round((sum(cum) / 3) * 1000000, 3)
results[avg] = server
fastest = sorted(results.keys())[0]
best = results[fastest]
best['latency'] = fastest
return best
def ctrl_c(signum, frame):
"""Catch Ctrl-C key sequence and set a shutdown_event for our threaded
operations
"""
global shutdown_event
shutdown_event.set()
raise SystemExit('\nCancelling...')
def version():
"""Print the version"""
raise SystemExit(__version__)
def speedtest():
"""Run the full speedtest.net test"""
global shutdown_event, source
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image')
parser.add_argument('--simple', action='store_true',
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', help='Specify a server ID to test against')
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--source', help='Source IP address to bind to')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
del options
# Print the version and exit
if args.version:
version()
# If specified bind to a specific IP address
if args.source:
source = args.source
socket.socket = bound_socket
if not args.simple:
print_('Retrieving speedtest.net configuration...')
try:
config = getConfig()
except URLError:
print_('Cannot retrieve speedtest configuration')
sys.exit(1)
if not args.simple:
print_('Retrieving speedtest.net server list...')
if args.list or args.server:
servers = closestServers(config['client'], True)
if args.list:
serverList = []
for server in servers:
line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
serverList.append(line)
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_('\n'.join(serverList).encode('utf-8', 'ignore'))
except NameError:
print_('\n'.join(serverList))
except IOError:
pass
sys.exit(0)
else:
servers = closestServers(config['client'])
if not args.simple:
print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
if args.server:
try:
best = getBestServer(filter(lambda x: x['id'] == args.server,
servers))
except IndexError:
print_('Invalid server ID')
sys.exit(1)
elif args.mini:
name, ext = os.path.splitext(args.mini)
if ext:
url = os.path.dirname(args.mini)
else:
url = args.mini
urlparts = urlparse(url)
try:
f = urlopen(args.mini)
except:
print_('Invalid Speedtest Mini URL')
sys.exit(1)
else:
text = f.read()
f.close()
extension = re.findall('upload_extension: "([^"]+)"', text.decode())
if not urlparts or not extension:
print_('Please provide the full URL of your Speedtest Mini server')
sys.exit(1)
servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
try:
best = getBestServer(servers)
except:
best = servers[0]
else:
if not args.simple:
print_('Selecting best server based on ping...')
best = getBestServer(servers)
if not args.simple:
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best).encode('utf-8', 'ignore'))
except NameError:
print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best)
else:
print_('Ping: %(latency)s ms' % best)
sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
urls = []
for size in sizes:
for i in range(0, 4):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(best['url']), size, size))
if not args.simple:
print_('Testing download speed', end='')
dlspeed = downloadSpeed(urls, args.simple)
if not args.simple:
print_()
print_('Download: %0.2f Mbit/s' % ((dlspeed / 1000 / 1000) * 8))
sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
sizes = []
for size in sizesizes:
for i in range(0, 25):
sizes.append(size)
if not args.simple:
print_('Testing upload speed', end='')
ulspeed = uploadSpeed(best['url'], sizes, args.simple)
if not args.simple:
print_()
print_('Upload: %0.2f Mbit/s' % ((ulspeed / 1000 / 1000) * 8))
if args.share and args.mini:
print_('Cannot generate a speedtest.net share results image while '
'testing against a Speedtest Mini server')
elif args.share:
dlspeedk = int(round((dlspeed / 1000) * 8, 0))
ping = int(round(best['latency'], 0))
ulspeedk = int(round((ulspeed / 1000) * 8, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
apiData = [
'download=%s' % dlspeedk,
'ping=%s' % ping,
'upload=%s' % ulspeedk,
'promo=',
'startmode=%s' % 'pingselect',
'recommendedserverid=%s' % best['id'],
'accuracy=%s' % 1,
'serverid=%s' % best['id'],
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, ulspeedk, dlspeedk, '297aae72'))
.encode()).hexdigest()]
req = Request('http://www.speedtest.net/api/api.php',
data='&'.join(apiData).encode())
req.add_header('Referer', 'http://c.speedtest.net/flash/speedtest.swf')
f = urlopen(req)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
print_('Could not submit results to speedtest.net')
sys.exit(1)
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
print_('Could not submit results to speedtest.net')
sys.exit(1)
print_('Share results: http://www.speedtest.net/result/%s.png' %
resultid[0])
def main():
try:
speedtest()
except KeyboardInterrupt:
print_('\nCancelling...')
if __name__ == '__main__':
main()
# vim:ts=4:sw=4:expandtab
|
the-stack_0_22686 | from projects.tutorials.object_nav_ithor_dagger_then_ppo_one_object import (
ObjectNavThorDaggerThenPPOExperimentConfig,
)
from allenact.utils.viz_utils import (
VizSuite,
TrajectoryViz,
AgentViewViz,
ActorViz,
TensorViz1D,
)
from allenact_plugins.ithor_plugin.ithor_viz import ThorViz
class ObjectNavThorDaggerThenPPOVizExperimentConfig(
ObjectNavThorDaggerThenPPOExperimentConfig
):
"""A simple object navigation experiment in THOR.
Training with DAgger and then PPO + using viz for test.
"""
TEST_SAMPLES_IN_SCENE = 4
@classmethod
def tag(cls):
return "ObjectNavThorDaggerThenPPOViz"
viz = None
def get_viz(self, mode):
if self.viz is not None:
return self.viz
self.viz = VizSuite(
mode=mode,
base_trajectory=TrajectoryViz(
path_to_target_location=None, path_to_rot_degrees=("rotation",),
),
egeocentric=AgentViewViz(max_video_length=100),
action_probs=ActorViz(figsize=(3.25, 10), fontsize=18),
taken_action_logprobs=TensorViz1D(),
episode_mask=TensorViz1D(rollout_source=("masks",)),
thor_trajectory=ThorViz(
path_to_target_location=None, figsize=(8, 8), viz_rows_cols=(448, 448),
),
)
return self.viz
def machine_params(self, mode="train", **kwargs):
params = super().machine_params(mode, **kwargs)
if mode == "test":
params.set_visualizer(self.get_viz(mode))
return params
|
the-stack_0_22687 | """Initial Migration
Revision ID: 3bd8233a6058
Revises:
Create Date: 2019-09-18 10:44:13.586778
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3bd8233a6058'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('users')
op.drop_table('roles')
# ### end Alembic commands ###
|
the-stack_0_22688 | #!/usr/bin/env python
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist, set_random_seed
from mmcv.utils import get_git_hash
from mmocr import __version__
from mmocr.apis import train_detector
from mmocr.datasets import build_dataset
from mmocr.models import build_detector
from mmocr.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument('--work-dir', help='The dir to save logs and models.')
parser.add_argument(
'--load-from', help='The checkpoint file to load from.')
parser.add_argument(
'--resume-from', help='The checkpoint file to resume from.')
parser.add_argument(
'--no-validate',
action='store_true',
help='Whether not to evaluate the checkpoint during training.')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='Number of gpus to use '
'(only applicable to non-distributed training).')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training).')
parser.add_argument('--seed', type=int, default=None, help='Random seed.')
parser.add_argument(
'--deterministic',
action='store_true',
help='Whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be of the form of either '
'key="[a,b]" or key=a,b .The argument also allows nested list/tuple '
'values, e.g. key="[(a,b),(c,d)]". Note that the quotation marks '
'are necessary and that no white space is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='Options for job launcher.')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--mc-config',
type=str,
default='',
help='Memory cache config for image loading speed-up during training.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# update mc config
if args.mc_config:
mc = Config.fromfile(args.mc_config)
if isinstance(cfg.data.train, list):
for i in range(len(cfg.data.train)):
cfg.data.train[i].pipeline[0].update(
file_client_args=mc['mc_file_client_args'])
else:
cfg.data.train.pipeline[0].update(
file_client_args=mc['mc_file_client_args'])
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
if cfg.data.train['type'] == 'ConcatDataset':
train_pipeline = cfg.data.train['datasets'][0].pipeline
else:
train_pipeline = cfg.data.train.pipeline
if val_dataset['type'] == 'ConcatDataset':
for dataset in val_dataset['datasets']:
dataset.pipeline = train_pipeline
else:
val_dataset.pipeline = train_pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmocr_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
|
the-stack_0_22689 | import os
import torch
import numpy as np
import scipy.misc as m
from torch.utils import data
from ptsemseg.utils import recursive_glob
from ptsemseg.augmentations import Compose, RandomHorizontallyFlip, RandomRotate, Scale
class cityscapesLoader(data.Dataset):
"""cityscapesLoader
https://www.cityscapes-dataset.com
Data is derived from CityScapes, and can be downloaded from here:
https://www.cityscapes-dataset.com/downloads/
Many Thanks to @fvisin for the loader repo:
https://github.com/fvisin/dataset_loaders/blob/master/dataset_loaders/images/cityscapes.py
"""
colors = [ # [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
]
label_colours = dict(zip(range(19), colors))
mean_rgb = {
"pascal": [103.939, 116.779, 123.68],
"cityscapes": [0.0, 0.0, 0.0],
} # pascal mean for PSPNet and ICNet pre-trained model
def __init__(
self,
root,
split="train",
is_transform=False,
img_size=(512, 1024),
augmentations=None,
img_norm=False,
version="pascal",
test_mode=False,
):
"""__init__
:param root:
:param split:
:param is_transform:
:param img_size:
:param augmentations
"""
self.root = root
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.img_norm = img_norm
self.n_classes = 19
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array(self.mean_rgb[version])
self.files = {}
self.images_base = os.path.join(self.root, "leftImg8bit", self.split)
self.annotations_base = os.path.join(self.root, "gtFine", self.split)
self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".png")
self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
self.valid_classes = [
7,
8,
11,
12,
13,
17,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
31,
32,
33,
]
self.class_names = [
"unlabelled",
"road",
"sidewalk",
"building",
"wall",
"fence",
"pole",
"traffic_light",
"traffic_sign",
"vegetation",
"terrain",
"sky",
"person",
"rider",
"car",
"truck",
"bus",
"train",
"motorcycle",
"bicycle",
]
self.ignore_index = 250
self.class_map = dict(zip(self.valid_classes, range(19)))
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(
self.annotations_base,
img_path.split(os.sep)[-2],
os.path.basename(img_path)[:-15] + "gtFine_labelIds.png",
)
img = m.imread(img_path)
img = np.array(img, dtype=np.uint8)
lbl = m.imread(lbl_path)
lbl = self.encode_segmap(np.array(lbl, dtype=np.uint8))
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
"""transform
:param img:
:param lbl:
"""
img = m.imresize(img, (self.img_size[0], self.img_size[1])) # uint8 with RGB mode
img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean
if self.img_norm:
# Resize scales images from 0 to 255, thus we need
# to divide by 255.0
img = img.astype(float) / 255.0
# NHWC -> NCHW
img = img.transpose(2, 0, 1)
classes = np.unique(lbl)
lbl = lbl.astype(float)
lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), "nearest", mode="F")
lbl = lbl.astype(int)
if not np.all(classes == np.unique(lbl)):
print("WARN: resizing labels yielded fewer classes")
if not np.all(np.unique(lbl[lbl != self.ignore_index]) < self.n_classes):
print("after det", classes, np.unique(lbl))
raise ValueError("Segmentation map contained invalid class values")
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def decode_segmap(self, temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = self.label_colours[l][0]
g[temp == l] = self.label_colours[l][1]
b[temp == l] = self.label_colours[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
def encode_segmap(self, mask):
# Put all void classes to zero
for _voidc in self.void_classes:
mask[mask == _voidc] = self.ignore_index
for _validc in self.valid_classes:
mask[mask == _validc] = self.class_map[_validc]
return mask
if __name__ == "__main__":
import matplotlib.pyplot as plt
augmentations = Compose([Scale(2048), RandomRotate(10), RandomHorizontallyFlip(0.5)])
local_path = "/datasets01/cityscapes/112817/"
dst = cityscapesLoader(local_path, is_transform=True, augmentations=augmentations)
bs = 4
trainloader = data.DataLoader(dst, batch_size=bs, num_workers=0)
for i, data_samples in enumerate(trainloader):
imgs, labels = data_samples
import pdb
pdb.set_trace()
imgs = imgs.numpy()[:, ::-1, :, :]
imgs = np.transpose(imgs, [0, 2, 3, 1])
f, axarr = plt.subplots(bs, 2)
for j in range(bs):
axarr[j][0].imshow(imgs[j])
axarr[j][1].imshow(dst.decode_segmap(labels.numpy()[j]))
plt.show()
a = input()
if a == "ex":
break
else:
plt.close()
|
the-stack_0_22690 | """Test praw.models.listing.generator."""
from ... import IntegrationTest
class TestListingGenerator(IntegrationTest):
def test_exhaust_items_with_before(self):
with self.use_cassette():
submissions = list(
self.reddit.redditor("spez").top(
limit=None, params={"before": "3cxedn"}
)
)
assert len(submissions) > 100
def test_exhaust_items(self):
with self.use_cassette():
submissions = list(self.reddit.redditor("spez").top(limit=None))
assert len(submissions) > 100
def test_no_items(self):
with self.use_cassette():
submissions = list(self.reddit.redditor("spez").top("hour"))
assert len(submissions) == 0
|
the-stack_0_22691 | """
A swarm plot showing the population change from 2010 to 2017 as a
function of the number of homicides over that period.
"""
from .. import datasets as gv_data
from . import default_style, palette
import pandas as pd
import geopandas as gpd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
def _load_data():
"""
Load the data and calculate the population change.
"""
# Load homicides and census tracts
homicides = gv_data.PoliceHomicides.get()
tracts = gv_data.CensusTracts2010.get()
# Total number of homicides by census tract
N_homicides = (
gpd.sjoin(homicides, tracts, op="within", how="left")
.groupby(["census_tract_id"])
.size()
.rename("num_homicides")
)
# Population numbers
pop = []
for year in range(2010, 2018):
df = gv_data.Population.get(year=year)
df["year"] = year
pop.append(df)
Y = pd.concat(pop).set_index(["census_tract_id"])
# Calculate population change
pop_change = (
Y.query("year == 2017")["total_population"]
- Y.query("year == 2010")["total_population"]
)
# Merge tracts with population change and
Y = pd.merge(
tracts,
pd.concat([pop_change, N_homicides], axis=1).reset_index(),
on="census_tract_id",
how="left",
)
Y["num_homicides"] = Y["num_homicides"].fillna(0)
# Calculate the homicide bin
Y["bins"] = pd.cut(Y["num_homicides"], [-1, 7, 15, 24, 36, 64])
# Sign of the population change
Y["Sign"] = np.where(
Y["total_population"] > 0, "Population Growth", "Population Loss"
)
return Y
def plot(fig_num, outfile):
"""
A swarm plot showing the population change from 2010 to 2017 as a
function of the number of homicides over that period.
"""
# Load the data
data = _load_data()
def get_ylabel(x):
sign = ""
if x > 0:
sign = "+"
if x < 0:
sign = "\u2212"
return sign + "{:,.0f}".format(abs(x))
with plt.style.context(default_style):
# Initialize
fig, ax = plt.subplots(
figsize=(6.4, 4.5),
gridspec_kw=dict(left=0.13, bottom=0.15, top=0.82, right=0.98),
)
ax.set_ylim(-3100, 3100)
# Plot the swarm plot
colors = sns.color_palette("RdYlGn", 7, desat=0.8).as_hex()
sns.swarmplot(
x="bins",
y="total_population",
data=data,
hue="Sign",
palette=[colors[-1], colors[0]],
alpha=1.0,
ax=ax,
size=4,
edgecolor="none",
)
# Add a line at y = 0
ax.axhline(y=0, c=palette["sidewalk"], lw=2, zorder=1)
# Format y axis
ax.set_ylabel("Population Change Since 2010", weight="bold", fontsize=11)
ax.set_yticklabels([get_ylabel(x) for x in ax.get_yticks()], fontsize=11)
# Format the x axis
ax.set_xlabel("Total Homicides Since 2010", weight="bold", fontsize=11)
ax.set_xticklabels(
["Less than 7", "7 to 15", "15 to 24", "24 to 36", "More than 36"],
fontsize=11,
)
# Add the legend
leg = ax.legend(
title="Census Tracts With:",
fontsize=10,
frameon=True,
facecolor="white",
framealpha=1,
edgecolor="none",
loc="upper right",
bbox_to_anchor=(1, 1.05),
bbox_transform=ax.transAxes,
)
title = leg.get_title()
title.set_weight("bold")
title.set_fontsize(11)
# Add title
fig.text(
0.005,
0.99,
f"Figure {fig_num}",
weight="bold",
fontsize=10,
ha="left",
va="top",
)
fig.text(
0.005,
0.96,
"Population Change and Number of Homicides since 2010 by Census Tract",
weight="bold",
fontsize=12,
ha="left",
va="top",
)
fig.text(
0.005,
0.92,
"Areas that experienced the most homicides were more likely to have seen a population decline",
fontsize=10,
ha="left",
va="top",
style="italic",
)
# Add the footnote
footnote = r"$\bf{Sources}$: American Community Survey 5-Year estimates, Police Department"
fig.text(
0.005,
0.002,
footnote,
fontsize=8,
color=palette["dark-gray"],
ha="left",
va="bottom",
)
# Save!
plt.savefig(outfile, dpi=300)
|
the-stack_0_22692 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import sys
import os
import shutil
import six
import click
from importlib import import_module
from rqalpha.utils.click_helper import Date
from rqalpha.utils.config import parse_config, dump_config
CONTEXT_SETTINGS = {
'default_map': {
'run': {
}
}
}
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option('-v', '--verbose', count=True)
@click.help_option('-h', '--help')
@click.pass_context
def cli(ctx, verbose):
ctx.obj["VERBOSE"] = verbose
def inject_mod_commands():
from rqalpha.utils.config import get_mod_conf
from rqalpha.mod import SYSTEM_MOD_LIST
from rqalpha.utils.package_helper import import_mod
mod_config = get_mod_conf()
for mod_name, config in six.iteritems(mod_config['mod']):
if 'lib' in config:
lib_name = config["lib"]
else:
lib_name = "rqalpha_mod_{}".format(mod_name)
if not config['enabled']:
continue
try:
if mod_name in SYSTEM_MOD_LIST:
# inject system mod
import_mod("rqalpha.mod." + lib_name)
else:
# inject third part mod
import_mod(lib_name)
except Exception as e:
pass
def entry_point():
inject_mod_commands()
cli(obj={})
@cli.command()
@click.option('-d', '--data-bundle-path', default=os.path.expanduser('~/.rqalpha'), type=click.Path(file_okay=False))
@click.option('--locale', 'locale', type=click.STRING, default="zh_Hans_CN")
def update_bundle(data_bundle_path, locale):
"""
Sync Data Bundle
"""
from rqalpha import main
main.update_bundle(data_bundle_path, locale)
@cli.command()
@click.help_option('-h', '--help')
# -- Base Configuration
@click.option('-d', '--data-bundle-path', 'base__data_bundle_path', type=click.Path(exists=True))
@click.option('-f', '--strategy-file', 'base__strategy_file', type=click.Path(exists=True))
@click.option('-s', '--start-date', 'base__start_date', type=Date())
@click.option('-e', '--end-date', 'base__end_date', type=Date())
@click.option('-bm', '--benchmark', 'base__benchmark', type=click.STRING, default=None)
@click.option('-mm', '--margin-multiplier', 'base__margin_multiplier', type=click.FLOAT)
@click.option('-a', '--account', 'base__accounts', nargs=2, multiple=True, help="set account type with starting cash")
@click.option('--position', 'base__init_positions', type=click.STRING, help="set init position")
@click.option('-fq', '--frequency', 'base__frequency', type=click.Choice(['1d', '1m', 'tick']))
@click.option('-rt', '--run-type', 'base__run_type', type=click.Choice(['b', 'p', 'r']), default="b")
@click.option('-rp', '--round-price', 'base__round_price', is_flag=True)
@click.option('-mk', '--market', 'base__market', type=click.Choice(['cn', 'hk']), default=None)
@click.option('--resume', 'base__resume_mode', is_flag=True)
@click.option('--source-code', 'base__source_code')
# -- Extra Configuration
@click.option('-l', '--log-level', 'extra__log_level', type=click.Choice(['verbose', 'debug', 'info', 'error', 'none']))
@click.option('--disable-user-system-log', 'extra__user_system_log_disabled', is_flag=True, help='disable user system log stdout')
@click.option('--disable-user-log', 'extra__user_log_disabled', is_flag=True, help='disable user log stdout')
@click.option('--logger', 'extra__logger', nargs=2, multiple=True, help='config logger, e.g. --logger system_log debug')
@click.option('--locale', 'extra__locale', type=click.Choice(['cn', 'en']), default="cn")
@click.option('--extra-vars', 'extra__context_vars', type=click.STRING, help="override context vars")
@click.option("--enable-profiler", "extra__enable_profiler", is_flag=True, help="add line profiler to profile your strategy")
@click.option('--config', 'config_path', type=click.STRING, help="config file path")
# -- Mod Configuration
@click.option('-mc', '--mod-config', 'mod_configs', nargs=2, multiple=True, type=click.STRING, help="mod extra config")
def run(**kwargs):
"""
Start to run a strategy
"""
config_path = kwargs.get('config_path', None)
if config_path is not None:
config_path = os.path.abspath(config_path)
kwargs.pop('config_path')
if not kwargs.get('base__securities', None):
kwargs.pop('base__securities', None)
from rqalpha import main
source_code = kwargs.get("base__source_code")
cfg = parse_config(kwargs, config_path=config_path, click_type=True, source_code=source_code)
source_code = cfg.base.source_code
results = main.run(cfg, source_code=source_code)
# store results into ipython when running in ipython
from rqalpha.utils import is_run_from_ipython
if results is not None and is_run_from_ipython():
import IPython
from rqalpha.utils import RqAttrDict
ipy = IPython.get_ipython()
report = results.get("sys_analyser", {})
ipy.user_global_ns["results"] = results
ipy.user_global_ns["report"] = RqAttrDict(report)
if results is None:
sys.exit(1)
@cli.command()
@click.option('-d', '--directory', default="./", type=click.Path(), required=True)
def examples(directory):
"""
Generate example strategies to target folder
"""
source_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "examples")
try:
shutil.copytree(source_dir, os.path.join(directory, "examples"))
except OSError as e:
if e.errno == errno.EEXIST:
six.print_("Folder examples is exists.")
@cli.command()
@click.option('-v', '--verbose', is_flag=True)
def version(**kwargs):
"""
Output Version Info
"""
from rqalpha import version_info
six.print_("Current Version: ", version_info)
@cli.command()
@click.option('-d', '--directory', default="./", type=click.Path(), required=True)
def generate_config(directory):
"""
Generate default config file
"""
default_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.yml")
target_config_path = os.path.abspath(os.path.join(directory, 'config.yml'))
shutil.copy(default_config, target_config_path)
six.print_("Config file has been generated in", target_config_path)
# For Mod Cli
@cli.command(context_settings=dict(
ignore_unknown_options=True,
))
@click.help_option('-h', '--help')
@click.argument('cmd', nargs=1, type=click.Choice(['list', 'enable', 'disable', 'install', 'uninstall']))
@click.argument('params', nargs=-1)
def mod(cmd, params):
"""
Mod management command
rqalpha mod list \n
rqalpha mod install xxx \n
rqalpha mod uninstall xxx \n
rqalpha mod enable xxx \n
rqalpha mod disable xxx \n
"""
def list(params):
"""
List all mod configuration
"""
from tabulate import tabulate
from rqalpha.utils.config import get_mod_conf
mod_config = get_mod_conf()
table = []
for mod_name, mod in six.iteritems(mod_config['mod']):
table.append([
mod_name,
("enabled" if mod['enabled'] else "disabled")
])
headers = [
"name",
"status"
]
six.print_(tabulate(table, headers=headers, tablefmt="psql"))
six.print_("You can use `rqalpha mod list/install/uninstall/enable/disable` to manage your mods")
def install(params):
"""
Install third-party Mod
"""
try:
from pip._internal import main as pip_main
from pip._internal.commands.install import InstallCommand
except ImportError:
from pip import main as pip_main
from pip.commands.install import InstallCommand
params = [param for param in params]
options, mod_list = InstallCommand().parse_args(params)
mod_list = [mod_name for mod_name in mod_list if mod_name != "."]
params = ["install"] + params
for mod_name in mod_list:
mod_name_index = params.index(mod_name)
if mod_name.startswith("rqalpha_mod_sys_"):
six.print_('System Mod can not be installed or uninstalled')
return
if "rqalpha_mod_" in mod_name:
lib_name = mod_name
else:
lib_name = "rqalpha_mod_" + mod_name
params[mod_name_index] = lib_name
# Install Mod
installed_result = pip_main(params)
# Export config
from rqalpha.utils.config import load_yaml, user_mod_conf_path
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
if installed_result == 0:
# 如果为0,则说明安装成功
if len(mod_list) == 0:
"""
主要是方便 `pip install -e .` 这种方式 本地调试 Mod 使用,需要满足以下条件:
1. `rqalpha mod install -e .` 命令是在对应 自定义 Mod 的根目录下
2. 该 Mod 必须包含 `setup.py` 文件(否则也不能正常的 `pip install -e .` 来安装)
3. 该 Mod 包名必须按照 RQAlpha 的规范来命名,具体规则如下
* 必须以 `rqalpha-mod-` 来开头,比如 `rqalpha-mod-xxx-yyy`
* 对应import的库名必须要 `rqalpha_mod_` 来开头,并且需要和包名后半部分一致,但是 `-` 需要替换为 `_`, 比如 `rqalpha_mod_xxx_yyy`
"""
mod_name = _detect_package_name_from_dir(params)
mod_name = mod_name.replace("-", "_").replace("rqalpha_mod_", "")
mod_list.append(mod_name)
for mod_name in mod_list:
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
if "==" in mod_name:
mod_name = mod_name.split('==')[0]
user_conf['mod'][mod_name] = {}
user_conf['mod'][mod_name]['enabled'] = False
dump_config(user_mod_conf_path(), user_conf)
return installed_result
def uninstall(params):
"""
Uninstall third-party Mod
"""
try:
from pip._internal import main as pip_main
from pip._internal.commands.uninstall import UninstallCommand
except ImportError:
# be compatible with pip < 10.0
from pip import main as pip_main
from pip.commands.uninstall import UninstallCommand
params = [param for param in params]
options, mod_list = UninstallCommand().parse_args(params)
params = ["uninstall"] + params
for mod_name in mod_list:
mod_name_index = params.index(mod_name)
if mod_name.startswith("rqalpha_mod_sys_"):
six.print_('System Mod can not be installed or uninstalled')
return
if "rqalpha_mod_" in mod_name:
lib_name = mod_name
else:
lib_name = "rqalpha_mod_" + mod_name
params[mod_name_index] = lib_name
# Uninstall Mod
uninstalled_result = pip_main(params)
# Remove Mod Config
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
for mod_name in mod_list:
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
del user_conf['mod'][mod_name]
dump_config(user_mod_conf_path(), user_conf)
return uninstalled_result
def enable(params):
"""
enable mod
"""
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
# check whether is installed
module_name = "rqalpha_mod_" + mod_name
if module_name.startswith("rqalpha_mod_sys_"):
module_name = "rqalpha.mod." + module_name
try:
import_module(module_name)
except ImportError:
installed_result = install([module_name])
if installed_result != 0:
return
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = True
except KeyError:
user_conf['mod'][mod_name] = {'enabled': True}
dump_config(user_mod_conf_path(), user_conf)
def disable(params):
"""
disable mod
"""
mod_name = params[0]
if "rqalpha_mod_" in mod_name:
mod_name = mod_name.replace("rqalpha_mod_", "")
from rqalpha.utils.config import user_mod_conf_path, load_yaml
user_conf = load_yaml(user_mod_conf_path()) if os.path.exists(user_mod_conf_path()) else {'mod': {}}
try:
user_conf['mod'][mod_name]['enabled'] = False
except KeyError:
user_conf['mod'][mod_name] = {'enabled': False}
dump_config(user_mod_conf_path(), user_conf)
locals()[cmd](params)
def _detect_package_name_from_dir(params):
setup_path = os.path.join(os.path.abspath(params[-1]), 'setup.py')
if not os.path.exists(setup_path):
return None
return os.path.split(os.path.dirname(setup_path))[1]
if __name__ == '__main__':
entry_point()
|
the-stack_0_22693 | from django.conf import settings
class PayPalSettingsError(Exception):
"""Raised when settings be bad."""
TEST = getattr(settings, "PAYPAL_TEST", True)
RECEIVER_EMAIL = settings.PAYPAL_RECEIVER_EMAIL
# API Endpoints.
POSTBACK_ENDPOINT = "https://www.paypal.com/cgi-bin/webscr"
SANDBOX_POSTBACK_ENDPOINT = "https://www.sandbox.paypal.com/cgi-bin/webscr"
# Images
IMAGE = getattr(settings, "PAYPAL_IMAGE", "http://images.paypal.com/images/x-click-but01.gif")
SUBSCRIPTION_IMAGE = "https://www.paypal.com/en_US/i/btn/btn_subscribeCC_LG.gif"
SANDBOX_IMAGE = getattr(settings, "PAYPAL_SANDBOX_IMAGE", "https://www.sandbox.paypal.com/en_US/i/btn/btn_buynowCC_LG.gif")
SUBSCRIPTION_SANDBOX_IMAGE = "https://www.sandbox.paypal.com/en_US/i/btn/btn_subscribeCC_LG.gif" |
the-stack_0_22694 | # -*- coding: utf-8 -*-
"""Main module."""
import re
import numpy as np
import matplotlib.pyplot as plt
def reverberation_time_energy_decay_curve(
energy_decay_curve,
times,
T='T20',
normalize=True,
plot=False):
"""Estimate the reverberation time from a given energy decay curve according
to the ISO standard 3382 _[1].
Parameters
----------
energy_decay_curve : ndarray, double
Energy decay curve. The time needs to be the arrays last dimension.
times : ndarray, double
Time vector corresponding to each sample of the EDC.
T : 'T20', 'T30', 'T40', 'T50', 'T60', 'EDT', 'LDT'
Decay interval to be used for the reverberation time extrapolation. EDT
corresponds to the early decay time extrapolated from the interval
[0, -10] dB, LDT corresponds to the late decay time extrapolated from
the interval [-25, -35] dB.
normalize : bool, True
Normalize the EDC to the steady state energy level
plot : bool, False
Plot the estimated extrapolation line for visual inspection of the
results.
Returns
-------
reverberation_time : double
The reverberation time
References
----------
.. [1] ISO 3382, Acoustics - Measurement of the reverberation time of
rooms with reference to other acoustical parameters.
"""
intervals = [20, 30, 40, 50, 60]
if T == 'EDT':
upper = -0.1
lower = -10.1
elif T == 'LDT':
upper = -25.
lower = -35.
else:
try:
(int(re.findall(r'\d+', T)[0]) in intervals)
except IndexError:
raise ValueError(
"{} is not a valid interval for the regression.".format(T))
upper = -5
lower = -np.double(re.findall(r'\d+', T)) + upper
if normalize:
energy_decay_curve /= energy_decay_curve[0]
edc_db = 10*np.log10(np.abs(energy_decay_curve))
idx_upper = np.nanargmin(np.abs(upper - edc_db))
idx_lower = np.nanargmin(np.abs(lower - edc_db))
A = np.vstack(
[times[idx_upper:idx_lower], np.ones(idx_lower - idx_upper)]).T
gradient, const = np.linalg.lstsq(
A, edc_db[..., idx_upper:idx_lower], rcond=None)[0]
reverberation_time = -60 / gradient
if plot:
plt.figure()
plt.plot(
times,
edc_db,
label='edc')
plt.plot(
times,
times * gradient + const,
label='regression',
linestyle='-.')
ax = plt.gca()
ax.set_ylim((-95, 5))
reverberation_time = -60 / gradient
ax.set_xlim((-0.05, 2*reverberation_time))
plt.grid(True)
plt.legend()
ax.set_ylabel('EDC [dB]')
ax.set_xlabel('Time [s]')
return reverberation_time
def schroeder_integration(impulse_response, is_energy=False):
"""Calculate the Schroeder integral of a room impulse response _[3]. The
result is the energy decay curve for the given room impulse response.
.. math:
\\langle e^2(t) \\rangle = N\\cdot \\int_{t}^{\\infty} h^2(\\tau) \\mathrm{d} \\tau
Parameters
----------
impulse_response : ndarray, double
Room impulse response as array
is_energy : boolean, optional
Whether the input represents energy data or sound pressure values.
Returns
-------
energy_decay_curve : ndarray, double
The energy decay curve
Reference
---------
.. [3] M. R. Schroeder, “New Method of Measuring Reverberation Time,”
The Journal of the Acoustical Society of America, vol. 37, no. 6,
pp. 1187–1187, 1965.
"""
if not is_energy:
data = np.abs(impulse_response)**2
else:
data = impulse_response.copy()
ndim = data.ndim
data = np.atleast_2d(data)
energy_decay_curve = np.fliplr(np.nancumsum(np.fliplr(data), axis=-1))
if ndim < energy_decay_curve.ndim:
energy_decay_curve = np.squeeze(energy_decay_curve)
return energy_decay_curve
def energy_decay_curve_analytic(
surfaces, alphas, volume, times, source=None,
receiver=None, method='eyring', c=343.4, frequency=None,
air_absorption=True):
"""Calculate the energy decay curve analytically by using Eyring's or
Sabine's equation _[2].
Parameters
----------
surfaces : ndarray, double
Surface areas of all surfaces in the room
alphas : ndarray, double
Absorption coefficients corresponding to each surface
volume : double
Room volume
times : ndarray, double
Time vector for which the decay curve is calculated
source : Coordinates
Coordinate object with the source coordinates
receiver : Coordinates
Coordinate object with the receiver coordinates
method : 'eyring', 'sabine'
Use either Eyring's or Sabine's equation
c : double
Speed of sound
frequency : double, optional
Center frequency of the respective octave band. This is only used for
the air absorption calculation.
Returns
-------
energy_decay_curve : ndarray, double
The energy decay curve
References
----------
.. [2] H. Kuttruff, Room acoustics, 4th Ed. Taylor & Francis, 2009.
"""
alphas = np.asarray(alphas)
surfaces = np.asarray(surfaces)
surface_room = np.sum(surfaces)
alpha_mean = np.sum(surfaces*alphas) / surface_room
if air_absorption:
m = air_attenuation_coefficient(frequency)
else:
m = 0
if all([source, receiver]):
dist_source_receiver = np.linalg.norm(
source.cartesian - receiver.cartesian)
delay_direct = dist_source_receiver / c
else:
delay_direct = 0
if method == 'eyring':
energy_decay_curve = np.exp(
-c*(times - delay_direct) *
((-surface_room * np.log(1 - alpha_mean) / 4 / volume) + m))
elif method == 'sabine':
energy_decay_curve = np.exp(
-c*(times - delay_direct) *
((surface_room * alpha_mean / 4 / volume) + m))
else:
raise ValueError("The method has to be either 'eyring' or 'sabine'.")
return energy_decay_curve
def air_attenuation_coefficient(
frequency,
temperature=20,
humidity=50,
atmospheric_pressure=101325):
"""Calculate the attenuation coefficient m for the absorption caused
by friction with the surrounding air.
Parameters
----------
frequency : double
The frequency for which the attenuation coefficient is calculated.
When processing in fractional octave bands use the center frequency.
temperature : double
Temperature in degrees Celsius.
humidity : double
Humidity in percent.
atmospheric_pressure : double
Atmospheric pressure.
Returns
-------
attenuation_coefficient : double
The resulting attenuation coefficient.
"""
roomTemperatureKelvin = temperature + 273.16
referencePressureKPa = 101.325
pressureKPa = atmospheric_pressure/1000.0
# determine molar concentration of water vapor
tmp = (( 10.79586 * (1.0 - (273.16/roomTemperatureKelvin) )) -
(5.02808 * np.log10((roomTemperatureKelvin/273.16)) ) +
(1.50474 * 0.0001 * (1.0 - 10.0 ** (-8.29692*((roomTemperatureKelvin/273.16) - 1.0)))) +
(0.42873 * 0.001 * (-1.0 + 10.0 ** (-4.76955*(1.0 - (273.16/roomTemperatureKelvin))))) - 2.2195983)
molarConcentrationWaterVaporPercent = (humidity * 10.0 ** tmp) / (pressureKPa/referencePressureKPa)
# determine relaxation frequencies of oxygen and nitrogen
relaxationFrequencyOxygen = ((pressureKPa/referencePressureKPa) *
(24.0 + (4.04 * 10000.0 * molarConcentrationWaterVaporPercent *
((0.02 + molarConcentrationWaterVaporPercent) / (0.391 + molarConcentrationWaterVaporPercent)))))
relaxationFrequencyNitrogen = ((pressureKPa/referencePressureKPa) *
((roomTemperatureKelvin / 293.16) ** (-0.5)) *
(9.0 + 280.0 * molarConcentrationWaterVaporPercent *
np.exp(-4.17 * (( (roomTemperatureKelvin / 293.16) ** (-0.3333333)) - 1.0))))
airAbsorptionCoeff = (((frequency**2) *
((1.84 * 10.0**(-11.0) * (referencePressureKPa / pressureKPa) * (roomTemperatureKelvin/293.16)**0.5) +
((roomTemperatureKelvin/293.16)**(-2.5) * (
((1.278 * 0.01 * np.exp( (-2239.1/roomTemperatureKelvin))) /
(relaxationFrequencyOxygen + ((frequency**2)/relaxationFrequencyOxygen))) +
((1.068 * 0.1 * np.exp((-3352.0/roomTemperatureKelvin))/
(relaxationFrequencyNitrogen + ((frequency**2)/relaxationFrequencyNitrogen)))))))
)* (20.0 / np.log(10.0)) / ((np.log10(np.exp(1.0))) * 10.0)) # Neper/m -> dB/m
return airAbsorptionCoeff
|
the-stack_0_22698 | # Lint as: python3
r"""Code example for a custom model, using PyTorch.
This demo shows how to use a custom model with LIT, in just a few lines of code.
We'll use a transformers model, with a minimal amount of code to implement the
LIT API. Compared to models/glue_models.py, this has fewer features, but the
code is more readable.
This demo is equivalent in functionality to simple_tf2_demo.py, but uses PyTorch
instead of TensorFlow 2. The models behave identically as far as LIT is
concerned, and the implementation is quite similar - to see changes, run:
git diff --no-index simple_tf2_demo.py simple_pytorch_demo.py
The transformers library can load weights from either,
so you can use any saved model compatible with the underlying model class
(AutoModelForSequenceClassification). To train something for this demo, you can:
- Use quickstart_sst_demo.py, and set --model_path to somewhere durable
- Or: Use tools/glue_trainer.py
- Or: Use any fine-tuning code that works with transformers, such as
https://github.com/huggingface/transformers#quick-tour-of-the-fine-tuningusage-scripts
To run locally:
python -m lit_nlp.examples.simple_pytorch_demo \
--port=5432 --model_path=/path/to/saved/model
Then navigate to localhost:5432 to access the demo UI.
NOTE: this demo still uses TensorFlow Datasets (which depends on TensorFlow) to
load the data. However, the output of glue.SST2Data is just NumPy arrays and
plain Python data, and you can easily replace this with a different library or
directly loading from CSV.
"""
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
# Use the regular GLUE data loaders, because these are very simple already.
from lit_nlp.examples.datasets import glue
from lit_nlp.lib import utils
import torch
import transformers
# NOTE: additional flags defined in server_flags.py
FLAGS = flags.FLAGS
flags.DEFINE_string(
"model_path", None,
"Path to trained model, in standard transformers format, e.g. as "
"saved by model.save_pretrained() and tokenizer.save_pretrained()")
def _from_pretrained(cls, *args, **kw):
"""Load a transformers model in PyTorch, with fallback to TF2/Keras weights."""
try:
return cls.from_pretrained(*args, **kw)
except OSError as e:
logging.warning("Caught OSError loading model: %s", e)
logging.warning(
"Re-trying to convert from TensorFlow checkpoint (from_tf=True)")
return cls.from_pretrained(*args, from_tf=True, **kw)
class SimpleSentimentModel(lit_model.Model):
"""Simple sentiment analysis model."""
LABELS = ["0", "1"] # negative, positive
def __init__(self, model_name_or_path):
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path)
model_config = transformers.AutoConfig.from_pretrained(
model_name_or_path,
num_labels=2,
output_hidden_states=True,
output_attentions=True,
)
# This is a just a regular PyTorch model.
self.model = _from_pretrained(
transformers.AutoModelForSequenceClassification,
model_name_or_path,
config=model_config)
self.model.eval()
##
# LIT API implementation
def max_minibatch_size(self):
# This tells lit_model.Model.predict() how to batch inputs to
# predict_minibatch().
# Alternately, you can just override predict() and handle batching yourself.
return 32
def predict_minibatch(self, inputs):
# Preprocess to ids and masks, and make the input batch.
encoded_input = self.tokenizer.batch_encode_plus(
[ex["sentence"] for ex in inputs],
return_tensors="pt",
add_special_tokens=True,
max_length=128,
pad_to_max_length=True)
# Check and send to cuda (GPU) if available
if torch.cuda.is_available():
self.model.cuda()
for tensor in encoded_input:
encoded_input[tensor] = encoded_input[tensor].cuda()
# Run a forward pass.
with torch.no_grad(): # remove this if you need gradients.
logits, embs, unused_attentions = self.model(**encoded_input)
# Post-process outputs.
batched_outputs = {
"probas": torch.nn.functional.softmax(logits, dim=-1),
"input_ids": encoded_input["input_ids"],
"ntok": torch.sum(encoded_input["attention_mask"], dim=1),
"cls_emb": embs[-1][:, 0], # last layer, first token
}
# Return as NumPy for further processing.
detached_outputs = {k: v.numpy() for k, v in batched_outputs.items()}
# Unbatch outputs so we get one record per input example.
for output in utils.unbatch_preds(detached_outputs):
ntok = output.pop("ntok")
output["tokens"] = self.tokenizer.convert_ids_to_tokens(
output.pop("input_ids")[1:ntok - 1])
yield output
def input_spec(self) -> lit_types.Spec:
return {
"sentence": lit_types.TextSegment(),
"label": lit_types.CategoryLabel(vocab=self.LABELS, required=False)
}
def output_spec(self) -> lit_types.Spec:
return {
"tokens": lit_types.Tokens(),
"probas": lit_types.MulticlassPreds(parent="label", vocab=self.LABELS),
"cls_emb": lit_types.Embeddings()
}
def main(_):
# Load the model we defined above.
models = {"sst": SimpleSentimentModel(FLAGS.model_path)}
# Load SST-2 validation set from TFDS.
datasets = {"sst_dev": glue.SST2Data("validation")}
# Start the LIT server. See server_flags.py for server options.
lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
lit_demo.serve()
if __name__ == "__main__":
app.run(main)
|
the-stack_0_22699 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
def xyline(x, y):
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
x = np.linspace(0, 3 * np.pi, 500)
y = np.sin(x)
dydx = np.cos(0.5 * (x[:-1] + x[1:])) # first derivative
g = np.linspace(0.1, 0.7, 10)
colors = np.array([0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.2, 0.3, 0.4])
rs = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.7, 1.0, 1.5, 2.0])
# Create a set of line segments so that we can color them individually
# This creates the points as a N x 1 x 2 array so that we can stack points
# together easily to get the segments. The segments array for line collection
# needs to be (numlines) x (points per line) x 2 (for x and y)
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
points = np.array([rs, g]).T.reshape(-1, 1, 2)
s2 = np.concatenate([points[:-1], points[1:]], axis=1)
fig = plt.figure()
ax = fig.add_subplot(111)
norm = plt.Normalize(dydx.min(), dydx.max())
lc = LineCollection(segments, cmap='Reds', norm=norm)
lc.set_array(dydx)
lc.set_linewidth(2)
line = ax.add_collection(lc)
lc = LineCollection(s2, cmap='Reds', norm=norm)
lc.set_array(colors)
lc.set_linewidth(2)
ll2 = ax.add_collection(lc)
fig.colorbar(ll2, ax=ax)
ax.set_xlim(-0.2,6)
ax.set_ylim(g.min() - 1, g.max() + 1)
'''
# Create a continuous norm to map from data points to colors
norm = plt.Normalize(dydx.min(), dydx.max())
lc = LineCollection(segments, cmap='viridis', norm=norm)
# Set the values used for colormapping
lc.set_array(dydx)
lc.set_linewidth(2)
line = axs[0].add_collection(lc)
fig.colorbar(line, ax=axs[0])
# Use a boundary norm instead
cmap = ListedColormap(['r', 'g', 'b'])
norm = BoundaryNorm([-1, -0.5, 0.5, 1], cmap.N)
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(dydx)
lc.set_linewidth(2)
line = axs[1].add_collection(lc)
fig.colorbar(line, ax=axs[1])
'''
|
the-stack_0_22701 | import warnings
from sqlalchemy.sql.elements import ClauseElement
from sqlalchemy.sql.base import ImmutableColumnCollection
from sqlalchemy import sql
from ..translate import (
SqlColumn, SqlColumnAgg,
win_cumul, AggOver, CumlOver, RankOver, warn_arg_default, win_absent
)
from ..dialects.sqlite import SqliteColumn
from ..dialects.mysql import MysqlColumn
from ..dialects.bigquery import BigqueryColumn
from siuba.dply.vector import (
#cumall, cumany, cummean,
desc,
dense_rank, percent_rank, min_rank, cume_dist,
row_number,
#ntile,
between,
coalesce,
lead, lag,
n,
n_distinct,
na_if,
#near,
nth, first, last
)
from ..translate import SiubaSqlRuntimeWarning
warnings.simplefilter('once', SiubaSqlRuntimeWarning)
# desc ------------------------------------------------------------------------
@desc.register(ClauseElement)
def _desc_sql(x) -> ClauseElement:
"""
Example:
>>> print(desc(sql.column('a')))
a DESC
"""
return x.desc()
# ranking functions -----------------------------------------------------------
# note: here we don't use the decorator syntax, but maybe we should for
# consistency
# TODO: remove repetition in rank definitions
def _sql_rank_over(rank_func, col, partition, nulls_last):
# partitioning ensures aggregates that use total length are correct,
# e.g. percent rank, cume_dist and friends, by separating NULLs into their
# own partition
over_clause = RankOver(
rank_func(),
order_by = col if not nulls_last else col.nullslast(),
partition_by = col.isnot(None) if partition else None
)
return sql.case({col.isnot(None): over_clause})
def _sql_rank(func_name, partition = False, nulls_last = False):
rank_func = getattr(sql.func, func_name)
def f(col, na_option = None) -> RankOver:
if na_option == "keep":
return _sql_rank_over(rank_func, col, partition, nulls_last)
warn_arg_default(func_name, 'na_option', None, "keep")
return RankOver(rank_func(), order_by = col)
return f
dense_rank .register(ClauseElement, _sql_rank("dense_rank"))
percent_rank.register(ClauseElement, _sql_rank("percent_rank"))
cume_dist .register(ClauseElement, _sql_rank("cume_dist", partition = True))
min_rank .register(ClauseElement, _sql_rank("rank", partition = True))
dense_rank .register(SqliteColumn, win_absent("DENSE_RANK"))
percent_rank.register(SqliteColumn, win_absent("PERCENT_RANK"))
cume_dist .register(SqliteColumn, win_absent("CUME_DIST"))
min_rank .register(SqliteColumn, win_absent("MIN_RANK"))
# partition everything, since MySQL puts NULLs first
# see: https://stackoverflow.com/q/1498648/1144523
dense_rank .register(MysqlColumn, _sql_rank("dense_rank", partition = True))
percent_rank.register(MysqlColumn, _sql_rank("percent_rank", partition = True))
cume_dist .register(MysqlColumn, _sql_rank("cume_dist", partition = True))
min_rank .register(MysqlColumn, _sql_rank("rank", partition = True))
# partition everything, since MySQL puts NULLs first
# see: https://stackoverflow.com/q/1498648/1144523
dense_rank .register(BigqueryColumn, _sql_rank("dense_rank", nulls_last = True))
percent_rank.register(BigqueryColumn, _sql_rank("percent_rank", nulls_last = True))
# row_number ------------------------------------------------------------------
@row_number.register(ClauseElement)
def _row_number_sql(col) -> CumlOver:
"""
Example:
>>> print(row_number(sql.column('a')))
row_number() OVER ()
"""
return CumlOver(sql.func.row_number())
row_number.register(SqliteColumn, win_absent("ROW_NUMBER"))
# between ---------------------------------------------------------------------
@between.register(ClauseElement)
def _between_sql(x, left, right, default = None) -> ClauseElement:
"""
Example:
>>> print(between(sql.column('a'), 1, 2))
a BETWEEN :a_1 AND :a_2
>>> print(between(sql.column('a'), 1, 2, default = False))
coalesce(a BETWEEN :a_1 AND :a_2, :coalesce_1)
"""
if default is not False:
# TODO: warn
pass
if default is None:
return x.between(left, right)
return sql.functions.coalesce(x.between(left, right), default)
# coalesce --------------------------------------------------------------------
@coalesce.register(ClauseElement)
def _coalesce_sql(x, *args) -> ClauseElement:
"""
Example:
>>> print(coalesce(sql.column('a'), sql.column('b')))
coalesce(a, b)
>>> coalesce(1, sql.column('a'))
Traceback (most recent call last):
...
TypeError: ...
"""
return sql.functions.coalesce(x, *args)
# lead and lag ----------------------------------------------------------------
@lead.register(ClauseElement)
def _lead_sql(x, n = 1, default = None) -> ClauseElement:
"""
Example:
>>> print(lead(sql.column('a'), 2, 99))
lead(a, :lead_1, :lead_2) OVER ()
"""
f = win_cumul("lead", rows=None)
return f(x, n, default)
@lag.register(ClauseElement)
def _lag_sql(x, n = 1, default = None) -> ClauseElement:
"""
Example:
>>> print(lag(sql.column('a'), 2, 99))
lag(a, :lag_1, :lag_2) OVER ()
"""
f = win_cumul("lag", rows=None)
return f(x, n , default)
# n ---------------------------------------------------------------------------
@n.register(ClauseElement)
@n.register(ImmutableColumnCollection)
def _n_sql(x) -> ClauseElement:
"""
Example:
>>> print(n(sql.column('a')))
count(*) OVER ()
"""
return AggOver(sql.func.count())
@n.register(SqlColumnAgg)
def _n_sql_agg(x) -> ClauseElement:
"""
Example:
>>> from siuba.sql.translate import SqlColumnAgg
>>> print(n(SqlColumnAgg('x')))
count(*)
"""
return sql.func.count()
n.register(SqliteColumn, win_absent("N"))
row_number.register(SqliteColumn, win_absent("ROW_NUMBER"))
# n_distinct ------------------------------------------------------------------
@n_distinct.register(ClauseElement)
def _n_distinct_sql(x) -> ClauseElement:
"""
Example:
>>> print(n_distinct(sql.column('a')) )
count(distinct(a))
"""
return sql.func.count(sql.func.distinct(x))
# na_if -----------------------------------------------------------------------
@na_if.register(ClauseElement)
def _na_if_sql(x, y) -> ClauseElement:
"""
Example:
>>> print(na_if(sql.column('x'), 2))
nullif(x, :nullif_1)
"""
return sql.func.nullif(x, y)
# nth, first, last ------------------------------------------------------------
# note: first and last wrap around nth, so are not dispatchers.
# this may need to change this in the future, since this means they won't
# show their own name, when you print, e.g. first(_.x)
@nth.register(ClauseElement)
def _nth_sql(x, n, order_by = None, default = None) -> ClauseElement:
if default is not None:
raise NotImplementedError("default argument not implemented")
if n < 0 and order_by is None:
raise NotImplementedError(
"must explicitly pass order_by when using last or nth with "
"n < 0 in SQL."
)
if n < 0:
# e.g. -1 in python is 0, -2 is 1.
n = abs(n + 1)
order_by = order_by.desc()
# note the adjustment for 1-based index in SQL
return CumlOver(
sql.func.nth_value(x, n + 1),
order_by = order_by,
rows = (None, None)
)
@nth.register(SqlColumnAgg)
def _nth_sql_agg(x, n, order_by = None, default = None) -> ClauseElement:
raise NotImplementedError("nth, first, and last not available in summarize")
|
the-stack_0_22702 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import jsonschema
import st2common
import st2tests
from st2common.bootstrap.policiesregistrar import PolicyRegistrar
from st2common.bootstrap.policiesregistrar import register_policy_types
from st2common.bootstrap.policiesregistrar import register_policies
import st2common.bootstrap.policiesregistrar as policies_registrar
from st2common.persistence.policy import Policy
from st2common.persistence.policy import PolicyType
from st2tests.base import CleanDbTestCase
from st2tests.fixturesloader import get_fixtures_packs_base_path
__all__ = [
'PoliciesRegistrarTestCase'
]
class PoliciesRegistrarTestCase(CleanDbTestCase):
def setUp(self):
super(PoliciesRegistrarTestCase, self).setUp()
# Register common policy types
register_policy_types(st2common)
def test_register_policy_types(self):
self.assertEqual(register_policy_types(st2tests), 2)
type1 = PolicyType.get_by_ref('action.concurrency')
self.assertEqual(type1.name, 'concurrency')
self.assertEqual(type1.resource_type, 'action')
type2 = PolicyType.get_by_ref('action.mock_policy_error')
self.assertEqual(type2.name, 'mock_policy_error')
self.assertEqual(type2.resource_type, 'action')
def test_register_all_policies(self):
policies_dbs = Policy.get_all()
self.assertEqual(len(policies_dbs), 0)
packs_base_path = get_fixtures_packs_base_path()
count = policies_registrar.register_policies(packs_base_paths=[packs_base_path])
# Verify PolicyDB objects have been created
policies_dbs = Policy.get_all()
policies = {
policies_db.name: {
'pack': policies_db.pack,
'type': policies_db.policy_type,
'parameters': policies_db.parameters
}
for policies_db in policies_dbs
}
expected_policies = {
'test_policy_1': {
'pack': 'dummy_pack_1',
'type': 'action.concurrency',
'parameters': {
'action': 'delay',
'threshold': 3
}
},
'test_policy_3': {
'pack': 'dummy_pack_1',
'type': 'action.retry',
'parameters': {
'retry_on': 'timeout',
'max_retry_count': 5
}
},
'cancel_on_concurrency': {
'pack': 'mistral_tests',
'type': 'action.concurrency',
'parameters': {
'action': 'cancel',
'threshold': 3
}
},
'cancel_on_concurrency_by_attr': {
'pack': 'mistral_tests',
'type': 'action.concurrency.attr',
'parameters': {
'action': 'cancel',
'threshold': 1,
'attributes': ['friend']
}
},
'sequential.retry_on_failure': {
'pack': 'orquesta_tests',
'type': 'action.retry',
'parameters': {
'retry_on': 'failure',
'max_retry_count': 1
}
}
}
self.assertEqual(len(expected_policies), count)
self.assertEqual(len(expected_policies), len(policies_dbs))
self.assertDictEqual(expected_policies, policies)
def test_register_policies_from_pack(self):
pack_dir = os.path.join(get_fixtures_packs_base_path(), 'dummy_pack_1')
self.assertEqual(register_policies(pack_dir=pack_dir), 2)
p1 = Policy.get_by_ref('dummy_pack_1.test_policy_1')
self.assertEqual(p1.name, 'test_policy_1')
self.assertEqual(p1.pack, 'dummy_pack_1')
self.assertEqual(p1.resource_ref, 'dummy_pack_1.local')
self.assertEqual(p1.policy_type, 'action.concurrency')
# Verify that a default value for parameter "action" which isn't provided in the file is set
self.assertEqual(p1.parameters['action'], 'delay')
p2 = Policy.get_by_ref('dummy_pack_1.test_policy_2')
self.assertEqual(p2, None)
def test_register_policy_invalid_policy_type_references(self):
# Policy references an invalid (inexistent) policy type
registrar = PolicyRegistrar()
policy_path = os.path.join(get_fixtures_packs_base_path(),
'dummy_pack_1/policies/policy_2.yaml')
expected_msg = 'Referenced policy_type "action.mock_policy_error" doesnt exist'
self.assertRaisesRegexp(ValueError, expected_msg, registrar._register_policy,
pack='dummy_pack_1', policy=policy_path)
def test_make_sure_policy_parameters_are_validated_during_register(self):
# Policy where specified parameters fail schema validation
registrar = PolicyRegistrar()
policy_path = os.path.join(get_fixtures_packs_base_path(),
'dummy_pack_2/policies/policy_3.yaml')
expected_msg = '100 is greater than the maximum of 5'
self.assertRaisesRegexp(jsonschema.ValidationError, expected_msg,
registrar._register_policy, pack='dummy_pack_2',
policy=policy_path)
|
the-stack_0_22703 | import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROD = False
USE_SSL = False
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# FIXME: We need to change this to mysql, instead of sqlite.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'),
'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'),
},
}
# The default values for these two settings seem to cause issues with apache
CACHE_BACKEND = 'dummy://'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# django-mailer uses a different settings attribute
MAILER_EMAIL_BACKEND = EMAIL_BACKEND
# Configure these for your outgoing email host
# EMAIL_HOST = 'smtp.my-company.com'
# EMAIL_PORT = 25
# EMAIL_HOST_USER = 'djangomail'
# EMAIL_HOST_PASSWORD = 'top-secret!'
HORIZON_CONFIG = {
'dashboards': ('nova', 'syspanel', 'settings',),
'default_dashboard': 'nova',
'user_home': 'openstack_dashboard.views.user_home',
}
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True
}
OPENSTACK_HOST = "127.0.0.1"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
# FIXME: this is only needed until keystone fixes its GET /tenants call
# so that it doesn't return everything for admins
OPENSTACK_KEYSTONE_ADMIN_URL = "http://%s:35357/v2.0" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
SWIFT_PAGINATE_LIMIT = 100
# If you have external monitoring links, eg:
# EXTERNAL_MONITORING = [
# ['Nagios','http://foo.com'],
# ['Ganglia','http://bar.com'],
# ]
#LOGGING = {
# 'version': 1,
# # When set to True this will disable all logging except
# # for loggers specified in this configuration dictionary. Note that
# # if nothing is specified here and disable_existing_loggers is True,
# # django.db.backends will still log unless it is disabled explicitly.
# 'disable_existing_loggers': False,
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class': 'django.utils.log.NullHandler',
# },
# 'console': {
# # Set the level to "DEBUG" for verbose output logging.
# 'level': 'INFO',
# 'class': 'logging.StreamHandler',
# },
# },
# 'loggers': {
# # Logging from django.db.backends is VERY verbose, send to null
# # by default.
# 'django.db.backends': {
# 'handlers': ['null'],
# 'propagate': False,
# },
# 'horizon': {
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'novaclient': {
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'keystoneclient': {
# 'handlers': ['console'],
# 'propagate': False,
# },
# 'nose.plugins.manager': {
# 'handlers': ['console'],
# 'propagate': False,
# }
# }
#}
|
the-stack_0_22707 | # mock.py
# Test tools for mocking and patching.
# Maintained by Michael Foord
# Backport for other versions of Python available from
# https://pypi.org/project/mock
__all__ = (
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
'seal',
)
__version__ = '1.0'
import inspect
import pprint
import sys
import builtins
from types import ModuleType
from functools import wraps, partial
_builtins = {name for name in dir(builtins) if not name.startswith('_')}
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
FILTER_DIR = True
# Workaround for issue #12370
# Without this, the __class__ properties wouldn't be set correctly
_safe_super = super
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, type) and issubclass(obj, BaseExceptions)
)
def _get_signature_object(func, as_instance, eat_self):
"""
Given an arbitrary, possibly callable object, try to create a suitable
signature object.
Return a (reduced func, signature) tuple, or None.
"""
if isinstance(func, type) and not as_instance:
# If it's a type and should be modelled as a type, use __init__.
try:
func = func.__init__
except AttributeError:
return None
# Skip the `self` argument in __init__
eat_self = True
elif not isinstance(func, FunctionTypes):
# If we really want to model an instance of the passed type,
# __call__ should be looked up, not __init__.
try:
func = func.__call__
except AttributeError:
return None
if eat_self:
sig_func = partial(func, None)
else:
sig_func = func
try:
return func, inspect.signature(sig_func)
except ValueError:
# Certain callable types are not supported by inspect.signature()
return None
def _check_signature(func, mock, skipfirst, instance=False):
sig = _get_signature_object(func, instance, skipfirst)
if sig is None:
return
func, sig = sig
def checksig(_mock_self, *args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
# we explicitly don't copy func.__dict__ into this copy as it would
# expose original attributes that should be mocked
for attribute in (
'__name__', '__doc__', '__text_signature__',
'__module__', '__defaults__', '__kwdefaults__',
):
try:
setattr(funcopy, attribute, getattr(func, attribute))
except AttributeError:
pass
def _callable(obj):
if isinstance(obj, type):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, type):
# already an instance
return getattr(obj, '__call__', None) is not None
# *could* be broken by a class overriding __mro__ or __dict__ via
# a metaclass
for base in (obj,) + obj.__mro__:
if base.__dict__.get('__call__') is not None:
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambda with the same
# signature as the original.
if not _callable(original):
return
skipfirst = isinstance(original, type)
result = _get_signature_object(original, instance, skipfirst)
if result is None:
return mock
func, sig = result
def checksig(*args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
name = original.__name__
if not name.isidentifier():
name = 'funcopy'
context = {'_checksig_': checksig, 'mock': mock}
src = """def %s(*args, **kwargs):
_checksig_(*args, **kwargs)
return mock(*args, **kwargs)""" % name
exec (src, context)
funcopy = context[name]
_setup_func(funcopy, mock)
return funcopy
def _setup_func(funcopy, mock):
funcopy.mock = mock
# can't use isinstance with mocks
if not _is_instance_mock(mock):
return
def assert_called_with(*args, **kwargs):
return mock.assert_called_with(*args, **kwargs)
def assert_called(*args, **kwargs):
return mock.assert_called(*args, **kwargs)
def assert_not_called(*args, **kwargs):
return mock.assert_not_called(*args, **kwargs)
def assert_called_once(*args, **kwargs):
return mock.assert_called_once(*args, **kwargs)
def assert_called_once_with(*args, **kwargs):
return mock.assert_called_once_with(*args, **kwargs)
def assert_has_calls(*args, **kwargs):
return mock.assert_has_calls(*args, **kwargs)
def assert_any_call(*args, **kwargs):
return mock.assert_any_call(*args, **kwargs)
def reset_mock():
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
mock.reset_mock()
ret = funcopy.return_value
if _is_instance_mock(ret) and not ret is mock:
ret.reset_mock()
funcopy.called = False
funcopy.call_count = 0
funcopy.call_args = None
funcopy.call_args_list = _CallList()
funcopy.method_calls = _CallList()
funcopy.mock_calls = _CallList()
funcopy.return_value = mock.return_value
funcopy.side_effect = mock.side_effect
funcopy._mock_children = mock._mock_children
funcopy.assert_called_with = assert_called_with
funcopy.assert_called_once_with = assert_called_once_with
funcopy.assert_has_calls = assert_has_calls
funcopy.assert_any_call = assert_any_call
funcopy.reset_mock = reset_mock
funcopy.assert_called = assert_called
funcopy.assert_not_called = assert_not_called
funcopy.assert_called_once = assert_called_once
mock._mock_delegate = funcopy
def _is_magic(name):
return '__%s__' % name[2:-2] == name
class _SentinelObject(object):
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
def __reduce__(self):
return 'sentinel.%s' % self.name
class _Sentinel(object):
"""Access attributes to return a named object, usable as a sentinel."""
def __init__(self):
self._sentinels = {}
def __getattr__(self, name):
if name == '__bases__':
# Without this help(unittest.mock) raises an exception
raise AttributeError
return self._sentinels.setdefault(name, _SentinelObject(name))
def __reduce__(self):
return 'sentinel'
sentinel = _Sentinel()
DEFAULT = sentinel.DEFAULT
_missing = sentinel.MISSING
_deleted = sentinel.DELETED
def _copy(value):
if type(value) in (dict, list, tuple, set):
return type(value)(value)
return value
_allowed_names = {
'return_value', '_mock_return_value', 'side_effect',
'_mock_side_effect', '_mock_parent', '_mock_new_parent',
'_mock_name', '_mock_new_name'
}
def _delegating_property(name):
_allowed_names.add(name)
_the_name = '_mock_' + name
def _get(self, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
return getattr(self, _the_name)
return getattr(sig, name)
def _set(self, value, name=name, _the_name=_the_name):
sig = self._mock_delegate
if sig is None:
self.__dict__[_the_name] = value
else:
setattr(sig, name, value)
return property(_get, _set)
class _CallList(list):
def __contains__(self, value):
if not isinstance(value, list):
return list.__contains__(self, value)
len_value = len(value)
len_self = len(self)
if len_value > len_self:
return False
for i in range(0, len_self - len_value + 1):
sub_list = self[i:i+len_value]
if sub_list == value:
return True
return False
def __repr__(self):
return pprint.pformat(list(self))
def _check_and_set_parent(parent, value, name, new_name):
if not _is_instance_mock(value):
return False
if ((value._mock_name or value._mock_new_name) or
(value._mock_parent is not None) or
(value._mock_new_parent is not None)):
return False
_parent = parent
while _parent is not None:
# setting a mock (value) as a child or return value of itself
# should not modify the mock
if _parent is value:
return False
_parent = _parent._mock_new_parent
if new_name:
value._mock_new_parent = parent
value._mock_new_name = new_name
if name:
value._mock_parent = parent
value._mock_name = name
return True
# Internal class to identify if we wrapped an iterator object or not.
class _MockIter(object):
def __init__(self, obj):
self.obj = iter(obj)
def __iter__(self):
return self
def __next__(self):
return next(self.obj)
class Base(object):
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
class NonCallableMock(Base):
"""A non-callable version of `Mock`"""
def __new__(cls, *args, **kw):
# every instance has its own class
# so we can create magic methods on the
# class without stomping on other mocks
new = type(cls.__name__, (cls,), {'__doc__': cls.__doc__})
instance = object.__new__(new)
return instance
def __init__(
self, spec=None, wraps=None, name=None, spec_set=None,
parent=None, _spec_state=None, _new_name='', _new_parent=None,
_spec_as_instance=False, _eat_self=None, unsafe=False, **kwargs
):
if _new_parent is None:
_new_parent = parent
__dict__ = self.__dict__
__dict__['_mock_parent'] = parent
__dict__['_mock_name'] = name
__dict__['_mock_new_name'] = _new_name
__dict__['_mock_new_parent'] = _new_parent
__dict__['_mock_sealed'] = False
if spec_set is not None:
spec = spec_set
spec_set = True
if _eat_self is None:
_eat_self = parent is not None
self._mock_add_spec(spec, spec_set, _spec_as_instance, _eat_self)
__dict__['_mock_children'] = {}
__dict__['_mock_wraps'] = wraps
__dict__['_mock_delegate'] = None
__dict__['_mock_called'] = False
__dict__['_mock_call_args'] = None
__dict__['_mock_call_count'] = 0
__dict__['_mock_call_args_list'] = _CallList()
__dict__['_mock_mock_calls'] = _CallList()
__dict__['method_calls'] = _CallList()
__dict__['_mock_unsafe'] = unsafe
if kwargs:
self.configure_mock(**kwargs)
_safe_super(NonCallableMock, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state
)
def attach_mock(self, mock, attribute):
"""
Attach a mock as an attribute of this one, replacing its name and
parent. Calls to the attached mock will be recorded in the
`method_calls` and `mock_calls` attributes of this one."""
mock._mock_parent = None
mock._mock_new_parent = None
mock._mock_name = ''
mock._mock_new_name = None
setattr(self, attribute, mock)
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
def _mock_add_spec(self, spec, spec_set, _spec_as_instance=False,
_eat_self=False):
_spec_class = None
_spec_signature = None
if spec is not None and not _is_list(spec):
if isinstance(spec, type):
_spec_class = spec
else:
_spec_class = _get_class(spec)
res = _get_signature_object(spec,
_spec_as_instance, _eat_self)
_spec_signature = res and res[1]
spec = dir(spec)
__dict__ = self.__dict__
__dict__['_spec_class'] = _spec_class
__dict__['_spec_set'] = spec_set
__dict__['_spec_signature'] = _spec_signature
__dict__['_mock_methods'] = spec
def __get_return_value(self):
ret = self._mock_return_value
if self._mock_delegate is not None:
ret = self._mock_delegate.return_value
if ret is DEFAULT:
ret = self._get_child_mock(
_new_parent=self, _new_name='()'
)
self.return_value = ret
return ret
def __set_return_value(self, value):
if self._mock_delegate is not None:
self._mock_delegate.return_value = value
else:
self._mock_return_value = value
_check_and_set_parent(self, value, None, '()')
__return_value_doc = "The value to be returned when the mock is called."
return_value = property(__get_return_value, __set_return_value,
__return_value_doc)
@property
def __class__(self):
if self._spec_class is None:
return type(self)
return self._spec_class
called = _delegating_property('called')
call_count = _delegating_property('call_count')
call_args = _delegating_property('call_args')
call_args_list = _delegating_property('call_args_list')
mock_calls = _delegating_property('mock_calls')
def __get_side_effect(self):
delegated = self._mock_delegate
if delegated is None:
return self._mock_side_effect
sf = delegated.side_effect
if (sf is not None and not callable(sf)
and not isinstance(sf, _MockIter) and not _is_exception(sf)):
sf = _MockIter(sf)
delegated.side_effect = sf
return sf
def __set_side_effect(self, value):
value = _try_iter(value)
delegated = self._mock_delegate
if delegated is None:
self._mock_side_effect = value
else:
delegated.side_effect = value
side_effect = property(__get_side_effect, __set_side_effect)
def reset_mock(self, visited=None,*, return_value=False, side_effect=False):
"Restore the mock object to its initial state."
if visited is None:
visited = []
if id(self) in visited:
return
visited.append(id(self))
self.called = False
self.call_args = None
self.call_count = 0
self.mock_calls = _CallList()
self.call_args_list = _CallList()
self.method_calls = _CallList()
if return_value:
self._mock_return_value = DEFAULT
if side_effect:
self._mock_side_effect = None
for child in self._mock_children.values():
if isinstance(child, _SpecState):
continue
child.reset_mock(visited)
ret = self._mock_return_value
if _is_instance_mock(ret) and ret is not self:
ret.reset_mock(visited)
def configure_mock(self, **kwargs):
"""Set attributes on the mock through keyword arguments.
Attributes plus return values and side effects can be set on child
mocks using standard dot notation and unpacking a dictionary in the
method call:
>>> attrs = {'method.return_value': 3, 'other.side_effect': KeyError}
>>> mock.configure_mock(**attrs)"""
for arg, val in sorted(kwargs.items(),
# we sort on the number of dots so that
# attributes are set before we set attributes on
# attributes
key=lambda entry: entry[0].count('.')):
args = arg.split('.')
final = args.pop()
obj = self
for entry in args:
obj = getattr(obj, entry)
setattr(obj, final, val)
def __getattr__(self, name):
if name in {'_mock_methods', '_mock_unsafe'}:
raise AttributeError(name)
elif self._mock_methods is not None:
if name not in self._mock_methods or name in _all_magics:
raise AttributeError("Mock object has no attribute %r" % name)
elif _is_magic(name):
raise AttributeError(name)
if not self._mock_unsafe:
if name.startswith(('assert', 'assret')):
raise AttributeError(name)
result = self._mock_children.get(name)
if result is _deleted:
raise AttributeError(name)
elif result is None:
wraps = None
if self._mock_wraps is not None:
# XXXX should we get the attribute without triggering code
# execution?
wraps = getattr(self._mock_wraps, name)
result = self._get_child_mock(
parent=self, name=name, wraps=wraps, _new_name=name,
_new_parent=self
)
self._mock_children[name] = result
elif isinstance(result, _SpecState):
result = create_autospec(
result.spec, result.spec_set, result.instance,
result.parent, result.name
)
self._mock_children[name] = result
return result
def _extract_mock_name(self):
_name_list = [self._mock_new_name]
_parent = self._mock_new_parent
last = self
dot = '.'
if _name_list == ['()']:
dot = ''
seen = set()
while _parent is not None:
last = _parent
_name_list.append(_parent._mock_new_name + dot)
dot = '.'
if _parent._mock_new_name == '()':
dot = ''
_parent = _parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
if id(_parent) in seen:
break
seen.add(id(_parent))
_name_list = list(reversed(_name_list))
_first = last._mock_name or 'mock'
if len(_name_list) > 1:
if _name_list[1] not in ('()', '().'):
_first += '.'
_name_list[0] = _first
return ''.join(_name_list)
def __repr__(self):
name = self._extract_mock_name()
name_string = ''
if name not in ('mock', 'mock.'):
name_string = ' name=%r' % name
spec_string = ''
if self._spec_class is not None:
spec_string = ' spec=%r'
if self._spec_set:
spec_string = ' spec_set=%r'
spec_string = spec_string % self._spec_class.__name__
return "<%s%s%s id='%s'>" % (
type(self).__name__,
name_string,
spec_string,
id(self)
)
def __dir__(self):
"""Filter the output of `dir(mock)` to only useful members."""
if not FILTER_DIR:
return object.__dir__(self)
extras = self._mock_methods or []
from_type = dir(type(self))
from_dict = list(self.__dict__)
from_type = [e for e in from_type if not e.startswith('_')]
from_dict = [e for e in from_dict if not e.startswith('_') or
_is_magic(e)]
return sorted(set(extras + from_type + from_dict +
list(self._mock_children)))
def __setattr__(self, name, value):
if name in _allowed_names:
# property setters go through here
return object.__setattr__(self, name, value)
elif (self._spec_set and self._mock_methods is not None and
name not in self._mock_methods and
name not in self.__dict__):
raise AttributeError("Mock object has no attribute '%s'" % name)
elif name in _unsupported_magics:
msg = 'Attempting to set unsupported magic method %r.' % name
raise AttributeError(msg)
elif name in _all_magics:
if self._mock_methods is not None and name not in self._mock_methods:
raise AttributeError("Mock object has no attribute '%s'" % name)
if not _is_instance_mock(value):
setattr(type(self), name, _get_method(name, value))
original = value
value = lambda *args, **kw: original(self, *args, **kw)
else:
# only set _new_name and not name so that mock_calls is tracked
# but not method calls
_check_and_set_parent(self, value, None, name)
setattr(type(self), name, value)
self._mock_children[name] = value
elif name == '__class__':
self._spec_class = value
return
else:
if _check_and_set_parent(self, value, name, name):
self._mock_children[name] = value
if self._mock_sealed and not hasattr(self, name):
mock_name = f'{self._extract_mock_name()}.{name}'
raise AttributeError(f'Cannot set {mock_name}')
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name in _all_magics and name in type(self).__dict__:
delattr(type(self), name)
if name not in self.__dict__:
# for magic methods that are still MagicProxy objects and
# not set on the instance itself
return
if name in self.__dict__:
object.__delattr__(self, name)
obj = self._mock_children.get(name, _missing)
if obj is _deleted:
raise AttributeError(name)
if obj is not _missing:
del self._mock_children[name]
self._mock_children[name] = _deleted
def _format_mock_call_signature(self, args, kwargs):
name = self._mock_name or 'mock'
return _format_call_signature(name, args, kwargs)
def _format_mock_failure_message(self, args, kwargs):
message = 'Expected call: %s\nActual call: %s'
expected_string = self._format_mock_call_signature(args, kwargs)
call_args = self.call_args
if len(call_args) == 3:
call_args = call_args[1:]
actual_string = self._format_mock_call_signature(*call_args)
return message % (expected_string, actual_string)
def _call_matcher(self, _call):
"""
Given a call (or simply an (args, kwargs) tuple), return a
comparison key suitable for matching with other calls.
This is a best effort method which relies on the spec's signature,
if available, or falls back on the arguments themselves.
"""
sig = self._spec_signature
if sig is not None:
if len(_call) == 2:
name = ''
args, kwargs = _call
else:
name, args, kwargs = _call
try:
return name, sig.bind(*args, **kwargs)
except TypeError as e:
return e.with_traceback(None)
else:
return _call
def assert_not_called(_mock_self):
"""assert that the mock was never called.
"""
self = _mock_self
if self.call_count != 0:
msg = ("Expected '%s' to not have been called. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
def assert_called(_mock_self):
"""assert that the mock was called at least once
"""
self = _mock_self
if self.call_count == 0:
msg = ("Expected '%s' to have been called." %
self._mock_name or 'mock')
raise AssertionError(msg)
def assert_called_once(_mock_self):
"""assert that the mock was called only once.
"""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected '%s' to have been called once. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
def assert_called_with(_mock_self, *args, **kwargs):
"""assert that the mock was called with the specified arguments.
Raises an AssertionError if the args and keyword args passed in are
different to the last call to the mock."""
self = _mock_self
if self.call_args is None:
expected = self._format_mock_call_signature(args, kwargs)
raise AssertionError('Expected call: %s\nNot called' % (expected,))
def _error_message():
msg = self._format_mock_failure_message(args, kwargs)
return msg
expected = self._call_matcher((args, kwargs))
actual = self._call_matcher(self.call_args)
if expected != actual:
cause = expected if isinstance(expected, Exception) else None
raise AssertionError(_error_message()) from cause
def assert_called_once_with(_mock_self, *args, **kwargs):
"""assert that the mock was called exactly once and that that call was
with the specified arguments."""
self = _mock_self
if not self.call_count == 1:
msg = ("Expected '%s' to be called once. Called %s times." %
(self._mock_name or 'mock', self.call_count))
raise AssertionError(msg)
return self.assert_called_with(*args, **kwargs)
def assert_has_calls(self, calls, any_order=False):
"""assert the mock has been called with the specified calls.
The `mock_calls` list is checked for the calls.
If `any_order` is False (the default) then the calls must be
sequential. There can be extra calls before or after the
specified calls.
If `any_order` is True then the calls can be in any order, but
they must all appear in `mock_calls`."""
expected = [self._call_matcher(c) for c in calls]
cause = expected if isinstance(expected, Exception) else None
all_calls = _CallList(self._call_matcher(c) for c in self.mock_calls)
if not any_order:
if expected not in all_calls:
raise AssertionError(
'Calls not found.\nExpected: %r\n'
'Actual: %r' % (_CallList(calls), self.mock_calls)
) from cause
return
all_calls = list(all_calls)
not_found = []
for kall in expected:
try:
all_calls.remove(kall)
except ValueError:
not_found.append(kall)
if not_found:
raise AssertionError(
'%r does not contain all of %r in its call list, '
'found %r instead' % (self._mock_name or 'mock',
tuple(not_found), all_calls)
) from cause
def assert_any_call(self, *args, **kwargs):
"""assert the mock has been called with the specified arguments.
The assert passes if the mock has *ever* been called, unlike
`assert_called_with` and `assert_called_once_with` that only pass if
the call is the most recent one."""
expected = self._call_matcher((args, kwargs))
actual = [self._call_matcher(c) for c in self.call_args_list]
if expected not in actual:
cause = expected if isinstance(expected, Exception) else None
expected_string = self._format_mock_call_signature(args, kwargs)
raise AssertionError(
'%s call not found' % expected_string
) from cause
def _get_child_mock(self, **kw):
"""Create the child mocks for attributes and return value.
By default child mocks will be the same type as the parent.
Subclasses of Mock may want to override this to customize the way
child mocks are made.
For non-callable mocks the callable variant will be used (rather than
any custom subclass)."""
_type = type(self)
if not issubclass(_type, CallableMixin):
if issubclass(_type, NonCallableMagicMock):
klass = MagicMock
elif issubclass(_type, NonCallableMock) :
klass = Mock
else:
klass = _type.__mro__[1]
if self._mock_sealed:
attribute = "." + kw["name"] if "name" in kw else "()"
mock_name = self._extract_mock_name() + attribute
raise AttributeError(mock_name)
return klass(**kw)
def _try_iter(obj):
if obj is None:
return obj
if _is_exception(obj):
return obj
if _callable(obj):
return obj
try:
return iter(obj)
except TypeError:
# XXXX backwards compatibility
# but this will blow up on first call - so maybe we should fail early?
return obj
class CallableMixin(Base):
def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
wraps=None, name=None, spec_set=None, parent=None,
_spec_state=None, _new_name='', _new_parent=None, **kwargs):
self.__dict__['_mock_return_value'] = return_value
_safe_super(CallableMixin, self).__init__(
spec, wraps, name, spec_set, parent,
_spec_state, _new_name, _new_parent, **kwargs
)
self.side_effect = side_effect
def _mock_check_sig(self, *args, **kwargs):
# stub method that can be replaced with one with a specific signature
pass
def __call__(_mock_self, *args, **kwargs):
# can't use self in-case a function / method we are mocking uses self
# in the signature
_mock_self._mock_check_sig(*args, **kwargs)
return _mock_self._mock_call(*args, **kwargs)
def _mock_call(_mock_self, *args, **kwargs):
self = _mock_self
self.called = True
self.call_count += 1
_new_name = self._mock_new_name
_new_parent = self._mock_new_parent
_call = _Call((args, kwargs), two=True)
self.call_args = _call
self.call_args_list.append(_call)
self.mock_calls.append(_Call(('', args, kwargs)))
seen = set()
skip_next_dot = _new_name == '()'
do_method_calls = self._mock_parent is not None
name = self._mock_name
while _new_parent is not None:
this_mock_call = _Call((_new_name, args, kwargs))
if _new_parent._mock_new_name:
dot = '.'
if skip_next_dot:
dot = ''
skip_next_dot = False
if _new_parent._mock_new_name == '()':
skip_next_dot = True
_new_name = _new_parent._mock_new_name + dot + _new_name
if do_method_calls:
if _new_name == name:
this_method_call = this_mock_call
else:
this_method_call = _Call((name, args, kwargs))
_new_parent.method_calls.append(this_method_call)
do_method_calls = _new_parent._mock_parent is not None
if do_method_calls:
name = _new_parent._mock_name + '.' + name
_new_parent.mock_calls.append(this_mock_call)
_new_parent = _new_parent._mock_new_parent
# use ids here so as not to call __hash__ on the mocks
_new_parent_id = id(_new_parent)
if _new_parent_id in seen:
break
seen.add(_new_parent_id)
ret_val = DEFAULT
effect = self.side_effect
if effect is not None:
if _is_exception(effect):
raise effect
if not _callable(effect):
result = next(effect)
if _is_exception(result):
raise result
if result is DEFAULT:
result = self.return_value
return result
ret_val = effect(*args, **kwargs)
if (self._mock_wraps is not None and
self._mock_return_value is DEFAULT):
return self._mock_wraps(*args, **kwargs)
if ret_val is DEFAULT:
ret_val = self.return_value
return ret_val
class Mock(CallableMixin, NonCallableMock):
"""
Create a new `Mock` object. `Mock` takes several optional arguments
that specify the behaviour of the Mock object:
* `spec`: This can be either a list of strings or an existing object (a
class or instance) that acts as the specification for the mock object. If
you pass in an object then a list of strings is formed by calling dir on
the object (excluding unsupported magic attributes and methods). Accessing
any attribute not in this list will raise an `AttributeError`.
If `spec` is an object (rather than a list of strings) then
`mock.__class__` returns the class of the spec object. This allows mocks
to pass `isinstance` tests.
* `spec_set`: A stricter variant of `spec`. If used, attempting to *set*
or get an attribute on the mock that isn't on the object passed as
`spec_set` will raise an `AttributeError`.
* `side_effect`: A function to be called whenever the Mock is called. See
the `side_effect` attribute. Useful for raising exceptions or
dynamically changing return values. The function is called with the same
arguments as the mock, and unless it returns `DEFAULT`, the return
value of this function is used as the return value.
If `side_effect` is an iterable then each call to the mock will return
the next value from the iterable. If any of the members of the iterable
are exceptions they will be raised instead of returned.
* `return_value`: The value returned when the mock is called. By default
this is a new Mock (created on first access). See the
`return_value` attribute.
* `wraps`: Item for the mock object to wrap. If `wraps` is not None then
calling the Mock will pass the call through to the wrapped object
(returning the real result). Attribute access on the mock will return a
Mock object that wraps the corresponding attribute of the wrapped object
(so attempting to access an attribute that doesn't exist will raise an
`AttributeError`).
If the mock has an explicit `return_value` set then calls are not passed
to the wrapped object and the `return_value` is returned instead.
* `name`: If the mock has a name then it will be used in the repr of the
mock. This can be useful for debugging. The name is propagated to child
mocks.
Mocks can also be called with arbitrary keyword arguments. These will be
used to set attributes on the mock after it is created.
"""
def _dot_lookup(thing, comp, import_path):
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
def _importer(target):
components = target.split('.')
import_path = components.pop(0)
thing = __import__(import_path)
for comp in components:
import_path += ".%s" % comp
thing = _dot_lookup(thing, comp, import_path)
return thing
def _is_started(patcher):
# XXXX horrible
return hasattr(patcher, 'is_local')
class _patch(object):
attribute_name = None
_active_patches = []
def __init__(
self, getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
):
if new_callable is not None:
if new is not DEFAULT:
raise ValueError(
"Cannot use 'new' and 'new_callable' together"
)
if autospec is not None:
raise ValueError(
"Cannot use 'autospec' and 'new_callable' together"
)
self.getter = getter
self.attribute = attribute
self.new = new
self.new_callable = new_callable
self.spec = spec
self.create = create
self.has_local = False
self.spec_set = spec_set
self.autospec = autospec
self.kwargs = kwargs
self.additional_patchers = []
def copy(self):
patcher = _patch(
self.getter, self.attribute, self.new, self.spec,
self.create, self.spec_set,
self.autospec, self.new_callable, self.kwargs
)
patcher.attribute_name = self.attribute_name
patcher.additional_patchers = [
p.copy() for p in self.additional_patchers
]
return patcher
def __call__(self, func):
if isinstance(func, type):
return self.decorate_class(func)
return self.decorate_callable(func)
def decorate_class(self, klass):
for attr in dir(klass):
if not attr.startswith(patch.TEST_PREFIX):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
patcher = self.copy()
setattr(klass, attr, patcher(attr_value))
return klass
def decorate_callable(self, func):
if hasattr(func, 'patchings'):
func.patchings.append(self)
return func
@wraps(func)
def patched(*args, **keywargs):
extra_args = []
entered_patchers = []
exc_info = tuple()
try:
for patching in patched.patchings:
arg = patching.__enter__()
entered_patchers.append(patching)
if patching.attribute_name is not None:
keywargs.update(arg)
elif patching.new is DEFAULT:
extra_args.append(arg)
args += tuple(extra_args)
return func(*args, **keywargs)
except:
if (patching not in entered_patchers and
_is_started(patching)):
# the patcher may have been started, but an exception
# raised whilst entering one of its additional_patchers
entered_patchers.append(patching)
# Pass the exception to __exit__
exc_info = sys.exc_info()
# re-raise the exception
raise
finally:
for patching in reversed(entered_patchers):
patching.__exit__(*exc_info)
patched.patchings = [self]
return patched
def get_original(self):
target = self.getter()
name = self.attribute
original = DEFAULT
local = False
try:
original = target.__dict__[name]
except (AttributeError, KeyError):
original = getattr(target, name, DEFAULT)
else:
local = True
if name in _builtins and isinstance(target, ModuleType):
self.create = True
if not self.create and original is DEFAULT:
raise AttributeError(
"%s does not have the attribute %r" % (target, name)
)
return original, local
def __enter__(self):
"""Perform the patch."""
new, spec, spec_set = self.new, self.spec, self.spec_set
autospec, kwargs = self.autospec, self.kwargs
new_callable = self.new_callable
self.target = self.getter()
# normalise False to None
if spec is False:
spec = None
if spec_set is False:
spec_set = None
if autospec is False:
autospec = None
if spec is not None and autospec is not None:
raise TypeError("Can't specify spec and autospec")
if ((spec is not None or autospec is not None) and
spec_set not in (True, None)):
raise TypeError("Can't provide explicit spec_set *and* spec or autospec")
original, local = self.get_original()
if new is DEFAULT and autospec is None:
inherit = False
if spec is True:
# set spec to the object we are replacing
spec = original
if spec_set is True:
spec_set = original
spec = None
elif spec is not None:
if spec_set is True:
spec_set = spec
spec = None
elif spec_set is True:
spec_set = original
if spec is not None or spec_set is not None:
if original is DEFAULT:
raise TypeError("Can't use 'spec' with create=True")
if isinstance(original, type):
# If we're patching out a class and there is a spec
inherit = True
Klass = MagicMock
_kwargs = {}
if new_callable is not None:
Klass = new_callable
elif spec is not None or spec_set is not None:
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if _is_list(this_spec):
not_callable = '__call__' not in this_spec
else:
not_callable = not callable(this_spec)
if not_callable:
Klass = NonCallableMagicMock
if spec is not None:
_kwargs['spec'] = spec
if spec_set is not None:
_kwargs['spec_set'] = spec_set
# add a name to mocks
if (isinstance(Klass, type) and
issubclass(Klass, NonCallableMock) and self.attribute):
_kwargs['name'] = self.attribute
_kwargs.update(kwargs)
new = Klass(**_kwargs)
if inherit and _is_instance_mock(new):
# we can only tell if the instance should be callable if the
# spec is not a list
this_spec = spec
if spec_set is not None:
this_spec = spec_set
if (not _is_list(this_spec) and not
_instance_callable(this_spec)):
Klass = NonCallableMagicMock
_kwargs.pop('name')
new.return_value = Klass(_new_parent=new, _new_name='()',
**_kwargs)
elif autospec is not None:
# spec is ignored, new *must* be default, spec_set is treated
# as a boolean. Should we check spec is not None and that spec_set
# is a bool?
if new is not DEFAULT:
raise TypeError(
"autospec creates the mock for you. Can't specify "
"autospec and new."
)
if original is DEFAULT:
raise TypeError("Can't use 'autospec' with create=True")
spec_set = bool(spec_set)
if autospec is True:
autospec = original
new = create_autospec(autospec, spec_set=spec_set,
_name=self.attribute, **kwargs)
elif kwargs:
# can't set keyword args when we aren't creating the mock
# XXXX If new is a Mock we could call new.configure_mock(**kwargs)
raise TypeError("Can't pass kwargs to a mock we aren't creating")
new_attr = new
self.temp_original = original
self.is_local = local
setattr(self.target, self.attribute, new_attr)
if self.attribute_name is not None:
extra_args = {}
if self.new is DEFAULT:
extra_args[self.attribute_name] = new
for patching in self.additional_patchers:
arg = patching.__enter__()
if patching.new is DEFAULT:
extra_args.update(arg)
return extra_args
return new
def __exit__(self, *exc_info):
"""Undo the patch."""
if not _is_started(self):
raise RuntimeError('stop called on unstarted patcher')
if self.is_local and self.temp_original is not DEFAULT:
setattr(self.target, self.attribute, self.temp_original)
else:
delattr(self.target, self.attribute)
if not self.create and (not hasattr(self.target, self.attribute) or
self.attribute in ('__doc__', '__module__',
'__defaults__', '__annotations__',
'__kwdefaults__')):
# needed for proxy objects like django settings
setattr(self.target, self.attribute, self.temp_original)
del self.temp_original
del self.is_local
del self.target
for patcher in reversed(self.additional_patchers):
if _is_started(patcher):
patcher.__exit__(*exc_info)
def start(self):
"""Activate a patch, returning any created mock."""
result = self.__enter__()
self._active_patches.append(self)
return result
def stop(self):
"""Stop an active patch."""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
pass
return self.__exit__()
def _get_target(target):
try:
target, attribute = target.rsplit('.', 1)
except (TypeError, ValueError):
raise TypeError("Need a valid target to patch. You supplied: %r" %
(target,))
getter = lambda: _importer(target)
return getter, attribute
def _patch_object(
target, attribute, new=DEFAULT, spec=None,
create=False, spec_set=None, autospec=None,
new_callable=None, **kwargs
):
"""
patch the named member (`attribute`) on an object (`target`) with a mock
object.
`patch.object` can be used as a decorator, class decorator or a context
manager. Arguments `new`, `spec`, `create`, `spec_set`,
`autospec` and `new_callable` have the same meaning as for `patch`. Like
`patch`, `patch.object` takes arbitrary keyword arguments for configuring
the mock object it creates.
When used as a class decorator `patch.object` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
getter = lambda: target
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
def _patch_multiple(target, spec=None, create=False, spec_set=None,
autospec=None, new_callable=None, **kwargs):
"""Perform multiple patches in a single call. It takes the object to be
patched (either as an object or a string to fetch the object by importing)
and keyword arguments for the patches::
with patch.multiple(settings, FIRST_PATCH='one', SECOND_PATCH='two'):
...
Use `DEFAULT` as the value if you want `patch.multiple` to create
mocks for you. In this case the created mocks are passed into a decorated
function by keyword, and a dictionary is returned when `patch.multiple` is
used as a context manager.
`patch.multiple` can be used as a decorator, class decorator or a context
manager. The arguments `spec`, `spec_set`, `create`,
`autospec` and `new_callable` have the same meaning as for `patch`. These
arguments will be applied to *all* patches done by `patch.multiple`.
When used as a class decorator `patch.multiple` honours `patch.TEST_PREFIX`
for choosing which methods to wrap.
"""
if type(target) is str:
getter = lambda: _importer(target)
else:
getter = lambda: target
if not kwargs:
raise ValueError(
'Must supply at least one keyword argument with patch.multiple'
)
# need to wrap in a list for python 3, where items is a view
items = list(kwargs.items())
attribute, new = items[0]
patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
patcher.attribute_name = attribute
for attribute, new in items[1:]:
this_patcher = _patch(
getter, attribute, new, spec, create, spec_set,
autospec, new_callable, {}
)
this_patcher.attribute_name = attribute
patcher.additional_patchers.append(this_patcher)
return patcher
def patch(
target, new=DEFAULT, spec=None, create=False,
spec_set=None, autospec=None, new_callable=None, **kwargs
):
"""
`patch` acts as a function decorator, class decorator or a context
manager. Inside the body of the function or with statement, the `target`
is patched with a `new` object. When the function/with statement exits
the patch is undone.
If `new` is omitted, then the target is replaced with a
`MagicMock`. If `patch` is used as a decorator and `new` is
omitted, the created mock is passed in as an extra argument to the
decorated function. If `patch` is used as a context manager the created
mock is returned by the context manager.
`target` should be a string in the form `'package.module.ClassName'`. The
`target` is imported and the specified object replaced with the `new`
object, so the `target` must be importable from the environment you are
calling `patch` from. The target is imported when the decorated function
is executed, not at decoration time.
The `spec` and `spec_set` keyword arguments are passed to the `MagicMock`
if patch is creating one for you.
In addition you can pass `spec=True` or `spec_set=True`, which causes
patch to pass in the object being mocked as the spec/spec_set object.
`new_callable` allows you to specify a different class, or callable object,
that will be called to create the `new` object. By default `MagicMock` is
used.
A more powerful form of `spec` is `autospec`. If you set `autospec=True`
then the mock will be created with a spec from the object being replaced.
All attributes of the mock will also have the spec of the corresponding
attribute of the object being replaced. Methods and functions being
mocked will have their arguments checked and will raise a `TypeError` if
they are called with the wrong signature. For mocks replacing a class,
their return value (the 'instance') will have the same spec as the class.
Instead of `autospec=True` you can pass `autospec=some_object` to use an
arbitrary object as the spec instead of the one being replaced.
By default `patch` will fail to replace attributes that don't exist. If
you pass in `create=True`, and the attribute doesn't exist, patch will
create the attribute for you when the patched function is called, and
delete it again afterwards. This is useful for writing tests against
attributes that your production code creates at runtime. It is off by
default because it can be dangerous. With it switched on you can write
passing tests against APIs that don't actually exist!
Patch can be used as a `TestCase` class decorator. It works by
decorating each test method in the class. This reduces the boilerplate
code when your test methods share a common patchings set. `patch` finds
tests by looking for method names that start with `patch.TEST_PREFIX`.
By default this is `test`, which matches the way `unittest` finds tests.
You can specify an alternative prefix by setting `patch.TEST_PREFIX`.
Patch can be used as a context manager, with the with statement. Here the
patching applies to the indented block after the with statement. If you
use "as" then the patched object will be bound to the name after the
"as"; very useful if `patch` is creating a mock object for you.
`patch` takes arbitrary keyword arguments. These will be passed to
the `Mock` (or `new_callable`) on construction.
`patch.dict(...)`, `patch.multiple(...)` and `patch.object(...)` are
available for alternate use-cases.
"""
getter, attribute = _get_target(target)
return _patch(
getter, attribute, new, spec, create,
spec_set, autospec, new_callable, kwargs
)
class _patch_dict(object):
"""
Patch a dictionary, or dictionary like object, and restore the dictionary
to its original state after the test.
`in_dict` can be a dictionary or a mapping like container. If it is a
mapping then it must at least support getting, setting and deleting items
plus iterating over keys.
`in_dict` can also be a string specifying the name of the dictionary, which
will then be fetched by importing it.
`values` can be a dictionary of values to set in the dictionary. `values`
can also be an iterable of `(key, value)` pairs.
If `clear` is True then the dictionary will be cleared before the new
values are set.
`patch.dict` can also be called with arbitrary keyword arguments to set
values in the dictionary::
with patch.dict('sys.modules', mymodule=Mock(), other_module=Mock()):
...
`patch.dict` can be used as a context manager, decorator or class
decorator. When used as a class decorator `patch.dict` honours
`patch.TEST_PREFIX` for choosing which methods to wrap.
"""
def __init__(self, in_dict, values=(), clear=False, **kwargs):
if isinstance(in_dict, str):
in_dict = _importer(in_dict)
self.in_dict = in_dict
# support any argument supported by dict(...) constructor
self.values = dict(values)
self.values.update(kwargs)
self.clear = clear
self._original = None
def __call__(self, f):
if isinstance(f, type):
return self.decorate_class(f)
@wraps(f)
def _inner(*args, **kw):
self._patch_dict()
try:
return f(*args, **kw)
finally:
self._unpatch_dict()
return _inner
def decorate_class(self, klass):
for attr in dir(klass):
attr_value = getattr(klass, attr)
if (attr.startswith(patch.TEST_PREFIX) and
hasattr(attr_value, "__call__")):
decorator = _patch_dict(self.in_dict, self.values, self.clear)
decorated = decorator(attr_value)
setattr(klass, attr, decorated)
return klass
def __enter__(self):
"""Patch the dict."""
self._patch_dict()
def _patch_dict(self):
values = self.values
in_dict = self.in_dict
clear = self.clear
try:
original = in_dict.copy()
except AttributeError:
# dict like object with no copy method
# must support iteration over keys
original = {}
for key in in_dict:
original[key] = in_dict[key]
self._original = original
if clear:
_clear_dict(in_dict)
try:
in_dict.update(values)
except AttributeError:
# dict like object with no update method
for key in values:
in_dict[key] = values[key]
def _unpatch_dict(self):
in_dict = self.in_dict
original = self._original
_clear_dict(in_dict)
try:
in_dict.update(original)
except AttributeError:
for key in original:
in_dict[key] = original[key]
def __exit__(self, *args):
"""Unpatch the dict."""
self._unpatch_dict()
return False
start = __enter__
stop = __exit__
def _clear_dict(in_dict):
try:
in_dict.clear()
except AttributeError:
keys = list(in_dict)
for key in keys:
del in_dict[key]
def _patch_stopall():
"""Stop all active patches. LIFO to unroll nested patches."""
for patch in reversed(_patch._active_patches):
patch.stop()
patch.object = _patch_object
patch.dict = _patch_dict
patch.multiple = _patch_multiple
patch.stopall = _patch_stopall
patch.TEST_PREFIX = 'test'
magic_methods = (
"lt le gt ge eq ne "
"getitem setitem delitem "
"len contains iter "
"hash str sizeof "
"enter exit "
# we added divmod and rdivmod here instead of numerics
# because there is no idivmod
"divmod rdivmod neg pos abs invert "
"complex int float index "
"round trunc floor ceil "
"bool next "
"fspath "
)
numerics = (
"add sub mul matmul div floordiv mod lshift rshift and xor or pow truediv"
)
inplace = ' '.join('i%s' % n for n in numerics.split())
right = ' '.join('r%s' % n for n in numerics.split())
# not including __prepare__, __instancecheck__, __subclasscheck__
# (as they are metaclass methods)
# __del__ is not supported at all as it causes problems if it exists
_non_defaults = {
'__get__', '__set__', '__delete__', '__reversed__', '__missing__',
'__reduce__', '__reduce_ex__', '__getinitargs__', '__getnewargs__',
'__getstate__', '__setstate__', '__getformat__', '__setformat__',
'__repr__', '__dir__', '__subclasses__', '__format__',
'__getnewargs_ex__',
}
def _get_method(name, func):
"Turns a callable object (like a mock) into a real function"
def method(self, *args, **kw):
return func(self, *args, **kw)
method.__name__ = name
return method
_magics = {
'__%s__' % method for method in
' '.join([magic_methods, numerics, inplace, right]).split()
}
_all_magics = _magics | _non_defaults
_unsupported_magics = {
'__getattr__', '__setattr__',
'__init__', '__new__', '__prepare__'
'__instancecheck__', '__subclasscheck__',
'__del__'
}
_calculate_return_value = {
'__hash__': lambda self: object.__hash__(self),
'__str__': lambda self: object.__str__(self),
'__sizeof__': lambda self: object.__sizeof__(self),
'__fspath__': lambda self: f"{type(self).__name__}/{self._extract_mock_name()}/{id(self)}",
}
_return_values = {
'__lt__': NotImplemented,
'__gt__': NotImplemented,
'__le__': NotImplemented,
'__ge__': NotImplemented,
'__int__': 1,
'__contains__': False,
'__len__': 0,
'__exit__': False,
'__complex__': 1j,
'__float__': 1.0,
'__bool__': True,
'__index__': 1,
}
def _get_eq(self):
def __eq__(other):
ret_val = self.__eq__._mock_return_value
if ret_val is not DEFAULT:
return ret_val
if self is other:
return True
return NotImplemented
return __eq__
def _get_ne(self):
def __ne__(other):
if self.__ne__._mock_return_value is not DEFAULT:
return DEFAULT
if self is other:
return False
return NotImplemented
return __ne__
def _get_iter(self):
def __iter__():
ret_val = self.__iter__._mock_return_value
if ret_val is DEFAULT:
return iter([])
# if ret_val was already an iterator, then calling iter on it should
# return the iterator unchanged
return iter(ret_val)
return __iter__
_side_effect_methods = {
'__eq__': _get_eq,
'__ne__': _get_ne,
'__iter__': _get_iter,
}
def _set_return_value(mock, method, name):
fixed = _return_values.get(name, DEFAULT)
if fixed is not DEFAULT:
method.return_value = fixed
return
return_calulator = _calculate_return_value.get(name)
if return_calulator is not None:
try:
return_value = return_calulator(mock)
except AttributeError:
# XXXX why do we return AttributeError here?
# set it as a side_effect instead?
return_value = AttributeError(name)
method.return_value = return_value
return
side_effector = _side_effect_methods.get(name)
if side_effector is not None:
method.side_effect = side_effector(mock)
class MagicMixin(object):
def __init__(self, *args, **kw):
self._mock_set_magics() # make magic work for kwargs in init
_safe_super(MagicMixin, self).__init__(*args, **kw)
self._mock_set_magics() # fix magic broken by upper level init
def _mock_set_magics(self):
these_magics = _magics
if getattr(self, "_mock_methods", None) is not None:
these_magics = _magics.intersection(self._mock_methods)
remove_magics = set()
remove_magics = _magics - these_magics
for entry in remove_magics:
if entry in type(self).__dict__:
# remove unneeded magic methods
delattr(self, entry)
# don't overwrite existing attributes if called a second time
these_magics = these_magics - set(type(self).__dict__)
_type = type(self)
for entry in these_magics:
setattr(_type, entry, MagicProxy(entry, self))
class NonCallableMagicMock(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicMock(MagicMixin, Mock):
"""
MagicMock is a subclass of Mock with default implementations
of most of the magic methods. You can use MagicMock without having to
configure the magic methods yourself.
If you use the `spec` or `spec_set` arguments then *only* magic
methods that exist in the spec will be created.
Attributes and the return value of a `MagicMock` will also be `MagicMocks`.
"""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
class MagicProxy(object):
def __init__(self, name, parent):
self.name = name
self.parent = parent
def __call__(self, *args, **kwargs):
m = self.create_mock()
return m(*args, **kwargs)
def create_mock(self):
entry = self.name
parent = self.parent
m = parent._get_child_mock(name=entry, _new_name=entry,
_new_parent=parent)
setattr(parent, entry, m)
_set_return_value(parent, m, entry)
return m
def __get__(self, obj, _type=None):
return self.create_mock()
class _ANY(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = _ANY()
def _format_call_signature(name, args, kwargs):
message = '%s(%%s)' % name
formatted_args = ''
args_string = ', '.join([repr(arg) for arg in args])
kwargs_string = ', '.join([
'%s=%r' % (key, value) for key, value in sorted(kwargs.items())
])
if args_string:
formatted_args = args_string
if kwargs_string:
if formatted_args:
formatted_args += ', '
formatted_args += kwargs_string
return message % formatted_args
class _Call(tuple):
"""
A tuple for holding the results of a call to a mock, either in the form
`(args, kwargs)` or `(name, args, kwargs)`.
If args or kwargs are empty then a call tuple will compare equal to
a tuple without those values. This makes comparisons less verbose::
_Call(('name', (), {})) == ('name',)
_Call(('name', (1,), {})) == ('name', (1,))
_Call(((), {'a': 'b'})) == ({'a': 'b'},)
The `_Call` object provides a useful shortcut for comparing with call::
_Call(((1, 2), {'a': 3})) == call(1, 2, a=3)
_Call(('foo', (1, 2), {'a': 3})) == call.foo(1, 2, a=3)
If the _Call has no name then it will match any name.
"""
def __new__(cls, value=(), name='', parent=None, two=False,
from_kall=True):
args = ()
kwargs = {}
_len = len(value)
if _len == 3:
name, args, kwargs = value
elif _len == 2:
first, second = value
if isinstance(first, str):
name = first
if isinstance(second, tuple):
args = second
else:
kwargs = second
else:
args, kwargs = first, second
elif _len == 1:
value, = value
if isinstance(value, str):
name = value
elif isinstance(value, tuple):
args = value
else:
kwargs = value
if two:
return tuple.__new__(cls, (args, kwargs))
return tuple.__new__(cls, (name, args, kwargs))
def __init__(self, value=(), name=None, parent=None, two=False,
from_kall=True):
self.name = name
self.parent = parent
self.from_kall = from_kall
def __eq__(self, other):
if other is ANY:
return True
try:
len_other = len(other)
except TypeError:
return False
self_name = ''
if len(self) == 2:
self_args, self_kwargs = self
else:
self_name, self_args, self_kwargs = self
other_name = ''
if len_other == 0:
other_args, other_kwargs = (), {}
elif len_other == 3:
other_name, other_args, other_kwargs = other
elif len_other == 1:
value, = other
if isinstance(value, tuple):
other_args = value
other_kwargs = {}
elif isinstance(value, str):
other_name = value
other_args, other_kwargs = (), {}
else:
other_args = ()
other_kwargs = value
elif len_other == 2:
# could be (name, args) or (name, kwargs) or (args, kwargs)
first, second = other
if isinstance(first, str):
other_name = first
if isinstance(second, tuple):
other_args, other_kwargs = second, {}
else:
other_args, other_kwargs = (), second
else:
other_args, other_kwargs = first, second
else:
return False
if self_name and other_name != self_name:
return False
# this order is important for ANY to work!
return (other_args, other_kwargs) == (self_args, self_kwargs)
__ne__ = object.__ne__
def __call__(self, *args, **kwargs):
if self.name is None:
return _Call(('', args, kwargs), name='()')
name = self.name + '()'
return _Call((self.name, args, kwargs), name=name, parent=self)
def __getattr__(self, attr):
if self.name is None:
return _Call(name=attr, from_kall=False)
name = '%s.%s' % (self.name, attr)
return _Call(name=name, parent=self, from_kall=False)
def count(self, *args, **kwargs):
return self.__getattr__('count')(*args, **kwargs)
def index(self, *args, **kwargs):
return self.__getattr__('index')(*args, **kwargs)
def __repr__(self):
if not self.from_kall:
name = self.name or 'call'
if name.startswith('()'):
name = 'call%s' % name
return name
if len(self) == 2:
name = 'call'
args, kwargs = self
else:
name, args, kwargs = self
if not name:
name = 'call'
elif not name.startswith('()'):
name = 'call.%s' % name
else:
name = 'call%s' % name
return _format_call_signature(name, args, kwargs)
def call_list(self):
"""For a call object that represents multiple calls, `call_list`
returns a list of all the intermediate calls as well as the
final call."""
vals = []
thing = self
while thing is not None:
if thing.from_kall:
vals.append(thing)
thing = thing.parent
return _CallList(reversed(vals))
call = _Call(from_kall=False)
def create_autospec(spec, spec_set=False, instance=False, _parent=None,
_name=None, **kwargs):
"""Create a mock object using another object as a spec. Attributes on the
mock will use the corresponding attribute on the `spec` object as their
spec.
Functions or methods being mocked will have their arguments checked
to check that they are called with the correct signature.
If `spec_set` is True then attempting to set attributes that don't exist
on the spec object will raise an `AttributeError`.
If a class is used as a spec then the return value of the mock (the
instance of the class) will have the same spec. You can use a class as the
spec for an instance object by passing `instance=True`. The returned mock
will only be callable if instances of the mock are callable.
`create_autospec` also takes arbitrary keyword arguments that are passed to
the constructor of the created mock."""
if _is_list(spec):
# can't pass a list instance to the mock constructor as it will be
# interpreted as a list of strings
spec = type(spec)
is_type = isinstance(spec, type)
_kwargs = {'spec': spec}
if spec_set:
_kwargs = {'spec_set': spec}
elif spec is None:
# None we mock with a normal mock without a spec
_kwargs = {}
if _kwargs and instance:
_kwargs['_spec_as_instance'] = True
_kwargs.update(kwargs)
Klass = MagicMock
if inspect.isdatadescriptor(spec):
# descriptors don't have a spec
# because we don't know what type they return
_kwargs = {}
elif not _callable(spec):
Klass = NonCallableMagicMock
elif is_type and instance and not _instance_callable(spec):
Klass = NonCallableMagicMock
_name = _kwargs.pop('name', _name)
_new_name = _name
if _parent is None:
# for a top level object no _new_name should be set
_new_name = ''
mock = Klass(parent=_parent, _new_parent=_parent, _new_name=_new_name,
name=_name, **_kwargs)
if isinstance(spec, FunctionTypes):
# should only happen at the top level because we don't
# recurse for functions
mock = _set_signature(mock, spec)
else:
_check_signature(spec, mock, is_type, instance)
if _parent is not None and not instance:
_parent._mock_children[_name] = mock
if is_type and not instance and 'return_value' not in kwargs:
mock.return_value = create_autospec(spec, spec_set, instance=True,
_name='()', _parent=mock)
for entry in dir(spec):
if _is_magic(entry):
# MagicMock already does the useful magic methods for us
continue
# XXXX do we need a better way of getting attributes without
# triggering code execution (?) Probably not - we need the actual
# object to mock it so we would rather trigger a property than mock
# the property descriptor. Likewise we want to mock out dynamically
# provided attributes.
# XXXX what about attributes that raise exceptions other than
# AttributeError on being fetched?
# we could be resilient against it, or catch and propagate the
# exception when the attribute is fetched from the mock
try:
original = getattr(spec, entry)
except AttributeError:
continue
kwargs = {'spec': original}
if spec_set:
kwargs = {'spec_set': original}
if not isinstance(original, FunctionTypes):
new = _SpecState(original, spec_set, mock, entry, instance)
mock._mock_children[entry] = new
else:
parent = mock
if isinstance(spec, FunctionTypes):
parent = mock.mock
skipfirst = _must_skip(spec, entry, is_type)
kwargs['_eat_self'] = skipfirst
new = MagicMock(parent=parent, name=entry, _new_name=entry,
_new_parent=parent,
**kwargs)
mock._mock_children[entry] = new
_check_signature(original, new, skipfirst=skipfirst)
# so functions created with _set_signature become instance attributes,
# *plus* their underlying mock exists in _mock_children of the parent
# mock. Adding to _mock_children may be unnecessary where we are also
# setting as an instance attribute?
if isinstance(new, FunctionTypes):
setattr(mock, entry, new)
return mock
def _must_skip(spec, entry, is_type):
"""
Return whether we should skip the first argument on spec's `entry`
attribute.
"""
if not isinstance(spec, type):
if entry in getattr(spec, '__dict__', {}):
# instance attribute - shouldn't skip
return False
spec = spec.__class__
for klass in spec.__mro__:
result = klass.__dict__.get(entry, DEFAULT)
if result is DEFAULT:
continue
if isinstance(result, (staticmethod, classmethod)):
return False
elif isinstance(getattr(result, '__get__', None), MethodWrapperTypes):
# Normal method => skip if looked up on type
# (if looked up on instance, self is already skipped)
return is_type
else:
return False
# shouldn't get here unless function is a dynamically provided attribute
# XXXX untested behaviour
return is_type
def _get_class(obj):
try:
return obj.__class__
except AttributeError:
# it is possible for objects to have no __class__
return type(obj)
class _SpecState(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
)
MethodWrapperTypes = (
type(ANY.__eq__.__get__),
)
file_spec = None
def _iterate_read_data(read_data):
# Helper for mock_open:
# Retrieve lines from read_data via a generator so that separate calls to
# readline, read, and readlines are properly interleaved
sep = b'\n' if isinstance(read_data, bytes) else '\n'
data_as_list = [l + sep for l in read_data.split(sep)]
if data_as_list[-1] == sep:
# If the last line ended in a newline, the list comprehension will have an
# extra entry that's just a newline. Remove this.
data_as_list = data_as_list[:-1]
else:
# If there wasn't an extra newline by itself, then the file being
# emulated doesn't have a newline to end the last line remove the
# newline that our naive format() added
data_as_list[-1] = data_as_list[-1][:-1]
for line in data_as_list:
yield line
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` methoddline`, and `readlines` of the
file handle to return. This is an empty string by default.
"""
def _readlines_side_effect(*args, **kwargs):
if handle.readlines.return_value is not None:
return handle.readlines.return_value
return list(_state[0])
def _read_side_effect(*args, **kwargs):
if handle.read.return_value is not None:
return handle.read.return_value
return type(read_data)().join(_state[0])
def _readline_side_effect():
yield from _iter_side_effect()
while True:
yield type(read_data)()
def _iter_side_effect():
if handle.readline.return_value is not None:
while True:
yield handle.readline.return_value
for line in _state[0]:
yield line
global file_spec
if file_spec is None:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.__enter__.return_value = handle
_state = [_iterate_read_data(read_data), None]
handle.write.return_value = None
handle.read.return_value = None
handle.readline.return_value = None
handle.readlines.return_value = None
handle.read.side_effect = _read_side_effect
_state[1] = _readline_side_effect()
handle.readline.side_effect = _state[1]
handle.readlines.side_effect = _readlines_side_effect
handle.__iter__.side_effect = _iter_side_effect
def reset_data(*args, **kwargs):
_state[0] = _iterate_read_data(read_data)
if handle.readline.side_effect == _state[1]:
# Only reset the side effect if the user hasn't overridden it.
_state[1] = _readline_side_effect()
handle.readline.side_effect = _state[1]
return DEFAULT
mock.side_effect = reset_data
mock.return_value = handle
return mock
class PropertyMock(Mock):
"""
A mock intended to be used as a property, or other descriptor, on a class.
`PropertyMock` provides `__get__` and `__set__` methods so you can specify
a return value when it is fetched.
Fetching a `PropertyMock` instance from an object calls the mock, with
no args. Setting it calls the mock with the value being set.
"""
def _get_child_mock(self, **kwargs):
return MagicMock(**kwargs)
def __get__(self, obj, obj_type):
return self()
def __set__(self, obj, val):
self(val)
def seal(mock):
"""Disable the automatic generation of child mocks.
Given an input Mock, seals it to ensure no further mocks will be generated
when accessing an attribute that was not already defined.
The operation recursively seals the mock passed in, meaning that
the mock itself, any mocks generated by accessing one of its attributes,
and all assigned mocks without a name or spec will be sealed.
"""
mock._mock_sealed = True
for attr in dir(mock):
try:
m = getattr(mock, attr)
except AttributeError:
continue
if not isinstance(m, NonCallableMock):
continue
if m._mock_new_parent is mock:
seal(m)
|
the-stack_0_22708 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
try_get,
)
class TwentyMinutenIE(InfoExtractor):
IE_NAME = "20min"
_VALID_URL = r"""(?x)
https?://
(?:www\.)?20min\.ch/
(?:
videotv/*\?.*?\bvid=|
videoplayer/videoplayer\.html\?.*?\bvideoId@
)
(?P<id>\d+)
"""
_TESTS = [
{
"url": "http://www.20min.ch/videotv/?vid=469148&cid=2",
"md5": "e7264320db31eed8c38364150c12496e",
"info_dict": {
"id": "469148",
"ext": "mp4",
"title": "85 000 Franken für 15 perfekte Minuten",
"thumbnail": r"re:https?://.*\.jpg$",
},
},
{
"url": "http://www.20min.ch/videoplayer/videoplayer.html?params=client@twentyDE|videoId@523629",
"info_dict": {
"id": "523629",
"ext": "mp4",
"title": "So kommen Sie bei Eis und Schnee sicher an",
"description": "md5:117c212f64b25e3d95747e5276863f7d",
"thumbnail": r"re:https?://.*\.jpg$",
},
"params": {
"skip_download": True,
},
},
{
"url": "http://www.20min.ch/videotv/?cid=44&vid=468738",
"only_matching": True,
},
]
@staticmethod
def _extract_urls(webpage):
return [
m.group("url")
for m in re.finditer(
r'<iframe[^>]+src=(["\'])(?P<url>(?:(?:https?:)?//)?(?:www\.)?20min\.ch/videoplayer/videoplayer.html\?.*?\bvideoId@\d+.*?)\1',
webpage,
)
]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
"http://api.20min.ch/video/%s/show" % video_id, video_id
)["content"]
title = video["title"]
formats = [
{
"format_id": format_id,
"url": "http://podcast.20min-tv.ch/podcast/20min/%s%s.mp4"
% (video_id, p),
"quality": quality,
}
for quality, (format_id, p) in enumerate([("sd", ""), ("hd", "h")])
]
self._sort_formats(formats)
description = video.get("lead")
thumbnail = video.get("thumbnail")
def extract_count(kind):
return try_get(
video, lambda x: int_or_none(x["communityobject"]["thumbs_%s" % kind])
)
like_count = extract_count("up")
dislike_count = extract_count("down")
return {
"id": video_id,
"title": title,
"description": description,
"thumbnail": thumbnail,
"like_count": like_count,
"dislike_count": dislike_count,
"formats": formats,
}
|
the-stack_0_22710 | from datetime import datetime, timedelta
from django.conf import settings
from celery.schedules import crontab
from celery.task import periodic_task, task
from celery.utils.log import get_task_logger
from corehq.util.metrics import metrics_gauge_task
from dimagi.utils.couch import get_redis_lock
from dimagi.utils.couch.undo import DELETED_SUFFIX
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.motech.models import RequestLog
from corehq.motech.repeaters.const import (
CHECK_REPEATERS_INTERVAL,
CHECK_REPEATERS_KEY,
RECORD_FAILURE_STATE,
RECORD_PENDING_STATE,
)
from corehq.motech.repeaters.dbaccessors import (
get_overdue_repeat_record_count,
iterate_repeat_records,
)
from corehq.privileges import DATA_FORWARDING, ZAPIER_INTEGRATION
from corehq.util.datadog.gauges import (
datadog_bucket_timer,
datadog_counter,
)
from corehq.util.datadog.utils import make_buckets_from_timedeltas
from corehq.util.soft_assert import soft_assert
_check_repeaters_buckets = make_buckets_from_timedeltas(
timedelta(seconds=10),
timedelta(minutes=1),
timedelta(minutes=5),
timedelta(hours=1),
timedelta(hours=5),
timedelta(hours=10),
)
_soft_assert = soft_assert(to='@'.join(('nhooper', 'dimagi.com')))
logging = get_task_logger(__name__)
@periodic_task(
run_every=crontab(day_of_month=27),
queue=settings.CELERY_PERIODIC_QUEUE,
)
def clean_logs():
"""
Drop MOTECH logs older than 90 days.
Runs on the 27th of every month.
"""
ninety_days_ago = datetime.now() - timedelta(days=90)
RequestLog.objects.filter(timestamp__lt=ninety_days_ago).delete()
@periodic_task(
run_every=CHECK_REPEATERS_INTERVAL,
queue=settings.CELERY_PERIODIC_QUEUE,
)
def check_repeaters():
start = datetime.utcnow()
six_hours_sec = 6 * 60 * 60
six_hours_later = start + timedelta(seconds=six_hours_sec)
# Long timeout to allow all waiting repeat records to be iterated
check_repeater_lock = get_redis_lock(
CHECK_REPEATERS_KEY,
timeout=six_hours_sec,
name=CHECK_REPEATERS_KEY,
)
if not check_repeater_lock.acquire(blocking=False):
datadog_counter("commcare.repeaters.check.locked_out")
return
try:
with datadog_bucket_timer(
"commcare.repeaters.check.processing",
tags=[],
timing_buckets=_check_repeaters_buckets,
):
for record in iterate_repeat_records(start):
if datetime.utcnow() > six_hours_later:
_soft_assert(False, "I've been iterating repeat records for six hours. I quit!")
break
datadog_counter("commcare.repeaters.check.attempt_forward")
record.attempt_forward_now()
finally:
check_repeater_lock.release()
@task(serializer='pickle', queue=settings.CELERY_REPEAT_RECORD_QUEUE)
def process_repeat_record(repeat_record):
# A RepeatRecord should ideally never get into this state, as the
# domain_has_privilege check is also triggered in the create_repeat_records
# in signals.py. But if it gets here, forcefully cancel the RepeatRecord.
# todo reconcile ZAPIER_INTEGRATION and DATA_FORWARDING
# they each do two separate things and are priced differently,
# but use the same infrastructure
if not (domain_has_privilege(repeat_record.domain, ZAPIER_INTEGRATION)
or domain_has_privilege(repeat_record.domain, DATA_FORWARDING)):
repeat_record.cancel()
repeat_record.save()
if (
repeat_record.state == RECORD_FAILURE_STATE and
repeat_record.overall_tries >= repeat_record.max_possible_tries
):
repeat_record.cancel()
repeat_record.save()
return
if repeat_record.cancelled:
return
repeater = repeat_record.repeater
if not repeater:
repeat_record.cancel()
repeat_record.save()
return
try:
if repeater.paused:
# postpone repeat record by 1 day so that these don't get picked in each cycle and
# thus clogging the queue with repeat records with paused repeater
repeat_record.postpone_by(timedelta(days=1))
return
if repeater.doc_type.endswith(DELETED_SUFFIX):
if not repeat_record.doc_type.endswith(DELETED_SUFFIX):
repeat_record.doc_type += DELETED_SUFFIX
repeat_record.save()
elif repeat_record.state == RECORD_PENDING_STATE or repeat_record.state == RECORD_FAILURE_STATE:
repeat_record.fire()
except Exception:
logging.exception('Failed to process repeat record: {}'.format(repeat_record._id))
repeaters_overdue = metrics_gauge_task(
'commcare.repeaters.overdue',
get_overdue_repeat_record_count,
run_every=crontab() # every minute
)
|
the-stack_0_22711 | import json
import logging
import logging.config
import os
import yaml
__author__ = "Dimi Balaouras"
__copyright__ = "Copyright 2016, Dimi Balaouras - Stek.io"
__license__ = "Apache License 2.0, see LICENSE for more details."
def load_config(config_files_csv):
"""
Loads configuration from multiple files provided via a comma separated set of files.
The order of loading is preserved: the latter files will replace properties of earlier files.
:param config_files_csv: A comma separated list of config files
:return: A dictionary with the loaded configuration
"""
# Initialize the config file
config = {}
# Get a list of files
config_files = [config_file.strip() for config_file in config_files_csv.split(',')]
# Update the config dictionary
for config_file in config_files:
add_config = load_config_file(config_file)
if add_config:
config = merge_dicts(config, add_config)
return config
def load_config_file(config_file):
"""
Loads a config file formatted either in yaml or json
:param config_file: The path to the config file
:return: The configuration dictionary
"""
# Initialize the return object
config = None
# Extract the file extension
filename, extension = os.path.splitext(config_file)
# Pick the right deserializer
try:
if not os.path.isfile(config_file):
raise IOError("%s is not a file." % config_file)
deserializer = {
".yaml": yaml,
".json": json
}[extension]
# Deserialize the config
config = deserializer.load(open(config_file))
except KeyError:
# TODO: use a logger her
print("Invalid configuration file type: %s" % extension)
return config
def get_logger(global_config, module_name, default_level=logging.DEBUG):
"""
Retrieves a logger from the global config if one exists for the given module; otherwise, it creates one.
:param global_config:
:param module_name:
:param default_level: The default logging level
:return:
"""
# Try using the config first
if "python-logging" in global_config:
logging.config.dictConfig(global_config["python-logging"])
logger = logging.getLogger(module_name)
else:
# Create a custom logger
logger = logging.getLogger(module_name)
logger.setLevel(default_level)
ch = logging.StreamHandler()
ch.setLevel(default_level)
# Create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add formatter to ch
ch.setFormatter(formatter)
# Add a logger
logger.addHandler(ch)
return logger
def merge_dicts(origin, patch):
"""
Merge two dictionaries, w/o overwriting missing keys
:param origin: The origin dictionary
:param patch: The dictionary containing the diffs
:return: The result of the merge: a new dictionary
"""
for key in patch:
if key in origin:
if isinstance(origin[key], dict) and isinstance(patch[key], dict):
merge_dicts(origin[key], patch[key])
else:
origin[key] = patch[key]
else:
origin[key] = patch[key]
return origin
class Struct:
"""
Class used as a convertor between objects and dictionaries
"""
def __init__(self, **entries):
self.__dict__.update(entries)
def dict_to_object(dict):
"""
Convert a dictionary to object
:param dict: The dictionary to convert
:return: the converted object
"""
return Struct(**dict)
def print_logo():
"""
Prints WebHawk's Logo in Ascii Art
"""
logo = """
=====================================================
_ __ __ __ __ __
| | /| / /__ / / / // /__ __ __/ /__
| |/ |/ / -_) _ \/ _ / _ \`/ |/|/ / '_/
|__/|__/\__/_.__/_//_/\_,_/|__,__/_/\_\\
WebHook MicroFramework
=====================================================
"""
print(logo)
|
the-stack_0_22712 | """ Tests for exerciser module
"""
from os.path import dirname, join as pjoin
from textwrap import dedent
from rnbgrader import load, loads
from rnbgrader.nbparser import RNotebook
from rmdex.exerciser import (make_check_exercise, make_exercise, make_solution,
get_marks, check_marks, check_chunk_marks,
question_chunks, MARK_RE, template2exercise,
template2solution, MarkupError, read_utf8)
import pytest
HERE = dirname(__file__)
SOLUTION_FNAME = pjoin(HERE, 'solution.Rmd')
EXERCISE_FNAME = pjoin(HERE, 'exercise.Rmd')
SOLUTION_STR = read_utf8(SOLUTION_FNAME)
EXERCISE_STR = read_utf8(EXERCISE_FNAME)
TEMPLATE_FNAME = pjoin(HERE, 'template.Rmd')
T_EXERCISE_FNAME = pjoin(HERE, 'template_exercise.Rmd')
T_SOLUTION_FNAME = pjoin(HERE, 'template_solution.Rmd')
TEMPLATE_STR = read_utf8(TEMPLATE_FNAME)
T_EXERCISE_STR = read_utf8(T_EXERCISE_FNAME)
T_SOLUTION_STR = read_utf8(T_SOLUTION_FNAME)
FMFT_FNAME = pjoin(HERE, 'fix_my_fors_template.Rmd')
FMFE_FNAME = pjoin(HERE, 'fix_my_fors.Rmd')
FMFS_FNAME = pjoin(HERE, 'fix_my_fors_solution.Rmd')
FMFT_STR = read_utf8(FMFT_FNAME)
FMFE_STR = read_utf8(FMFE_FNAME)
FMFS_STR = read_utf8(FMFS_FNAME)
def test_make_check_exercise():
assert make_check_exercise(SOLUTION_STR) == EXERCISE_STR
def test_make_exercise():
nb = load(SOLUTION_FNAME)
check_marks(nb.nb_str)
exercise = make_exercise(SOLUTION_STR)
assert exercise == EXERCISE_STR
check_marks(exercise)
check_chunk_marks(question_chunks(loads(exercise)))
nb = load(TEMPLATE_FNAME)
exercise = make_exercise(TEMPLATE_STR)
assert exercise == T_EXERCISE_STR
nb = load(FMFT_FNAME)
exercise = make_exercise(FMFT_STR)
assert exercise == FMFE_STR
def test_make_solution():
# No changes for the basic example (no #<- lines)
solution = make_solution(SOLUTION_STR)
assert solution == SOLUTION_STR
# The Python example does have #<- lines.
solution = make_solution(TEMPLATE_STR)
assert solution == T_SOLUTION_STR
solution = make_solution(FMFT_STR)
assert solution == FMFS_STR
def test_question_chunks():
nb = load(SOLUTION_FNAME)
chunks = question_chunks(nb)
assert len(chunks) == 15
nb = loads("""\
Some text
```{python}
# Not question
a = 1
```
More text.
```{r}
# Still not question
b <- 2
```
Another line of text.
```{r}
#- This is a question.
c <- 3
```
Text.
Continues.
```{python}
# This is a question too.
#<- print("hello")
```
Typing is easy but boring.
```{r}
# This is not question, again.
d <- 4
```
```{python}
#- This is a question, again.
e <- 4
```
""")
chunks = question_chunks(nb)
assert len(chunks) == 3
assert [c.code for c in chunks] == [
'#- This is a question.\nc <- 3\n',
'# This is a question too.\n#<- print("hello")\n',
'#- This is a question, again.\ne <- 4\n']
assert [c.language for c in chunks] == ['r', 'python', 'python']
def test_null_solution():
# A notebook with no question cells doesn't result in an error.
nb = RNotebook.from_string('')
check_marks(nb.nb_str, 0)
def test_check_marks():
nb = load(SOLUTION_FNAME)
q_chunks = question_chunks(nb)
check_chunk_marks(q_chunks)
def test_marks_re():
assert MARK_RE.match(
'#- 5 marks / 100 (total 95 so far).').groups() == ('5', '100', '95')
def test_marks():
assert get_marks('#- 5 marks / 100 (total 95 so far).') == (5, 100, 95)
def test_template2exercise():
t2e = template2exercise
assert t2e('#- foo\n#- bar') == '#- foo\n#- bar\n'
assert t2e('#- foo\na = 1\n#- bar') == '#- foo\n#- bar\n'
assert t2e('#- foo\na = 1\n# bar') == '#- foo\n'
assert t2e('#- foo\n#<- a = ?\n# bar') == '#- foo\na = ?\n'
assert t2e('#- foo\n#<- a = ?\n#- bar') == '#- foo\na = ?\n#- bar\n'
assert (t2e('#- foo\n #<- a = ?\n#- bar') ==
'#- foo\n a = ?\n#- bar\n')
with pytest.raises(MarkupError): # No space after #<-
t2e('#- foo\n#<-a = ?\n# bar\n')
# With space suffix, marker adds a blank line to the solution.
assert t2e('#- foo\n#<- \n# bar') == '#- foo\n\n'
with pytest.raises(MarkupError): # No closing #<-
t2e('#- foo\n#<-\n# bar\n')
# With a closing marker - include solution code in exercise.
assert (t2e('#- foo\n#<-\n# bar\na = 1\n#<-\n') ==
'#- foo\n# bar\na = 1\n')
# Check stuff after both chunk still gets stripped.
assert (t2e(
'#- foo\n#<-\n# bar\na = 1\n#<-\nb = 2\n') ==
'#- foo\n# bar\na = 1\n')
# And that one-line #<- still works.
assert (t2e(
'#- foo\n#<-\n# bar\na = 1\n#<-\n#<- b = 2\n') ==
'#- foo\n# bar\na = 1\nb = 2\n')
# Test a second chunk.
assert (t2e(
'#- foo\n#<-\n# bar\na = 1\n#<-\nb = 2\n'
'#<-\nc = 2\nd=3\n#<-\ne = 4\n') ==
('#- foo\n# bar\na = 1\n'
'c = 2\nd=3\n'))
# Test both-line
assert t2e('#- foo\n#<--\na = 1\n#- bar') == '#- foo\na = 1\n#- bar\n'
assert (t2e('#- foo\n#<--\na = 1\n#- bar\n#<--\n# baz') ==
'#- foo\na = 1\n#- bar\n# baz\n')
# Test both-line error
with pytest.raises(MarkupError):
t2e('#- foo\n#<--\na = 1\n#- bar\n#<--')
# Mix both-line and both-section
assert (t2e(
'#- foo\n#<--\nq = 99\n#<-\n# bar\na = 1\n#<-\nb = 2\n'
'#<-\nc = 2\nd=3\n#<-\ne = 4\n') ==
'#- foo\nq = 99\n# bar\na = 1\nc = 2\nd=3\n')
# both-line ignored inside both-section
assert (t2e(
'#- foo\n#<-\n#<--\n# bar\na = 1\n#<-\nb = 2\n'
'#<-\nc = 2\nd=3\n#<-\ne = 4\n') ==
'#- foo\n#<--\n# bar\na = 1\nc = 2\nd=3\n')
# both-section ignored after both-line
with pytest.raises(MarkupError):
t2e('#- foo\n#<--#<-\n# bar\na = 1\n<-\nb = 2\n'
'#<-\nc = 2\nd=3\n#<-\ne = 4\n')
assert (t2e(
'#- foo\n#<--\n#<-\n# bar\na = 1\nb = 2\n'
'#<-\nc = 2\nd=3\n#<-\ne = 4\n') ==
'#- foo\n#<-\nc = 2\nd=3\n')
# Both to end marker - everything included
assert (t2e(
'#- foo\n#<->\n#<- ...\n# bar\na = 1\nb = 2\n') ==
'#- foo\n#<- ...\n# bar\na = 1\nb = 2\n')
def test_readme_example():
inp_str = dedent("""
#- Here you will do a simple assignment.
#- More description of the assignment.
#- 5 marks / 100 (total 10 marks so far).
# This comment gets stripped from the exercise version of the cell.
# Also this one. The next line adds the text after #<- to the exercise.
#<- my_variable = ...
# This comment and the next code line do not appear in the exercise.
my_variable = 10
#<-
# This comment does appear in the exercise, as well as the following code.
another_variable = 11
print("Something")
#<-
#<--
# This line follows the both-line marker, and appears in the exercise.
# This line does not.
# Starting at the previous line, we resume normal service. This and
# the next line of comments do not appear in the exercise.
#
# The following marker causes everything to the end of the cell/chunk
# to appear in both exercise and solution:
#<->
print('This line appears in the exercise and solution')
print('as does this line')
""")
exp_str = dedent("""
#- Here you will do a simple assignment.
#- More description of the assignment.
#- 5 marks / 100 (total 10 marks so far).
my_variable = ...
# This comment does appear in the exercise, as well as the following code.
another_variable = 11
print("Something")
# This line follows the both-line marker, and appears in the exercise.
print('This line appears in the exercise and solution')
print('as does this line')
""")
assert template2exercise(inp_str).strip() == exp_str.strip()
exp_soln_str = dedent("""
#- Here you will do a simple assignment.
#- More description of the assignment.
#- 5 marks / 100 (total 10 marks so far).
# This comment gets stripped from the exercise version of the cell.
# Also this one. The next line adds the text after #<- to the exercise.
# This comment and the next code line do not appear in the exercise.
my_variable = 10
# This comment does appear in the exercise, as well as the following code.
another_variable = 11
print("Something")
# This line follows the both-line marker, and appears in the exercise.
# This line does not.
# Starting at the previous line, we resume normal service. This and
# the next line of comments do not appear in the exercise.
#
# The following marker causes everything to the end of the cell/chunk
# to appear in both exercise and solution:
print('This line appears in the exercise and solution')
print('as does this line')
""")
assert template2solution(inp_str).strip() == exp_soln_str.strip()
|
the-stack_0_22715 | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Jens Krüger <[email protected]>
#
# *****************************************************************************
name = 'test_puma setup'
includes = ['stdsystem']
devices = dict(
phi = device('nicos_mlz.puma.devices.comb_ax.CombAxis',
motor = device('nicos.devices.generic.VirtualMotor',
unit = 'deg',
abslimits = (-5, 116.1),
),
obs = [],
precision = 0.005,
offset = 0.,
maxtries = 5,
loopdelay = 0.02,
fix_ax = device('nicos.devices.generic.VirtualMotor',
unit = 'deg',
abslimits = (-15., 355.),
),
iscomb = False,
),
af = device('nicos_mlz.puma.devices.focus.FocusAxis',
motor = device('nicos.devices.generic.VirtualMotor',
unit = 'deg',
abslimits = (-55, 55),
curvalue = 4.92,
),
obs = [],
uplimit = 5,
lowlimit = -5,
flatpos = 4.92,
startpos = 4,
precision = 0.25,
maxtries = 15,
loopdelay = 0.02,
),
polyswitch = device('nicos.devices.generic.ManualSwitch',
states = [0, 1],
),
mtt = device('nicos_mlz.puma.devices.mtt.MttAxis',
motor = device('nicos.devices.generic.VirtualMotor',
unit = 'deg',
abslimits = (-110.1, 0),
),
io_flag = 'polyswitch',
polyswitch = 'polyswitch',
obs = [],
precision = 0.011,
offset = 0.0,
maxtries = 1,
backlash = -0.1,
loopdelay = 0.02,
polysleep = 0.02,
),
ath = device('nicos.devices.generic.VirtualMotor',
unit = 'deg',
abslimits = (0, 60),
precision = 0.05,
),
cad = device('nicos_mlz.puma.devices.coupledaxis.PumaCoupledAxis',
tt = device('nicos.devices.generic.VirtualMotor',
unit = 'deg',
abslimits = (-117, 117),
precision = 0.05,
),
th = 'ath',
fmtstr = '%.3f',
unit = 'deg',
precision = .1,
),
rd6_cad = device('nicos_mlz.puma.devices.StackedAxis',
bottom = 'cad',
top = 'rd6',
),
)
|
the-stack_0_22716 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 2018
@author: toshiki.ishikawa
"""
import os
import sys
import gc
import utils
import numpy as np
import pandas as pd
from tqdm import tqdm
# from datetime import datetime, date
import datetime
from sklearn.preprocessing import LabelEncoder
from multiprocessing import cpu_count, Pool
utils.start(__file__)
#==============================================================================
NTHREAD = cpu_count()
PREF = 'f231'
SUMMARY = 30
KEY = 'card_id'
stats = ['sum']
# =============================================================================
#
# =============================================================================
PATH = os.path.join('..', 'data')
new_merchant_transactions = pd.read_csv(os.path.join(PATH, 'new_merchant_transactions.csv'))
# new_merchant_transactions['purchase_amount'] = np.log1p(new_merchant_transactions['purchase_amount'] - new_merchant_transactions['purchase_amount'].min())
new_merchant_transactions['purchase_amount'] = np.round(new_merchant_transactions['purchase_amount'] / 0.00150265118 + 497.06,2)
# =============================================================================
#
# =============================================================================
def aggregate(args):
prefix, index, columns, values = args['prefix'], args['index'], args['columns'], args['values']
pt = new_merchant_transactions.pivot_table(
index=index,
columns=columns,
values=values,
aggfunc=stats).reset_index()
pt.columns = [f'{c[0]}_{c[1]}_{c[2]}'.strip('_') for c in pt.columns]
pt = pt.add_prefix(prefix)
pt = pt.rename(columns={prefix+KEY: KEY})
pt.to_pickle(f'../feature/{PREF}.pkl')
return
# =============================================================================
#
# =============================================================================
if __name__ == '__main__':
argss = [
{
'prefix': 'new_',
'index': 'card_id',
'columns': 'month_lag',
'values': ['purchase_amount']
}
]
pool = Pool(NTHREAD)
callback = pool.map(aggregate, argss)
pool.close()
#==============================================================================
utils.end(__file__)
|
the-stack_0_22717 | """Core managing the usage of preprocessors for preprocessing.
Usage example:
# Create a core for preprocessing, with PCA as a dimensional reduction
# algorithm, and attach a core
core = PreprocessingCore(ReductionAlgorithm.PCA, 10)
core.attach(PreprocessorsTypes.IDENTITY)
# Process data
preprocessed_data = core.preprocess([[1, 2, 0, 4], [0, 1, 3, 6]])
"""
import typing
import joblib
import numpy as np
import pandas
import scipy
from modules.configuration.folder_structure import Files
from modules.configuration.parameters import Packages
from modules.features.types import ExtractorsTypes
from modules.preprocessing.preprocessors import (Counter, CountVectorizer,
GroupCounter, Identity,
NGrams, Preprocessor,
SameLengthImputer)
from modules.preprocessing.types import (Charset, PreprocessorsTypes,
ReductionAlgorithm)
from modules.utils.configuration_manager import ConfigurationManager
from modules.utils.types import ConfigurationSpaces
from sklearn.base import BaseEstimator
from sklearn.decomposition import NMF, PCA, FastICA
from sklearn.preprocessing import Binarizer, KBinsDiscretizer, MinMaxScaler
class PreprocessingCore:
"""Class for preprocessing data by applying preprocessors."""
_is_loaded: bool
_extractors_config: typing.Any
_preprocessors_config: typing.Any
_preprocessors: typing.List[Preprocessor]
_columns_to_be_filled: list
_last_scalar_model: MinMaxScaler
_preprocessors_output_lengths: list
_reduction_model: BaseEstimator
_reduction_algorithm: str
_reduction_components_count: float
def __init__(self, algorithm: ReductionAlgorithm,
components_count: int) -> None:
"""Initializes the PreprocessingCore instance.
Args:
algorithm (ReductionAlgorithm): Dimensionality reduction algorithm
components_count (int): Number of components to return. If the
algorithm is PCA, then this parameter can be the minimum
variance of the returned components.
"""
configuration = ConfigurationManager()
self._extractors_config = configuration.get_space(
ConfigurationSpaces.FEATURES)
self._preprocessors_config = configuration.get_space(
ConfigurationSpaces.PREPROCESSING)
self._reduction_algorithm = algorithm
self._reduction_components_count = components_count
self._is_loaded = False
self._preprocessors = []
self._columns_to_be_filled = []
self._last_scalar_model = None
self._preprocessors_output_lengths = []
self._reduction_model = None
def attach(self,
preprocessor_type: PreprocessorsTypes,
parent_extractor_type: ExtractorsTypes = None) -> None:
"""Attaches a preprocessor to the core.
Args:
preprocessor_type (PreprocessorsTypes): Type of the preprocessor
parent_extractor_type (ExtractorsTypes): Type of the parent
extractor. Defaults to None, in case of a preprocessor that
does not require special arguments.
"""
# Check what arguments are needed for the current preprocessor
arguments = {}
if preprocessor_type == PreprocessorsTypes.N_GRAMS:
charset = Charset[self._preprocessors_config["ngrams"]
["valid_charset"]]
arguments = {
"n":
self._preprocessors_config["ngrams"]["n"],
"to_lowercase":
self._preprocessors_config["ngrams"]["to_lowercase"],
"valid_charset":
charset
}
elif preprocessor_type == PreprocessorsTypes.GROUP_COUNTER:
if (parent_extractor_type in [
ExtractorsTypes.STATIC_OPCODES,
ExtractorsTypes.DYNAMIC_OPCODES
]):
arguments = {
"categories":
self._extractors_config["opcodes"]["categories"],
"allow_multiple_categories":
self._extractors_config["opcodes"]
["allow_multiple_categories"],
"verbose":
self._extractors_config["opcodes"]["verbose"],
"min_ignored_percent":
self._extractors_config["opcodes"]["min_ignored_percent"]
}
elif (parent_extractor_type in [
ExtractorsTypes.STATIC_APIS, ExtractorsTypes.DYNAMIC_APIS
]):
arguments = {
"categories":
self._extractors_config["apis"]["categories"],
"allow_multiple_categories":
self._extractors_config["apis"]
["allow_multiple_categories"],
"verbose":
self._extractors_config["apis"]["verbose"],
"min_ignored_percent":
self._extractors_config["apis"]["min_ignored_percent"]
}
# Create the preprocessor
preprocessor = None
if preprocessor_type == PreprocessorsTypes.IDENTITY:
preprocessor = Identity()
elif preprocessor_type == PreprocessorsTypes.BINARIZER:
preprocessor = Binarizer()
elif preprocessor_type == PreprocessorsTypes.K_BINS_DISCRETIZER:
preprocessor = KBinsDiscretizer()
# Save this column in case of imputation needs
self._columns_to_be_filled.append(len(self._preprocessors))
elif preprocessor_type == PreprocessorsTypes.COUNTER:
preprocessor = Counter()
elif preprocessor_type == PreprocessorsTypes.COUNT_VECTORIZER:
preprocessor = CountVectorizer()
elif preprocessor_type == PreprocessorsTypes.N_GRAMS:
preprocessor = NGrams(**arguments)
elif preprocessor_type == PreprocessorsTypes.GROUP_COUNTER:
preprocessor = GroupCounter(**arguments)
elif preprocessor_type == PreprocessorsTypes.SAME_LENGTH_IMPUTER:
preprocessor = SameLengthImputer()
self._preprocessors.append(preprocessor)
def _impute_values(self,
matrix: np.array,
desired_length: int = 0) -> np.array:
if desired_length == 0:
imputed_features_df = pandas.DataFrame(matrix)
for column_id in self._columns_to_be_filled:
# Apply the imputer to each column
column = imputed_features_df.iloc[:, column_id].values
imputed_values = SameLengthImputer().fit_transform(column)
# Insert the imputed value into the cell
for index, value in enumerate(imputed_values):
imputed_features_df.at[index, column_id] = list(value)
return imputed_features_df.values
# If the desired length is set, then ensure that each vector has that
# length
return SameLengthImputer(desired_length).fit_transform(matrix)
def preprocess(self, matrix: np.array) -> typing.Tuple[np.array, np.array]:
"""Preprocesses the given features.
Args:
matrix (np.array): Raw features
Returns:
typing.Tuple[np.array, np.array]: Tuple of preprocessed and reduced
features
"""
# Impute values for some preprocessors
matrix = self._impute_values(matrix)
# Apply the preprocessors manually
processed_features = []
for index, preprocessor in enumerate(self._preprocessors):
features = [line[index] for line in matrix]
if self._is_loaded:
try:
current_preprocessed = preprocessor.transform(features)
except ValueError:
# If there is a difference between features count, pad the
# vectors
features = self._impute_values(features,
preprocessor.n_features_in_)
current_preprocessed = preprocessor.transform(features)
else:
current_preprocessed = preprocessor.fit_transform(features)
processed_features.append(current_preprocessed)
# Transpose the matrix of features to let each line represent a sample
processed_features = list(map(list, zip(*processed_features)))
# Drop the array and sparse matrix representations
converted_features = []
length_already_stored = bool(self._preprocessors_output_lengths)
for sample_id, _ in enumerate(processed_features):
current_features = []
for feature_id in range(len(processed_features[sample_id])):
feature = processed_features[sample_id][feature_id]
if isinstance(feature, scipy.sparse.csr.csr_matrix):
current_features.extend(feature.toarray()[0])
elif isinstance(feature, list):
current_features.extend(feature)
else:
current_features.append(feature)
# Save the lengths if they are not already set
if not length_already_stored:
if isinstance(feature, scipy.sparse.csr.csr_matrix):
length = feature.shape[1]
elif isinstance(feature, list):
length = len(feature)
else:
length = 1
self._preprocessors_output_lengths.append(length)
converted_features.append(current_features)
# Apply a scalar
if self._is_loaded:
converted_features = self._last_scalar_model.transform(
converted_features)
else:
# If the core is not loaded from dumped models, then create a new
# scalar, fit it and transform the data
self._last_scalar_model = MinMaxScaler()
converted_features = self._last_scalar_model.fit_transform(
converted_features)
# Create a model if one is not loaded
if not self._is_loaded:
if self._reduction_algorithm == ReductionAlgorithm.PCA:
self._reduction_model = PCA(
n_components=self._reduction_components_count)
elif self._reduction_algorithm == ReductionAlgorithm.FAST_ICA:
self._reduction_model = FastICA(
n_components=self._reduction_components_count)
elif self._reduction_algorithm == ReductionAlgorithm.NMF:
self._reduction_model = NMF(
n_components=self._reduction_components_count)
reduced_features = self._reduction_model.fit_transform(
converted_features)
else:
reduced_features = self._reduction_model.transform(
converted_features)
return (converted_features, reduced_features)
def dump(self, model_name: str) -> None:
"""Dumps the preprocessors and the scalar to files.
Args:
model_name (str): Name of the trained model
"""
# Dump each preprocessor
for index, preprocessor in enumerate(self._preprocessors):
model_filename = Files.MODEL_PREPROCESSOR_MODEL_FMT.format(
model_name, index)
joblib.dump(preprocessor, model_filename)
# Dump the scalar
filename = Files.MODEL_PREPROCESSOR_MODEL_FMT.format(
model_name, Packages.Models.Training.SCALAR_MODEL_NAME)
joblib.dump(self._last_scalar_model, filename)
# Dump the dimensionality reduction model
reduction_model_path = Files.MODEL_REDUCTION_MODEL_FMT.format(
model_name)
joblib.dump(self._reduction_model, reduction_model_path)
def load(self, model_name: str, preprocessors_count: int) -> None:
"""Loads the preprocessor and the scalar from a file.
Args:
model_name (str): Name of the trained model
preprocessors_count (int): Number of saved preprocessors
"""
# Load each preprocessor
for preprocessor_id in range(preprocessors_count):
model_filename = Files.MODEL_PREPROCESSOR_MODEL_FMT.format(
model_name, preprocessor_id)
self._preprocessors.append(joblib.load(model_filename))
# Load the scalar
scalar_model_filename = Files.MODEL_PREPROCESSOR_MODEL_FMT.format(
model_name, Packages.Models.Training.SCALAR_MODEL_NAME)
self._last_scalar_model = joblib.load(scalar_model_filename)
# Load the dimensionality reduction model
reduction_model_path = Files.MODEL_REDUCTION_MODEL_FMT.format(
model_name)
self._reduction_model = joblib.load(reduction_model_path)
self._is_loaded = True
def split_preprocessed_features(
self, features: np.array) -> typing.List[typing.List]:
"""Group the preprocessed features by their parent preprocessor.
Args:
features (np.array): Preprocessed features
Returns:
typing.List[typing.List]: Grouped features
"""
if not self._preprocessors_output_lengths:
return None
current_position = 0
returned_list = []
for length in self._preprocessors_output_lengths:
returned_list.append(features[current_position:current_position
+ length])
current_position += length
return returned_list
|
the-stack_0_22718 | #!python3
import argparse
import pandas as pd
import numpy as np
from collections import Counter
from stat_simulated import open_ali_file
import os
from codons import codon_table
def at_gc(filepath, third_pos):
species, alignment = open_ali_file(filepath)
size = len(alignment[0])
if third_pos:
ali_array = np.array([list(s) for s in alignment])
alignment = []
for site in range(int(ali_array.shape[1] / 3)):
set_aa = set([codon_table["".join(c)] for c in ali_array[:, 3 * site:3 * site + 3]])
set_aa.discard("-")
if len(set_aa) == 1:
alignment.append("".join(ali_array[:, (3 * site) + 2]))
at = 0.0
gc = 0.0
for seq in alignment:
count = Counter(seq)
at += count["A"] + count["T"]
gc += count["C"] + count["G"]
return at / gc, size
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', required=False, default='../OrthoMam/singlegene_alignments', type=str,
dest="input")
parser.add_argument('-o', '--only_third_pos', required=False, default=False, type=bool, dest="only_third_pos")
args = parser.parse_args()
files = os.listdir(args.input)
ali_at_gc = {}
for n, file in enumerate(files):
ali_at_gc[file] = at_gc(args.input + "/" + file, args.only_third_pos)
print("{0:.2f}% completed ({1})".format(100 * n / len(files), file))
sorted_ali = list(sorted(ali_at_gc, key=lambda k: ali_at_gc[k][0], reverse=True))
df = pd.DataFrame({"CDS": sorted_ali, "AT/GC": [ali_at_gc[k][0] for k in sorted_ali],
"Sites": [ali_at_gc[k][1] for k in sorted_ali]})
df.to_csv("../OrthoMam/AT_GC{0}.tsv".format("_4F" if args.only_third_pos else ""), index=False, sep="\t")
|
the-stack_0_22719 | #
# Created on Mon Jan 31 2022 10:02:26 AM
# Author: Ashwin De Silva ([email protected])
# Objective: Kernel Density Network
#
# import standard libraries
from .base import KernelDensityGraph
from sklearn.utils.validation import check_array, check_X_y
import numpy as np
from scipy.stats import multivariate_normal
from sklearn.covariance import LedoitWolf
class kdn(KernelDensityGraph):
def __init__(
self,
network,
weighting=True,
k=1.0,
T=1e-3,
h=0.33,
verbose=True,
):
r"""[summary]
Parameters
----------
network : tf.keras.Model()
trained neural network model
weighting : bool, optional
use weighting if true, by default True
k : float, optional
bias control parameter, by default 1
T : float, optional
neighborhood size control parameter, by default 1e-3
h : float, optional
variational parameters of the weighting, by default 0.33
verbose : bool, optional
print internal data, by default True
"""
super().__init__()
self.polytope_means = {}
self.polytope_covs = {}
self.polytope_samples = {}
self.class_priors = {}
self.network = network
self.weighting = weighting
self.k = k
self.h = h
self.T = T
self.bias = {}
self.verbose = verbose
# total number of layers in the NN
self.total_layers = len(self.network.layers)
# get the sizes of each layer
self.network_shape = []
for layer in network.layers:
self.network_shape.append(layer.output_shape[-1])
# total number of units in the network (up to the penultimate layer)
self.num_neurons = sum(self.network_shape) - self.network_shape[-1]
# get the weights and biases of the trained MLP
self.weights = {}
self.biases = {}
for i in range(len(self.network.layers)):
weight, bias = self.network.layers[i].get_weights()
self.weights[i], self.biases[i] = weight, bias.reshape(1, -1)
def _get_polytope_ids(self, X):
r"""
Obtain the polytope ID of each input sample
Parameters
----------
X : ndarray
Input data matrix.
"""
polytope_ids_tmp = []
last_activations = X
# Iterate through neural network manually, getting node activations at each step
for l in range(self.total_layers):
weights, bias = self.weights[l], self.biases[l]
preactivation = np.matmul(last_activations, weights) + bias
if l == self.total_layers - 1:
binary_preactivation = (preactivation > 0.5).astype("int")
else:
binary_preactivation = (preactivation > 0).astype("int")
if (
l < self.total_layers - 1
): # record the activation patterns only upto the penultimate layer
polytope_ids_tmp.append(binary_preactivation)
last_activations = preactivation * binary_preactivation
# Concatenate all activations for given observation
polytope_ids_tmp = np.concatenate(polytope_ids_tmp, axis=1)
polytope_ids = [
np.tensordot(
polytope_ids_tmp,
2 ** np.arange(0, np.shape(polytope_ids_tmp)[1]),
axes=1,
)
]
self.num_neurons = polytope_ids_tmp.shape[
1
] # get the number of total FC neurons under consideration
return polytope_ids[0]
def _get_activation_pattern(self, polytope_id):
r"""get the ReLU activation pattern given the polytope ID
Parameters
----------
polytope_id : int
polytope identifier
Returns
-------
ndarray
ReLU activation pattern (binary) corresponding to the given polytope ID
"""
binary_string = np.binary_repr(polytope_id, width=self.num_neurons)[::-1]
return np.array(list(binary_string)).astype("int")
def compute_weights(self, X_, polytope_id):
"""compute weights based on the global network linearity measure
Parameters
----------
X_ : ndarray
Input data matrix
polytope_id : int
refernce polytope identifier
Returns
-------
ndarray
weights of each input sample in the input data matrix
"""
M_ref = self._get_activation_pattern(polytope_id)
start = 0
A = X_
A_ref = X_
d = 0
for l in range(len(self.network_shape) - 1):
end = start + self.network_shape[l]
M_l = M_ref[start:end]
start = end
W, B = self.weights[l], self.biases[l]
pre_A = A @ W + B
A = np.maximum(0, pre_A)
pre_A_ref = A_ref @ W + B
A_ref = pre_A_ref @ np.diag(M_l)
d += np.linalg.norm(A - A_ref, axis=1, ord=2)
return np.exp(-d / self.h)
def fit(self, X, y):
r"""
Fits the kernel density forest.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response) data matrix.
"""
X, y = check_X_y(X, y)
self.labels = np.unique(y)
feature_dim = X.shape[1]
for label in self.labels:
self.polytope_means[label] = []
self.polytope_covs[label] = []
self.polytope_samples[label] = []
X_ = X[np.where(y == label)[0]] # data having the current label
# get class prior probability
self.class_priors[label] = len(X_) / len(X)
# get polytope ids and unique polytope ids
polytope_ids = self._get_polytope_ids(X_)
unique_polytope_ids = np.unique(polytope_ids)
for polytope in unique_polytope_ids:
weights = self.compute_weights(X_, polytope)
if not self.weighting:
weights[weights < 1] = 0
weights[weights < self.T] = 0 # set very small weights to zero
points_with_nonzero_weights = len(np.where(weights > 0)[0])
if points_with_nonzero_weights < 2:
continue
# apply weights to the data
X_tmp = X_.copy()
polytope_mean_ = np.average(
X_tmp, axis=0, weights=weights
) # compute the weighted average of the samples
X_tmp -= polytope_mean_ # center the data
sqrt_weights = np.sqrt(weights).reshape(-1, 1) @ np.ones(
feature_dim
).reshape(1, -1)
X_tmp *= sqrt_weights # scale the centered data with the square root of the weights
covariance_model = LedoitWolf(assume_centered=True)
covariance_model.fit(X_tmp)
polytope_cov_ = (
covariance_model.covariance_ * len(weights) / sum(weights)
)
polytope_samples_ = len(
np.where(polytope_ids == polytope)[0]
) # count the number of points in the polytope
# store the mean, covariances, and polytope sample size
self.polytope_means[label].append(polytope_mean_)
self.polytope_covs[label].append(polytope_cov_)
self.polytope_samples[label].append(polytope_samples_)
## calculate bias for each label
likelihoods = np.zeros((np.size(X_, 0)), dtype=float)
for polytope, _ in enumerate(self.polytope_means[label]):
likelihoods += np.nan_to_num(
self.polytope_samples[label][polytope]
* self._compute_pdf(X_, label, polytope)
)
likelihoods /= sum(self.polytope_samples[label])
self.bias[label] = np.min(likelihoods) / (
self.k * sum(self.polytope_samples[label])
)
def _compute_pdf(self, X, label, polytope_id):
r"""compute the likelihood for the given data
Parameters
----------
X : ndarray
Input data matrix
label : int
class label
polytope_idx : int
polytope identifier
Returns
-------
ndarray
likelihoods
"""
polytope_mean = self.polytope_means[label][polytope_id]
polytope_cov = self.polytope_covs[label][polytope_id]
var = multivariate_normal(
mean=polytope_mean, cov=polytope_cov, allow_singular=True
)
likelihood = var.pdf(X)
return likelihood
def predict_proba(self, X, return_likelihoods=False):
r"""
Calculate posteriors using the kernel density forest.
Parameters
----------
X : ndarray
Input data matrix.
"""
X = check_array(X)
likelihoods = np.zeros((np.size(X, 0), len(self.labels)), dtype=float)
priors = np.zeros((len(self.labels), 1))
for ii, label in enumerate(self.labels):
priors[ii] = self.class_priors[label]
for polytope, _ in enumerate(self.polytope_means[label]):
likelihoods[:, ii] += np.nan_to_num(
self.polytope_samples[label][polytope]
* self._compute_pdf(X, label, polytope)
)
likelihoods[:, ii] = likelihoods[:, ii] / sum(self.polytope_samples[label])
likelihoods[:, ii] += min(self.bias.values())
proba = (
likelihoods.T * priors / (np.sum(likelihoods * priors.T, axis=1) + 1e-100)
).T
if return_likelihoods:
return proba, likelihoods
else:
return proba
def predict_proba_nn(self, X):
r"""
Calculate posteriors using the vanilla NN
Parameters
----------
X : ndarray
Input data matrix.
"""
X = check_array(X)
proba = self.network.predict(X)
return proba
def predict(self, X):
r"""
Perform inference using the kernel density forest.
Parameters
----------
X : ndarray
Input data matrix.
"""
return np.argmax(self.predict_proba(X), axis=1)
|
the-stack_0_22720 | # Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import zlib
import yaml
import boto3 # pylint: disable=import-error
import jinja2 # pylint: disable=import-error
periodic_template = """
- name: {{job_name}}
cron: '{{cron}}'
labels:
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
decorate: true
decoration_config:
timeout: {{job_timeout}}
extra_refs:
- org: kubernetes
repo: kops
base_ref: master
workdir: true
path_alias: k8s.io/kops
spec:
containers:
- command:
- runner.sh
args:
- bash
- -c
- |
make test-e2e-install
kubetest2 kops \\
-v 2 \\
--up --down \\
--cloud-provider=aws \\
--create-args="{{create_args}}" \\
{%- if kops_feature_flags %}
--env=KOPS_FEATURE_FLAGS={{kops_feature_flags}} \\
{%- endif %}
--kops-version-marker={{kops_deploy_url}} \\
{%- if publish_version_marker %}
--publish-version-marker={{publish_version_marker}} \\
{%- endif %}
--kubernetes-version={{k8s_deploy_url}} \\
{%- if terraform_version %}
--terraform-version={{terraform_version}} \\
{%- endif %}
--test=kops \\
-- \\
--ginkgo-args="--debug" \\
--test-args="-test.timeout={{test_timeout}} -num-nodes=0" \\
{%- if test_package_bucket %}
--test-package-bucket={{test_package_bucket}} \\
{%- endif %}
{%- if test_package_dir %}
--test-package-dir={{test_package_dir}} \\
{%- endif %}
--test-package-marker={{marker}} \\
--parallel={{test_parallelism}} \\
{%- if focus_regex %}
--focus-regex="{{focus_regex}}" \\
{%- endif %}
--skip-regex="{{skip_regex}}"
env:
- name: KUBE_SSH_KEY_PATH
value: /etc/aws-ssh/aws-ssh-private
- name: KUBE_SSH_USER
value: {{kops_ssh_user}}
image: gcr.io/k8s-testimages/kubekins-e2e:v20210402-0a23031-master
imagePullPolicy: Always
resources:
limits:
memory: 3Gi
requests:
cpu: "2"
memory: 3Gi
"""
presubmit_template = """
- name: {{job_name}}
branches:
- master
{%- if run_if_changed %}
run_if_changed: '{{run_if_changed}}'
{%- endif %}
always_run: {{always_run}}
skip_report: {{skip_report}}
labels:
preset-service-account: "true"
preset-aws-ssh: "true"
preset-aws-credential: "true"
preset-bazel-scratch-dir: "true"
preset-bazel-remote-cache-enabled: "true"
preset-dind-enabled: "true"
decorate: true
decoration_config:
timeout: {{job_timeout}}
path_alias: k8s.io/kops
spec:
containers:
- image: gcr.io/k8s-testimages/kubekins-e2e:v20210402-0a23031-master
imagePullPolicy: Always
command:
- runner.sh
args:
- bash
- -c
- |
make test-e2e-install
kubetest2 kops \\
-v 2 \\
--up --build --down \\
--cloud-provider=aws \\
--create-args="{{create_args}}" \\
--kubernetes-version={{k8s_deploy_url}} \\
--kops-binary-path=/home/prow/go/src/k8s.io/kops/bazel-bin/cmd/kops/linux-amd64/kops \\
{%- if terraform_version %}
--terraform-version={{terraform_version}} \\
{%- endif %}
--test=kops \\
-- \\
--ginkgo-args="--debug" \\
--test-args="-test.timeout={{test_timeout}} -num-nodes=0" \\
{%- if test_package_bucket %}
--test-package-bucket={{test_package_bucket}} \\
{%- endif %}
{%- if test_package_dir %}
--test-package-dir={{test_package_dir}} \\
{%- endif %}
--test-package-marker={{marker}} \\
--parallel={{test_parallelism}} \\
{%- if focus_regex %}
--focus-regex="{{focus_regex}}" \\
{%- endif %}
--skip-regex="{{skip_regex}}"
securityContext:
privileged: true
env:
- name: KUBE_SSH_KEY_PATH
value: /etc/aws-ssh/aws-ssh-private
- name: KUBE_SSH_USER
value: {{kops_ssh_user}}
- name: GOPATH
value: /home/prow/go
resources:
requests:
cpu: "2"
memory: "6Gi"
"""
# We support rapid focus on a few tests of high concern
# This should be used for temporary tests we are evaluating,
# and ideally linked to a bug, and removed once the bug is fixed
run_hourly = [
]
run_daily = [
'kops-grid-scenario-public-jwks',
'kops-grid-scenario-arm64',
'kops-grid-scenario-aws-cloud-controller-manager',
'kops-grid-scenario-serial-test-for-timeout',
'kops-grid-scenario-terraform',
]
# These are job tab names of unsupported grid combinations
skip_jobs = [
]
def simple_hash(s):
# & 0xffffffff avoids python2/python3 compatibility
return zlib.crc32(s.encode()) & 0xffffffff
def build_cron(key, runs_per_day):
runs_per_week = 0
minute = simple_hash("minutes:" + key) % 60
hour = simple_hash("hours:" + key) % 24
day_of_week = simple_hash("day_of_week:" + key) % 7
if runs_per_day > 0:
hour_denominator = 24 / runs_per_day
hour_offset = simple_hash("hours:" + key) % hour_denominator
return "%d %d-23/%d * * *" % (minute, hour_offset, hour_denominator), (runs_per_day * 7)
# run Ubuntu 20.04 (Focal) jobs more frequently
if "u2004" in key:
runs_per_week += 7
return "%d %d * * *" % (minute, hour), runs_per_week
# run hotlist jobs more frequently
if key in run_hourly:
runs_per_week += 24 * 7
return "%d * * * *" % (minute), runs_per_week
if key in run_daily:
runs_per_week += 7
return "%d %d * * *" % (minute, hour), runs_per_week
runs_per_week += 1
return "%d %d * * %d" % (minute, hour, day_of_week), runs_per_week
def replace_or_remove_line(s, pattern, new_str):
keep = []
for line in s.split('\n'):
if pattern in line:
if new_str:
line = line.replace(pattern, new_str)
keep.append(line)
else:
keep.append(line)
return '\n'.join(keep)
def should_skip_newer_k8s(k8s_version, kops_version):
if kops_version is None:
return False
if k8s_version is None:
return True
return float(k8s_version) > float(kops_version)
def k8s_version_info(k8s_version):
test_package_bucket = ''
test_package_dir = ''
if k8s_version == 'latest':
marker = 'latest.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release/release/latest.txt"
elif k8s_version == 'ci':
marker = 'latest.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release-dev/ci/latest.txt"
test_package_bucket = 'kubernetes-release-dev'
test_package_dir = 'ci'
elif k8s_version == 'stable':
marker = 'stable.txt'
k8s_deploy_url = "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
elif k8s_version:
marker = f"stable-{k8s_version}.txt"
k8s_deploy_url = f"https://storage.googleapis.com/kubernetes-release/release/stable-{k8s_version}.txt" # pylint: disable=line-too-long
else:
raise Exception('missing required k8s_version')
return marker, k8s_deploy_url, test_package_bucket, test_package_dir
def create_args(kops_channel, networking, container_runtime, extra_flags, kops_image):
args = f"--channel={kops_channel} --networking=" + (networking or "kubenet")
if container_runtime:
args += f" --container-runtime={container_runtime}"
image_overridden = False
if extra_flags:
for arg in extra_flags:
if "--image=" in arg:
image_overridden = True
args = args + " " + arg
if not image_overridden:
args = f"--image='{kops_image}' {args}"
return args.strip()
def latest_aws_image(owner, name):
client = boto3.client('ec2', region_name='us-east-1')
response = client.describe_images(
Owners=[owner],
Filters=[
{
'Name': 'name',
'Values': [
name,
],
},
],
)
images = {}
for image in response['Images']:
images[image['CreationDate']] = image['ImageLocation']
return images[sorted(images, reverse=True)[0]]
distro_images = {
'amzn2': latest_aws_image('137112412989', 'amzn2-ami-hvm-*-x86_64-gp2'),
'centos7': latest_aws_image('125523088429', 'CentOS 7.*x86_64'),
'centos8': latest_aws_image('125523088429', 'CentOS 8.*x86_64'),
'deb9': latest_aws_image('379101102735', 'debian-stretch-hvm-x86_64-gp2-*'),
'deb10': latest_aws_image('136693071363', 'debian-10-amd64-*'),
'flatcar': latest_aws_image('075585003325', 'Flatcar-stable-*-hvm'),
'rhel7': latest_aws_image('309956199498', 'RHEL-7.*_HVM_*-x86_64-0-Hourly2-GP2'),
'rhel8': latest_aws_image('309956199498', 'RHEL-8.*_HVM-*-x86_64-0-Hourly2-GP2'),
'u1804': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*'), # pylint: disable=line-too-long
'u2004': latest_aws_image('099720109477', 'ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*'), # pylint: disable=line-too-long
}
distros_ssh_user = {
'amzn2': 'ec2-user',
'centos7': 'centos',
'centos8': 'centos',
'deb9': 'admin',
'deb10': 'admin',
'flatcar': 'core',
'rhel7': 'ec2-user',
'rhel8': 'ec2-user',
'u1804': 'ubuntu',
'u2004': 'ubuntu',
}
##############
# Build Test #
##############
# Returns a string representing the periodic prow job and the number of job invocations per week
def build_test(cloud='aws',
distro='u2004',
networking=None,
container_runtime='docker',
k8s_version='latest',
kops_channel='alpha',
kops_version=None,
publish_version_marker=None,
name_override=None,
feature_flags=(),
extra_flags=None,
extra_dashboards=None,
terraform_version=None,
test_parallelism=25,
test_timeout_minutes=60,
skip_override=None,
focus_regex=None,
runs_per_day=0):
# pylint: disable=too-many-statements,too-many-branches,too-many-arguments
if kops_version is None:
# TODO: Move to kops-ci/markers/master/ once validated
kops_deploy_url = "https://storage.googleapis.com/kops-ci/bin/latest-ci-updown-green.txt"
elif kops_version.startswith("https://"):
kops_deploy_url = kops_version
kops_version = None
else:
kops_deploy_url = f"https://storage.googleapis.com/kops-ci/markers/release-{kops_version}/latest-ci-updown-green.txt" # pylint: disable=line-too-long
# https://github.com/cilium/cilium/blob/71cfb265d53b63a2be3806fb3fd4425fa36262ff/Documentation/install/system_requirements.rst#centos-foot
if networking == "cilium" and distro not in ["u2004", "deb10", "rhel8"]:
return None
if should_skip_newer_k8s(k8s_version, kops_version):
return None
kops_image = distro_images[distro]
kops_ssh_user = distros_ssh_user[distro]
marker, k8s_deploy_url, test_package_bucket, test_package_dir = k8s_version_info(k8s_version)
args = create_args(kops_channel, networking, container_runtime, extra_flags, kops_image)
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler|Services.*functioning.*NodePort|Services.*rejected.*endpoints|Services.*affinity' # pylint: disable=line-too-long
if networking == "cilium":
# https://github.com/cilium/cilium/issues/10002
skip_regex += r'|TCP.CLOSE_WAIT'
if skip_override is not None:
skip_regex = skip_override
suffix = ""
if cloud and cloud != "aws":
suffix += "-" + cloud
if networking:
suffix += "-" + networking
if distro:
suffix += "-" + distro
if k8s_version:
suffix += "-k" + k8s_version.replace("1.", "")
if kops_version:
suffix += "-ko" + kops_version.replace("1.", "")
if container_runtime:
suffix += "-" + container_runtime
tab = name_override or (f"kops-grid{suffix}")
if tab in skip_jobs:
return None
job_name = f"e2e-{tab}"
cron, runs_per_week = build_cron(tab, runs_per_day)
tmpl = jinja2.Template(periodic_template)
job = tmpl.render(
job_name=job_name,
cron=cron,
kops_ssh_user=kops_ssh_user,
create_args=args,
k8s_deploy_url=k8s_deploy_url,
kops_deploy_url=kops_deploy_url,
test_parallelism=str(test_parallelism),
job_timeout=str(test_timeout_minutes + 30) + 'm',
test_timeout=str(test_timeout_minutes) + 'm',
marker=marker,
skip_regex=skip_regex,
kops_feature_flags=','.join(feature_flags),
terraform_version=terraform_version,
test_package_bucket=test_package_bucket,
test_package_dir=test_package_dir,
focus_regex=focus_regex,
publish_version_marker=publish_version_marker,
)
spec = {
'cloud': cloud,
'networking': networking,
'distro': distro,
'k8s_version': k8s_version,
'kops_version': kops_version,
'container_runtime': container_runtime,
'kops_channel': kops_channel,
}
if feature_flags:
spec['feature_flags'] = ','.join(feature_flags)
if extra_flags:
spec['extra_flags'] = ' '.join(extra_flags)
jsonspec = json.dumps(spec, sort_keys=True)
dashboards = [
'sig-cluster-lifecycle-kops',
'google-aws',
'kops-kubetest2',
f"kops-distro-{distro}",
f"kops-k8s-{k8s_version or 'latest'}",
f"kops-{kops_version or 'latest'}",
]
if extra_dashboards:
dashboards.extend(extra_dashboards)
annotations = {
'testgrid-dashboards': ', '.join(sorted(dashboards)),
'testgrid-days-of-results': '90',
'testgrid-tab-name': tab,
}
for (k, v) in spec.items():
annotations[f"test.kops.k8s.io/{k}"] = v or ""
extra = yaml.dump({'annotations': annotations}, width=9999, default_flow_style=False)
output = f"\n# {jsonspec}\n{job.strip()}\n"
for line in extra.splitlines():
output += f" {line}\n"
return output, runs_per_week
# Returns a string representing a presubmit prow job YAML
def presubmit_test(cloud='aws',
distro='u2004',
networking=None,
container_runtime='docker',
k8s_version='latest',
kops_channel='alpha',
name=None,
tab_name=None,
feature_flags=(),
extra_flags=None,
extra_dashboards=None,
test_parallelism=25,
test_timeout_minutes=60,
skip_override=None,
focus_regex=None,
run_if_changed=None,
skip_report=False,
always_run=False):
# pylint: disable=too-many-statements,too-many-branches,too-many-arguments
kops_image = distro_images[distro]
kops_ssh_user = distros_ssh_user[distro]
marker, k8s_deploy_url, test_package_bucket, test_package_dir = k8s_version_info(k8s_version)
args = create_args(kops_channel, networking, container_runtime, extra_flags, kops_image)
tmpl = jinja2.Template(presubmit_template)
job = tmpl.render(
job_name=name,
kops_ssh_user=kops_ssh_user,
create_args=args,
k8s_deploy_url=k8s_deploy_url,
test_parallelism=str(test_parallelism),
job_timeout=str(test_timeout_minutes + 30) + 'm',
test_timeout=str(test_timeout_minutes) + 'm',
marker=marker,
skip_regex=skip_override,
kops_feature_flags=','.join(feature_flags),
test_package_bucket=test_package_bucket,
test_package_dir=test_package_dir,
focus_regex=focus_regex,
run_if_changed=run_if_changed,
skip_report='true' if skip_report else 'false',
always_run='true' if always_run else 'false',
)
spec = {
'cloud': cloud,
'networking': networking,
'distro': distro,
'k8s_version': k8s_version,
'container_runtime': container_runtime,
'kops_channel': kops_channel,
}
if feature_flags:
spec['feature_flags'] = ','.join(feature_flags)
if extra_flags:
spec['extra_flags'] = ' '.join(extra_flags)
jsonspec = json.dumps(spec, sort_keys=True)
dashboards = [
'presubmits-kops',
'kops-presubmits',
'sig-cluster-lifecycle-kops',
'kops-kubetest2',
f"kops-distro-{distro}",
f"kops-k8s-{k8s_version or 'latest'}",
]
if extra_dashboards:
dashboards.extend(extra_dashboards)
annotations = {
'testgrid-dashboards': ', '.join(sorted(dashboards)),
'testgrid-days-of-results': '90',
'testgrid-tab-name': tab_name,
}
for (k, v) in spec.items():
annotations[f"test.kops.k8s.io/{k}"] = v or ""
extra = yaml.dump({'annotations': annotations}, width=9999, default_flow_style=False)
output = f"\n# {jsonspec}{job}\n"
for line in extra.splitlines():
output += f" {line}\n"
return output
####################
# Grid Definitions #
####################
networking_options = [
None,
'calico',
'cilium',
'flannel',
'kopeio',
]
distro_options = [
'amzn2',
'deb9',
'deb10',
'flatcar',
'rhel7',
'rhel8',
'u1804',
'u2004',
]
k8s_versions = [
#"latest", # disabled until we're ready to test 1.21
"1.18",
"1.19",
"1.20"
]
kops_versions = [
None, # maps to latest
"1.19",
"1.20",
]
container_runtimes = [
"docker",
"containerd",
]
############################
# kops-periodics-grid.yaml #
############################
def generate_grid():
results = []
# pylint: disable=too-many-nested-blocks
for container_runtime in container_runtimes:
for networking in networking_options:
for distro in distro_options:
for k8s_version in k8s_versions:
for kops_version in kops_versions:
results.append(
build_test(cloud="aws",
distro=distro,
extra_dashboards=['kops-grid'],
k8s_version=k8s_version,
kops_version=kops_version,
networking=networking,
container_runtime=container_runtime)
)
return filter(None, results)
#############################
# kops-periodics-misc2.yaml #
#############################
def generate_misc():
u2004_arm = distro_images['u2004'].replace('amd64', 'arm64')
results = [
# A one-off scenario testing arm64
build_test(name_override="kops-grid-scenario-arm64",
cloud="aws",
distro="u2004",
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large",
f"--image={u2004_arm}"],
extra_dashboards=['kops-misc']),
# A special test for JWKS
build_test(name_override="kops-grid-scenario-public-jwks",
cloud="aws",
distro="u2004",
feature_flags=["UseServiceAccountIAM", "PublicJWKS"],
extra_flags=['--api-loadbalancer-type=public'],
extra_dashboards=['kops-misc']),
# A special test for AWS Cloud-Controller-Manager
build_test(name_override="kops-grid-scenario-aws-cloud-controller-manager",
cloud="aws",
distro="u2004",
k8s_version="1.19",
feature_flags=["EnableExternalCloudController,SpecOverrideFlag"],
extra_flags=['--override=cluster.spec.cloudControllerManager.cloudProvider=aws',
'--override=cluster.spec.cloudConfig.awsEBSCSIDriver.enabled=true'],
extra_dashboards=['provider-aws-cloud-provider-aws', 'kops-misc']),
build_test(name_override="kops-grid-scenario-terraform",
container_runtime='containerd',
k8s_version="1.20",
terraform_version="0.14.6",
extra_dashboards=['kops-misc']),
build_test(name_override="kops-aws-misc-ha-euwest1",
k8s_version="stable",
networking="calico",
kops_channel="alpha",
runs_per_day=24,
extra_flags=["--master-count=3", "--zones=eu-west-1a,eu-west-1b,eu-west-1c"],
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-release",
k8s_version="latest",
container_runtime="containerd",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large",
f"--image={u2004_arm}"],
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-ci",
k8s_version="ci",
container_runtime="containerd",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large",
f"--image={u2004_arm}"],
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler', # pylint: disable=line-too-long
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-arm64-conformance",
k8s_version="ci",
container_runtime="containerd",
networking="calico",
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large",
f"--image={u2004_arm}"],
skip_override=r'\[Slow\]|\[Serial\]|\[Flaky\]',
focus_regex=r'\[Conformance\]|\[NodeConformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-amd64-conformance",
k8s_version="ci",
container_runtime="containerd",
distro='u2004',
kops_channel="alpha",
runs_per_day=3,
extra_flags=["--node-size=c5.large",
"--master-size=c5.large"],
skip_override=r'\[Slow\]|\[Serial\]|\[Flaky\]',
focus_regex=r'\[Conformance\]|\[NodeConformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-aws-misc-updown",
k8s_version="stable",
container_runtime="containerd",
networking="calico",
distro='u2004',
kops_channel="alpha",
kops_version="https://storage.googleapis.com/kops-ci/bin/latest-ci.txt",
publish_version_marker="gs://kops-ci/bin/latest-ci-updown-green.txt",
runs_per_day=24,
extra_flags=["--node-size=c5.large",
"--master-size=c5.large"],
skip_override=r'',
focus_regex=r'\[k8s.io\]\sNetworking.*\[Conformance\]',
extra_dashboards=["kops-misc"]),
build_test(name_override="kops-grid-scenario-cilium10-arm64",
cloud="aws",
networking="cilium",
distro="u2004",
kops_channel="alpha",
runs_per_day=1,
extra_flags=["--zones=eu-central-1a",
"--node-size=m6g.large",
"--master-size=m6g.large",
"--override=cluster.spec.networking.cilium.version=v1.10.0-rc0",
f"--image={u2004_arm}"],
extra_dashboards=['kops-misc']),
build_test(name_override="kops-grid-scenario-cilium10-amd64",
cloud="aws",
networking="cilium",
distro="u2004",
kops_channel="alpha",
runs_per_day=1,
extra_flags=["--zones=eu-central-1a",
"--override=cluster.spec.networking.cilium.version=v1.10.0-rc0"],
extra_dashboards=['kops-misc']),
]
return results
###############################
# kops-periodics-distros.yaml #
###############################
def generate_distros():
distros = ['debian9', 'debian10', 'ubuntu1804', 'ubuntu2004', 'centos7', 'centos8',
'amazonlinux2', 'rhel7', 'rhel8', 'flatcar']
results = []
for distro in distros:
distro_short = distro.replace('ubuntu', 'u').replace('debian', 'deb').replace('amazonlinux', 'amzn') # pylint: disable=line-too-long
results.append(
build_test(distro=distro_short,
networking='calico',
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name_override=f"kops-aws-distro-image{distro}",
extra_dashboards=['kops-distros'],
runs_per_day=3,
skip_override=r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
)
)
return results
#######################################
# kops-periodics-network-plugins.yaml #
#######################################
def generate_network_plugins():
plugins = ['amazon-vpc', 'calico', 'canal', 'cilium', 'flannel', 'kopeio', 'kuberouter', 'weave'] # pylint: disable=line-too-long
results = []
skip_base = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler'# pylint: disable=line-too-long
for plugin in plugins:
networking_arg = plugin
skip_regex = skip_base
if plugin == 'amazon-vpc':
networking_arg = 'amazonvpc'
if plugin == 'cilium':
skip_regex += r'|should.set.TCP.CLOSE_WAIT'
else:
skip_regex += r'|Services.*functioning.*NodePort'
if plugin in ['calico', 'canal', 'weave', 'cilium']:
skip_regex += r'|Services.*rejected.*endpoints'
if plugin == 'kuberouter':
skip_regex += r'|load-balancer|hairpin|affinity\stimeout|service\.kubernetes\.io|CLOSE_WAIT' # pylint: disable=line-too-long
networking_arg = 'kube-router'
results.append(
build_test(
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name_override=f"kops-aws-cni-{plugin}",
networking=networking_arg,
extra_flags=['--node-size=t3.large'],
extra_dashboards=['kops-network-plugins'],
runs_per_day=3,
skip_override=skip_regex
)
)
return results
################################
# kops-periodics-versions.yaml #
################################
def generate_versions():
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
results = [
build_test(
container_runtime='containerd',
k8s_version='ci',
kops_channel='alpha',
name_override='kops-aws-k8s-latest',
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=24,
# This version marker is only used by the k/k presubmit job
publish_version_marker='gs://kops-ci/bin/latest-ci-green.txt',
skip_override=skip_regex
)
]
for version in ['1.20', '1.19', '1.18', '1.17', '1.16', '1.15']:
distro = 'deb9' if version in ['1.17', '1.16', '1.15'] else 'u2004'
if version == '1.15':
skip_regex += r'|Services.*rejected.*endpoints'
results.append(
build_test(
container_runtime='containerd',
distro=distro,
k8s_version=version,
kops_channel='alpha',
name_override=f"kops-aws-k8s-{version.replace('.', '-')}",
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=8,
skip_override=skip_regex
)
)
return results
######################
# kops-pipeline.yaml #
######################
def generate_pipeline():
results = []
focus_regex = r'\[k8s.io\]\sNetworking.*\[Conformance\]'
for version in ['master', '1.20', '1.19']:
branch = version if version == 'master' else f"release-{version}"
publish_version_marker = f"gs://kops-ci/markers/{branch}/latest-ci-updown-green.txt"
kops_version = f"https://storage.googleapis.com/k8s-staging-kops/kops/releases/markers/{branch}/latest-ci.txt" # pylint: disable=line-too-long
results.append(
build_test(
container_runtime='containerd',
k8s_version=version.replace('master', 'latest'),
kops_version=kops_version,
kops_channel='alpha',
name_override=f"kops-pipeline-updown-kops{version.replace('.', '')}",
networking='calico',
extra_dashboards=['kops-versions'],
runs_per_day=24,
skip_override=r'\[Slow\]|\[Serial\]',
focus_regex=focus_regex,
publish_version_marker=publish_version_marker,
)
)
return results
########################################
# kops-presubmits-network-plugins.yaml #
########################################
def generate_presubmits_network_plugins():
plugins = {
'amazonvpc': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.amazon-vpc-routed-eni\/|pkg\/model\/(firewall|components\/kubeproxy|iam\/iam_builder).go|nodeup\/pkg\/model\/(context|kubelet).go|upup\/pkg\/fi\/cloudup\/defaults.go)', # pylint: disable=line-too-long
'calico': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.projectcalico\.org\/|pkg\/model\/(firewall.go|pki.go|iam\/iam_builder.go)|nodeup\/pkg\/model\/networking\/calico.go)', # pylint: disable=line-too-long
'canal': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.projectcalico\.org\.canal\/|nodeup\/pkg\/model\/networking\/(flannel|canal).go)', # pylint: disable=line-too-long
'cilium': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.cilium\.io\/|pkg\/model\/(firewall|components\/cilium|iam\/iam_builder).go|nodeup\/pkg\/model\/(context|networking\/cilium).go|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'flannel': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.flannel\/|nodeup\/pkg\/model\/(sysctls|networking\/flannel).go|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'kuberouter': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.kuberouter\/|upup\/pkg\/fi\/cloudup\/template_functions.go)', # pylint: disable=line-too-long
'weave': r'^(upup\/models\/cloudup\/resources\/addons\/networking\.weave\/|upup\/pkg\/fi\/cloudup\/template_functions.go)' # pylint: disable=line-too-long
}
results = []
skip_base = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
for plugin, run_if_changed in plugins.items():
networking_arg = plugin
skip_regex = skip_base
if plugin == 'cilium':
skip_regex += r'|should.set.TCP.CLOSE_WAIT'
else:
skip_regex += r'|Services.*functioning.*NodePort'
if plugin in ['calico', 'canal', 'weave', 'cilium']:
skip_regex += r'|Services.*rejected.*endpoints'
if plugin == 'kuberouter':
skip_regex += r'|load-balancer|hairpin|affinity\stimeout|service\.kubernetes\.io|CLOSE_WAIT' # pylint: disable=line-too-long
networking_arg = 'kube-router'
if plugin in ['canal', 'flannel']:
skip_regex += r'|up\sand\sdown|headless|service-proxy-name'
results.append(
presubmit_test(
container_runtime='containerd',
k8s_version='stable',
kops_channel='alpha',
name=f"pull-kops-e2e-cni-{plugin}",
tab_name=f"e2e-{plugin}",
networking=networking_arg,
extra_flags=['--node-size=t3.large'],
extra_dashboards=['kops-network-plugins'],
skip_override=skip_regex,
run_if_changed=run_if_changed,
skip_report=False,
always_run=False,
)
)
return results
############################
# kops-presubmits-e2e.yaml #
############################
def generate_presubmits_e2e():
skip_regex = r'\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|\[HPA\]|Dashboard|RuntimeClass|RuntimeHandler' # pylint: disable=line-too-long
return [
presubmit_test(
container_runtime='docker',
k8s_version='1.20',
kops_channel='stable',
name='pull-kops-e2e-kubernetes-aws',
tab_name='e2e-docker',
always_run=True,
skip_override=skip_regex,
),
presubmit_test(
container_runtime='docker',
k8s_version='1.20',
kops_channel='stable',
name='pull-kops-e2e-k8s-containerd',
networking='calico',
tab_name='e2e-containerd',
always_run=True,
skip_override=skip_regex,
),
]
########################
# YAML File Generation #
########################
periodics_files = {
'kops-periodics-distros.yaml': generate_distros,
'kops-periodics-grid.yaml': generate_grid,
'kops-periodics-misc2.yaml': generate_misc,
'kops-periodics-network-plugins.yaml': generate_network_plugins,
'kops-periodics-versions.yaml': generate_versions,
'kops-periodics-pipeline.yaml': generate_pipeline,
}
presubmits_files = {
'kops-presubmits-network-plugins.yaml': generate_presubmits_network_plugins,
'kops-presubmits-e2e.yaml': generate_presubmits_e2e,
}
def main():
for filename, generate_func in periodics_files.items():
print(f"Generating {filename}")
output = []
runs_per_week = 0
job_count = 0
for res in generate_func():
output.append(res[0])
runs_per_week += res[1]
job_count += 1
output.insert(0, "# Test jobs generated by build_jobs.py (do not manually edit)\n")
output.insert(1, f"# {job_count} jobs, total of {runs_per_week} runs per week\n")
output.insert(2, "periodics:\n")
with open(filename, 'w') as fd:
fd.write(''.join(output))
for filename, generate_func in presubmits_files.items():
print(f"Generating {filename}")
output = []
job_count = 0
for res in generate_func():
output.append(res)
job_count += 1
output.insert(0, "# Test jobs generated by build_jobs.py (do not manually edit)\n")
output.insert(1, f"# {job_count} jobs\n")
output.insert(2, "presubmits:\n")
output.insert(3, " kubernetes/kops:\n")
with open(filename, 'w') as fd:
fd.write(''.join(output))
if __name__ == "__main__":
main()
|
the-stack_0_22721 | """This module contains the general information for SwFcZone ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SwFcZoneConsts:
LC_ALLOCATED = "allocated"
LC_AVAILABLE = "available"
LC_DEALLOCATED = "deallocated"
LC_REPURPOSED = "repurposed"
OPER_STATE_ACTIVE = "active"
OPER_STATE_APPLIED = "applied"
OPER_STATE_APPLY_PENDING = "apply-pending"
OPER_STATE_APPLYING = "applying"
OPER_STATE_CREATE_FAILED = "create-failed"
OPER_STATE_CREATED = "created"
OPER_STATE_DELETED = "deleted"
OPER_STATE_NOT_ACTIVE = "not-active"
OPER_STATE_NOT_APPLIED = "not-applied"
OPER_STATE_ZONE_MERGE_FAILURE = "zone-merge-failure"
class SwFcZone(ManagedObject):
"""This is SwFcZone class."""
consts = SwFcZoneConsts()
naming_props = set([u'identity'])
mo_meta = MoMeta("SwFcZone", "swFcZone", "zone-[identity]", VersionMeta.Version211a, "InputOutput", 0x3f, [], ["read-only"], [u'swZoneInitiatorMember'], [u'swZoneTargetMember'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"identity": MoPropertyMeta("identity", "identity", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, 0x8, 1, 510, None, [], []),
"lc": MoPropertyMeta("lc", "lc", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["allocated", "available", "deallocated", "repurposed"], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{1,64}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["active", "applied", "apply-pending", "applying", "create-failed", "created", "deleted", "not-active", "not-applied", "zone-merge-failure"], []),
"peer_dn": MoPropertyMeta("peer_dn", "peerDn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"identity": "identity",
"lc": "lc",
"name": "name",
"operState": "oper_state",
"peerDn": "peer_dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, identity, **kwargs):
self._dirty_mask = 0
self.identity = identity
self.child_action = None
self.id = None
self.lc = None
self.name = None
self.oper_state = None
self.peer_dn = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "SwFcZone", parent_mo_or_dn, **kwargs)
|
the-stack_0_22722 | """
Python mapping for the QTKit framework.
This module does not contain docstrings for the wrapped code, check Apple's
documentation for details on how to use these functions and classes.
"""
import sys
import objc
import Cocoa
import Quartz
from QTKit import _metadata, _QTKit
sys.modules["QTKit"] = mod = objc.ObjCLazyModule(
"QTKit",
"com.apple.QTKit",
objc.pathForFramework("/System/Library/Frameworks/QTKit.framework"),
_metadata.__dict__,
None,
{
"__doc__": __doc__,
"__path__": __path__,
"__loader__": globals().get("__loader__", None),
"objc": objc,
},
(Cocoa, Quartz),
)
import sys
del sys.modules["QTKit._metadata"]
del sys.modules["QTKit._QTKit"]
|
the-stack_0_22725 | def isgore(a,b):
nbr1=int(a+b)
nbr2=int(b+a)
return nbr1>=nbr2
def largest_number(a):
res = ""
while len(a)!=0:
mx=0
for x in a:
if isgore(str(x),str(mx)):
mx=x
res+=str(mx)
a.remove(mx)
return res
n = int(input())
a = list(map(int, input().split()))
print(largest_number(a))
|
the-stack_0_22726 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature utils.
Includes 2D relative position.
"""
from typing import Union
import tensorflow as tf
from etcmodel import feature_utils as etc_feature_utils
class MmtRelativePositionGenerator(object):
"""Generates `relative_att_ids` for image and text."""
def __init__(self,
num_patch_per_row: int,
num_core_layers: int,
text_relative_pos_max_distance: int):
"""instantiates an object to generate relative attention IDs for Mmt model.
Args:
num_patch_per_row: the value is image size divided by patch size. We assume
all images and patches are square. For example, image size is 224 and
patch size is 16. the # ptch per row will be 224 / 16 = 14.
num_core_layers: the radius (except ID 0) of fine-grained 2D attention.
For example, if the num_core_layers is 1, then we have 9 IDs.
-
5 6 7 | num_core_layers = 1
8 0 1 -
2 3 4
If the num_core_layers is 2, then we have 9 IDs.
-
13 14 15 16 17 | num_core_layers = 2
18 19 20 21 22 |
23 24 0 1 2 -
3 4 5 6 7
8 9 10 11 12
text_relative_pos_max_distance: the maximum radius for 1D attention (text).
"""
if num_patch_per_row <= 0:
raise ValueError('`num_patch_per_row` must be positive.')
if num_core_layers <= 0:
raise ValueError('`num_core_layers` must be positive.')
if text_relative_pos_max_distance < 0:
raise ValueError('`text_relative_pos_max_distance` must be positive.')
self._num_patch_per_row = num_patch_per_row
# The number of core layers (radius from the top layer to the center) of
# fine-grained position ids. The minimum is 1 which will result in 9 ids.
self._num_core_layers = num_core_layers
# core_layer_diameter will be 3 in the case shown above.
self._core_layer_diameter = num_core_layers * 2 + 1
# 1D relative position IDs for text.
text_max_id = text_relative_pos_max_distance * 2 + 1
# Gives the same IDs for all patches when using 1D text relative IDs.
self._image_part_id = (self._num_patch_per_row ** 2 +
len(self.direction_config) + text_max_id)
# Gives the same IDs for all text when using 2D patch relative IDs.
self._text_part_id = self._image_part_id + 1
self._base_tensor = self.create_base_tensor()
self._text_relative_generator = etc_feature_utils.RelativePositionGenerator(
text_relative_pos_max_distance)
def create_base_tensor(self):
"""Creates the base tensor for all patches.
We use a kernel sliding on the base tensor to get the 2d relative position
ids.
"""
r = self._num_core_layers
d = self._core_layer_diameter
n = self._num_patch_per_row - self._num_core_layers
num_center_ids = d ** 2
center = tf.range(num_center_ids)
center = tf.roll(center, shift=d*r+r, axis=0)
center = tf.reshape(center, (d, d))
center = tf.pad(center, paddings=[[n, n], [n, n]])
base_tensor = center
for idx, dn in enumerate(self.direction_config.values(), start=d*d):
dn_tensor = tf.fill(dn['fill'], idx)
dn_tensor = tf.pad(dn_tensor, paddings=dn['paddings'])
base_tensor += dn_tensor
return base_tensor
def make_relative_att_ids(self,
seq_len: Union[int, tf.Tensor],
batch_size: int):
"""Makes relative attention ids.
Includes 1D for text and 2D for image.
For image 2D relative IDs, we use the base tensor as the auxiliary tensor.
Let's use the base_tensor as a toy example shown below.
base_tensor = tf.Tensor(
[[16 9 9 9 10]
[15 5 6 7 11]
[15 8 0 1 11]
[15 2 3 4 11]
[14 13 13 13 12]], shape=(5, 5), dtype=int32)
If the image has 9 (3x3) patches A-I.
A B C
D E F
G H I
We position each patch at 0 of the base tensor and crop the region of the
base tensor that corresponds to the rest of the patches' positions.
For example, the 2D relative position attention ids of the patch A will be:
0 1 11
3 4 11
13 13 12
The 2D relative position attention ids of the patch B will be:
8 0 1
2 3 4
13 13 13
The 2D relative position attention ids of the patch H will be:
9 9 9
5 6 7
8 0 1
"""
image_seq_len = self._num_patch_per_row ** 2
text_seq_len = seq_len - image_seq_len
image_relative_att_ids = []
for x in range(self._num_patch_per_row):
for y in range(self._num_patch_per_row):
begin = [self._num_patch_per_row - x, self._num_patch_per_row - y]
size = [self._num_patch_per_row, self._num_patch_per_row]
ids = tf.slice(self._base_tensor, begin, size)
ids = tf.reshape(ids, (-1,))
image_relative_att_ids.append(ids)
image_relative_att_ids = tf.stack(image_relative_att_ids)
image_relative_att_ids = tf.pad(image_relative_att_ids,
paddings=[[0, 0], [0, text_seq_len]],
constant_values=self._text_part_id)
image_relative_att_ids = tf.expand_dims(image_relative_att_ids, axis=0)
text_relative_att_ids = self._text_relative_generator.make_relative_att_ids(
text_seq_len,
batch_size=batch_size)
text_relative_att_ids = tf.pad(text_relative_att_ids,
paddings=[[0, 0], [0, 0], [image_seq_len, 0]],
constant_values=self._image_part_id)
return tf.concat([image_relative_att_ids, text_relative_att_ids], axis=1)
@property
def direction_config(self):
"""Creates direction configurations for 8 directions.
Toy example:
base_tensor = tf.Tensor(
[[16 9 9 9 10]
[15 5 6 7 11]
[15 8 0 1 11]
[15 2 3 4 11]
[14 13 13 13 12]], shape=(5, 5), dtype=int32)
1. Fine-grained IDs.
5 6 7 ^
8 0 1 | d = core_layer_diameter
2 3 4 v
2. Coarse-grained IDs.
The other 8 directions in total are as follows.
top: 9.
top-right: 10.
right: 11.
bottom-right 12.
bottom: 13.
bottom-left: 14.
left: 15.
top-left: 16.
"""
d = self._core_layer_diameter
m = self._num_patch_per_row + self._num_core_layers + 1
n = self._num_patch_per_row - self._num_core_layers
direction_config = {
'top': {
'fill': [n, d],
'paddings': [[0, m], [n, n]],
},
'top_right': {
'fill': [n, n],
'paddings': [[0, m], [m, 0]],
},
'right': {
'fill': [d, n],
'paddings': [[n, n], [m, 0]],
},
'right_bottom': {
'fill': [n, n],
'paddings': [[m, 0], [m, 0]],
},
'bottom': {
'fill': [n, d],
'paddings': [[m, 0], [n, n]],
},
'bottom_left': {
'fill': [n, n],
'paddings': [[m, 0], [0, m]],
},
'left': {
'fill': [d, n],
'paddings': [[n, n], [0, m]],
},
'top_left': {
'fill': [n, n],
'paddings': [[0, m], [0, m]],
}
}
return direction_config
|
the-stack_0_22728 | import numpy as np
import random
import copy
from collections import namedtuple, deque
from .model import Actor, Critic
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 128 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-4 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, random_seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def step(self, state, action, reward, next_state, done):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
self.memory.add(state, action, reward, next_state, done)
# Learn, if enough samples are available in memory
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class OUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size, seed, mu=0., theta=0.15, sigma=0.2):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.seed = random.seed(seed)
self.reset()
def reset(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.array([random.random() for i in range(len(x))])
self.state = x + dx
return self.state
class ReplayBuffer:
"""Fixed-size buffer to store experience tuples."""
def __init__(self, action_size, buffer_size, batch_size, seed):
"""Initialize a ReplayBuffer object.
Params
======
buffer_size (int): maximum size of buffer
batch_size (int): size of each training batch
"""
self.action_size = action_size
self.memory = deque(maxlen=buffer_size) # internal memory (deque)
self.batch_size = batch_size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
experiences = random.sample(self.memory, k=self.batch_size)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states, actions, rewards, next_states, dones)
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
|
the-stack_0_22729 | from __future__ import absolute_import
# System modules
import os
import sys
# Third-party modules
import six
# LLDB Modules
import lldb
from .lldbtest import *
from . import lldbutil
from lldbsuite.test.decorators import *
@skipIfRemote
@skipIfWindows # llvm.org/pr22274: need a pexpect replacement for windows
class PExpectTest(TestBase):
NO_DEBUG_INFO_TESTCASE = True
PROMPT = "(lldb) "
def expect_prompt(self):
self.child.expect_exact(self.PROMPT)
def launch(self, executable=None, extra_args=None, timeout=30, dimensions=None):
logfile = getattr(sys.stdout, 'buffer',
sys.stdout) if self.TraceOn() else None
args = ['--no-lldbinit', '--no-use-colors']
for cmd in self.setUpCommands():
args += ['-O', cmd]
if executable is not None:
args += ['--file', executable]
if extra_args is not None:
args.extend(extra_args)
env = dict(os.environ)
env["TERM"]="vt100"
import pexpect
self.child = pexpect.spawn(
lldbtest_config.lldbExec, args=args, logfile=logfile,
timeout=timeout, dimensions=dimensions, env=env)
self.expect_prompt()
for cmd in self.setUpCommands():
self.child.expect_exact(cmd)
self.expect_prompt()
if executable is not None:
self.child.expect_exact("target create")
self.child.expect_exact("Current executable set to")
self.expect_prompt()
def expect(self, cmd, substrs=None):
self.assertNotIn('\n', cmd)
self.child.sendline(cmd)
# If 'substrs' is a string then this code would just check that every
# character of the string is in the output.
assert not isinstance(substrs, six.string_types), \
"substrs must be a collection of strings"
if substrs is not None:
for s in substrs:
self.child.expect_exact(s)
self.expect_prompt()
def quit(self, gracefully=True):
self.child.sendeof()
self.child.close(force=not gracefully)
self.child = None
def cursor_forward_escape_seq(self, chars_to_move):
"""
Returns the escape sequence to move the cursor forward/right
by a certain amount of characters.
"""
return b"\x1b\[" + str(chars_to_move).encode("utf-8") + b"C"
|
the-stack_0_22731 | from __future__ import absolute_import
from kombu.transport.virtual.scheduling import FairCycle
from kombu.tests.utils import TestCase
class MyEmpty(Exception):
pass
def consume(fun, n):
r = []
for i in range(n):
r.append(fun())
return r
class test_FairCycle(TestCase):
def test_cycle(self):
resources = ['a', 'b', 'c', 'd', 'e']
def echo(r, timeout=None):
return r
# cycle should be ['a', 'b', 'c', 'd', 'e', ... repeat]
cycle = FairCycle(echo, resources, MyEmpty)
for i in range(len(resources)):
self.assertEqual(cycle.get(), (resources[i],
resources[i]))
for i in range(len(resources)):
self.assertEqual(cycle.get(), (resources[i],
resources[i]))
def test_cycle_breaks(self):
resources = ['a', 'b', 'c', 'd', 'e']
def echo(r):
if r == 'c':
raise MyEmpty(r)
return r
cycle = FairCycle(echo, resources, MyEmpty)
self.assertEqual(
consume(cycle.get, len(resources)),
[('a', 'a'), ('b', 'b'), ('d', 'd'),
('e', 'e'), ('a', 'a')],
)
self.assertEqual(
consume(cycle.get, len(resources)),
[('b', 'b'), ('d', 'd'), ('e', 'e'),
('a', 'a'), ('b', 'b')],
)
cycle2 = FairCycle(echo, ['c', 'c'], MyEmpty)
with self.assertRaises(MyEmpty):
consume(cycle2.get, 3)
def test_cycle_no_resources(self):
cycle = FairCycle(None, [], MyEmpty)
cycle.pos = 10
with self.assertRaises(MyEmpty):
cycle._next()
def test__repr__(self):
self.assertTrue(repr(FairCycle(lambda x: x, [1, 2, 3], MyEmpty)))
|
the-stack_0_22733 | # -*- coding: utf-8 -*-
"""
Bone Age Assessment BoNet train routine.
"""
# Standard lib imports
import os
import csv
import glob
import time
import argparse
import warnings
import pandas as pd
import os.path as osp
# PyTorch imports
import torch
import torch.nn as nn
import torch.optim as optim
import horovod.torch as hvd
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
# Other imports
from tqdm import tqdm
import pdb
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
# Dataloading-related settings
parser.add_argument('--heatmaps', default=False, action='store_true',
help='Train model with gaussian heatmaps')
parser.add_argument('--cropped', default=False, action='store_true',
help='Train model with cropped images according to bbox')
parser.add_argument('--dataset', default='RSNA', type=str,choices=['RSNA','RHPE'],
help='Dataset to perform training')
parser.add_argument('--data-train', default='data/train/', type=str,
help='path to train data folder')
parser.add_argument('--ann-path-train', default='train.csv', type=str,
help='path to BAA annotations file')
parser.add_argument('--rois-path-train', default='train.json',
type=str, help='path to ROIs annotations in coco format')
parser.add_argument('--data-val', default='data/val/', type=str,
help='path to val data folder')
parser.add_argument('--ann-path-val', default='val.csv', type=str,
help='path to BAA annotations file')
parser.add_argument('--rois-path-val', default='val.json',
type=str, help='path to ROIs annotations in coco format')
parser.add_argument('--save-folder', default='TRAIN/new_test/',
help='location to save checkpoint models')
parser.add_argument('--snapshot', default='boneage_bonet_weights.pth',
help='path to weight snapshot file')
parser.add_argument('--optim-snapshot', type=str,
default='boneage_bonet_optim.pth',
help='path to optimizer state snapshot')
parser.add_argument('--eval-first', default=False, action='store_true',
help='evaluate model weights before training')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
# Training procedure settings
parser.add_argument('--batch-size', default=1, type=int,
help='Batch size for training')
parser.add_argument('--epochs', type=int, default=20,
help='upper epoch limit')
parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float,
help='initial learning rate')
parser.add_argument('--patience', default=2, type=int,
help='patience epochs for LR decreasing')
parser.add_argument('--start-epoch', type=int, default=1,
help='epoch number to resume')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--log-interval', type=int, default=30, metavar='N',
help='report interval')
parser.add_argument('--gpu', type=str, default='2,3')
args = parser.parse_args()
args_dict = vars(args)
print('Argument list to program')
print('\n'.join(['--{0} {1}'.format(arg, args_dict[arg])
for arg in args_dict]))
print('\n\n')
torch.manual_seed(args.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
# Horovod settings
hvd.init()
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(hvd.size())
args.distributed = hvd.size() > 1
args.rank = hvd.rank()
args.size = hvd.size()
# CREATE THE NETWORK ARCHITECTURE AND LOAD THE BEST MODEL
if args.heatmaps:
from models.bonet_heatmap import BoNet
else:
from models.bonet import BoNet
net = BoNet()
if args.rank == 0:
print('---> Number of params: {}'.format(
sum([p.data.nelement() for p in net.parameters()])))
if osp.exists(args.snapshot):
model_to_load=args.snapshot
else:
model_to_load=args.save_folder+'/'+args.snapshot
if osp.exists(model_to_load) and args.rank == 0:
print('Loading state dict from: {0}'.format(model_to_load))
snapshot_dict = torch.load(model_to_load, map_location=lambda storage, loc: storage)
weights= net.state_dict()
new_snapshot_dict=snapshot_dict.copy()
for key in snapshot_dict:
if key not in weights.keys():
new_key='inception_v3.'+key
new_snapshot_dict[new_key]=snapshot_dict[key]
new_snapshot_dict.pop(key)
net.load_state_dict(new_snapshot_dict)
net = net.to(device)
# Criterion
criterion = nn.L1Loss()
# Optimizer
optimizer = optim.Adam(net.parameters(), lr=args.lr * args.size)
annealing = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.8, patience=args.patience, cooldown=5,
min_lr=0.00001, eps=0.00001, verbose=True)
if osp.exists(args.optim_snapshot):
optim_to_load=args.optim_snapshot
else:
optim_to_load=args.save_folder+'/'+args.optim_snapshot
if osp.exists(optim_to_load):
print('loading optim snapshot from {}'.format(optim_to_load))
optimizer.load_state_dict(torch.load(optim_to_load, map_location=lambda storage,
loc: storage))
# Horovod
hvd.broadcast_parameters(net.state_dict(), root_rank=0)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=net.named_parameters())
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
group = optimizer.param_groups[0]
group['betas'] = (float(group['betas'][0]), float(group['betas'][1]))
# Dataloaders
train_transform = transforms.Compose([transforms.Resize((500, 500)),
transforms.RandomAffine(
20, translate=(0.2, 0.2),
scale=(1, 1.2)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
val_transform = transforms.Compose([transforms.Resize((500, 500)),
transforms.ToTensor()])
if args.heatmaps:
from data.data_loader import Boneage_HeatmapDataset as Dataset
else:
from data.data_loader import BoneageDataset as Dataset
train_dataset = Dataset(args.data_train, args.ann_path_train,args.rois_path_train,
img_transform=train_transform,crop=args.cropped,dataset=args.dataset)
val_dataset = Dataset(args.data_val, args.ann_path_val,args.rois_path_val,
img_transform=val_transform,crop=args.cropped,dataset=args.dataset)
# Data samplers
train_sampler = None
val_sampler = None
if args.distributed:
train_sampler = DistributedSampler(train_dataset,
num_replicas=args.size,
rank=args.rank)
val_sampler = DistributedSampler(val_dataset,
num_replicas=args.size,
rank=args.rank)
train_loader = DataLoader(train_dataset,
shuffle=(train_sampler is None),
sampler=train_sampler,
batch_size=args.batch_size,
num_workers=args.workers)
val_loader = DataLoader(val_dataset,
shuffle=(val_sampler is None),
sampler=val_sampler,
batch_size=1,
num_workers=args.workers)
def main():
print('Train begins...')
best_val_loss = None
# Find best model in validation
if osp.exists(osp.join(args.save_folder, 'train.csv')):
with open(osp.join(args.save_folder, 'train.csv')) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
val_list = []
for row in csv_reader:
val_list.append(float(row[2]))
best_val_loss = min(val_list)
if args.eval_first:
val_loss = evaluate()
try:
out_file = open(os.path.join(args.save_folder, 'train.csv'), 'a+')
for epoch in range(args.start_epoch, args.epochs + 1):
if args.distributed:
train_sampler.set_epoch(epoch)
val_sampler.set_epoch(epoch)
if args.rank == 0:
epoch_start_time = time.time()
train_loss = train(epoch)
annealing.step(train_loss)
val_loss = evaluate()
if args.rank == 0:
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s '
'| epoch loss {:.6f} |'.format(
epoch, time.time() - epoch_start_time, train_loss))
print('-' * 89)
out_file.write('{}, {}, {}\n'.format(epoch, train_loss, val_loss))
out_file.flush()
if best_val_loss is None or val_loss > best_val_loss and args.rank == 0:
best_val_loss = val_loss
filename = osp.join(args.save_folder, 'boneage_bonet_weights.pth')
torch.save(net.state_dict(), filename)
out_file.close()
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
def train(epoch):
net.train()
total_loss = AverageMeter()
epoch_loss_stats = AverageMeter()
time_stats = AverageMeter()
loss = 0
optimizer.zero_grad()
for (batch_idx, (imgs, bone_ages, genders, _)) in enumerate(train_loader):
imgs = imgs.to(device)
bone_ages = bone_ages.to(device)
genders = genders.to(device)
start_time = time.time()
outputs = net(imgs, genders)
loss = criterion(outputs.squeeze(), bone_ages)
loss.backward()
optimizer.step()
loss = metric_average(loss.item(), 'loss')
time_stats.update(time.time() - start_time, 1)
total_loss.update(loss, 1)
epoch_loss_stats.update(loss, 1)
optimizer.zero_grad()
if (batch_idx % args.log_interval == 0) and args.rank == 0:
elapsed_time = time_stats.avg
print(' [{:5d}] ({:5d}/{:5d}) | ms/batch {:.4f} |'
' loss {:.6f} | avg loss {:.6f} | lr {:.7f}'.format(
epoch, batch_idx, len(train_loader),
elapsed_time * 1000, total_loss.avg,
epoch_loss_stats.avg,
optimizer.param_groups[0]['lr']))
total_loss.reset()
epoch_total_loss = epoch_loss_stats.avg
args.resume_iter = 0
if args.rank == 0:
filename = 'boneage_bonet_snapshot.pth'
filename = osp.join(args.save_folder, filename)
torch.save(net.state_dict(), filename)
optim_filename = 'boneage_bonet_optim.pth'
optim_filename = osp.join(args.save_folder, optim_filename)
torch.save(optimizer.state_dict(), optim_filename)
return epoch_total_loss
def evaluate():
net.eval()
epoch_total_loss = AverageMeter()
for (batch_idx, (imgs, bone_ages, genders, _)) in enumerate(val_loader):
imgs = imgs.to(device)
bone_ages = bone_ages.to(device)
genders = genders.to(device)
with torch.no_grad():
outputs = net(imgs, genders)
loss = criterion(outputs.squeeze(), bone_ages)
loss = metric_average(loss.item(), 'loss')
epoch_total_loss.update(loss, 1)
epoch_total_loss = epoch_total_loss.avg
if args.rank == 0:
print('Val loss: {:.5f}'.format(epoch_total_loss))
return epoch_total_loss
def metric_average(val, name):
tensor = torch.tensor(val)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
|
the-stack_0_22738 | # -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
import warnings
import os
import contextlib
from os import path
import declarative
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import numpy as np
from .colors import color_array
try:
org_mode
except NameError:
org_mode = False
def save_figure_MP(fig, fname, *args, **kwargs):
"""
After pickling to a subprocess, the canvas is destroyed due to matplotlib bullshit, so make a new one as it apparently doesn't
do this for you
"""
if fig.canvas is None:
canvas = FigureCanvas(fig)
fig.set_canvas(canvas)
return fig.savefig(fname, *args, **kwargs)
class SaveToken(declarative.OverridableObject):
aps = None
fbasename = None
kwargs = {}
def __lshift__(self, other):
self.aps(self.fbasename, fig_or_fbunch = other, **self.kwargs)
return other
def __rlshift__(self, other):
self.aps(self.fbasename, fig_or_fbunch = other, **self.kwargs)
return other
def __rshift__(self, other):
self.aps(self.fbasename, fig_or_fbunch = other, **self.kwargs)
return other
def __rrshift__(self, other):
self.aps(self.fbasename, fig_or_fbunch = other, **self.kwargs)
return other
def mpl_autorasterize(fig):
children_current = fig.get_children()
children_ever = set()
while children_current:
child = children_current.pop()
if child in children_ever:
continue
else:
children_ever.add(child)
try:
more_children = child.get_children()
except AttributeError:
pass
else:
children_current.extend(more_children)
try:
xdat = child.get_xdata()
if len(xdat) > 100:
child.set_rasterized(True)
child.set_antialiased(True)
continue
except AttributeError:
pass
try:
paths = child.get_paths()
for p in paths:
if len(p.vertices) > 100:
child.set_rasterized(True)
child.set_antialiased(True)
#print(child, len(xdat))
except AttributeError:
pass
class AutoPlotSaver(declarative.OverridableObject):
max_width_in = None
max_height_in = None
save_dpi = 400
org_dpi = 100
org_subfolder = None
rasterize_auto = True
formats = declarative.DeepBunch()
formats.pdf.use = True
formats.jpg.use = False
formats.jpg.dpi = 200
formats.jpg.facecolorize = True
formats.png.use = False
embed = False
save_show = True
fixname = True
_pool = None
_last_async_result = None
@contextlib.contextmanager
def pool(self, workers = 4):
"""
runs the plot save in a contextmanager and waits for the plotting to be done simultaneously
"""
import multiprocessing
wasnone = False
if self._pool is None:
if workers > 1:
asavefig._pool = multiprocessing.Pool(workers)
asavefig._last_async_result = []
wasnone = True
yield
if asavefig._last_async_result is not None:
for result in asavefig._last_async_result:
result.get()
asavefig._last_async_result = []
if wasnone:
if asavefig._pool is not None:
asavefig._pool.close()
asavefig._pool.join()
asavefig._pool = None
asavefig._last_async_result = None
def __call__(
self,
fbasename,
fig_or_fbunch = None,
fixname = None,
):
if fig_or_fbunch is None:
return SaveToken(
aps = self,
fbasename = fbasename,
kwargs = dict(
fixname = fixname,
),
)
fixname = fixname if fixname is not None else self.fixname
try:
fig = fig_or_fbunch.fig
formats = fig_or_fbunch.get("formats", None)
#the "get" method unwraps the deepbunch
formats = declarative.DeepBunch(formats)
if not formats:
formats = self.formats
save_show = fig_or_fbunch.get("save_show", None)
#and needed since show may be empty DeepBunch
if not save_show and save_show is not False:
save_show = self.save_show
except AttributeError:
fig = fig_or_fbunch
save_show = self.save_show
formats = self.formats
w, h = fig.get_size_inches()
if self.max_width_in is not None and w > self.max_width_in:
new_w = self.max_width_in
new_h = float(h)/float(w) * new_w
fig.set_size_inches(new_w, new_h)
#this silly bit reduces the formats to only the one specified
fbase, fext = path.splitext(fbasename)
if fext:
fbasename = fbase
#cut off the dot
fext = fext[1:]
single_formats = declarative.DeepBunch()
#apply any settings stored in this object or the plot itself
single_formats[fext].update_recursive(self.formats[fext])
single_formats[fext].update_recursive(formats[fext])
#force usage of this single format!
single_formats[fext].use = True
formats = single_formats
if self.rasterize_auto:
mpl_autorasterize(fig)
subfolder = ''
if self.org_subfolder:
subfolder = self.org_subfolder
fbasepath, fbasefname = path.split(fbasename)
if '_' in fbasefname and fixname:
warnings.warn("Image name contains '_' which will be changed to '-' to fix nbsphinx export")
fbasefname = fbasefname.replace('_', '-')
fbasename = path.join(fbasepath, fbasename)
fbasename = path.join(subfolder, fbasename)
dirname = path.dirname(fbasename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
global org_mode
used_png = False
for fmt, fB in formats.items():
if fmt == 'png' and (org_mode or self.org_subfolder):
#to avoide the elif
pass
elif not fB.use:
continue
if fmt == 'png':
used_png = True
if fB.dpi:
dpi = fB.dpi
else:
dpi = self.save_dpi
kwargs = dict()
if fB.facecolorize:
kwargs['facecolor'] = fig.get_facecolor()
if self._pool is None:
fig.savefig(
fbasename + '.' + fmt,
dpi = dpi,
bbox_inches = 'tight',
#tight_layout=True,
pad_inches = 0.05,
transparent=True,
quality = 50,
**kwargs
)
else:
mydir = os.getcwd()
self._last_async_result.append(
self._pool.apply_async(
save_figure_MP,
args = (
fig,
os.path.join(mydir, fbasename + '.' + fmt),
),
kwds = dict(
dpi = dpi,
bbox_inches = 'tight',
pad_inches = 0.05,
transparent = True,
quality = 50,
#tight_layout =True,
**kwargs
),
)
)
if used_png:
fname = fbasename + '.png'
if org_mode:
print("figure: {0}".format(fname))
print("[[file:{0}]]".format(fname))
if not self.embed:
if save_show:
try:
import IPython.display
import time
#IPython.display.display(IPython.display.Image(filename=fname, embed=False))
#html_bit = '<img src="{1}/../{0}?{1}">'.format(fname, int(time.time()))
#IPython.display.display(IPython.display.HTML(html_bit))
ftype_md = []
for fmt, fB in formats.items():
if fB.use or fmt == 'png':
md = "[{ftype}]({fbasename}.{ftype})".format(
ftype = fmt,
fbasename = fbasename
)
ftype_md.append(md)
markdown_bit = ''.format(
fname,
int(time.time()),
fbasename = fbasename,
)
IPython.display.display(IPython.display.Markdown(markdown_bit + "\n" + ", ".join(ftype_md)))
plt.close(fig)
except ImportError:
pass
else:
plt.close(fig)
else:
if save_show:
try:
import IPython.display
import time
IPython.display.display(IPython.display.Image("{0}".format(fname)))
plt.close(fig)
except ImportError:
pass
else:
plt.close(fig)
fig.set_dpi(mpl.rcParams['figure.dpi'])
return
asavefig = AutoPlotSaver()
def patchify_axes(ax, plotname, check_log_Y = False):
oldplot = getattr(ax, plotname)
def plot(X, Y, *args, **kwargs):
Y = np.asarray(Y)
b = np.broadcast(X, Y)
if check_log_Y and np.all(Y <= 0):
return
if b.shape != Y.shape:
Y = np.ones(X.shape) * Y
return oldplot(X, Y, *args, **kwargs)
plot.__name__ = oldplot.__name__
plot.__doc__ = oldplot.__doc__
setattr(ax, plotname, plot)
def patch_axes(ax):
patchify_axes(ax, 'plot')
patchify_axes(ax, 'loglog', check_log_Y = True)
patchify_axes(ax, 'semilogy', check_log_Y = True)
patchify_axes(ax, 'semilogx')
def mplfigB(
Nrows = 1,
Ncols = 1,
size_in = (None, None),
size_in_base = (None, None),
size_in_dW_dH = (3, 1),
x_by_col = False,
prop_cycle = color_array,
):
if isinstance(Nrows, (list, tuple)):
rownames = Nrows
Nrows = len(rownames)
else:
rownames = None
width_in, height_in = size_in
size_in_base_W, size_in_base_H = size_in_base
if size_in_base_W is None:
size_in_base_W = mpl.rcParams['figure.figsize'][0]
if size_in_base_H is None:
size_in_base_H = mpl.rcParams['figure.figsize'][1]
if width_in is None:
width_in = size_in_base_W + Ncols * size_in_dW_dH[0]
if height_in is None:
height_in = size_in_base_H + Nrows * size_in_dW_dH[1]
axB = declarative.Bunch()
axB.fig = plt.figure()
axB.fig.set_size_inches(width_in, height_in)
global asavefig
def save(rootname, **kwargs):
axB << asavefig(rootname, **kwargs)
axB.save = save
N = 0
axB.ax_grid_colrow = []
for idx_col in range(Ncols):
ax_list = []
axB.ax_grid_colrow.append([])
for idx_row in range(Nrows):
if x_by_col:
if idx_row != 0:
sharex = axB.ax_grid_colrow[idx_col][0]
else:
sharex = None
else:
sharex = None
ax = axB.fig.add_subplot(Nrows, Ncols, idx_row + idx_col*Nrows + 1, sharex = sharex)
if prop_cycle is not None:
ax.set_prop_cycle(
color = prop_cycle
)
#patch_axes(ax)
ax_list.append(ax)
ax.grid(b=True)
ax.grid(b=True, which = 'minor', color = (.9, .9, .9), lw = .5)
axB.ax_grid_colrow[idx_col].append(ax)
axB["ax{0}_{1}".format(idx_row, idx_col)] = ax
axB["ax{0}".format(N)] = ax
if rownames is not None:
axB[rownames[N]] = ax
N += 1
if idx_col == 0:
if idx_row == 0:
axB.ax_top = ax
if idx_row == Nrows-1:
axB.ax_bottom = ax
axB['ax_list'] = ax_list
axB['ax_list_{0}'.format(idx_col)] = ax_list
return axB
asavefig = AutoPlotSaver()
|
the-stack_0_22740 | import numpy as np
from perceptron import Perceptron
X_train = np.array([
[0, 0, 0],
[0, 1, 0],
[1, 0, 1],
[1, 1, 1],
[0, 1, 1],
])
y_train = np.array([0, 0, 1, 1, 0])
X_test = np.array([
[0, 0, 1],
[1, 1, 1],
[0, 1, 1],
[1, 1, 0],
[1, 0, 0],
])
y_test = np.array([0, 1, 0, 1, 1])
def accuracy(y, y_pred):
accuracy = np.sum(y == y_pred) / len(y)
return accuracy
p = Perceptron(learning_rate=0.01, epochs=10000)
p.train(X_train, y_train)
print("Trained weights")
print(p.weights)
predictions = p.predict(X_test)
print('Predictions')
print(predictions)
print("Accuracy -> ", accuracy(y_test, predictions)) |
the-stack_0_22741 | tagihan = [50000, 75000, -150000, 125000, 300000, -50000, 200000]
i = 0
jumlah_tagihan = len(tagihan)
total_tagihan = 0
while i < jumlah_tagihan:
# jika terdapat tagihan ke-i yang bernilai minus (di bawah nol),
# abaikan tagihan ke-i dan lanjutkan ke tagihan berikutnya
if tagihan[i] < 0:
i += 1
continue
total_tagihan += tagihan[i]
i += 1
print(total_tagihan)
|
the-stack_0_22742 | # This code is built from Kensho Hara's repository: https://github.com/kenshohara/3D-ResNets-PyTorch.
# Copyright (c) 2017 Kensho Hara.
# The repository is available under the MIT License.
#
# ==========================================================================================
# This implementation includes Same-Conv and Full-Conv versions all together.
# For more details see the paper:
# O. S. Kayhan and J. van Gemert,
# "On Translation Invariance in CNNs: Convolutional Layers can Exploit Absolute Spatial Location"
# In CVPR, 2020.
# https://arxiv.org/abs/2003.07064
# ==========================================================================================
import os
import sys
import json
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.optim import lr_scheduler
from opts import parse_opts
from model import generate_model
from mean import get_mean, get_std
from spatial_transforms import (
Compose, Normalize, Scale, CenterCrop, CornerCrop, MultiScaleCornerCrop,
MultiScaleRandomCrop, RandomHorizontalFlip, ToTensor)
from temporal_transforms import LoopPadding, TemporalRandomCrop
from target_transforms import ClassLabel, VideoID
from target_transforms import Compose as TargetCompose
from dataset import get_training_set, get_validation_set, get_test_set
from utils import Logger
from train import train_epoch
from validation import val_epoch
import test
if __name__ == '__main__':
opt = parse_opts()
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
opt.std = get_std(opt.norm_value)
print(opt)
#with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
#json.dump(vars(opt), opt_file)
torch.manual_seed(opt.manual_seed)
model, parameters = generate_model(opt)
print(model)
criterion = nn.CrossEntropyLoss()
if not opt.no_cuda:
criterion = criterion.cuda()
if opt.no_mean_norm and not opt.std_norm:
norm_method = Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = Normalize(opt.mean, [1, 1, 1])
else:
norm_method = Normalize(opt.mean, opt.std)
if not opt.no_train:
assert opt.train_crop in ['random', 'corner', 'center']
if opt.train_crop == 'random':
crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)
elif opt.train_crop == 'corner':
crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)
elif opt.train_crop == 'center':
crop_method = MultiScaleCornerCrop(
opt.scales, opt.sample_size, crop_positions=['c'])
spatial_transform = Compose([
crop_method,
RandomHorizontalFlip(),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = TemporalRandomCrop(opt.sample_duration)
target_transform = ClassLabel()
training_data = get_training_set(opt, spatial_transform,
temporal_transform, target_transform)
train_loader = torch.utils.data.DataLoader(
training_data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_threads,
pin_memory=True)
train_logger = Logger(
os.path.join(opt.result_path, 'train.log'),
['epoch', 'loss', 'acc', 'lr'])
train_batch_logger = Logger(
os.path.join(opt.result_path, 'train_batch.log'),
['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])
if opt.nesterov:
dampening = 0
else:
dampening = opt.dampening
optimizer = optim.SGD(
parameters,
lr=opt.learning_rate,
momentum=opt.momentum,
dampening=dampening,
weight_decay=opt.weight_decay,
nesterov=opt.nesterov)
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', patience=opt.lr_patience)
if not opt.no_val:
spatial_transform = Compose([
Scale(opt.sample_size),
CenterCrop(opt.sample_size),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = LoopPadding(opt.sample_duration)
target_transform = ClassLabel()
validation_data = get_validation_set(
opt, spatial_transform, temporal_transform, target_transform)
val_loader = torch.utils.data.DataLoader(
validation_data,
batch_size=16,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
val_logger = Logger(
os.path.join(opt.result_path, 'val.log'), ['epoch', 'loss', 'acc'])
if opt.resume_path:
print('loading checkpoint {}'.format(opt.resume_path))
checkpoint = torch.load(opt.resume_path)
assert opt.arch == checkpoint['arch']
opt.begin_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
if not opt.no_train:
optimizer.load_state_dict(checkpoint['optimizer'])
print('run')
for i in range(opt.begin_epoch, opt.n_epochs + 1):
if not opt.no_train:
train_epoch(i, train_loader, model, criterion, optimizer, opt,
train_logger, train_batch_logger)
if not opt.no_val:
validation_loss = val_epoch(i, val_loader, model, criterion, opt,
val_logger)
if not opt.no_train and not opt.no_val:
scheduler.step(validation_loss)
if opt.test:
spatial_transform = Compose([
Scale(int(opt.sample_size / opt.scale_in_test)),
CornerCrop(opt.sample_size, opt.crop_position_in_test),
ToTensor(opt.norm_value), norm_method
])
temporal_transform = LoopPadding(opt.sample_duration)
target_transform = VideoID()
test_data = get_test_set(opt, spatial_transform, temporal_transform,
target_transform)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.n_threads,
pin_memory=True)
test.test(test_loader, model, opt, test_data.class_names)
|
the-stack_0_22743 | # @Time : 2020/12/16
# @Author : Yuanhang Zhou
# @Email : [email protected]
# UPDATE
# @Time : 2020/12/29, 2021/1/4
# @Author : Xiaolei Wang, Yuanhang Zhou
# @email : [email protected], [email protected]
r"""
TextCNN
=======
References:
Kim, Yoon. `"Convolutional Neural Networks for Sentence Classification."`_ in EMNLP 2014.
.. _`"Convolutional Neural Networks for Sentence Classification."`:
https://www.aclweb.org/anthology/D14-1181/
"""
import torch
import torch.nn.functional as F
from loguru import logger
from torch import nn
from crslab.model.base import BaseModel
class TextCNNModel(BaseModel):
"""
Attributes:
movie_num: A integer indicating the number of items.
num_filters: A string indicating the number of filter in CNN.
embed: A integer indicating the size of embedding layer.
filter_sizes: A string indicating the size of filter in CNN.
dropout: A float indicating the dropout rate.
"""
def __init__(self, opt, device, vocab, side_data):
"""
Args:
opt (dict): A dictionary record the hyper parameters.
device (torch.device): A variable indicating which device to place the data and model.
vocab (dict): A dictionary record the vocabulary information.
side_data (dict): A dictionary record the side data.
"""
self.movie_num = vocab['n_entity']
self.num_filters = opt['num_filters']
self.embed = opt['embed']
self.filter_sizes = eval(opt['filter_sizes'])
self.dropout = opt['dropout']
super(TextCNNModel, self).__init__(opt, device)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def build_model(self):
self.embedding = nn.Embedding(self.movie_num, self.embed)
self.convs = nn.ModuleList(
[nn.Conv2d(1, self.num_filters, (k, self.embed)) for k in self.filter_sizes])
self.dropout = nn.Dropout(self.dropout)
self.fc = nn.Linear(self.num_filters * len(self.filter_sizes), self.movie_num)
# this loss may conduct to some weakness
self.rec_loss = nn.CrossEntropyLoss()
logger.debug('[Finish build rec layer]')
def recommend(self, batch, mode):
context, mask, input_ids, target_pos, input_mask, sample_negs, y = batch
out = self.embedding(context)
out = out.unsqueeze(1)
out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
out = self.dropout(out)
out = self.fc(out)
rec_scores = out
rec_loss = self.rec_loss(out, y)
return rec_loss, rec_scores
|
the-stack_0_22744 | # -*- coding: utf-8 -*-
from markdown import Markdown
from django.forms import Textarea
from django.template import Context
from django.template.loader import get_template
from pybb.markup.base import smile_it, BaseParser
class MarkdownWidget(Textarea):
class Media:
css = {
"all": (
"markitup/skins/simple/style.css",
"markitup/sets/markdown/style.css",
),
}
js = (
"markitup/ajax_csrf.js",
"markitup/jquery.markitup.js",
"markitup/sets/markdown/set.js",
"pybb/js/markitup.js",
)
def render(self, *args, **kwargs):
tpl = get_template("pybb/markup/markdown_widget.html")
ctx = {"widget_output": super(MarkdownWidget, self).render(*args, **kwargs)}
return tpl.render(ctx)
class MarkdownParser(BaseParser):
widget_class = MarkdownWidget
def __init__(self):
self._parser = Markdown(safe_mode="escape")
def format(self, text, instance=None):
if instance and instance.pk:
text = self.format_attachments(text, attachments=instance.attachments.all())
return smile_it(self._parser.convert(text))
def quote(self, text, username=""):
return ">" + text.replace("\n", "\n>").replace("\r", "\n>") + "\n"
|
the-stack_0_22745 | #!/usr/bin/env python
from setuptools import setup, find_packages
install_requires = [
'boto3>=1.9.240',
'click>=6.7',
'configparser>=3.5.0',
'docker>=2.4.2',
'dockerpty>=0.4.1',
'jsonpath>=0.75',
'tabulate>=0.7.7',
'humanize>=0.5.1',
'pytz>=2017.2',
'stringcase>=1.2.0',
'pyyaml>=5.1.2',
'oyaml>=0.9',
'pygments>=2.4.2',
'jsonpath-ng==1.4.3',
'Jinja2>=2.10.3',
'MarkupSafe>=1.1.1'
]
classifiers = [
'Development Status :: 4 - Alpha',
'Environment :: Console',
'Topic :: System :: Clustering',
]
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='ecsctl',
version='20200310',
description='kubectl-style command line client for AWS ECS.',
license="MIT license",
long_description=long_description,
author='Witold Gren',
author_email='[email protected]',
url='https://github.com/witold-gren/ecsctl',
packages=find_packages(include=['ecsctl', 'ecsctl.commands']),
entry_points={
'console_scripts': [
'ecsctl = ecsctl.__main__:main'
]
},
install_requires=install_requires,
keywords=['ECS', 'ecsctl', 'kubectl', 'AWS', 'docker'],
classifiers=classifiers,
include_package_data=True,
)
|
the-stack_0_22746 | #!/usr/bin/env python3
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
import unittest
from unittest.mock import MagicMock, Mock, PropertyMock, call, mock_open, patch
from magma_access_gateway_installer.agw_installer import AGWInstaller
class TestAGWInstaller(unittest.TestCase):
TEST_MAGMA_VERSION = "bla-bla-123"
APT_LIST_WITH_MAGMA = b"""lua-cjson/focal,now 2.1.0+dfsg-2.1 amd64 [installed,automatic]\n
lvm2/focal,now 2.03.07-1ubuntu1 amd64 [installed,automatic]\n
lxd-agent-loader/focal,now 0.4 all [installed,automatic]\n
lz4/focal-updates,focal-security,now 1.9.2-2ubuntu0.20.04.1 amd64 [installed,automatic]\n
magma-cpp-redis/focal-1.6.1,now 4.3.1.1-2 amd64 [installed,automatic]\n
magma-libfluid/focal-1.6.1,now 0.1.0.6-1 amd64 [installed,automatic]\n
magma-libtacopie/focal-1.6.1,now 3.2.0.1-1 amd64 [installed,automatic]\n
magma-sctpd/focal-1.6.1,now 1.6.1-1636529012-5d886707 amd64 [installed,automatic]\n
magma/focal-1.6.1,now 1.6.1-1636529012-5d886707 amd64 [installed]\n
make/focal,now 4.2.1-1.2 amd64 [installed,automatic]\n
man-db/focal,now 2.9.1-1 amd64 [installed,automatic]\n
"""
ETC_CA_CERTIFICATES_CONF_WITH_DST_ROOT_CA_X3_FORBIDDEN = """mozilla/Cybertrust_Global_Root.crt
mozilla/D-TRUST_Root_Class_3_CA_2_2009.crt
mozilla/D-TRUST_Root_Class_3_CA_2_EV_2009.crt
!mozilla/DST_Root_CA_X3.crt
!mozilla/Deutsche_Telekom_Root_CA_2.crt
mozilla/DigiCert_Assured_ID_Root_CA.crt
mozilla/DigiCert_Assured_ID_Root_G2.crt
"""
ETC_CA_CERTIFICATES_CONF_WITH_DST_ROOT_CA_X3_ALLOWED = """mozilla/Cybertrust_Global_Root.crt
mozilla/D-TRUST_Root_Class_3_CA_2_2009.crt
mozilla/D-TRUST_Root_Class_3_CA_2_EV_2009.crt
mozilla/DST_Root_CA_X3.crt
!mozilla/Deutsche_Telekom_Root_CA_2.crt
mozilla/DigiCert_Assured_ID_Root_CA.crt
mozilla/DigiCert_Assured_ID_Root_G2.crt
"""
def setUp(self) -> None:
self.agw_installer = AGWInstaller()
@patch(
"magma_access_gateway_installer.agw_installer.check_output",
return_value=APT_LIST_WITH_MAGMA,
)
def test_given_magma_agw_installed_when_agw_installer_then_installer_exits_without_executing_any_commands( # noqa: E501
self, _
):
self.assertEqual(self.agw_installer.install(), None)
@patch("magma_access_gateway_installer.agw_installer.check_call")
def test_given_magma_agw_not_installed_when_update_apt_cache_then_apt_update_is_called(
self, mock_check_call
):
self.agw_installer.update_apt_cache()
mock_check_call.assert_called_once_with(["apt", "-qq", "update"])
@patch("magma_access_gateway_installer.agw_installer.check_call")
def test_given_magma_agw_not_installed_when_update_ca_certificates_package_then_relevant_apt_command_is_called( # noqa: E501
self, mock_check_call
):
self.agw_installer.update_ca_certificates_package()
mock_check_call.assert_called_once_with(["apt", "-qq", "install", "ca-certificates"])
@patch(
"magma_access_gateway_installer.agw_installer.open",
new_callable=mock_open,
read_data=ETC_CA_CERTIFICATES_CONF_WITH_DST_ROOT_CA_X3_FORBIDDEN,
)
@patch("magma_access_gateway_installer.agw_installer.check_call")
def test_given_dst_root_ca_x3_certificate_forbidden_when_forbid_usage_of_expired_dst_root_ca_x3_certificate_then_no_changes_are_done_to_etc_ca_certificates_conf( # noqa: E501
self, mock_check_call, mock_open_file
):
self.agw_installer.forbid_usage_of_expired_dst_root_ca_x3_certificate()
mock_open_file.assert_called_once_with("/etc/ca-certificates.conf", "r")
mock_check_call.assert_not_called()
@patch(
"magma_access_gateway_installer.agw_installer.open",
new_callable=mock_open,
read_data=ETC_CA_CERTIFICATES_CONF_WITH_DST_ROOT_CA_X3_ALLOWED,
)
@patch("magma_access_gateway_installer.agw_installer.check_call", Mock())
def test_given_dst_root_ca_x3_certificate_allowed_when_forbid_usage_of_expired_dst_root_ca_x3_certificate_then_certificate_is_marked_as_forbidden_in_etc_ca_certificates_conf( # noqa: E501
self, mock_open_file
):
expected_etc_ca_certificates_conf = [
"mozilla/Cybertrust_Global_Root.crt\n",
"mozilla/D-TRUST_Root_Class_3_CA_2_2009.crt\n",
"mozilla/D-TRUST_Root_Class_3_CA_2_EV_2009.crt\n",
"!mozilla/DST_Root_CA_X3.crt\n",
"!mozilla/Deutsche_Telekom_Root_CA_2.crt\n",
"mozilla/DigiCert_Assured_ID_Root_CA.crt\n",
"mozilla/DigiCert_Assured_ID_Root_G2.crt\n",
]
self.agw_installer.forbid_usage_of_expired_dst_root_ca_x3_certificate()
mock_open_file().writelines.assert_called_once_with(expected_etc_ca_certificates_conf)
@patch("magma_access_gateway_installer.agw_installer.check_call")
@patch(
"magma_access_gateway_installer.agw_installer.open",
new_callable=mock_open,
read_data=ETC_CA_CERTIFICATES_CONF_WITH_DST_ROOT_CA_X3_ALLOWED,
)
def test_given_dst_root_ca_x3_certificate_allowed_when_forbid_usage_of_expired_dst_root_ca_x3_certificate_then_ca_certificates_are_updated( # noqa: E501
self, _, mock_check_call
):
self.agw_installer.forbid_usage_of_expired_dst_root_ca_x3_certificate()
mock_check_call.assert_called_once_with(["update-ca-certificates"])
@patch("magma_access_gateway_installer.agw_installer.check_call")
@patch("magma_access_gateway_installer.agw_installer.open")
@patch("magma_access_gateway_installer.agw_installer.os.path.exists", return_value=True)
def test_given_magma_apt_repo_configured_when_configure_apt_for_magma_agw_deb_package_installation_then_new_apt_repo_is_not_created( # noqa: E501
self, _, mock_open_file, mock_check_call
):
self.agw_installer.configure_apt_for_magma_agw_deb_package_installation()
mock_open_file.assert_not_called()
mock_check_call.assert_not_called()
@patch("magma_access_gateway_installer.agw_installer.open", new_callable=mock_open)
@patch(
"magma_access_gateway_installer.agw_installer.AGWInstaller.MAGMA_VERSION",
new_callable=PropertyMock,
)
@patch("magma_access_gateway_installer.agw_installer.check_call", Mock())
@patch("magma_access_gateway_installer.agw_installer.os.path.exists", return_value=False)
def test_given_magma_apt_repo_not_configured_when_configure_apt_for_magma_agw_deb_package_installation_then_new_apt_repo_config_file_is_created( # noqa: E501
self, _, mock_magma_version, mock_open_file
):
mock_magma_version.return_value = self.TEST_MAGMA_VERSION
expected_apt_repo_config_file_content = (
"deb https://artifactory.magmacore.org/artifactory/debian "
f"{self.TEST_MAGMA_VERSION} main"
)
self.agw_installer.configure_apt_for_magma_agw_deb_package_installation()
self.assertTrue(
call("/etc/apt/sources.list.d/magma.list", "w") in mock_open_file.mock_calls
)
self.assertTrue(
call(expected_apt_repo_config_file_content) in mock_open_file().write.mock_calls
)
@patch("magma_access_gateway_installer.agw_installer.open", new_callable=mock_open)
@patch("magma_access_gateway_installer.agw_installer.check_call")
@patch("magma_access_gateway_installer.agw_installer.os.path.exists", return_value=False)
def test_given_magma_apt_repo_not_configured_when_configure_apt_for_magma_agw_deb_package_installation_then_unvalidated_apt_signing_key_is_added( # noqa: E501
self, _, mock_check_call, mock_open_file
):
expected_99insecurehttpsrepo_content = """Acquire::https::artifactory.magmacore.org/artifactory/debian {
Verify-Peer "false";
Verify-Host "false";
};
""" # noqa: E501
self.agw_installer.configure_apt_for_magma_agw_deb_package_installation()
self.assertTrue(
call(
[
"apt-key",
"adv",
"--fetch-keys",
"https://artifactory.magmacore.org/artifactory/api/gpg/key/public",
]
)
in mock_check_call.mock_calls
)
self.assertTrue(
call("/etc/apt/apt.conf.d/99insecurehttpsrepo", "w") in mock_open_file.mock_calls
)
self.assertTrue(
call(expected_99insecurehttpsrepo_content) in mock_open_file().write.mock_calls
)
@patch("magma_access_gateway_installer.agw_installer.check_call")
@patch("magma_access_gateway_installer.agw_installer.open", new_callable=mock_open)
@patch("magma_access_gateway_installer.agw_installer.os.path.exists", return_value=False)
def test_given_magma_apt_repo_not_configured_when_configure_apt_for_magma_agw_deb_package_installation_then_apt_cache_is_updated( # noqa: E501
self, _, __, mock_check_call
):
self.agw_installer.configure_apt_for_magma_agw_deb_package_installation()
self.assertTrue(call(["apt", "-qq", "update"]) in mock_check_call.mock_calls)
@patch("magma_access_gateway_installer.agw_installer.check_call")
def test_given_magma_agw_not_installed_when_install_runtime_dependencies_then_apt_installs_required_packages( # noqa: E501
self, mock_check_call
):
expected_apt_calls = [
call(f"apt -qq install -y --no-install-recommends {package_name}", shell=True)
for package_name in self.agw_installer.MAGMA_AGW_RUNTIME_DEPENDENCIES
]
self.agw_installer.install_runtime_dependencies()
mock_check_call.assert_has_calls(expected_apt_calls)
@patch("magma_access_gateway_installer.agw_installer.check_call")
def test_given_magma_agw_not_installed_when_preconfigure_wireshark_suid_property_then_correct_configuration_is_sent_to_debconf_database( # noqa: E501
self, mock_check_call
):
self.agw_installer.preconfigure_wireshark_suid_property()
mock_check_call.assert_called_once_with(
'echo "wireshark-common wireshark-common/install-setuid boolean true" | debconf-set-selections', # noqa: E501
shell=True,
)
@patch("magma_access_gateway_installer.agw_installer.check_call")
def test_given_magma_agw_not_installed_when_install_magma_agw_then_correct_apt_command_is_called( # noqa: E501
self, mock_check_call
):
self.agw_installer.install_magma_agw()
mock_check_call.assert_called_once_with(
'apt -o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold" '
'-o "Dpkg::Options::=--force-overwrite" -qq install -y --no-install-recommends magma',
shell=True,
)
@patch("magma_access_gateway_installer.agw_installer.check_call")
def test_given_magma_installation_process_when_start_open_vswitch_then_correct_service_is_started( # noqa: E501
self, mock_check_call
):
self.agw_installer.start_open_vswitch()
mock_check_call.assert_called_once_with(["service", "openvswitch-switch", "start"])
@patch("magma_access_gateway_installer.agw_installer.check_call")
def test_given_magma_installation_process_when_start_magma_then_magma_services_are_stopped_interfaces_are_brought_up_and_magma_services_are_started( # noqa: E501
self, mock_check_call
):
expected_calls = [call(["service", "magma@*", "stop"])]
expected_calls.extend(
[
call(["ifup", magma_interface])
for magma_interface in self.agw_installer.MAGMA_INTERFACES
]
)
expected_calls.append(call(["service", "magma@magma", "start"]))
self.agw_installer.start_magma()
mock_check_call.assert_has_calls(expected_calls)
@patch("magma_access_gateway_installer.agw_installer.os.system")
@patch("magma_access_gateway_installer.agw_installer.check_call", Mock())
@patch("magma_access_gateway_installer.agw_installer.check_output", MagicMock())
@patch("magma_access_gateway_installer.agw_installer.open", mock_open())
@patch("magma_access_gateway_installer.agw_installer.time.sleep", Mock())
def test_given_magma_not_installed_when_install_then_system_goes_for_reboot_once_installation_is_done( # noqa: E501
self, mock_os_system
):
self.agw_installer.install()
mock_os_system.assert_called_once_with("reboot")
|
the-stack_0_22747 | class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
# Enter your code here. Read input from STDIN. Print output to STDOUT
'''
class Node:
def __init__(self,info):
self.info = info
self.left = None
self.right = None
// this is a node of the tree , which contains info as data, left , right
'''
def height(root):
if root.left == None and root.right == None:
return 0
left_height = right_height =0
if root.left:
left_height += height(root.left) + 1
if root.right:
right_height += height(root.right) + 1
return max(left_height, right_height)
tree = BinarySearchTree()
t = int(input())
arr = list(map(int, input().split()))
for i in range(t):
tree.create(arr[i])
print(height(tree.root))
|
the-stack_0_22748 | import logging
import cv2
import networkx as nx
import numpy as np
from six import iteritems
from opensfm import dataset
from opensfm import features
from opensfm import log
from opensfm import transformations as tf
from opensfm import types
from opensfm import pysfm
from opensfm.context import parallel_map
logger = logging.getLogger(__name__)
class Command:
name = 'undistort'
help = "Save radially undistorted images"
def add_arguments(self, parser):
parser.add_argument(
'dataset',
help='dataset to process',
)
parser.add_argument(
'--reconstruction',
help='reconstruction to undistort',
)
parser.add_argument(
'--reconstruction-index',
help='index of the reconstruction component to undistort',
type=int,
default=0,
)
parser.add_argument(
'--tracks',
help='tracks graph of the reconstruction',
)
parser.add_argument(
'--output',
help='output folder',
default='undistorted',
)
def run(self, args):
data = dataset.DataSet(args.dataset)
udata = dataset.UndistortedDataSet(data, args.output)
reconstructions = data.load_reconstruction(args.reconstruction)
if data.tracks_exists(args.tracks):
tracks_manager = data.load_tracks_manager(args.tracks)
else:
tracks_manager = None
if reconstructions:
r = reconstructions[args.reconstruction_index]
self.undistort_reconstruction(tracks_manager, r, data, udata)
def undistort_reconstruction(self, tracks_manager, reconstruction, data, udata):
urec = types.Reconstruction()
urec.points = reconstruction.points
utracks_manager = pysfm.TracksManager()
logger.debug('Undistorting the reconstruction')
undistorted_shots = {}
for shot in reconstruction.shots.values():
if shot.camera.projection_type == 'perspective':
camera = perspective_camera_from_perspective(shot.camera)
subshots = [get_shot_with_different_camera(shot, camera)]
elif shot.camera.projection_type == 'brown':
camera = perspective_camera_from_brown(shot.camera)
subshots = [get_shot_with_different_camera(shot, camera)]
elif shot.camera.projection_type == 'fisheye':
camera = perspective_camera_from_fisheye(shot.camera)
subshots = [get_shot_with_different_camera(shot, camera)]
elif shot.camera.projection_type in ['equirectangular', 'spherical']:
subshot_width = int(data.config['depthmap_resolution'])
subshots = perspective_views_of_a_panorama(shot, subshot_width)
for subshot in subshots:
urec.add_camera(subshot.camera)
urec.add_shot(subshot)
if tracks_manager:
add_subshot_tracks(tracks_manager, utracks_manager, shot, subshot)
undistorted_shots[shot.id] = subshots
udata.save_undistorted_reconstruction([urec])
if tracks_manager:
udata.save_undistorted_tracks_manager(utracks_manager)
arguments = []
for shot in reconstruction.shots.values():
arguments.append((shot, undistorted_shots[shot.id], data, udata))
processes = data.config['processes']
parallel_map(undistort_image_and_masks, arguments, processes)
def undistort_image_and_masks(arguments):
shot, undistorted_shots, data, udata = arguments
log.setup()
logger.debug('Undistorting image {}'.format(shot.id))
# Undistort image
image = data.load_image(shot.id, unchanged=True, anydepth=True)
if image is not None:
max_size = data.config['undistorted_image_max_size']
undistorted = undistort_image(shot, undistorted_shots, image,
cv2.INTER_AREA, max_size)
for k, v in undistorted.items():
udata.save_undistorted_image(k, v)
# Undistort mask
mask = data.load_mask(shot.id)
if mask is not None:
undistorted = undistort_image(shot, undistorted_shots, mask,
cv2.INTER_NEAREST, 1e9)
for k, v in undistorted.items():
udata.save_undistorted_mask(k, v)
# Undistort segmentation
segmentation = data.load_segmentation(shot.id)
if segmentation is not None:
undistorted = undistort_image(shot, undistorted_shots, segmentation,
cv2.INTER_NEAREST, 1e9)
for k, v in undistorted.items():
udata.save_undistorted_segmentation(k, v)
# Undistort detections
detection = data.load_detection(shot.id)
if detection is not None:
undistorted = undistort_image(shot, undistorted_shots, detection,
cv2.INTER_NEAREST, 1e9)
for k, v in undistorted.items():
udata.save_undistorted_detection(k, v)
def undistort_image(shot, undistorted_shots, original, interpolation,
max_size):
"""Undistort an image into a set of undistorted ones.
Args:
shot: the distorted shot
undistorted_shots: the set of undistorted shots covering the
distorted shot field of view. That is 1 for most camera
types and 6 for equirectangular cameras.
original: the original distorted image array.
interpolation: the opencv interpolation flag to use.
max_size: maximum size of the undistorted image.
"""
if original is None:
return
projection_type = shot.camera.projection_type
if projection_type in ['perspective', 'brown', 'fisheye']:
undistort_function = {
'perspective': undistort_perspective_image,
'brown': undistort_brown_image,
'fisheye': undistort_fisheye_image,
}
new_camera = undistorted_shots[0].camera
uf = undistort_function[projection_type]
undistorted = uf(original, shot.camera, new_camera, interpolation)
return {shot.id: scale_image(undistorted, max_size)}
elif projection_type in ['equirectangular', 'spherical']:
subshot_width = undistorted_shots[0].camera.width
width = 4 * subshot_width
height = width // 2
image = cv2.resize(original, (width, height), interpolation=interpolation)
mint = cv2.INTER_LINEAR if interpolation == cv2.INTER_AREA else interpolation
res = {}
for subshot in undistorted_shots:
undistorted = render_perspective_view_of_a_panorama(
image, shot, subshot, mint)
res[subshot.id] = scale_image(undistorted, max_size)
return res
else:
raise NotImplementedError(
'Undistort not implemented for projection type: {}'.format(
shot.camera.projection_type))
def scale_image(image, max_size):
"""Scale an image not to exceed max_size."""
height, width = image.shape[:2]
factor = max_size / float(max(height, width))
if factor >= 1:
return image
width = int(round(width * factor))
height = int(round(height * factor))
return cv2.resize(image, (width, height), interpolation=cv2.INTER_NEAREST)
def undistort_perspective_image(image, camera, new_camera, interpolation):
"""Remove radial distortion from a perspective image."""
height, width = image.shape[:2]
K = camera.get_K_in_pixel_coordinates(width, height)
distortion = np.array([camera.k1, camera.k2, 0, 0])
new_K = new_camera.get_K_in_pixel_coordinates(width, height)
map1, map2 = cv2.initUndistortRectifyMap(
K, distortion, None, new_K, (width, height), cv2.CV_32FC1)
return cv2.remap(image, map1, map2, interpolation)
def undistort_brown_image(image, camera, new_camera, interpolation):
"""Remove radial distortion from a brown image."""
height, width = image.shape[:2]
K = camera.get_K_in_pixel_coordinates(width, height)
distortion = np.array([camera.k1, camera.k2, camera.p1, camera.p2, camera.k3])
new_K = new_camera.get_K_in_pixel_coordinates(width, height)
map1, map2 = cv2.initUndistortRectifyMap(
K, distortion, None, new_K, (width, height), cv2.CV_32FC1)
return cv2.remap(image, map1, map2, interpolation)
def undistort_fisheye_image(image, camera, new_camera, interpolation):
"""Remove radial distortion from a fisheye image."""
height, width = image.shape[:2]
K = camera.get_K_in_pixel_coordinates(width, height)
distortion = np.array([camera.k1, camera.k2, 0, 0])
new_K = new_camera.get_K_in_pixel_coordinates(width, height)
map1, map2 = cv2.fisheye.initUndistortRectifyMap(
K, distortion, None, new_K, (width, height), cv2.CV_32FC1)
return cv2.remap(image, map1, map2, interpolation)
def get_shot_with_different_camera(shot, camera):
"""Copy shot and replace camera."""
ushot = types.Shot()
ushot.id = shot.id
ushot.camera = camera
ushot.pose = shot.pose
ushot.metadata = shot.metadata
return ushot
def perspective_camera_from_perspective(distorted):
"""Create an undistorted camera from a distorted."""
camera = types.PerspectiveCamera()
camera.id = distorted.id
camera.width = distorted.width
camera.height = distorted.height
camera.focal = distorted.focal
camera.k1 = camera.k2 = 0.0
return camera
def perspective_camera_from_brown(brown):
"""Create a perspective camera froma a Brown camera."""
camera = types.PerspectiveCamera()
camera.id = brown.id
camera.width = brown.width
camera.height = brown.height
camera.focal = (brown.focal_x + brown.focal_y) / 2.0
camera.k1 = camera.k2 = 0.0
return camera
def perspective_camera_from_fisheye(fisheye):
"""Create a perspective camera from a fisheye."""
camera = types.PerspectiveCamera()
camera.id = fisheye.id
camera.width = fisheye.width
camera.height = fisheye.height
camera.focal = fisheye.focal
camera.k1 = camera.k2 = 0.0
return camera
def perspective_views_of_a_panorama(spherical_shot, width):
"""Create 6 perspective views of a panorama."""
camera = types.PerspectiveCamera()
camera.id = 'perspective_panorama_camera'
camera.width = width
camera.height = width
camera.focal = 0.5
camera.k1 = camera.k2 = 0.0
names = ['front', 'left', 'back', 'right', 'top', 'bottom']
rotations = [
tf.rotation_matrix(-0 * np.pi / 2, (0, 1, 0)),
tf.rotation_matrix(-1 * np.pi / 2, (0, 1, 0)),
tf.rotation_matrix(-2 * np.pi / 2, (0, 1, 0)),
tf.rotation_matrix(-3 * np.pi / 2, (0, 1, 0)),
tf.rotation_matrix(-np.pi / 2, (1, 0, 0)),
tf.rotation_matrix(+np.pi / 2, (1, 0, 0)),
]
shots = []
for name, rotation in zip(names, rotations):
shot = types.Shot()
shot.id = '{}_perspective_view_{}'.format(spherical_shot.id, name)
shot.camera = camera
R = np.dot(rotation[:3, :3], spherical_shot.pose.get_rotation_matrix())
o = spherical_shot.pose.get_origin()
shot.pose = types.Pose()
shot.pose.set_rotation_matrix(R)
shot.pose.set_origin(o)
shots.append(shot)
return shots
def render_perspective_view_of_a_panorama(image, panoshot, perspectiveshot,
interpolation=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_WRAP):
"""Render a perspective view of a panorama."""
# Get destination pixel coordinates
dst_shape = (perspectiveshot.camera.height, perspectiveshot.camera.width)
dst_y, dst_x = np.indices(dst_shape).astype(np.float32)
dst_pixels_denormalized = np.column_stack([dst_x.ravel(), dst_y.ravel()])
dst_pixels = features.normalized_image_coordinates(
dst_pixels_denormalized,
perspectiveshot.camera.width,
perspectiveshot.camera.height)
# Convert to bearing
dst_bearings = perspectiveshot.camera.pixel_bearing_many(dst_pixels)
# Rotate to panorama reference frame
rotation = np.dot(panoshot.pose.get_rotation_matrix(),
perspectiveshot.pose.get_rotation_matrix().T)
rotated_bearings = np.dot(dst_bearings, rotation.T)
# Project to panorama pixels
src_x, src_y = panoshot.camera.project((rotated_bearings[:, 0],
rotated_bearings[:, 1],
rotated_bearings[:, 2]))
src_pixels = np.column_stack([src_x.ravel(), src_y.ravel()])
src_pixels_denormalized = features.denormalized_image_coordinates(
src_pixels, image.shape[1], image.shape[0])
src_pixels_denormalized.shape = dst_shape + (2,)
# Sample color
x = src_pixels_denormalized[..., 0].astype(np.float32)
y = src_pixels_denormalized[..., 1].astype(np.float32)
colors = cv2.remap(image, x, y, interpolation, borderMode=borderMode)
return colors
def add_subshot_tracks(tracks_manager, utracks_manager, shot, subshot):
"""Add shot tracks to the undistorted tracks_manager."""
if shot.id not in tracks_manager.get_shot_ids():
return
if shot.camera.projection_type in ['equirectangular', 'spherical']:
add_pano_subshot_tracks(tracks_manager, utracks_manager, shot, subshot)
else:
for track_id, obs in tracks_manager.get_shot_observations(shot.id).items():
utracks_manager.add_observation(subshot.id, track_id, obs)
def add_pano_subshot_tracks(tracks_manager, utracks_manager, panoshot, perspectiveshot):
"""Add edges between subshots and visible tracks."""
for track_id, obs in tracks_manager.get_shot_observations(panoshot.id).items():
bearing = panoshot.camera.pixel_bearing(obs.point)
rotation = np.dot(perspectiveshot.pose.get_rotation_matrix(),
panoshot.pose.get_rotation_matrix().T)
rotated_bearing = np.dot(bearing, rotation.T)
if rotated_bearing[2] <= 0:
continue
perspective_feature = perspectiveshot.camera.project(rotated_bearing)
if (perspective_feature[0] < -0.5 or
perspective_feature[0] > 0.5 or
perspective_feature[1] < -0.5 or
perspective_feature[1] > 0.5):
continue
obs.point = perspective_feature
utracks_manager.add_observation(perspectiveshot.id, track_id, obs)
|
the-stack_0_22749 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
public database operation
"""
import db_session,db_user
from lib import common, config
# get username、expired_time and permissions
def get_info_by_token(access_token):
info = db_session.get(access_token)
info['permissions'] = db_user.get(info['username'])['permissions']
return info
# update user actime and expired time
def update_expire_time(access_token):
action_time = common.cur_timestamp()
data = {'access_token': access_token, 'action_time': action_time, 'expire_time': action_time + config.expire_second}
db_session.update(data)
|
the-stack_0_22751 | #!/usr/bin/env python
"""
===============
Pygraphviz Draw
===============
An example showing how to use the interface to the pygraphviz
AGraph class to draw a graph.
Also see the pygraphviz documentation and examples at
http://pygraphviz.github.io/
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2006-2017 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
# plain graph
G = nx.complete_graph(5) # start with K5 in networkx
A = nx.nx_agraph.to_agraph(G) # convert to a graphviz graph
A.layout() # neato layout
A.draw("k5.ps") # write postscript in k5.ps with neato layout
|
the-stack_0_22752 | import pdb
import sys
# From https://stackoverflow.com/questions/4716533/how-to-attach-debugger-to-a-python-subproccess
class ForkedPdb(pdb.Pdb):
"""A pdb subclass that may be used from a forked multiprocessing child
**Examples**:
.. code-block:: python
from dagster.utils.forked_pdb import ForkedPdb
@solid
def complex_solid(_):
# some complicated stuff
ForkedPdb().set_trace()
# some other complicated stuff
You can initiate pipeline execution via dagit and use the pdb debugger to examine/step through
execution at the breakpoint.
"""
def interaction(self, frame, traceback):
_stdin = sys.stdin
try:
sys.stdin = open("/dev/stdin")
pdb.Pdb.interaction(self, frame, traceback)
finally:
sys.stdin = _stdin
|
the-stack_0_22754 | # -*- coding:utf8 -*-
# Source:https://github.com/Show-Me-the-Code/show-me-the-code
# Author:renzongxian
# Date:2014-12-06
# Python 2.7, MySQL-python does not currently support Python 3
"""
将 0001 题生成的 200 个激活码(或者优惠券)保存到 MySQL 关系型数据库中。
"""
import uuid
import MySQLdb
def generate_key():
key_list = []
for i in range(200):
uuid_key = uuid.uuid3(uuid.NAMESPACE_DNS, str(uuid.uuid1()))
key_list.append(str(uuid_key).replace('-', ''))
return key_list
def write_to_mysql(key_list):
# Connect to database
db = MySQLdb.connect("localhost", "test", "test1234", "testDB")
# Use function cursor() to open the cursor operation
cursor = db.cursor()
# If the table exists, delete it
cursor.execute("drop table if exists ukey")
# Create table
sql = """create table ukey (
key_value char(40) not null
)"""
cursor.execute(sql)
# Insert data
try:
for i in range(200):
cursor.execute('insert into ukey values("%s")' % (key_list[i]))
# Commit to database
db.commit()
except:
# Rollback when errors occur
db.rollback()
# Close database
db.close()
if __name__ == '__main__':
write_to_mysql(generate_key())
|
the-stack_0_22759 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 13 13:41:18 2018
First experiment for joint learning of HMM model with POMDP policy.
Goal of experiment is to test effect of varying number of dimensions in
our toy environment (for now, tiger and a chain-world).
Tons of tweaks and modifications to try out different bits during learning...
@author: josephfutoma
"""
import os
import sys
import pickle
import copy
import argparse
from time import time
import autograd
import autograd.numpy as np
from autograd import grad
from autograd import value_and_grad as vg
from autograd import make_vjp
from autograd.scipy.misc import logsumexp
from autograd.misc.flatten import flatten_func,flatten
import autograd.scipy.stats as stat
from sklearn.cluster import KMeans
#import matplotlib.pyplot as plt
from util import *
from util_hypotension import *
from pbvi_cts import *
from action_hmm_cts import *
from OPE_funcs import *
#####
##### helper funcs
#####
def update_and_write_savedict(save_dict):
#### save out to a dict
save_dict['objs'] = objs
save_dict['RL_objs'] = RL_objs
save_dict['HMM_objs'] = HMM_objs
save_dict['grad_norms'] = grad_norms
save_dict['RL_grad_norm'] = RL_grad_norm
save_dict['HMM_grad_norm'] = HMM_grad_norm
save_dict['HMM_te_objs'] = HMM_te_objs
save_dict['grad_norms_HMM_te'] = grad_norms_HMM_te
save_dict['te_policy_val'] = te_policy_val
# save_dict['te_policy_val_noprune'] = te_policy_val_noprune
save_dict['tr_ESS'] = tr_ESS
# save_dict['tr_ESS_noprune'] = tr_ESS_noprune
save_dict['tr_CWPDIS'] = tr_CWPDIS
save_dict['tr_CWPDIS_obj'] = tr_CWPDIS_obj
# save_dict['tr_CWPDIS_obj_noprune'] = tr_CWPDIS_obj_noprune
save_dict['te_ESS'] = te_ESS
# save_dict['te_ESS_noprune'] = te_ESS_noprune
save_dict['te_CWPDIS'] = te_CWPDIS
save_dict['tracked_params'] = tracked_params
save_dict['tracked_Vs'] = tracked_Vs
save_dict['tracked_Bs'] = tracked_Bs
save_dict['params'] = params
try:
print("saving!")
with open(RESULTS_PATH+model_string+'.p','wb') as f:
pickle.dump(save_dict, f)
except:
print("save failed!!")
return save_dict
#####
##### funcs to get param inits
#####
def params_init_random(alpha_pi=25,alpha_T=25):
"""
random initialization
"""
T = np.zeros((n_S,n_S,n_A))
for s in range(n_S):
for a in range(n_A):
T[:,s,a] = np.random.dirichlet(alpha_T*np.ones(n_S))
pi = np.random.dirichlet(alpha_pi*np.ones(n_S))
#random obs model; assumes data already standardized
O_means = np.random.normal(0,1,(n_dim,n_S,n_A))
O_sds = np.random.normal(1,.25,(n_dim,n_S,n_A))
n_inds = np.sum(O_sds<=0.1)
while n_inds>0:
O_sds[O_sds<=0.1] = np.random.normal(1,.25,n_inds)
n_inds = np.sum(O_sds<=0.1)
O = (O_means,O_sds)
#for now, assume R is normal with unknown means and known, small variances (eg .1)
nz_rew = rewards_tr[np.logical_not(np.isinf(rewards_tr))]
R = np.random.normal(np.mean(nz_rew),np.std(nz_rew),(n_S,n_A))
n_inds = np.sum(R>1) #truncate R to be < 1
while n_inds>0:
R[R>1] = np.random.normal(np.mean(nz_rew),np.std(nz_rew),n_inds)
n_inds = np.sum(R>1)
params = (pi,T,O,R)
return params
def params_init_MAP_sep(map_ind=0,M_step_with_missing=False):
#helper stuff
map_log1p_mean = 4.28748298
map_log1p_sd = 0.1783257
def back_transform(maps):
return np.exp(maps*map_log1p_sd+map_log1p_mean)-1
def transform(raw_maps):
return (np.log(1+raw_maps)-map_log1p_mean)/map_log1p_sd
## bin MAPs...
all_maps = np.reshape(observs_tr[:,:,map_ind],-1)
all_maps = all_maps[np.logical_not(np.isinf(all_maps))]
all_raw_maps = np.exp(all_maps*map_log1p_sd + map_log1p_mean) - 1
# TODO: way to automate this???
#for now just manually define the n_S-1 bin edges
if n_S == 5:
qs = [5,10,15,30]
elif n_S == 10:
qs = [1,2.5,5,7.5,10,15,20,30,60]
elif n_S == 15:
qs = [0.5,2,4,6,8,10,12,14,16,18,20,25,30,65]
map_bins = np.percentile(all_maps,qs)
#OLD
# qs = np.linspace(0,100,n_S+1)[1:-1]
#now use these states defined by MAP separation to filter
max_T = rewards_tr.shape[1]
gam = np.zeros((max_T+1,n_S,N)) #sstats for states
E_Njka = .01*np.ones((n_S,n_S,n_A)) #sstats for trans
#edge case at beginning when separating on MAP / obs
this_map = init_observs_tr[:,map_ind]
this_state = np.searchsorted(map_bins,this_map)
gam[0,this_state,np.arange(N)] = 1
for t in range(max_T):
mask = np.logical_not(np.isinf(observs_tr[:,t,map_ind])) #only get obs actually go this long
this_map = observs_tr[mask,t,map_ind]
this_state = np.searchsorted(map_bins,this_map)
gam[t+1,this_state,mask] = 1
for n in range(N):
this_state = np.where(gam[:,:,n])[1]
for t in range(int(seq_lens_tr[n])):
E_Njka[this_state[t],this_state[t+1],actions_tr[n,t]] += 1
#run M step... decide if want to use missing masks or not...
params = params_init_random()
nat_params = to_natural_params(params)
if M_step_with_missing:
pi,T,O,R = M_step(nat_params,observs_tr,actions_tr,rewards_tr,gam,E_Njka,
init_observs = init_observs_tr,
init_actions = init_actions_tr,
observs_missing_mask = observs_mask_tr,
init_observs_missing_mask = init_observs_mask_tr)
else:
pi,T,O,R = M_step(nat_params,observs_tr,actions_tr,rewards_tr,gam,E_Njka)
params = (pi,T,O,R)
return params
#####
##### funcs for the joint learning
#####
def init_B_and_V(params):
V_min = 0
B = initialize_B(params,V_min,gamma,n_expandB_iters=min(int(n_S*2),50)) #max out at...S*4 previously...
## TODO: put a check to see if B is too big, else drop the last few rows??
n_B = B.shape[0]
V = [V_min*np.ones((n_B,n_S)),-1*np.ones(n_B)]
return V,B
def params_init(param_init,**kwargs):
#### get initialization...
if param_init == 'random':
params = params_init_random(**kwargs)
if param_init == 'MAP-sep':
params = params_init_MAP_sep(**kwargs)
return params
def get_param_inits(param_init,n_PBVI_iters=20):
"""
helper func, to get a bunch of inits by different means and then
test how well they do, and choose the best to run with
"""
# inits = np.array(['random','kmeans','EM-random','EM-kmeans','reward-sep','BP-sep'])
restarts_per_init = {
'random': 10,
'MAP-sep': 10
}
n_restarts = restarts_per_init[param_init]
lls_tr = []
polvals_tr = []
ESS_tr = []
objs = []
best_obj = np.inf #will select the best init based on PC objective: HMM_obj + lambda*RL_obj
best_EM_obj = np.inf
for restart in range(n_restarts):
if param_init=='MAP-sep':
#for MAP-sep init, try M step with & without missing data half the time...
params = params_init(param_init,M_step_with_missing=restart>=n_restarts/2)
else:
params = params_init(param_init)
nat_params = to_natural_params(params)
pi,T,O,R = params
print("learning policy for restart %d" %restart,flush=True)
V,B = init_B_and_V(params)
for ii in range(n_PBVI_iters):
V = update_V_softmax(V,B,T,O,R,gamma,max_iter=1,verbose=False,eps=.001,PBVI_temps=[.01,.01,.01],n_samps=100)
#check value of init:
# - log-lik of HMM on test data
# - val of learned policy
ll = MAP_objective(nat_params,observs_tr,actions_tr,
init_observs_tr,init_actions_tr,
observs_mask_tr,init_observs_mask_tr)
lls_tr.append(-ll)
all_beliefs_tr = get_beliefs(params,seq_lens_tr,actions_tr,observs_tr,
init_observs_tr,init_actions_tr,observs_mask_tr,init_observs_mask_tr)
RL_obj,(_,CWPDIS_obj,ESS,_,_,_,_) = softmax_policy_value_objective_term(
nat_params,R,V,B,
action_probs_tr,beh_probs_tr,actions_tr,init_actions_tr,observs_tr,
init_observs_tr,observs_mask_tr,init_observs_mask_tr,
rewards_tr,seq_lens_tr,gamma,
cached_beliefs=all_beliefs_tr,update_V = False,
gr_safety_thresh=gr_safety_thresh,prune_num=0,
ESS_penalty=ESS_penalty)
polvals_tr.append(CWPDIS_obj)
ESS_tr.append(ESS)
###
### based on current lambda, select the best overall objective
###
if lambd == np.inf:
obj = log_prior(nat_params) + 1e8*RL_obj
else:
obj = ll + lambd*RL_obj
objs.append(obj)
if obj < best_obj:
best_obj = obj
best_nat_params = nat_params
best_params = params
best_V = V
best_B = B
best_te_ll = -MAP_objective(best_nat_params,observs_te,actions_te,
init_observs_te,init_actions_te,observs_mask_te,init_observs_mask_te)
all_beliefs_te = get_beliefs(params,seq_lens_te,actions_te,observs_te,
init_observs_te,init_actions_te,observs_mask_te,init_observs_mask_te)
_,(_,CWPDIS_obj,ESS,_,_,_,_) = softmax_policy_value_objective_term(
nat_params,R,V,B,
action_probs_te,beh_probs_te,actions_te,init_actions_te,observs_te,
init_observs_te,observs_mask_te,init_observs_mask_te,
rewards_te,seq_lens_te,gamma,
cached_beliefs=all_beliefs_te,update_V = False,
gr_safety_thresh=gr_safety_thresh,prune_num=0)
save_dict['best_init_params'] = best_params
save_dict['best_init_natparams'] = best_nat_params
save_dict['best_restart_ind'] = restart
save_dict['best_obj'] = best_obj
save_dict['best_V_init'] = best_V
save_dict['best_B_init'] = best_B
save_dict['best_init_te_ESS'] = ESS
save_dict['best_init_te_ll'] = best_te_ll
save_dict['best_init_te_polval'] = CWPDIS_obj
#init stuff in case we want to check them later
save_dict['init_objs'] = objs
save_dict['init_lls_tr'] = lls_tr
save_dict['init_polvals_tr'] = polvals_tr
save_dict['init_ESS_tr'] = ESS_tr
return best_params,best_V,best_B
if __name__ == "__main__":
##########
########## parse args & setup
##########
parser = argparse.ArgumentParser()
#paths default to local if not specified
parser.add_argument('--data_path', default='/Users/josephfutoma/Dropbox/research/mimic_data/hypotension_management/model_data/POPCORN_9obs-logstd-inds_alldata.p')
parser.add_argument('--results_path')
parser.add_argument('--logs_path')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--var_set', default='all')
parser.add_argument('--log_lambd', type=float, default=0)
parser.add_argument('--param_init', default='random')
parser.add_argument('--num_states', type=int, default=5)
parser.add_argument('--fold', type=int, default=0)
parser.add_argument('--prune_num', type=int, default=0)
parser.add_argument('--ESS_penalty', type=float, default=0)
parser.add_argument('--gr_safety_thresh', type=float, default=0)
#now unpack args
arg_dict = vars(parser.parse_args())
DATA_PATH = arg_dict['data_path']
LOGS_PATH = arg_dict['logs_path']
RESULTS_PATH = arg_dict['results_path']
seed = arg_dict['seed']
var_set = arg_dict['var_set']
lambd = np.power(10,arg_dict['log_lambd'])
param_init = arg_dict['param_init']
n_S = arg_dict['num_states']
fold = arg_dict['fold']
prune_num = arg_dict['prune_num']
ESS_penalty = arg_dict['ESS_penalty']
gr_safety_thresh = arg_dict['gr_safety_thresh']
env_name = 'hypotension'
model_string = '%s_nS%d_lambd%.8f_vars-%s_init-%s_prune%d_ESSpenalty%.2f_gr-safety-thresh-%.2f_seed%d_fold%d' %(
env_name,n_S,lambd,var_set,param_init,prune_num,ESS_penalty,gr_safety_thresh,seed,fold)
#redirect stdout/stderr when remote
if LOGS_PATH is not None:
sys.stdout = open(LOGS_PATH+model_string+"_out.txt","w")
sys.stderr = open(LOGS_PATH+model_string+"_err.txt","w")
print("starting! "+model_string,flush=True)
##########
########## Load in data, setup job params
##########
#setup params
n_A = 20
gamma = 0.999
n_folds = 5
np.set_printoptions(threshold=10000)
np.random.seed(seed)
#####
#load data and setup train/test split
#####
all_obs_dim = 9 #9 labs/vitals at most
all_dat = pickle.load(open(DATA_PATH,'rb'))
all_ids = np.array(list(all_dat.keys()))
var_names = list(all_dat.values())[0].columns
var_names = np.array(var_names[:all_obs_dim])
N_tot = len(all_ids)
fold_ids = np.arange(N_tot) % n_folds
tr_inds = fold_ids != fold
te_inds = fold_ids == fold
N = int(np.sum(tr_inds))
Nte = int(np.sum(te_inds))
rng = np.random.RandomState(711) #fixed train/test sets across folds!
perm = rng.permutation(N_tot)
ids_tr = all_ids[perm[tr_inds]]
ids_te = all_ids[perm[te_inds]]
(init_observs_te,observs_te,init_observs_mask_te,observs_mask_te,
rewards_te,init_actions_te,actions_te,action_probs_te,
beh_probs_te,seq_lens_te) = get_padded_databatch_from_IDs(
ids_te,all_dat,var_names,n_A)
(init_observs_tr,observs_tr,init_observs_mask_tr,observs_mask_tr,
rewards_tr,init_actions_tr,actions_tr,action_probs_tr,
beh_probs_tr,seq_lens_tr) = get_padded_databatch_from_IDs(
ids_tr,all_dat,var_names,n_A)
##### do a bit of clipping on MAP values, limit outlier upper values...
MAP_THRESH = 2
init_observs_te[:,0] = np.clip(init_observs_te[:,0],a_min=None,a_max=MAP_THRESH)
observs_te[:,:,0] = np.clip(observs_te[:,:,0],a_min=None,a_max=MAP_THRESH)
init_observs_tr[:,0] = np.clip(init_observs_tr[:,0],a_min=None,a_max=MAP_THRESH)
observs_tr[:,:,0] = np.clip(observs_tr[:,:,0],a_min=None,a_max=MAP_THRESH)
#####
#subset vars if only using a subset...
if var_set == 'map':
n_dim = 1
init_observs_te = init_observs_te[:,[0]]
observs_te = observs_te[:,:,[0]]
init_observs_mask_te = init_observs_mask_te[:,[0]]
observs_mask_te = observs_mask_te[:,:,[0]]
init_observs_tr = init_observs_tr[:,[0]]
observs_tr = observs_tr[:,:,[0]]
init_observs_mask_tr = init_observs_mask_tr[:,[0]]
observs_mask_tr = observs_mask_tr[:,:,[0]]
if var_set == 'map-urine-lactate':
n_dim = 3
init_observs_te = init_observs_te[:,:3]
observs_te = observs_te[:,:,:3]
init_observs_mask_te = init_observs_mask_te[:,:3]
observs_mask_te = observs_mask_te[:,:,:3]
init_observs_tr = init_observs_tr[:,:3]
observs_tr = observs_tr[:,:,:3]
init_observs_mask_tr = init_observs_mask_tr[:,:3]
observs_mask_tr = observs_mask_tr[:,:,:3]
if var_set == 'all':
n_dim = 9
var_names = np.array(var_names[:n_dim])
#up the 0-NN cases probs by a little bit (pretty rare anyways...)
action_probs_tr[action_probs_tr==.001] = .01
action_probs_te[action_probs_te==.001] = .01
te_returns = []
for i in range(Nte):
te_returns.append(np.sum(np.power(gamma,np.arange(seq_lens_te[i]))*rewards_te[i,:seq_lens_te[i]]))
te_returns = np.array(te_returns)
print('fold %d, est test set beh policy value: %.5f' %(fold,np.mean(te_returns)),flush=True)
# test set avg returns: 48.86755, 48.44530, 48.42580, 47.65438, 48.23278: 48.33 overall
### learning params
n_epochs = 20000
batchsize = N
lr = 1e-3
optim = 'rprop'
PBVI_train_update_iters = 1
save_dict = {}
##########
########## Given an init, start PC learning
##########
params,V,B = get_param_inits(param_init)
nat_params = to_natural_params(params)
pi,T,O,R = params
##### setup gradient functions
## explicitly split our objective into HMM term and RL term so we can
## track each value and gradients separately
RLobj_V_g = value_and_output_and_grad(softmax_policy_value_objective_term)
Prior_obj_g = vg(log_prior)
HMMobj_g = vg(MAP_objective)
flat_nat_params,unflatten = flatten(nat_params)
#store progress
#TODO: func to hide this setup...but still keep these in global namespace?? class to cache all this & just save object??
objs = []
RL_objs = []
HMM_objs = []
grad_norms = []
RL_grad_norm = []
HMM_grad_norm = []
HMM_te_objs = []
grad_norms_HMM_te = []
te_policy_val = []
te_policy_val_noprune = []
tr_ESS = []
tr_ESS_noprune = []
tr_CWPDIS = []
tr_CWPDIS_obj = []
tr_CWPDIS_obj_noprune = []
te_ESS = []
te_ESS_noprune = []
te_CWPDIS = []
tracked_params = []
tracked_Vs = []
tracked_Bs = []
tracked_Bs.append(B)
#init rprop stuff
tot_iter = 0
last_v = np.inf #last objective value
last_g = np.zeros(len(flat_nat_params))
step_sizes = lr*np.ones(len(flat_nat_params))
last_steps = np.zeros(len(flat_nat_params))
for epoch in range(n_epochs):
print("starting epoch %d" %epoch,flush=True)
for n_iter in range(N//batchsize):
t = time()
(this_init_observs,this_observ,this_init_observs_mask,this_observs_mask,
this_rew,this_init_act,this_act,this_act_probs,this_beh_probs,
this_seq_lens) = (init_observs_tr,observs_tr,init_observs_mask_tr,observs_mask_tr,
rewards_tr,init_actions_tr,actions_tr,action_probs_tr,beh_probs_tr,seq_lens_tr)
this_nat_params = nat_params
#####
##### HMM objective
#####
if lambd == np.inf:
HMM_obj,HMM_grad = Prior_obj_g(this_nat_params)
else:
HMM_obj,HMM_grad = HMMobj_g(this_nat_params,this_observ,
this_act,this_init_observs,this_init_act,
this_observs_mask,this_init_observs_mask)
HMM_grad = flatten(HMM_grad)[0]
#####
##### RL objective
#####
if lambd > 0:
RL_obj,(V,CWPDIS_obj,ESS,CWPDIS_nums,
CWPDIS_denoms,ESS_noprune,
CWPDIS_obj_noprune),RL_grad = RLobj_V_g(this_nat_params,R,V,B,
this_act_probs,this_beh_probs,this_act,this_init_act,this_observ,
this_init_observs,this_observs_mask,this_init_observs_mask,
this_rew,this_seq_lens,gamma,
gr_safety_thresh=gr_safety_thresh,
PBVI_update_iters=PBVI_train_update_iters,
update_V=True,V_penalty=1e-6,
prune_num=prune_num,ESS_penalty=ESS_penalty)
V = [V[0]._value,V[1]._value]
if lambd == np.inf:
RL_obj *= 1e8
RL_grad = flatten(RL_grad)[0]*1e8
else:
RL_obj *= lambd
RL_grad = flatten(RL_grad)[0]*lambd
#save RL stuff if computing during opt anyways
RL_objs.append(RL_obj)
RL_grad_norm.append(np.sum(np.abs(RL_grad)))
tr_ESS.append(ESS._value)
# tr_ESS_noprune.append(ESS_noprune._value)
# tr_CWPDIS.append((CWPDIS_nums._value, CWPDIS_denoms._value))
# tr_CWPDIS_obj_noprune.append(CWPDIS_obj_noprune._value)
tr_CWPDIS_obj.append(CWPDIS_obj._value)
else:
RL_obj = 0
RL_grad = np.zeros(HMM_grad.shape)
g = RL_grad + HMM_grad
v = RL_obj + HMM_obj
# g = np.clip(g,-1e4,1e4)
#save stuff
objs.append(v)
grad_norms.append(np.sum(np.abs(g)))
HMM_objs.append(HMM_obj)
HMM_grad_norm.append(np.sum(np.abs(HMM_grad)))
#apply gradient!
flat_nat_params,last_g,step_sizes,last_steps = rprop(flat_nat_params,g,last_g,step_sizes,last_steps,v,last_v)
last_v = v
pi,T,O,R = to_params(unflatten(flat_nat_params))
params = (pi,T,O,R)
nat_params = to_natural_params(params)
#update R separately via E step
_,gam = forward_backward_Estep(nat_params,observs_tr,actions_tr,rewards_tr,
get_xi=False,init_observs=init_observs_tr,
init_actions=init_actions_tr,
observs_missing_mask=observs_mask_tr,
init_observs_missing_mask=init_observs_mask_tr)
R = M_step_just_reward(nat_params,observs_tr,actions_tr,rewards_tr,gam)
params = (pi,T,O,R)
nat_params = to_natural_params(params)
flat_nat_params,unflatten = flatten(nat_params)
#####
##### End of learning iteration, now do some checks every so often...
#####
tot_iter += 1
if tot_iter%1==0:
print("epoch %d, iter %d, RL obj %.4f, HMM obj %.4f, total obj %.4f grad L1-norm %.4f, took %.2f"
%(epoch,tot_iter,RL_obj,HMM_obj,v,np.sum(np.abs(g)),time()-t),flush=True)
#every so often, check test set
if tot_iter % 100 == 1 or tot_iter==n_epochs:
##### check HMM performance on held-out test set
HMM_obj,HMM_grad = HMMobj_g(nat_params,observs_te,actions_te,
init_observs_te,init_actions_te,
observs_mask_te,init_observs_mask_te)
print("HMM objective on test data %.4f, grad norm %.4f"
%(-HMM_obj,np.sum(np.abs(flatten(HMM_grad)[0]))),flush=True)
HMM_te_objs.append(-HMM_obj)
grad_norms_HMM_te.append(np.sum(np.abs(flatten(HMM_grad)[0])))
### and check the policy...
all_beliefs_te = get_beliefs(params,seq_lens_te,actions_te,observs_te,
init_observs_te,init_actions_te,observs_mask_te,init_observs_mask_te)
if lambd > 0:
_,(V,CWPDIS_obj,ESS,CWPDIS_nums,CWPDIS_denoms,
ESS_noprune,CWPDIS_obj_noprune) = softmax_policy_value_objective_term(
nat_params,R,V,B,
action_probs_te,beh_probs_te,actions_te,init_actions_te,observs_te,
init_observs_te,observs_mask_te,init_observs_mask_te,
rewards_te,seq_lens_te,gamma,
cached_beliefs=all_beliefs_te,
update_V = False,
gr_safety_thresh=gr_safety_thresh,
prune_num=prune_num,
ESS_penalty=ESS_penalty)
print('iter %d, est value of policy on test data: %.5f' %(tot_iter,CWPDIS_obj),flush=True)
te_policy_val.append(CWPDIS_obj)
# te_policy_val_noprune.append(CWPDIS_obj_noprune)
te_ESS.append(ESS)
# te_ESS_noprune.append(ESS_noprune)
te_CWPDIS.append((CWPDIS_nums,CWPDIS_denoms))
tracked_params.append(params)
tracked_Vs.append(V)
#treat the 2 stage case separately...
if lambd==0:
pi,T,O,R = params
V,B = init_B_and_V(params)
tracked_Bs.append(B)
tracked_params.append(params)
this_V = []
this_te_policy_val = []
this_te_ESS = []
for _ in range(3):
#first, update V
for _ in range(10):
V = update_V_softmax(V,B,T,O,R,gamma,max_iter=1,verbose=False,eps=.001,
PBVI_temps=[.01,.01,.01])
this_V.append(V)
#then, check policy stuff
_,(V,CWPDIS_obj,ESS,CWPDIS_nums,CWPDIS_denoms,
ESS_noprune,CWPDIS_obj_noprune) = softmax_policy_value_objective_term(
nat_params,R,V,B,
action_probs_te,beh_probs_te,actions_te,init_actions_te,observs_te,
init_observs_te,observs_mask_te,init_observs_mask_te,
rewards_te,seq_lens_te,gamma,
cached_beliefs=all_beliefs_te,
update_V = False,
gr_safety_thresh=gr_safety_thresh,
prune_num=0,
ESS_penalty=0)
this_te_policy_val.append(CWPDIS_obj_noprune)
this_te_ESS.append(ESS_noprune)
te_policy_val.append(this_te_policy_val)
te_ESS.append(this_te_ESS)
tracked_Vs.append(this_V)
### save
save_dict = update_and_write_savedict(save_dict)
#refresh belief set & refresh V every so often...
if lambd > 0 and tot_iter == 250: #only refresh for not 2 stage
print("getting new belief set...",flush=True)
pi,T,O,R = params
V,B = init_B_and_V(params)
for _ in range(10):
V = update_V_softmax(V,B,T,O,R,gamma,max_iter=1,verbose=False,eps=.001,
PBVI_temps=[.01,.01,.01])
print("setup beliefs and V...",flush=True)
tracked_Bs.append(B)
|
the-stack_0_22760 | '''define the config file for ade20k and resnet101os8'''
import os
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG.update({
'type': 'ade20k',
'rootdir': os.path.join(os.getcwd(), 'ADE20k'),
})
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 130
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify segmentor config
SEGMENTOR_CFG = SEGMENTOR_CFG.copy()
SEGMENTOR_CFG.update(
{
'num_classes': 150,
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'fcn_resnet101os8_ade20k_train',
'logfilepath': 'fcn_resnet101os8_ade20k_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'fcn_resnet101os8_ade20k_test',
'logfilepath': 'fcn_resnet101os8_ade20k_test/test.log',
'resultsavepath': 'fcn_resnet101os8_ade20k_test/fcn_resnet101os8_ade20k_results.pkl'
}
) |
the-stack_0_22763 | # Copyright 2014-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import netaddr
from neutron.common.exceptions import NeutronException
from neutron.plugins.common import constants as plugin_const
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip.l2_service import \
L2ServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.network_helper import \
NetworkHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.selfips import BigipSelfIpManager
from f5_openstack_agent.lbaasv2.drivers.bigip.snats import BigipSnatManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import strip_domain_address
LOG = logging.getLogger(__name__)
class NetworkServiceBuilder(object):
def __init__(self, f5_global_routed_mode, conf, driver, l3_binding=None):
self.f5_global_routed_mode = f5_global_routed_mode
self.conf = conf
self.driver = driver
self.l3_binding = l3_binding
self.l2_service = L2ServiceBuilder(conf, f5_global_routed_mode)
self.bigip_selfip_manager = BigipSelfIpManager(
self.driver, self.l2_service, self.driver.l3_binding)
self.bigip_snat_manager = BigipSnatManager(
self.driver, self.l2_service, self.driver.l3_binding)
self.rds_cache = {}
self.interface_mapping = self.l2_service.interface_mapping
self.network_helper = NetworkHelper()
self.service_adapter = self.driver.service_adapter
def post_init(self):
# Run and Post Initialization Tasks """
# run any post initialized tasks, now that the agent
# is fully connected
self.l2_service.post_init()
def tunnel_sync(self, tunnel_ips):
self.l2_service.tunnel_sync(tunnel_ips)
def set_tunnel_rpc(self, tunnel_rpc):
# Provide FDB Connector with ML2 RPC access """
self.l2_service.set_tunnel_rpc(tunnel_rpc)
def set_l2pop_rpc(self, l2pop_rpc):
# Provide FDB Connector with ML2 RPC access """
self.l2_service.set_l2pop_rpc(l2pop_rpc)
def initialize_tunneling(self):
# setup tunneling
vtep_folder = self.conf.f5_vtep_folder
vtep_selfip_name = self.conf.f5_vtep_selfip_name
local_ips = []
for bigip in self.driver.get_all_bigips():
if not vtep_folder or vtep_folder.lower() == 'none':
vtep_folder = 'Common'
if vtep_selfip_name and \
not vtep_selfip_name.lower() == 'none':
# profiles may already exist
# create vxlan_multipoint_profile`
self.network_helper.create_vxlan_multipoint_profile(
bigip,
'vxlan_ovs',
partition='Common')
# create l2gre_multipoint_profile
self.network_helper.create_l2gre_multipoint_profile(
bigip,
'gre_ovs',
partition='Common')
# find the IP address for the selfip for each box
local_ip = self.bigip_selfip_manager.get_selfip_addr(
bigip,
vtep_selfip_name,
partition=vtep_folder
)
if local_ip:
bigip.local_ip = local_ip
local_ips.append(local_ip)
else:
raise f5_ex.MissingVTEPAddress(
'device %s missing vtep selfip %s'
% (bigip.device_name,
'/' + vtep_folder + '/' +
vtep_selfip_name))
return local_ips
def prep_service_networking(self, service, traffic_group):
# Assure network connectivity is established on all bigips
if self.conf.f5_global_routed_mode or not service['loadbalancer']:
return
if self.conf.use_namespaces:
try:
LOG.debug("Annotating the service definition networks "
"with route domain ID.")
self._annotate_service_route_domains(service)
except Exception as err:
LOG.exception(err)
raise f5_ex.RouteDomainCreationException(
"Route domain annotation error")
# Per Device Network Connectivity (VLANs or Tunnels)
subnetsinfo = self._get_subnets_to_assure(service)
for (assure_bigip, subnetinfo) in (
itertools.product(self.driver.get_all_bigips(), subnetsinfo)):
LOG.debug("Assuring per device network connectivity "
"for %s on subnet %s." % (assure_bigip.hostname,
subnetinfo['subnet']))
# Make sure the L2 network is established
self.l2_service.assure_bigip_network(
assure_bigip, subnetinfo['network'])
# Connect the BigIP device to network, by getting
# a self-ip address on the subnet.
self.bigip_selfip_manager.assure_bigip_selfip(
assure_bigip, service, subnetinfo)
# L3 Shared Config
assure_bigips = self.driver.get_config_bigips()
LOG.debug("Getting subnetinfo for ...")
LOG.debug(assure_bigips)
for subnetinfo in subnetsinfo:
if self.conf.f5_snat_addresses_per_subnet > 0:
self._assure_subnet_snats(assure_bigips, service, subnetinfo)
if subnetinfo['is_for_member'] and not self.conf.f5_snat_mode:
try:
self._allocate_gw_addr(subnetinfo)
except KeyError as err:
raise f5_ex.VirtualServerCreationException(err.message)
for assure_bigip in assure_bigips:
# If we are not using SNATS, attempt to become
# the subnet's default gateway.
self.bigip_selfip_manager.assure_gateway_on_subnet(
assure_bigip, subnetinfo, traffic_group)
def _annotate_service_route_domains(self, service):
# Add route domain notation to pool member and vip addresses.
LOG.debug("Service before route domains: %s" % service)
tenant_id = service['loadbalancer']['tenant_id']
self.update_rds_cache(tenant_id)
if 'members' in service:
for member in service['members']:
if 'address' in member:
LOG.debug("processing member %s" % member['address'])
if 'network_id' in member and member['network_id']:
member_network = (
self.service_adapter.get_network_from_service(
service,
member['network_id']
))
member_subnet = (
self.service_adapter.get_subnet_from_service(
service,
member['subnet_id']
))
if member_network:
self.assign_route_domain(
tenant_id, member_network, member_subnet)
rd_id = (
'%' + str(member_network['route_domain_id'])
)
member['address'] += rd_id
else:
member['address'] += '%0'
if 'vip_address' in service['loadbalancer']:
loadbalancer = service['loadbalancer']
if 'network_id' in loadbalancer:
lb_network = self.service_adapter.get_network_from_service(
service, loadbalancer['network_id'])
vip_subnet = self.service_adapter.get_subnet_from_service(
service, loadbalancer['vip_subnet_id'])
self.assign_route_domain(
tenant_id, lb_network, vip_subnet)
rd_id = '%' + str(lb_network['route_domain_id'])
service['loadbalancer']['vip_address'] += rd_id
else:
service['loadbalancer']['vip_address'] += '%0'
LOG.debug("Service after route domains: %s" % service)
def assign_route_domain(self, tenant_id, network, subnet):
# Assign route domain for a network
if self.l2_service.is_common_network(network):
network['route_domain_id'] = 0
return
LOG.debug("assign route domain get from cache %s" % network)
route_domain_id = self.get_route_domain_from_cache(network)
if route_domain_id is not None:
network['route_domain_id'] = route_domain_id
return
LOG.debug("max namespaces: %s" % self.conf.max_namespaces_per_tenant)
LOG.debug("max namespaces == 1: %s" %
(self.conf.max_namespaces_per_tenant == 1))
if self.conf.max_namespaces_per_tenant == 1:
bigip = self.driver.get_bigip()
LOG.debug("bigip before get_domain: %s" % bigip)
partition_id = self.service_adapter.get_folder_name(
tenant_id)
tenant_rd = self.network_helper.get_route_domain(
bigip, partition=partition_id)
network['route_domain_id'] = tenant_rd.id
return
LOG.debug("assign route domain checking for available route domain")
# need new route domain ?
check_cidr = netaddr.IPNetwork(subnet['cidr'])
placed_route_domain_id = None
for route_domain_id in self.rds_cache[tenant_id]:
LOG.debug("checking rd %s" % route_domain_id)
rd_entry = self.rds_cache[tenant_id][route_domain_id]
overlapping_subnet = None
for net_shortname in rd_entry:
LOG.debug("checking net %s" % net_shortname)
net_entry = rd_entry[net_shortname]
for exist_subnet_id in net_entry['subnets']:
if exist_subnet_id == subnet['id']:
continue
exist_subnet = net_entry['subnets'][exist_subnet_id]
exist_cidr = exist_subnet['cidr']
if check_cidr in exist_cidr or exist_cidr in check_cidr:
overlapping_subnet = exist_subnet
LOG.debug('rd %s: overlaps with subnet %s id: %s' % (
(route_domain_id, exist_subnet, exist_subnet_id)))
break
if overlapping_subnet:
# no need to keep looking
break
if not overlapping_subnet:
placed_route_domain_id = route_domain_id
break
if placed_route_domain_id is None:
if (len(self.rds_cache[tenant_id]) <
self.conf.max_namespaces_per_tenant):
placed_route_domain_id = self._create_aux_rd(tenant_id)
self.rds_cache[tenant_id][placed_route_domain_id] = {}
LOG.debug("Tenant %s now has %d route domains" %
(tenant_id, len(self.rds_cache[tenant_id])))
else:
raise Exception("Cannot allocate route domain")
LOG.debug("Placed in route domain %s" % placed_route_domain_id)
rd_entry = self.rds_cache[tenant_id][placed_route_domain_id]
net_short_name = self.get_neutron_net_short_name(network)
if net_short_name not in rd_entry:
rd_entry[net_short_name] = {'subnets': {}}
net_subnets = rd_entry[net_short_name]['subnets']
net_subnets[subnet['id']] = {'cidr': check_cidr}
network['route_domain_id'] = placed_route_domain_id
def _create_aux_rd(self, tenant_id):
# Create a new route domain
route_domain_id = None
for bigip in self.driver.get_all_bigips():
partition_id = self.service_adapter.get_folder_name(tenant_id)
bigip_route_domain_id = self.network_helper.create_route_domain(
bigip,
partition=partition_id,
strictness=self.conf.f5_route_domain_strictness,
is_aux=True)
if route_domain_id is None:
route_domain_id = bigip_route_domain_id.id
elif bigip_route_domain_id.id != route_domain_id:
# FixME error
LOG.debug(
"Bigips allocated two different route domains!: %s %s"
% (bigip_route_domain_id, route_domain_id))
LOG.debug("Allocated route domain %s for tenant %s"
% (route_domain_id, tenant_id))
return route_domain_id
# The purpose of the route domain subnet cache is to
# determine whether there is an existing bigip
# subnet that conflicts with a new one being
# assigned to the route domain.
"""
# route domain subnet cache
rds_cache =
{'<tenant_id>': {
{'0': {
'<network type>-<segmentation id>': [
'subnets': [
'<subnet id>': {
'cidr': '<cidr>'
}
],
'1': {}}}}
"""
def update_rds_cache(self, tenant_id):
# Update the route domain cache from bigips
if tenant_id not in self.rds_cache:
LOG.debug("rds_cache: adding tenant %s" % tenant_id)
self.rds_cache[tenant_id] = {}
for bigip in self.driver.get_all_bigips():
self.update_rds_cache_bigip(tenant_id, bigip)
LOG.debug("rds_cache updated: " + str(self.rds_cache))
def update_rds_cache_bigip(self, tenant_id, bigip):
# Update the route domain cache for this tenant
# with information from bigip's vlan and tunnels
LOG.debug("rds_cache: processing bigip %s" % bigip.device_name)
route_domain_ids = self.network_helper.get_route_domain_ids(
bigip,
partition=self.service_adapter.get_folder_name(tenant_id))
# LOG.debug("rds_cache: got bigip route domains: %s" % route_domains)
for route_domain_id in route_domain_ids:
self.update_rds_cache_bigip_rd_vlans(
tenant_id, bigip, route_domain_id)
def update_rds_cache_bigip_rd_vlans(
self, tenant_id, bigip, route_domain_id):
# Update the route domain cache with information
# from the bigip vlans and tunnels from
# this route domain
LOG.debug("rds_cache: processing bigip %s rd %s"
% (bigip.device_name, route_domain_id))
# this gets tunnels too
partition_id = self.service_adapter.get_folder_name(tenant_id)
rd_vlans = self.network_helper.get_vlans_in_route_domain_by_id(
bigip,
partition=partition_id,
id=route_domain_id
)
LOG.debug("rds_cache: bigip %s rd %s vlans: %s"
% (bigip.device_name, route_domain_id, rd_vlans))
if len(rd_vlans) == 0:
LOG.debug("No vlans found for route domain: %d" %
(route_domain_id))
return
# make sure this rd has a cache entry
tenant_entry = self.rds_cache[tenant_id]
if route_domain_id not in tenant_entry:
tenant_entry[route_domain_id] = {}
# for every VLAN or TUNNEL on this bigip...
for rd_vlan in rd_vlans:
self.update_rds_cache_bigip_vlan(
tenant_id, bigip, route_domain_id, rd_vlan)
def update_rds_cache_bigip_vlan(
self, tenant_id, bigip, route_domain_id, rd_vlan):
# Update the route domain cache with information
# from the bigip vlan or tunnel
LOG.debug("rds_cache: processing bigip %s rd %d vlan %s"
% (bigip.device_name, route_domain_id, rd_vlan))
net_short_name = self.get_bigip_net_short_name(
bigip, tenant_id, rd_vlan)
# make sure this net has a cache entry
tenant_entry = self.rds_cache[tenant_id]
rd_entry = tenant_entry[route_domain_id]
if net_short_name not in rd_entry:
rd_entry[net_short_name] = {'subnets': {}}
net_subnets = rd_entry[net_short_name]['subnets']
partition_id = self.service_adapter.get_folder_name(tenant_id)
LOG.debug("Calling get_selfips with: partition %s and vlan_name %s",
partition_id, rd_vlan)
selfips = self.bigip_selfip_manager.get_selfips(
bigip,
partition=partition_id,
vlan_name=rd_vlan
)
LOG.debug("rds_cache: got selfips")
for selfip in selfips:
LOG.debug("rds_cache: processing bigip %s rd %s vlan %s self %s" %
(bigip.device_name, route_domain_id, rd_vlan,
selfip.name))
if bigip.device_name not in selfip.name:
LOG.error("rds_cache: Found unexpected selfip %s for tenant %s"
% (selfip.name, tenant_id))
continue
subnet_id = selfip.name.split(bigip.device_name + '-')[1]
# convert 10.1.1.1%1/24 to 10.1.1.1/24
(addr, netbits) = selfip.address.split('/')
addr = addr.split('%')[0]
selfip.address = addr + '/' + netbits
# selfip addresses will have slash notation: 10.1.1.1/24
netip = netaddr.IPNetwork(selfip.address)
LOG.debug("rds_cache: updating subnet %s with %s"
% (subnet_id, str(netip.cidr)))
net_subnets[subnet_id] = {'cidr': netip.cidr}
LOG.debug("rds_cache: now %s" % self.rds_cache)
def get_route_domain_from_cache(self, network):
# Get route domain from cache by network
net_short_name = self.get_neutron_net_short_name(network)
for tenant_id in self.rds_cache:
tenant_cache = self.rds_cache[tenant_id]
for route_domain_id in tenant_cache:
if net_short_name in tenant_cache[route_domain_id]:
return route_domain_id
def remove_from_rds_cache(self, network, subnet):
# Get route domain from cache by network
LOG.debug("remove_from_rds_cache")
net_short_name = self.get_neutron_net_short_name(network)
for tenant_id in self.rds_cache:
LOG.debug("rds_cache: processing remove for %s" % tenant_id)
deleted_rds = []
tenant_cache = self.rds_cache[tenant_id]
for route_domain_id in tenant_cache:
if net_short_name in tenant_cache[route_domain_id]:
net_entry = tenant_cache[route_domain_id][net_short_name]
if subnet['id'] in net_entry['subnets']:
del net_entry['subnets'][subnet['id']]
if len(net_entry['subnets']) == 0:
del net_entry['subnets']
if len(tenant_cache[route_domain_id][net_short_name]) == 0:
del tenant_cache[route_domain_id][net_short_name]
if len(self.rds_cache[tenant_id][route_domain_id]) == 0:
deleted_rds.append(route_domain_id)
for rd in deleted_rds:
LOG.debug("removing route domain %d from tenant %s" %
(rd, tenant_id))
del self.rds_cache[tenant_id][rd]
def get_bigip_net_short_name(self, bigip, tenant_id, network_name):
# Return <network_type>-<seg_id> for bigip network
LOG.debug("get_bigip_net_short_name: %s:%s" % (
tenant_id, network_name))
partition_id = self.service_adapter.get_folder_name(tenant_id)
LOG.debug("network_name %s", network_name.split('/'))
network_name = network_name.split("/")[-1]
if 'tunnel-gre-' in network_name:
tunnel_key = self.network_helper.get_tunnel_key(
bigip,
network_name,
partition=partition_id
)
return 'gre-%s' % tunnel_key
elif 'tunnel-vxlan-' in network_name:
LOG.debug("Getting tunnel key for VXLAN: %s", network_name)
tunnel_key = self.network_helper.get_tunnel_key(
bigip,
network_name,
partition=partition_id
)
return 'vxlan-%s' % tunnel_key
else:
LOG.debug("Getting tunnel key for VLAN: %s", network_name)
vlan_id = self.network_helper.get_vlan_id(bigip,
name=network_name,
partition=partition_id)
return 'vlan-%s' % vlan_id
@staticmethod
def get_neutron_net_short_name(network):
# Return <network_type>-<seg_id> for neutron network
net_type = network['provider:network_type']
net_seg_key = network['provider:segmentation_id']
return net_type + '-' + str(net_seg_key)
def _assure_subnet_snats(self, assure_bigips, service, subnetinfo):
# Ensure snat for subnet exists on bigips
tenant_id = service['loadbalancer']['tenant_id']
subnet = subnetinfo['subnet']
snats_per_subnet = self.conf.f5_snat_addresses_per_subnet
assure_bigips = \
[bigip for bigip in assure_bigips
if tenant_id not in bigip.assured_tenant_snat_subnets or
subnet['id'] not in
bigip.assured_tenant_snat_subnets[tenant_id]]
LOG.debug("_assure_subnet_snats: getting snat addrs for: %s" %
subnet['id'])
if len(assure_bigips):
snat_addrs = self.bigip_snat_manager.get_snat_addrs(
subnetinfo, tenant_id, snats_per_subnet)
if len(snat_addrs) != snats_per_subnet:
raise f5_ex.SNAT_CreationException(
"Unable to satisfy request to allocate %d "
"snats. Actual SNAT count: %d SNATs" %
(snats_per_subnet, len(snat_addrs)))
for assure_bigip in assure_bigips:
self.bigip_snat_manager.assure_bigip_snats(
assure_bigip, subnetinfo, snat_addrs, tenant_id)
def _allocate_gw_addr(self, subnetinfo):
# Create a name for the port and for the IP Forwarding
# Virtual Server as well as the floating Self IP which
# will answer ARP for the members
need_port_for_gateway = False
network = subnetinfo['network']
subnet = subnetinfo['subnet']
if not network or not subnet:
LOG.error('Attempted to create default gateway'
' for network with no id...skipping.')
return
if not subnet['gateway_ip']:
raise KeyError("attempting to create gateway on subnet without "
"gateway ip address specified.")
gw_name = "gw-" + subnet['id']
ports = self.driver.plugin_rpc.get_port_by_name(port_name=gw_name)
if len(ports) < 1:
need_port_for_gateway = True
# There was no port on this agent's host, so get one from Neutron
if need_port_for_gateway:
try:
rpc = self.driver.plugin_rpc
new_port = rpc.create_port_on_subnet_with_specific_ip(
subnet_id=subnet['id'], mac_address=None,
name=gw_name, ip_address=subnet['gateway_ip'])
LOG.info('gateway IP for subnet %s will be port %s'
% (subnet['id'], new_port['id']))
except Exception as exc:
ermsg = 'Invalid default gateway for subnet %s:%s - %s.' \
% (subnet['id'],
subnet['gateway_ip'],
exc.message)
ermsg += " SNAT will not function and load balancing"
ermsg += " support will likely fail. Enable f5_snat_mode."
LOG.exception(ermsg)
return True
def post_service_networking(self, service, all_subnet_hints):
# Assure networks are deleted from big-ips
if self.conf.f5_global_routed_mode:
return
# L2toL3 networking layer
# Non Shared Config - Local Per BIG-IP
self.update_bigip_l2(service)
# Delete shared config objects
deleted_names = set()
for bigip in self.driver.get_config_bigips():
LOG.debug('post_service_networking: calling '
'_assure_delete_networks del nets sh for bigip %s %s'
% (bigip.device_name, all_subnet_hints))
subnet_hints = all_subnet_hints[bigip.device_name]
deleted_names = deleted_names.union(
self._assure_delete_nets_shared(bigip, service,
subnet_hints))
# Delete non shared config objects
for bigip in self.driver.get_all_bigips():
LOG.debug(' post_service_networking: calling '
' _assure_delete_networks del nets ns for bigip %s'
% bigip.device_name)
subnet_hints = all_subnet_hints[bigip.device_name]
deleted_names = deleted_names.union(
self._assure_delete_nets_nonshared(
bigip, service, subnet_hints)
)
for port_name in deleted_names:
LOG.debug(' post_service_networking: calling '
' del port %s'
% port_name)
self.driver.plugin_rpc.delete_port_by_name(
port_name=port_name)
def update_bigip_l2(self, service):
# Update fdb entries on bigip
loadbalancer = service['loadbalancer']
service_adapter = self.service_adapter
for bigip in self.driver.get_all_bigips():
for member in service['members']:
LOG.debug("update_bigip_l2 update service members")
member['network'] = service_adapter.get_network_from_service(
service,
member['network_id']
)
member_status = member['provisioning_status']
if member_status == plugin_const.PENDING_DELETE:
self.delete_bigip_member_l2(bigip, loadbalancer, member)
else:
self.update_bigip_member_l2(bigip, loadbalancer, member)
if "network_id" not in loadbalancer:
LOG.error("update_bigip_l2, expected network ID")
return
LOG.debug("update_bigip_l2 get network for ID %s" %
loadbalancer["network_id"])
loadbalancer['network'] = service_adapter.get_network_from_service(
service,
loadbalancer['network_id']
)
lb_status = loadbalancer['provisioning_status']
if lb_status == plugin_const.PENDING_DELETE:
self.delete_bigip_vip_l2(bigip, loadbalancer)
else:
LOG.debug("update_bigip_l2 calling update_bigip_vip_l2")
self.update_bigip_vip_l2(bigip, loadbalancer)
LOG.debug("update_bigip_l2 complete")
def update_bigip_member_l2(self, bigip, loadbalancer, member):
# update pool member l2 records
network = member['network']
if network:
if self.l2_service.is_common_network(network):
net_folder = 'Common'
else:
net_folder = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
fdb_info = {'network': network,
'ip_address': member['address'],
'mac_address': member['port']['mac_address']}
self.l2_service.add_bigip_fdbs(
bigip, net_folder, fdb_info, member)
def delete_bigip_member_l2(self, bigip, loadbalancer, member):
# Delete pool member l2 records
network = member['network']
if network:
if 'port' in member:
if self.l2_service.is_common_network(network):
net_folder = 'Common'
else:
net_folder = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
fdb_info = {'network': network,
'ip_address': member['address'],
'mac_address': member['port']['mac_address']}
self.l2_service.delete_bigip_fdbs(
bigip, net_folder, fdb_info, member)
else:
LOG.error('Member on SDN has no port. Manual '
'removal on the BIG-IP will be '
'required. Was the vm instance '
'deleted before the pool member '
'was deleted?')
def update_bigip_vip_l2(self, bigip, loadbalancer):
# Update vip l2 records
network = loadbalancer['network']
if network:
if self.l2_service.is_common_network(network):
net_folder = 'Common'
else:
net_folder = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
fdb_info = {'network': network,
'ip_address': None,
'mac_address': None}
self.l2_service.add_bigip_fdbs(
bigip, net_folder, fdb_info, loadbalancer)
def delete_bigip_vip_l2(self, bigip, loadbalancer):
# Delete loadbalancer l2 records
network = loadbalancer['network']
if network:
if self.l2_service.is_common_network(network):
net_folder = 'Common'
else:
net_folder = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
fdb_info = {'network': network,
'ip_address': None,
'mac_address': None}
self.l2_service.delete_bigip_fdbs(
bigip, net_folder, fdb_info, loadbalancer)
def _assure_delete_nets_shared(self, bigip, service, subnet_hints):
# Assure shared configuration (which syncs) is deleted
deleted_names = set()
tenant_id = service['loadbalancer']['tenant_id']
delete_gateway = self.bigip_selfip_manager.delete_gateway_on_subnet
for subnetinfo in self._get_subnets_to_delete(bigip,
service,
subnet_hints):
try:
if not self.conf.f5_snat_mode:
gw_name = delete_gateway(bigip, subnetinfo)
deleted_names.add(gw_name)
my_deleted_names, my_in_use_subnets = \
self.bigip_snat_manager.delete_bigip_snats(
bigip, subnetinfo, tenant_id)
deleted_names = deleted_names.union(my_deleted_names)
for in_use_subnetid in my_in_use_subnets:
subnet_hints['check_for_delete_subnets'].pop(
in_use_subnetid, None)
except NeutronException as exc:
LOG.error("assure_delete_nets_shared: exception: %s"
% str(exc.msg))
except Exception as exc:
LOG.error("assure_delete_nets_shared: exception: %s"
% str(exc.message))
return deleted_names
def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints):
# Delete non shared base objects for networks
deleted_names = set()
for subnetinfo in self._get_subnets_to_delete(bigip,
service,
subnet_hints):
try:
network = subnetinfo['network']
if self.l2_service.is_common_network(network):
network_folder = 'Common'
else:
network_folder = self.service_adapter.get_folder_name(
service['loadbalancer']['tenant_id'])
subnet = subnetinfo['subnet']
if self.conf.f5_populate_static_arp:
self.network_helper.arp_delete_by_subnet(
bigip,
subnet=subnet['cidr'],
mask=None,
partition=network_folder
)
local_selfip_name = "local-" + bigip.device_name + \
"-" + subnet['id']
selfip_address = self.bigip_selfip_manager.get_selfip_addr(
bigip,
local_selfip_name,
partition=network_folder
)
if not selfip_address:
LOG.error("Failed to get self IP address %s in cleanup.",
local_selfip_name)
self.bigip_selfip_manager.delete_selfip(
bigip,
local_selfip_name,
partition=network_folder
)
if self.l3_binding and selfip_address:
self.l3_binding.unbind_address(subnet_id=subnet['id'],
ip_address=selfip_address)
deleted_names.add(local_selfip_name)
self.l2_service.delete_bigip_network(bigip, network)
if subnet['id'] not in subnet_hints['do_not_delete_subnets']:
subnet_hints['do_not_delete_subnets'].append(subnet['id'])
self.remove_from_rds_cache(network, subnet)
tenant_id = service['loadbalancer']['tenant_id']
if tenant_id in bigip.assured_tenant_snat_subnets:
tenant_snat_subnets = \
bigip.assured_tenant_snat_subnets[tenant_id]
if subnet['id'] in tenant_snat_subnets:
tenant_snat_subnets.remove(subnet['id'])
except NeutronException as exc:
LOG.error("assure_delete_nets_nonshared: exception: %s"
% str(exc.msg))
except Exception as exc:
LOG.error("assure_delete_nets_nonshared: exception: %s"
% str(exc.message))
return deleted_names
def _get_subnets_to_delete(self, bigip, service, subnet_hints):
# Clean up any Self IP, SNATs, networks, and folder for
# services items that we deleted.
subnets_to_delete = []
for subnetinfo in subnet_hints['check_for_delete_subnets'].values():
subnet = self.service_adapter.get_subnet_from_service(
service, subnetinfo['subnet_id'])
subnetinfo['subnet'] = subnet
network = self.service_adapter.get_network_from_service(
service, subnetinfo['network_id'])
subnetinfo['network'] = network
route_domain = network['route_domain_id']
if not subnet:
continue
if not self._ips_exist_on_subnet(
bigip,
service,
subnet,
route_domain):
subnets_to_delete.append(subnetinfo)
return subnets_to_delete
def _ips_exist_on_subnet(self, bigip, service, subnet, route_domain):
# Does the big-ip have any IP addresses on this subnet?
LOG.debug("_ips_exist_on_subnet entry %s rd %s"
% (str(subnet['cidr']), route_domain))
route_domain = str(route_domain)
ipsubnet = netaddr.IPNetwork(subnet['cidr'])
# Are there any virtual addresses on this subnet?
folder = self.service_adapter.get_folder_name(
service['loadbalancer']['tenant_id']
)
virtual_services = self.network_helper.get_virtual_service_insertion(
bigip,
partition=folder
)
for virt_serv in virtual_services:
(_, dest) = virt_serv.items()[0]
LOG.debug(" _ips_exist_on_subnet: checking vip %s"
% str(dest['address']))
if len(dest['address'].split('%')) > 1:
vip_route_domain = dest['address'].split('%')[1]
else:
vip_route_domain = '0'
if vip_route_domain != route_domain:
continue
vip_addr = strip_domain_address(dest['address'])
if netaddr.IPAddress(vip_addr) in ipsubnet:
LOG.debug(" _ips_exist_on_subnet: found")
return True
# If there aren't any virtual addresses, are there
# node addresses on this subnet?
nodes = self.network_helper.get_node_addresses(
bigip,
partition=folder
)
for node in nodes:
LOG.debug(" _ips_exist_on_subnet: checking node %s"
% str(node))
if len(node.split('%')) > 1:
node_route_domain = node.split('%')[1]
else:
node_route_domain = '0'
if node_route_domain != route_domain:
continue
node_addr = strip_domain_address(node)
if netaddr.IPAddress(node_addr) in ipsubnet:
LOG.debug(" _ips_exist_on_subnet: found")
return True
LOG.debug(" _ips_exist_on_subnet exit %s"
% str(subnet['cidr']))
# nothing found
return False
def add_bigip_fdb(self, bigip, fdb):
self.l2_service.add_bigip_fdb(bigip, fdb)
def remove_bigip_fdb(self, bigip, fdb):
self.l2_service.remove_bigip_fdb(bigip, fdb)
def update_bigip_fdb(self, bigip, fdb):
self.l2_service.update_bigip_fdb(bigip, fdb)
def set_context(self, context):
self.l2_service.set_context(context)
def vlan_exists(self, network, folder='Common'):
return False
def _get_subnets_to_assure(self, service):
# Examine service and return active networks
networks = dict()
loadbalancer = service['loadbalancer']
service_adapter = self.service_adapter
lb_status = loadbalancer['provisioning_status']
if lb_status != plugin_const.PENDING_DELETE:
if 'network_id' in loadbalancer:
network = service_adapter.get_network_from_service(
service,
loadbalancer['network_id']
)
subnet = service_adapter.get_subnet_from_service(
service,
loadbalancer['vip_subnet_id']
)
networks[network['id']] = {'network': network,
'subnet': subnet,
'is_for_member': False}
for member in service['members']:
if member['provisioning_status'] != plugin_const.PENDING_DELETE:
if 'network_id' in member:
network = service_adapter.get_network_from_service(
service,
member['network_id']
)
subnet = service_adapter.get_subnet_from_service(
service,
member['subnet_id']
)
networks[network['id']] = {'network': network,
'subnet': subnet,
'is_for_member': True}
return networks.values()
|
the-stack_0_22765 | from rubicon.objc import SEL, Block, objc_method
from rubicon.objc.runtime import objc_id
from toga_iOS.libs import (
NSDate,
NSRunLoop,
UIAlertAction,
UIAlertActionStyle,
UIAlertController,
UIAlertControllerStyle,
UIBarButtonItem,
UIBarButtonSystemItem,
UIScreen,
UIViewController
)
class TogaDialog(UIViewController):
@objc_method
def loadView(self) -> None:
self.title = self.interface.title
self.cancelButton = UIBarButtonItem.alloc().initWithBarButtonSystemItem_target_action_(
UIBarButtonSystemItem.Cancel,
self,
SEL('cancelClicked')
)
self.navigationController.navigationBar.topItem.leftBarButtonItem = self.cancelButton
self.doneButton = UIBarButtonItem.alloc().initWithBarButtonSystemItem_target_action_(
UIBarButtonSystemItem.Done,
SEL('doneClicked')
)
self.navigationController.navigationBar.topItem.rightBarButtonItem = self.doneButton
self.interface.content._update_layout(
width=UIScreen.mainScreen().bounds.size.width,
height=UIScreen.mainScreen().bounds.size.height,
padding_top=12
)
self.view = self.interface.content._impl
@objc_method
def cancelClicked(self):
self.dismissModalViewControllerAnimated_(True)
if self.interface.on_cancel:
self.interface.on_cancel(self.interface)
@objc_method
def doneClicked(self):
self.dismissModalViewControllerAnimated_(True)
if self.interface.on_accept:
self.interface.on_accept(self.interface)
class Dialog:
def __init__(self, title, content, on_accept=None, on_cancel=None):
self.title = title
self.content = content
self.on_accept = on_accept
self.on_cancel = on_cancel
self._create()
def _create(self):
self.content.startup()
self._native = TogaDialog.alloc().init()
self._native.interface = self
class TogaModalDialog:
def __init__(self, title, message):
self.native = UIAlertController.alertControllerWithTitle(
title,
message=message,
preferredStyle=UIAlertControllerStyle.Alert
)
self.response = None
def true_response(self, action: objc_id) -> None:
self.response = True
def false_response(self, action: objc_id) -> None:
self.response = False
def add_ok_button(self):
self.native.addAction(
UIAlertAction.actionWithTitle(
"OK",
style=UIAlertActionStyle.Default,
handler=Block(self.true_response, None, objc_id)
)
)
def add_cancel_button(self):
self.native.addAction(
UIAlertAction.actionWithTitle(
"Cancel",
style=UIAlertActionStyle.Cancel,
handler=Block(self.false_response, None, objc_id)
)
)
def runModal(self, window):
window._impl.controller.presentViewController(
self.native,
animated=False,
completion=None,
)
while self.response is None:
NSRunLoop.currentRunLoop.runUntilDate(NSDate.alloc().init())
return self.response
def info_dialog(window, title, message):
dialog = TogaModalDialog(title=title, message=message)
dialog.add_ok_button()
return dialog.runModal(window)
def question_dialog(window, title, message):
dialog = TogaModalDialog(title=title, message=message)
dialog.add_yes_button()
dialog.add_no_button()
return dialog.runModal(window)
def confirm_dialog(window, title, message):
dialog = TogaModalDialog(title=title, message=message)
dialog.add_ok_button()
dialog.add_cancel_button()
return dialog.runModal(window)
def error_dialog(window, title, message):
dialog = TogaModalDialog(title=title, message=message)
dialog.add_ok_button()
return dialog.runModal(window)
|
the-stack_0_22766 | import networkx as nx
import torch
import torch.nn as nn
import torch.nn.functional as F
from .. import BaseModel, register_model
from cogdl.models.nn import GATLayer
from cogdl.utils import add_remaining_self_loops
from cogdl.trainers.daegc_trainer import DAEGCTrainer
@register_model("daegc")
class DAEGC(BaseModel):
r"""The DAEGC model from the `"Attributed Graph Clustering: A Deep Attentional Embedding Approach"
<https://arxiv.org/abs/1906.06532>`_ paper
Args:
num_clusters (int) : Number of clusters.
T (int) : Number of iterations to recalculate P and Q
gamma (float) : Hyperparameter that controls two parts of the loss.
"""
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument("--num-features", type=int)
parser.add_argument("--hidden-size", type=int, default=256)
parser.add_argument("--embedding-size", type=int, default=16)
parser.add_argument("--num-heads", type=int, default=1)
parser.add_argument("--dropout", type=float, default=0)
parser.add_argument("--max-epoch", type=int, default=100)
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--T", type=int, default=5)
parser.add_argument("--gamma", type=float, default=10)
# fmt: on
@classmethod
def build_model_from_args(cls, args):
return cls(
args.num_features, args.hidden_size, args.embedding_size, args.num_heads, args.dropout, args.num_clusters
)
def __init__(self, num_features, hidden_size, embedding_size, num_heads, dropout, num_clusters):
super(DAEGC, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.embedding_size = embedding_size
self.dropout = dropout
self.num_clusters = num_clusters
self.att1 = GATLayer(
num_features, hidden_size, dropout=dropout, alpha=0.2, nhead=num_heads, concat=True, fast_mode=False
)
self.att2 = GATLayer(
hidden_size * num_heads, embedding_size, dropout=dropout, alpha=0.2, nhead=1, concat=False, fast_mode=False
)
self.cluster_center = None
def get_trainer(self, task, args):
return DAEGCTrainer
def forward(self, graph):
x = graph.x
x = F.dropout(x, p=self.dropout, training=self.training)
x = F.elu(self.att1(graph, x))
x = F.dropout(x, p=self.dropout, training=self.training)
x = F.elu(self.att2(graph, x))
return F.normalize(x, p=2, dim=1)
def get_2hop(self, edge_index):
r"""add 2-hop neighbors as new edges"""
G = nx.Graph()
G.add_edges_from(edge_index.t().tolist())
H = nx.Graph()
for i in range(G.number_of_nodes()):
layers = dict(nx.bfs_successors(G, source=i, depth_limit=2))
for succ in layers:
for idx in layers[succ]:
H.add_edge(i, idx)
return torch.tensor(list(H.edges())).t()
def get_features(self, data):
return self.forward(data).detach()
def recon_loss(self, z, adj):
# print(torch.mm(z, z.t()), adj)
return F.binary_cross_entropy(F.softmax(torch.mm(z, z.t())), adj, reduction="sum")
|
the-stack_0_22769 | import os
from utils import ods_dataset_api as DatasetAPI
from utils import ods_catalog_api as CatalogAPI
DATASET_ID_TEST = 'arbresremarquablesparis2011@public'
os.environ['DJANGO_SETTINGS_MODULE'] = 'chatbot_app.settings'
class TestODSApiAvailability(object):
def test_api_catalog_availability(self):
response = CatalogAPI.dataset_meta_request(DATASET_ID_TEST)
assert response
response = CatalogAPI.datasets_meta_request(start=0, rows=5)
assert response
def test_apiV2_record_availability(self):
response = DatasetAPI.dataset_records_request(DATASET_ID_TEST, rows=5)
assert response
|
the-stack_0_22770 | import pytest
from ray.util.client.ray_client_helpers import ray_start_client_server
from ray._raylet import NodeID
from ray.runtime_context import RuntimeContext
def test_get_ray_metadata(ray_start_regular_shared):
"""Test the ClusterInfo client data pathway and API surface"""
with ray_start_client_server() as ray:
ip_address = ray_start_regular_shared["node_ip_address"]
initialized = ray.is_initialized()
assert initialized
nodes = ray.nodes()
assert len(nodes) == 1, nodes
assert nodes[0]["NodeManagerAddress"] == ip_address
current_node_id = "node:" + ip_address
cluster_resources = ray.cluster_resources()
available_resources = ray.available_resources()
assert cluster_resources["CPU"] == 1.0
assert current_node_id in cluster_resources
assert current_node_id in available_resources
def test_get_runtime_context(ray_start_regular_shared):
"""Test the get_runtime_context data through the metadata API"""
with ray_start_client_server() as ray:
rtc = ray.get_runtime_context()
assert isinstance(rtc, RuntimeContext)
assert isinstance(rtc.node_id, NodeID)
assert len(rtc.node_id.hex()) == 56
with pytest.raises(Exception):
_ = rtc.task_id
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_0_22773 | # -*- coding: utf-8 -*-
"""Main module."""
import functools
import logging
import multiprocessing as mp
import multiprocessing.queues as mpq
import signal
import sys
import time
from queue import Empty, Full
DEFAULT_POLLING_TIMEOUT = 0.02
MAX_SLEEP_SECS = 0.02
start_time = time.monotonic()
def _logger(name, level, msg, exc_info=None):
elapsed = time.monotonic() - start_time
hours = int(elapsed // 60)
seconds = elapsed - (hours * 60)
logging.log(level, f'{hours:3}:{seconds:06.3f} {name:20} {msg}', exc_info=exc_info)
# -- Queue handling support
class MPQueue(mpq.Queue):
# -- See StackOverflow Article :
# https://stackoverflow.com/questions/39496554/cannot-subclass-multiprocessing-queue-in-python-3-5
#
# -- tldr; mp.Queue is a _method_ that returns an mpq.Queue object. That object
# requires a context for proper operation, so this __init__ does that work as well.
def __init__(self, *args, **kwargs):
ctx = mp.get_context()
super().__init__(*args, **kwargs, ctx=ctx)
def safe_get(self, timeout=DEFAULT_POLLING_TIMEOUT):
try:
if timeout is None:
return self.get(block=False)
else:
return self.get(block=True, timeout=timeout)
except Empty:
return None
def safe_put(self, item, timeout=DEFAULT_POLLING_TIMEOUT):
try:
self.put(item, block=False, timeout=timeout)
return True
except Full:
return False
def drain(self):
item = self.safe_get()
while item:
yield item
item = self.safe_get()
def safe_close(self):
num_left = sum(1 for __ in self.drain())
self.close()
self.join_thread()
return num_left
# -- useful function
def _sleep_secs(max_sleep, end_time=999999999999999.9):
# Calculate time left to sleep, no less than 0
return max(0.0, min(end_time - time.time(), max_sleep))
# -- Standard Event Queue manager
class EventMessage:
def __init__(self, msg_src, msg_type, msg):
self.id = time.time()
self.msg_src = msg_src
self.msg_type = msg_type
self.msg = msg
def __str__(self):
return f"{self.msg_src:10} - {self.msg_type:10} : {self.msg}"
# -- Signal Handling
class TerminateInterrupt(BaseException):
pass
class SignalObject:
MAX_TERMINATE_CALLED = 3
def __init__(self, shutdown_event):
self.terminate_called = 0
self.shutdown_event = shutdown_event
def default_signal_handler(signal_object, exception_class, signal_num, current_stack_frame):
signal_object.terminate_called += 1
signal_object.shutdown_event.set()
if signal_object.terminate_called >= signal_object.MAX_TERMINATE_CALLED:
raise exception_class()
def init_signal(signal_num, signal_object, exception_class, handler):
handler = functools.partial(handler, signal_object, exception_class)
signal.signal(signal_num, handler)
signal.siginterrupt(signal_num, False)
def init_signals(shutdown_event, int_handler, term_handler):
signal_object = SignalObject(shutdown_event)
init_signal(signal.SIGINT, signal_object, KeyboardInterrupt, int_handler)
init_signal(signal.SIGTERM, signal_object, TerminateInterrupt, term_handler)
return signal_object
# -- Worker Process classes
class ProcWorker:
MAX_TERMINATE_CALLED = 3
int_handler = staticmethod(default_signal_handler)
term_handler = staticmethod(default_signal_handler)
def __init__(self, name, startup_event, shutdown_event, event_q, *args):
self.name = name
self.log = functools.partial(_logger, f'{self.name} Worker')
self.startup_event = startup_event
self.shutdown_event = shutdown_event
self.event_q = event_q
self.terminate_called = 0
self.init_args(args)
def init_args(self, args):
if args:
raise ValueError(f"Unexpected arguments to ProcWorker.init_args: {args}")
def init_signals(self):
self.log(logging.DEBUG, "Entering init_signals")
signal_object = init_signals(self.shutdown_event, self.int_handler, self.term_handler)
return signal_object
def main_loop(self):
self.log(logging.DEBUG, "Entering main_loop")
while not self.shutdown_event.is_set():
self.main_func()
def startup(self):
self.log(logging.DEBUG, "Entering startup")
pass
def shutdown(self):
self.log(logging.DEBUG, "Entering shutdown")
pass
def main_func(self, *args):
self.log(logging.DEBUG, "Entering main_func")
raise NotImplementedError(f"{self.__class__.__name__}.main_func is not implemented")
def run(self):
self.init_signals()
try:
self.startup()
self.startup_event.set()
self.main_loop()
self.log(logging.INFO, "Normal Shutdown")
self.event_q.safe_put(EventMessage(self.name, "SHUTDOWN", "Normal"))
return 0
except BaseException as exc:
# -- Catch ALL exceptions, even Terminate and Keyboard interrupt
self.log(logging.ERROR, f"Exception Shutdown: {exc}", exc_info=True)
self.event_q.safe_put(EventMessage(self.name, "FATAL", f"{exc}"))
# -- TODO: call raise if in some sort of interactive mode
if type(exc) in (TerminateInterrupt, KeyboardInterrupt):
sys.exit(1)
else:
sys.exit(2)
finally:
self.shutdown()
class TimerProcWorker(ProcWorker):
INTERVAL_SECS = 10
MAX_SLEEP_SECS = 0.02
def main_loop(self):
self.log(logging.DEBUG, "Entering TimerProcWorker.main_loop")
next_time = time.time() + self.INTERVAL_SECS
while not self.shutdown_event.is_set():
sleep_secs = _sleep_secs(self.MAX_SLEEP_SECS, next_time)
time.sleep(sleep_secs)
if time.time() > next_time:
self.log(logging.DEBUG, f"TimerProcWorker.main_loop : calling main_func")
self.main_func()
next_time = time.time() + self.INTERVAL_SECS
class QueueProcWorker(ProcWorker):
def init_args(self, args):
self.log(logging.DEBUG, f"Entering QueueProcWorker.init_args : {args}")
self.work_q, = args
def main_loop(self):
self.log(logging.DEBUG, "Entering QueueProcWorker.main_loop")
while not self.shutdown_event.is_set():
item = self.work_q.safe_get()
if not item:
continue
self.log(logging.DEBUG, f"QueueProcWorker.main_loop received '{item}' message")
if item == "END":
break
else:
self.main_func(item)
# -- Process Wrapper
def proc_worker_wrapper(proc_worker_class, name, startup_evt, shutdown_evt, event_q, *args):
proc_worker = proc_worker_class(name, startup_evt, shutdown_evt, event_q, *args)
return proc_worker.run()
class Proc:
STARTUP_WAIT_SECS = 3.0
SHUTDOWN_WAIT_SECS = 3.0
def __init__(self, name, worker_class, shutdown_event, event_q, *args):
self.log = functools.partial(_logger, f'{name} Worker')
self.name = name
self.shutdown_event = shutdown_event
self.startup_event = mp.Event()
self.proc = mp.Process(target=proc_worker_wrapper,
args=(worker_class, name, self.startup_event, shutdown_event, event_q, *args))
self.log(logging.DEBUG, f"Proc.__init__ starting : {name}")
self.proc.start()
started = self.startup_event.wait(timeout=Proc.STARTUP_WAIT_SECS)
self.log(logging.DEBUG, f"Proc.__init__ starting : {name} got {started}")
if not started:
self.terminate()
raise RuntimeError(f"Process {name} failed to startup after {Proc.STARTUP_WAIT_SECS} seconds")
def full_stop(self, wait_time=SHUTDOWN_WAIT_SECS):
self.log(logging.DEBUG, f"Proc.full_stop stoping : {self.name}")
self.shutdown_event.set()
self.proc.join(wait_time)
if self.proc.is_alive():
self.terminate()
def terminate(self):
self.log(logging.DEBUG, f"Proc.terminate terminating : {self.name}")
NUM_TRIES = 3
tries = NUM_TRIES
while tries and self.proc.is_alive():
self.proc.terminate()
time.sleep(0.01)
tries -= 1
if self.proc.is_alive():
self.log(logging.ERROR, f"Proc.terminate failed to terminate {self.name} after {NUM_TRIES} attempts")
return False
else:
self.log(logging.INFO, f"Proc.terminate terminated {self.name} after {NUM_TRIES - tries} attempt(s)")
return True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.full_stop()
return not exc_type
# -- Main Wrappers
class MainContext:
STOP_WAIT_SECS = 3.0
def __init__(self):
self.procs = []
self.queues = []
self.log = functools.partial(_logger, "MAIN")
self.shutdown_event = mp.Event()
self.event_queue = self.MPQueue()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
self.log(logging.ERROR, f"Exception: {exc_val}", exc_info=(exc_type, exc_val, exc_tb))
self._stopped_procs_result = self.stop_procs()
self._stopped_queues_result = self.stop_queues()
# -- Don't eat exceptions that reach here.
return not exc_type
def Proc(self, name, worker_class, *args):
proc = Proc(name, worker_class, self.shutdown_event, self.event_queue, *args)
self.procs.append(proc)
return proc
def MPQueue(self, *args, **kwargs):
q = MPQueue(*args, **kwargs)
self.queues.append(q)
return q
def stop_procs(self):
self.event_queue.safe_put(EventMessage("stop_procs", "END", "END"))
self.shutdown_event.set()
end_time = time.time() + self.STOP_WAIT_SECS
num_terminated = 0
num_failed = 0
# -- Wait up to STOP_WAIT_SECS for all processes to complete
for proc in self.procs:
join_secs = _sleep_secs(self.STOP_WAIT_SECS, end_time)
proc.proc.join(join_secs)
# -- Clear the procs list and _terminate_ any procs that
# have not yet exited
still_running = []
while self.procs:
proc = self.procs.pop()
if proc.proc.is_alive():
if proc.terminate():
num_terminated += 1
else:
still_running.append(proc)
else:
exitcode = proc.proc.exitcode
if exitcode:
self.log(logging.ERROR, f"Process {proc.name} ended with exitcode {exitcode}")
num_failed += 1
else:
self.log(logging.DEBUG, f"Process {proc.name} stopped successfully")
self.procs = still_running
return num_failed, num_terminated
def stop_queues(self):
num_items_left = 0
# -- Clear the queues list and close all associated queues
for q in self.queues:
num_items_left += sum(1 for __ in q.drain())
q.close()
# -- Wait for all queue threads to stop
while self.queues:
q = self.queues.pop(0)
q.join_thread()
return num_items_left
|
the-stack_0_22775 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test hardcoded decomposition rules and matrix definitions for standard gates."""
import inspect
import numpy as np
from ddt import ddt, data, unpack
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.quantum_info import Operator
from qiskit.test import QiskitTestCase
from qiskit.circuit import ParameterVector, Gate, ControlledGate
from qiskit.circuit.library import standard_gates
from qiskit.circuit.library import (
HGate,
CHGate,
IGate,
RGate,
RXGate,
CRXGate,
RYGate,
CRYGate,
RZGate,
CRZGate,
SGate,
SdgGate,
CSwapGate,
TGate,
TdgGate,
U1Gate,
CU1Gate,
U2Gate,
U3Gate,
CU3Gate,
XGate,
CXGate,
ECRGate,
CCXGate,
YGate,
CYGate,
ZGate,
CZGate,
RYYGate,
PhaseGate,
CPhaseGate,
UGate,
CUGate,
SXGate,
SXdgGate,
CSXGate,
RVGate,
)
from qiskit.circuit.library.standard_gates.equivalence_library import (
StandardEquivalenceLibrary as std_eqlib,
)
from .gate_utils import _get_free_params
class TestGateDefinitions(QiskitTestCase):
"""Test the decomposition of a gate in terms of other gates
yields the equivalent matrix as the hardcoded matrix definition
up to a global phase."""
def test_ch_definition(self): # TODO: expand this to all gates
"""Test ch gate matrix and definition."""
circ = QuantumCircuit(2)
circ.ch(0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_ccx_definition(self):
"""Test ccx gate matrix and definition."""
circ = QuantumCircuit(3)
circ.ccx(0, 1, 2)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_crz_definition(self):
"""Test crz gate matrix and definition."""
circ = QuantumCircuit(2)
circ.crz(1, 0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cry_definition(self):
"""Test cry gate matrix and definition."""
circ = QuantumCircuit(2)
circ.cry(1, 0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_crx_definition(self):
"""Test crx gate matrix and definition."""
circ = QuantumCircuit(2)
circ.crx(1, 0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cswap_definition(self):
"""Test cswap gate matrix and definition."""
circ = QuantumCircuit(3)
circ.cswap(0, 1, 2)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cu1_definition(self):
"""Test cu1 gate matrix and definition."""
circ = QuantumCircuit(2)
circ.append(CU1Gate(1), [0, 1])
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cu3_definition(self):
"""Test cu3 gate matrix and definition."""
circ = QuantumCircuit(2)
circ.append(CU3Gate(1, 1, 1), [0, 1])
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_cx_definition(self):
"""Test cx gate matrix and definition."""
circ = QuantumCircuit(2)
circ.cx(0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_ecr_definition(self):
"""Test ecr gate matrix and definition."""
circ = QuantumCircuit(2)
circ.ecr(0, 1)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_rv_definition(self):
"""Test R(v) gate to_matrix and definition."""
qreg = QuantumRegister(1)
circ = QuantumCircuit(qreg)
vec = np.array([0.1, 0.2, 0.3], dtype=float)
circ.rv(*vec, 0)
decomposed_circ = circ.decompose()
self.assertTrue(Operator(circ).equiv(Operator(decomposed_circ)))
def test_rv_r_equiv(self):
"""Test R(v) gate is equivalent to R gate."""
theta = np.pi / 5
phi = np.pi / 3
rgate = RGate(theta, phi)
axis = np.array([np.cos(phi), np.sin(phi), 0]) # RGate axis
rotvec = theta * axis
rv = RVGate(*rotvec)
self.assertTrue(np.array_equal(rgate.to_matrix(), rv.to_matrix()))
def test_rv_zero(self):
"""Test R(v) gate with zero vector returns identity"""
rv = RVGate(0, 0, 0)
self.assertTrue(np.array_equal(rv.to_matrix(), np.array([[1, 0], [0, 1]])))
@ddt
class TestStandardGates(QiskitTestCase):
"""Standard Extension Test."""
@unpack
@data(
*inspect.getmembers(
standard_gates,
predicate=lambda value: (inspect.isclass(value) and issubclass(value, Gate)),
)
)
def test_definition_parameters(self, class_name, gate_class):
"""Verify definitions from standard library include correct parameters."""
free_params = _get_free_params(gate_class)
n_params = len(free_params)
param_vector = ParameterVector("th", n_params)
if class_name in ("MCPhaseGate", "MCU1Gate"):
param_vector = param_vector[:-1]
gate = gate_class(*param_vector, num_ctrl_qubits=2)
elif class_name in ("MCXGate", "MCXGrayCode", "MCXRecursive", "MCXVChain"):
num_ctrl_qubits = 2
param_vector = param_vector[:-1]
gate = gate_class(num_ctrl_qubits, *param_vector)
elif class_name == "MSGate":
num_qubits = 2
param_vector = param_vector[:-1]
gate = gate_class(num_qubits, *param_vector)
else:
gate = gate_class(*param_vector)
if gate.definition is not None:
self.assertEqual(gate.definition.parameters, set(param_vector))
@unpack
@data(
*inspect.getmembers(
standard_gates,
predicate=lambda value: (inspect.isclass(value) and issubclass(value, Gate)),
)
)
def test_inverse(self, class_name, gate_class):
"""Verify self-inverse pair yield identity for all standard gates."""
free_params = _get_free_params(gate_class)
n_params = len(free_params)
float_vector = [0.1 + 0.1 * i for i in range(n_params)]
if class_name in ("MCPhaseGate", "MCU1Gate"):
float_vector = float_vector[:-1]
gate = gate_class(*float_vector, num_ctrl_qubits=2)
elif class_name in ("MCXGate", "MCXGrayCode", "MCXRecursive", "MCXVChain"):
num_ctrl_qubits = 3
float_vector = float_vector[:-1]
gate = gate_class(num_ctrl_qubits, *float_vector)
elif class_name == "MSGate":
num_qubits = 3
float_vector = float_vector[:-1]
gate = gate_class(num_qubits, *float_vector)
elif class_name == "PauliGate":
pauli_string = "IXYZ"
gate = gate_class(pauli_string)
else:
gate = gate_class(*float_vector)
from qiskit.quantum_info.operators.predicates import is_identity_matrix
self.assertTrue(is_identity_matrix(Operator(gate).dot(gate.inverse()).data))
if gate.definition is not None:
self.assertTrue(is_identity_matrix(Operator(gate).dot(gate.definition.inverse()).data))
self.assertTrue(is_identity_matrix(Operator(gate).dot(gate.inverse().definition).data))
class TestGateEquivalenceEqual(QiskitTestCase):
"""Test the decomposition of a gate in terms of other gates
yields the same matrix as the hardcoded matrix definition."""
@classmethod
def setUpClass(cls):
super().setUpClass()
class_list = Gate.__subclasses__() + ControlledGate.__subclasses__()
exclude = {
"ControlledGate",
"DiagonalGate",
"UCGate",
"MCGupDiag",
"MCU1Gate",
"UnitaryGate",
"HamiltonianGate",
"MCPhaseGate",
"UCPauliRotGate",
"SingleQubitUnitary",
"MCXGate",
"VariadicZeroParamGate",
"ClassicalFunction",
"ClassicalElement",
}
cls._gate_classes = []
for aclass in class_list:
if aclass.__name__ not in exclude:
cls._gate_classes.append(aclass)
def test_equivalence_phase(self):
"""Test that the equivalent circuits from the equivalency_library
have equal matrix representations"""
for gate_class in self._gate_classes:
with self.subTest(i=gate_class):
n_params = len(_get_free_params(gate_class))
params = [0.1 * i for i in range(1, n_params + 1)]
if gate_class.__name__ == "RXXGate":
params = [np.pi / 2]
if gate_class.__name__ in ["MSGate"]:
params[0] = 2
if gate_class.__name__ in ["PauliGate"]:
params = ["IXYZ"]
if gate_class.__name__ in ["BooleanExpression"]:
params = ["x | y"]
if gate_class.__name__ in ["PauliEvolutionGate", "PauliEvolutionGate"]:
continue
gate = gate_class(*params)
equiv_lib_list = std_eqlib.get_entry(gate)
for ieq, equivalency in enumerate(equiv_lib_list):
with self.subTest(msg=gate.name + "_" + str(ieq)):
op1 = Operator(gate)
op2 = Operator(equivalency)
self.assertEqual(op1, op2)
@ddt
class TestStandardEquivalenceLibrary(QiskitTestCase):
"""Standard Extension Test."""
@data(
HGate,
CHGate,
IGate,
RGate,
RXGate,
CRXGate,
RYGate,
CRYGate,
RZGate,
CRZGate,
SGate,
SdgGate,
CSwapGate,
TGate,
TdgGate,
U1Gate,
CU1Gate,
U2Gate,
U3Gate,
CU3Gate,
XGate,
CXGate,
ECRGate,
CCXGate,
YGate,
CYGate,
ZGate,
CZGate,
RYYGate,
PhaseGate,
CPhaseGate,
UGate,
CUGate,
SXGate,
SXdgGate,
CSXGate,
)
def test_definition_parameters(self, gate_class):
"""Verify decompositions from standard equivalence library match definitions."""
n_params = len(_get_free_params(gate_class))
param_vector = ParameterVector("th", n_params)
float_vector = [0.1 * i for i in range(n_params)]
param_gate = gate_class(*param_vector)
float_gate = gate_class(*float_vector)
param_entry = std_eqlib.get_entry(param_gate)
float_entry = std_eqlib.get_entry(float_gate)
if not param_gate.definition or not param_gate.definition.data:
return
self.assertGreaterEqual(len(param_entry), 1)
self.assertGreaterEqual(len(float_entry), 1)
param_qc = QuantumCircuit(param_gate.num_qubits)
float_qc = QuantumCircuit(float_gate.num_qubits)
param_qc.append(param_gate, param_qc.qregs[0])
float_qc.append(float_gate, float_qc.qregs[0])
self.assertTrue(any(equiv == param_qc.decompose() for equiv in param_entry))
self.assertTrue(any(equiv == float_qc.decompose() for equiv in float_entry))
|
the-stack_0_22778 | import torch
from zhusuan.distributions import Distribution
class Gamma(Distribution):
"""
The class of univariate Gamma distribution
See :class:`~zhusuan.distributions.base.Distribution` for details.
:param alpha: A 'float' Var. Shape parameter of the Gamma distribution.
:param beta: A 'float' Var. Rate parameter of the Gamma distribution.
"""
def __init__(self,
dtype=torch.float32,
is_continues=True,
group_ndims=0,
device=torch.device('cpu'),
**kwargs):
super(Gamma, self).__init__(dtype,
is_continues,
is_reparameterized=False, # reparameterization trick is not applied for Gamma distribution
group_ndims=group_ndims,
device=device,
**kwargs)
self._alpha = torch.as_tensor(kwargs['alpha'], dtype = self._dtype).to(device) if type(kwargs['alpha']) in [int, float] else kwargs['alpha'].to(device)
self._beta = torch.as_tensor(kwargs['beta'], dtype = self._dtype).to(device) if type(kwargs['beta']) in [int, float] else kwargs['beta'].to(device)
@property
def alpha(self):
"""Shape parameter of the Gamma distribution."""
return self._alpha
@property
def beta(self):
"""Rate parameter of the Gamma distribution."""
return self._beta
def _sample(self, n_samples=1):
if n_samples > 1:
_shape = self._alpha.shape
_shape = torch.Size([n_samples]) + _shape
_len = len(self._alpha.shape)
_alpha = self._alpha.repeat([n_samples, *_len * [1]])
_beta = self._beta.repeat([n_samples, *_len * [1]])
else:
_shape = self._alpha.shape
_alpha = torch.as_tensor(self._alpha, dtype=self._dtype)
_beta = torch.as_tensor(self._beta, dtype=self._dtype)
_sample = torch.distributions.gamma.Gamma(_alpha, _beta).sample()
self.sample_cache = _sample
return _sample
def _log_prob(self, sample=None):
if sample is None:
sample = self.sample_cache
if len(sample.shape) > len(self._alpha.shape):
n_samples = sample.shape[0]
_len = len(self._alpha.shape)
_alpha = self._alpha.repeat([n_samples, *_len * [1]])
_beta = self._beta.repeat([n_samples, *_len * [1]])
else:
_alpha = self._alpha
_beta = self._beta
return torch.distributions.gamma.Gamma(_alpha, _beta).log_prob(sample) |
the-stack_0_22781 | #!/usr/bin/env python2
# coding: utf-8
import mmap
import multiprocessing
import os
import random
import time
import unittest
from pykit import fsutil
from pykit import ututil
from pykit.cgrouparch import cgroup_manager
from pykit.cgrouparch import cgroup_util
dd = ututil.dd
random.seed(time.time())
base_dir = os.path.dirname(__file__)
class TestBlkio(unittest.TestCase):
def worker(self, index, duration, result_dict):
# wait for the cgroup directory tree to be setup.
time.sleep(0.2)
m = mmap.mmap(-1, 1024 * 1024 * 2)
data = ' ' * 1024 * 1024 * 2
m.write(data)
file_path = os.path.join(base_dir, 'test_file_%d' % index)
f = os.open(file_path, os.O_CREAT |
os.O_DIRECT | os.O_TRUNC | os.O_RDWR)
start_time = time.time()
dd('worker %d %d started at: %f' %
(index, os.getpid(), start_time))
count = 0
while True:
os.write(f, m)
count += 1
dd('worker %d %d wrote %d times' %
(index, os.getpid(), count))
if time.time() - start_time > duration:
break
dd('worker %d %d stoped at: %f' %
(index, os.getpid(), time.time()))
result_dict[index] = count
os.close(f)
return
def test_blkio_weight(self):
manager = multiprocessing.Manager()
result_dict = manager.dict()
p1 = multiprocessing.Process(target=self.worker,
args=(1, 10, result_dict))
p1.daemon = True
p1.start()
p2 = multiprocessing.Process(target=self.worker,
args=(2, 10, result_dict))
p2.daemon = True
p2.start()
p3 = multiprocessing.Process(target=self.worker,
args=(3, 10, result_dict))
p3.daemon = True
p3.start()
p4 = multiprocessing.Process(target=self.worker,
args=(4, 10, result_dict))
p4.daemon = True
p4.start()
arch_conf = {
'blkio': {
'sub_cgroup': {
'test_cgroup_a': {
'conf': {
'weight': int(500 * 0.95),
},
'sub_cgroup': {
'test_cgroup_a_sub1': {
'conf': {
'weight': 500,
'pids': [p1.pid],
},
},
'test_cgroup_a_sub2': {
'conf': {
'weight': 500,
'pids': [p2.pid],
},
},
},
},
'test_cgroup_b': {
'conf': {
'weight': int(500 * 0.05),
},
'sub_cgroup': {
'test_cgroup_b_sub1': {
'conf': {
'weight': 500,
'pids': [p3.pid],
},
},
'test_cgroup_b_sub2': {
'conf': {
'weight': 500,
'pids': [p4.pid],
},
},
},
},
},
},
}
context = {
'cgroup_dir': '/sys/fs/cgroup',
'arch_conf': {
'value': arch_conf
},
}
cgroup_manager.build_all_subsystem_cgroup_arch(context)
cgroup_manager.set_cgroup(context)
p1.join()
p2.join()
p3.join()
p4.join()
for cgrou_name in arch_conf['blkio']['sub_cgroup'].keys():
cgroup_util.remove_cgroup(
os.path.join(context['cgroup_dir'], 'blkio'),
os.path.join(context['cgroup_dir'], 'blkio', cgrou_name))
for i in range(1, 5):
fsutil.remove(os.path.join(base_dir, 'test_file_%d' % i))
dd(result_dict)
self.assertGreater(result_dict[1] + result_dict[2],
result_dict[3] + result_dict[4])
|
the-stack_0_22782 | import os
import sys
from lettuce import *
try:
import pkgtools.pkg as pkg
except ImportError:
sys.path.insert(0, os.path.abspath(os.path.pardir))
import pkgtools.pkg as pkg
DISTS = os.path.abspath('dist')
ENV = {}
@step(r'I set (?P<var>[\w][\w\d]*) to "(?P<arg>.*?)" as (?P<obj>\w+)$')
def i_set_to_as(step, var, arg, obj):
dist = getattr(pkg, obj)(os.path.join(DISTS, arg))
ENV[var] = dist
setattr(world, var, dist)
@step(r'I get (?P<var>[\w][\w\d]*)\.(?P<attr>.*)$')
def i_get_attr(step, var, attr):
world.res = getattr(ENV[var], attr)
@step(r'I see (.*)$')
def i_see(step, result):
if result.startswith('*'):
result = os.path.abspath(result[1:])
if str(world.res) != result:
raise AssertionError('Wrong result. Expected %s, got %s' % (world.res, result)) |
the-stack_0_22786 | #!/usr/bin/env python
# Simple script to convert molecular structures into Faunus AAM
# format. Uses OpenBabel's python interface to read a multitude
# of different input formats. Openbabel can be installed using conda:
# conda install --channel https://conda.anaconda.org/openbabel openbabel
# python 2/3 compatibility
from __future__ import print_function, division
import openbabel as ob
from math import pi
import sys, os, datetime
# see http://openbabel.org/docs/2.3.0/UseTheLibrary/PythonDoc.html
def MolecularWeight(residue):
Mw = 0
for atom in ob.OBResidueAtomIter(residue):
Mw += atom.GetAtomicMass()
return Mw
def Radius(residue):
rho = 1.0
Mw = MolecularWeight(residue)
return (3. / (4 * pi) * Mw / rho) ** (1 / 3.)
def MassCenter(residue):
wsum = 0
v = [0, 0, 0]
for atom in ob.OBResidueAtomIter(residue):
w = atom.GetAtomicMass()
wsum += w
v[0] += w * atom.x()
v[1] += w * atom.y()
v[2] += w * atom.z()
v[0] /= wsum
v[1] /= wsum
v[2] /= wsum
return v
if len(sys.argv) == 1:
print("First argument must be a structure file. Supported formats:")
for s in ob.OBConversion().GetSupportedInputFormat():
print(s)
sys.exit()
mol = ob.OBMol()
obconv = ob.OBConversion()
infile = sys.argv[1]
informat = obconv.FormatFromExt(infile)
obconv.SetInFormat(informat)
obconv.ReadFile(mol, infile)
assert mol.NumResidues() > 0, infile + " not found or it is empty."
print("# Infile:", infile, "on", datetime.datetime.now(), os.uname()[1])
print(mol.NumResidues())
for res in ob.OBResidueIter(mol):
cm = MassCenter(res)
resname = res.GetName()
resnum = res.GetNum()
charge = 0
radius = Radius(res)
weight = MolecularWeight(res)
print('{0:4} {1:5} {2:8.3f} {3:8.3f} {4:8.3f} {5:6.3f} {6:6.2f} {7:6.2f}'.format(
resname, resnum, cm[0], cm[1], cm[2], charge, weight, radius))
|
the-stack_0_22789 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import uuid
import numpy as np
import tensorflow as tf
from niftynet.engine.application_driver import ApplicationDriver
from niftynet.engine.application_variables import global_vars_init_or_restore
from niftynet.engine.handler_model import ModelRestorer
from niftynet.io.misc_io import set_logger
from niftynet.utilities.util_common import ParserNamespace
from niftynet.engine.signal import SESS_FINISHED, SESS_STARTED
from tests.niftynet_testcase import NiftyNetTestCase
# def _run_test_application():
# test_driver = get_initialised_driver()
# test_driver.run_application()
# return
def get_initialised_driver(starting_iter=0,
model_dir_rand=True,
vars_to_restore='',
application='tests.toy_application.ToyApplication'):
if model_dir_rand:
model_dir = os.path.join('.', 'testing_data', 'tmp', str(uuid.uuid4()))
os.makedirs(model_dir)
else:
model_dir = os.path.join('.', 'testing_data')
system_param = {
'SYSTEM': ParserNamespace(
action='train',
num_threads=2,
num_gpus=4,
cuda_devices='6',
model_dir=model_dir,
dataset_split_file=os.path.join(
'.', 'testing_data', 'testtoyapp.csv'),
event_handler=[
'niftynet.engine.handler_model.ModelRestorer',
'niftynet.engine.handler_sampler.SamplerThreading',
'niftynet.engine.handler_gradient.ApplyGradients'],
iteration_generator=None),
'NETWORK': ParserNamespace(
batch_size=20,
name='tests.toy_application.TinyNet'),
'TRAINING': ParserNamespace(
starting_iter=starting_iter,
max_iter=500,
save_every_n=20,
tensorboard_every_n=1,
max_checkpoints=20,
optimiser='niftynet.engine.application_optimiser.Adagrad',
validation_every_n=-1,
exclude_fraction_for_validation=0.1,
exclude_fraction_for_inference=0.1,
vars_to_restore=vars_to_restore,
patience=100,
lr=0.01),
'CUSTOM': ParserNamespace(
vector_size=100,
mean=10.0,
stddev=2.0,
name=application)
}
app_driver = ApplicationDriver()
app_driver.initialise_application(system_param, {})
# set parameters without __init__
app_driver.app.action_param = system_param['TRAINING']
app_driver.app.net_param = system_param['NETWORK']
app_driver.app.action = 'train'
return app_driver
class ApplicationDriverTest(NiftyNetTestCase):
def test_wrong_init(self):
app_driver = ApplicationDriver()
with self.assertRaisesRegexp(AttributeError, ''):
app_driver.initialise_application([], [])
# def test_create_app(self):
# test_driver = get_initialised_driver(499, True)
# with self.assertRaisesRegexp(ValueError, 'Could not import'):
# test_driver._create_app('test.test')
# with self.assertRaisesRegexp(ValueError, 'Could not import'):
# test_driver._create_app('testtest')
# with self.assertRaisesRegexp(ValueError, 'Could not import'):
# test_driver._create_app(1)
# test_driver._create_app('tests.toy_application.ToyApplication')
# def test_stop_app(self):
# test_driver = get_initialised_driver()
# graph = test_driver.create_graph(
# test_driver.app, test_driver.num_gpus, True)
# with self.cached_session(graph=graph) as sess:
# sess.run(global_vars_init_or_restore())
# GRAPH_CREATED.send(test_driver.app, iter_msg=None)
# SESS_STARTED.send(test_driver.app, iter_msg=None)
# train_op = test_driver.app.gradient_op
# SESS_FINISHED.send(test_driver.app, itermsg=None)
# test_driver.app.stop()
# try:
# while True:
# sess.run(train_op)
# except tf.errors.OutOfRangeError:
# for thread in test_driver.app.sampler[0][0]._threads:
# self.assertFalse(thread.isAlive(), "threads not closed")
def test_training_update(self):
test_driver = get_initialised_driver()
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.cached_session(graph=graph) as sess:
SESS_STARTED.send(test_driver.app, iter_msg=None)
train_op = test_driver.app.gradient_op
test_tensor = graph.get_tensor_by_name(
'G/conv_bn_selu/conv_/w:0')
var_0 = sess.run(test_tensor)
sess.run(train_op)
var_1 = sess.run(test_tensor)
square_diff = np.sum(np.abs(var_0 - var_1))
self.assertGreater(
square_diff, 0.0, 'train_op does not change model')
SESS_FINISHED.send(test_driver.app, itermsg=None)
test_driver.app.stop()
def test_multi_device_inputs(self):
test_driver = get_initialised_driver()
graph = test_driver.create_graph(
test_driver.app, test_driver.num_gpus, True)
with self.cached_session(graph=graph) as sess:
SESS_STARTED.send(test_driver.app, iter_msg=None)
for i in range(2):
sess.run(test_driver.app.gradient_op)
s_0, s_1, s_2, s_3 = sess.run([
graph.get_tensor_by_name(
'worker_0/feature_input:0'),
graph.get_tensor_by_name(
'worker_1/feature_input:0'),
graph.get_tensor_by_name(
'worker_2/feature_input:0'),
graph.get_tensor_by_name(
'worker_3/feature_input:0')
])
msg = 'same input data for different devices'
self.assertGreater(np.sum(np.abs(s_0 - s_1)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_0 - s_2)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_0 - s_3)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_1 - s_2)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_1 - s_3)), 0.0, msg)
self.assertGreater(np.sum(np.abs(s_2 - s_3)), 0.0, msg)
SESS_FINISHED.send(test_driver.app, itermsg=None)
test_driver.app.stop()
def test_multi_device_gradients(self):
test_driver = get_initialised_driver()
graph = test_driver.create_graph(
test_driver.app, test_driver.num_gpus, True)
with self.cached_session(graph=graph) as sess:
SESS_STARTED.send(test_driver.app, iter_msg=None)
for i in range(2):
sess.run(test_driver.app.gradient_op)
g_0, g_1, g_2, g_3, g_ave = sess.run([
graph.get_tensor_by_name(
'worker_0/ComputeGradients/gradients/AddN_5:0'),
graph.get_tensor_by_name(
'worker_1/ComputeGradients/gradients/AddN_5:0'),
graph.get_tensor_by_name(
'worker_2/ComputeGradients/gradients/AddN_5:0'),
graph.get_tensor_by_name(
'worker_3/ComputeGradients/gradients/AddN_5:0'),
graph.get_tensor_by_name(
'ApplyGradients/AveOverDevices:0')
])
self.check_gradients(g_0, g_1, g_2, g_3, g_ave)
SESS_FINISHED.send(test_driver.app, itermsg=None)
test_driver.app.stop()
def test_multi_device_multi_optimiser_gradients(self):
test_driver = get_initialised_driver(
application='tests.toy_application.ToyApplicationMultOpti')
graph = test_driver.create_graph(
test_driver.app, test_driver.num_gpus, True)
with self.cached_session(graph=graph) as sess:
SESS_STARTED.send(test_driver.app, iter_msg=None)
for i in range(2):
sess.run(test_driver.app.gradient_op)
# query generator gradient sample to check
dis_0, dis_1, dis_2, dis_3, dis_ave = sess.run([
graph.get_tensor_by_name(
'worker_0/ComputeGradientsD/gradients/AddN_5:0'),
graph.get_tensor_by_name(
'worker_1/ComputeGradientsD/gradients/AddN_5:0'),
graph.get_tensor_by_name(
'worker_2/ComputeGradientsD/gradients/AddN_5:0'),
graph.get_tensor_by_name(
'worker_3/ComputeGradientsD/gradients/AddN_5:0'),
graph.get_tensor_by_name(
'ApplyGradients/AveOverDevices:0')
])
# query discriminator gradient sample to check
gen_0, gen_1, gen_2, gen_3, gen_ave = sess.run([
graph.get_tensor_by_name(
'worker_0/ComputeGradientsG/gradients/worker_0/tinynet/G/conv/conv_/conv/ExpandDims_1_grad/Reshape:0'),
graph.get_tensor_by_name(
'worker_1/ComputeGradientsG/gradients/worker_1/tinynet/G/conv/conv_/conv/ExpandDims_1_grad/Reshape:0'),
graph.get_tensor_by_name(
'worker_2/ComputeGradientsG/gradients/worker_2/tinynet/G/conv/conv_/conv/ExpandDims_1_grad/Reshape:0'),
graph.get_tensor_by_name(
'worker_3/ComputeGradientsG/gradients/worker_3/tinynet/G/conv/conv_/conv/ExpandDims_1_grad/Reshape:0'),
graph.get_tensor_by_name(
'ApplyGradients/AveOverDevices_14:0')
])
self.check_gradients(gen_0, gen_1, gen_2, gen_3, gen_ave)
self.check_gradients(dis_0, dis_1, dis_2, dis_3, dis_ave)
SESS_FINISHED.send(test_driver.app, itermsg=None)
test_driver.app.stop()
def check_gradients(self, g_0, g_1, g_2, g_3, g_ave):
msg = 'same gradients for different devices'
self.assertGreater(np.sum(np.abs(g_0 - g_1)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_0 - g_2)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_0 - g_3)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_1 - g_2)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_1 - g_3)), 0.0, msg)
self.assertGreater(np.sum(np.abs(g_2 - g_3)), 0.0, msg)
g_array = np.concatenate([g_0.reshape((1, -1)),
g_1.reshape((1, -1)),
g_2.reshape((1, -1)),
g_3.reshape((1, -1))], axis=0)
g_ave = g_ave.reshape(-1)
g_np_ave = np.mean(g_array, axis=0)
self.assertAllClose(g_np_ave, g_ave)
def test_rand_initialisation(self):
test_driver = get_initialised_driver(0, True)
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.cached_session(graph=graph) as sess:
test_tensor = graph.get_tensor_by_name(
"G/conv_bn_selu/conv_/w:0")
with self.assertRaisesRegexp(
tf.errors.FailedPreconditionError,
'uninitialized value'):
sess.run(test_tensor)
ModelRestorer(**vars(test_driver)).rand_init_model(None)
sess.run(test_tensor)
_ = sess.run(tf.global_variables())
def test_from_latest_file_initialisation(self):
test_driver = get_initialised_driver(-1, False)
expected_init = np.array(
[[-0.03544217, 0.0228963, -0.04585603, 0.16923568, -0.51635778,
0.60694504, 0.01968583, -0.6252712, 0.28622296, -0.29527491,
0.61191976, 0.27878678, -0.07661559, -0.41357407, 0.70488983,
-0.10836645, 0.06488426, 0.0746650, -0.188567, -0.64652514]],
dtype=np.float32)
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.cached_session(graph=graph) as sess:
test_tensor = graph.get_tensor_by_name(
"G/conv_bn_selu/conv_/w:0")
with self.assertRaisesRegexp(
tf.errors.FailedPreconditionError,
'uninitialized value'):
_ = sess.run(test_tensor)
ModelRestorer(**vars(test_driver)).restore_model(None)
after_init = sess.run(test_tensor)
self.assertAllClose(after_init[0], expected_init)
_ = sess.run(tf.global_variables())
# def test_not_found_file_initialisation(self):
# test_driver = get_initialised_driver(42, False)
# graph = test_driver.create_graph(test_driver.app, 1, True)
# with self.cached_session(graph=graph) as sess:
# with self.assertRaisesRegexp(
# ValueError, ''):
# ModelRestorer(**vars(test_driver)).restore_model(None)
# with self.assertRaisesRegexp(
# tf.errors.NotFoundError, 'Failed to find'):
# ModelRestorer(**vars(test_driver)).restore_model(None)
def test_from_file_initialisation(self):
test_driver = get_initialised_driver(40, False)
expected_init = np.array(
[[-0.23192197, 0.60880029, -0.24921742, -0.00186354, -0.3345384,
0.16067748, -0.2210995, -0.19460233, -0.3035436, -0.42839912,
-0.0489039, -0.90753943, -0.12664583, -0.23129687, 0.01584663,
-0.43854219, 0.40412974, 0.0396539, -0.1590578, -0.53759819]],
dtype=np.float32)
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.cached_session(graph=graph) as sess:
test_tensor = graph.get_tensor_by_name(
"G/conv_bn_selu/conv_/w:0")
with self.assertRaisesRegexp(
tf.errors.FailedPreconditionError,
'uninitialized value'):
_ = sess.run(test_tensor)
ModelRestorer(**vars(test_driver)).restore_model(None)
after_init = sess.run(test_tensor)
self.assertAllClose(after_init[0], expected_init)
_ = sess.run(tf.global_variables())
def test_from_file_finetuning(self):
test_driver = get_initialised_driver(
40, False, '.*conv_bn_selu/.*conv_.*')
expected_init = np.array(
[[-0.23192197, 0.60880029, -0.24921742, -0.00186354, -0.3345384,
0.16067748, -0.2210995, -0.19460233, -0.3035436, -0.42839912,
-0.0489039, -0.90753943, -0.12664583, -0.23129687, 0.01584663,
-0.43854219, 0.40412974, 0.0396539, -0.1590578, -0.53759819]],
dtype=np.float32)
graph = test_driver.create_graph(test_driver.app, 1, True)
with self.cached_session(graph=graph) as sess:
test_tensor = graph.get_tensor_by_name(
"G/conv_bn_selu/conv_/w:0")
test_negative_tensor = graph.get_tensor_by_name(
"D/conv_relu/conv_/b:0")
with self.assertRaisesRegexp(
tf.errors.FailedPreconditionError,
'uninitialized value'):
_ = sess.run(test_tensor)
ModelRestorer(**vars(test_driver)).restore_model(None)
after_init = sess.run(test_tensor)
after_init_negative = sess.run(test_negative_tensor)
self.assertAllClose(after_init[0], expected_init)
# the not matched should be initialised using default initializer
self.assertEqual(np.any(after_init_negative), False)
_ = sess.run(tf.global_variables())
bad_init = sess.run(tf.report_uninitialized_variables())
self.assertEqual(bad_init.size, 0)
if __name__ == "__main__":
set_logger()
# _run_test_application()
tf.test.main()
|
the-stack_0_22791 | import urllib2
import logging
from django.conf import settings
from django.http import HttpResponse
from django.template import RequestContext, loader
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.db.models import Count
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from djmp.views import get_mapproxy
from models import Service, Layer, Catalog
from tasks import (check_all_services, check_service, check_layer, remove_service_checks, unindex_layers_with_issues,
index_service, index_all_layers, index_layer, index_cached_layers, clear_index,
SEARCH_TYPE, SEARCH_URL)
from enums import SERVICE_TYPES
LOGGER = logging.getLogger(__name__)
class BootstrapPaginator(Paginator):
def __init__(self, *args, **kwargs):
"""
:param wing_pages: How many pages will be shown before and after current page.
"""
self.wing_pages = kwargs.pop('wing_pages', 3)
super(BootstrapPaginator, self).__init__(*args, **kwargs)
def _get_page(self, *args, **kwargs):
self.page = super(BootstrapPaginator, self)._get_page(*args, **kwargs)
return self.page
@property
def page_range(self):
return range(max(self.page.number - self.wing_pages, 1),
min(self.page.number + self.wing_pages + 1, self.num_pages + 1))
def serialize_checks(check_set):
"""
Serialize a check_set for raphael
"""
check_set_list = []
for check in check_set.all()[:25]:
check_set_list.append(
{
'datetime': check.checked_datetime.isoformat(),
'value': check.response_time,
'success': 1 if check.success else 0
}
)
return check_set_list
@login_required
def domains(request):
"""
A page with number of services and layers faceted on domains.
"""
url = ''
query = '*:*&facet=true&facet.limit=-1&facet.pivot=domain_name,service_id&wt=json&indent=true&rows=0'
if settings.SEARCH_TYPE == 'elasticsearch':
url = '%s/select?q=%s' % (settings.SEARCH_URL, query)
if settings.SEARCH_TYPE == 'solr':
url = '%s/solr/hypermap/select?q=%s' % (settings.SEARCH_URL, query)
LOGGER.debug(url)
response = urllib2.urlopen(url)
data = response.read().replace('\n', '')
# stats
layers_count = Layer.objects.all().count()
services_count = Service.objects.all().count()
template = loader.get_template('aggregator/index.html')
context = RequestContext(request, {
'data': data,
'layers_count': layers_count,
'services_count': services_count,
})
return HttpResponse(template.render(context))
def index(request, catalog_slug=None):
order_by = request.GET.get('order_by', '-last_updated')
filter_by = request.GET.get('filter_by', None)
query = request.GET.get('q', None)
services = Service.objects.prefetch_related('check_set').all()
if catalog_slug:
services = Service.objects.filter(catalog__slug=catalog_slug)
# order_by
if 'total_checks' in order_by:
services = services.annotate(total_checks=Count('resource_ptr__check')).order_by(order_by)
elif 'layers_count' in order_by:
services = services.annotate(layers_count=Count('layer')).order_by(order_by)
else:
services = services.order_by(order_by)
# filter_by
if filter_by:
services = services.filter(type__exact=filter_by)
# query
if query:
services = services.filter(url__icontains=query)
# types filter
types_list = []
for service_type in SERVICE_TYPES:
type_item = []
service_type_code = service_type[0]
type_item.append(service_type_code)
type_item.append(service_type[1])
type_item.append(Service.objects.filter(type__exact=service_type_code).count())
types_list.append(type_item)
page = request.GET.get('page', 1)
paginator = BootstrapPaginator(services, settings.PAGINATION_DEFAULT_PAGINATION)
try:
services = paginator.page(page)
except PageNotAnInteger:
services = paginator.page(1)
except EmptyPage:
services = paginator.page(paginator.num_pages)
# stats
layers_count = Layer.objects.all().count()
services_count = Service.objects.all().count()
template = loader.get_template('aggregator/search.html')
context = RequestContext(request, {
'services': services,
'types_list': types_list,
'layers_count': layers_count,
'services_count': services_count,
'catalogs': Catalog.objects.filter(url__isnull=False),
'filter_by': filter_by,
})
return HttpResponse(template.render(context))
def service_detail(request, catalog_slug, service_uuid=None, service_id=None):
if service_uuid is not None:
service = get_object_or_404(Service, uuid=service_uuid)
else:
service = get_object_or_404(Service, pk=service_id)
if request.method == 'POST':
if 'check' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
check_service(service.id)
else:
check_service.delay(service.id)
if 'remove' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
remove_service_checks(service.id)
else:
remove_service_checks.delay(service.id)
if 'index' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
index_service(service.id)
else:
index_service.delay(service.id)
page = request.GET.get('page', 1)
layers = service.layer_set.select_related('catalog').prefetch_related('check_set').all()
paginator = BootstrapPaginator(layers, settings.PAGINATION_DEFAULT_PAGINATION)
try:
layers = paginator.page(page)
except PageNotAnInteger:
layers = paginator.page(1)
except EmptyPage:
layers = paginator.page(paginator.num_pages)
return render(request, 'aggregator/service_detail.html', {'service': service,
'layers': layers,
'SEARCH_TYPE': SEARCH_TYPE,
'SEARCH_URL': SEARCH_URL.rstrip('/'),
'catalog_slug': catalog_slug})
def service_checks(request, catalog_slug, service_uuid):
service = get_object_or_404(Service, uuid=service_uuid)
resource = serialize_checks(service.check_set)
page = request.GET.get('page', 1)
checks = service.check_set.all()
paginator = BootstrapPaginator(checks, settings.PAGINATION_DEFAULT_PAGINATION)
try:
checks = paginator.page(page)
except PageNotAnInteger:
checks = paginator.page(1)
except EmptyPage:
checks = paginator.page(paginator.num_pages)
return render(request, 'aggregator/service_checks.html', {'service': service,
'checks': checks,
'resource': resource})
def layer_detail(request, catalog_slug, layer_uuid=None, layer_id=None):
if layer_uuid is not None:
layer = get_object_or_404(Layer, uuid=layer_uuid)
else:
layer = get_object_or_404(Layer, pk=layer_id)
if request.method == 'POST':
if 'check' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
check_layer(layer.id)
else:
check_layer.delay(layer.id)
if 'remove' in request.POST:
layer.check_set.all().delete()
if 'index' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
index_layer(layer.id)
else:
index_layer.delay(layer.id)
return render(request, 'aggregator/layer_detail.html', {'layer': layer,
'SEARCH_TYPE': SEARCH_TYPE,
'SEARCH_URL': SEARCH_URL.rstrip('/'),
'catalog_slug': catalog_slug})
def layer_checks(request, catalog_slug, layer_uuid):
layer = get_object_or_404(Layer, uuid=layer_uuid)
resource = serialize_checks(layer.check_set)
page = request.GET.get('page', 1)
checks = layer.check_set.all()
paginator = BootstrapPaginator(checks, settings.PAGINATION_DEFAULT_PAGINATION)
try:
checks = paginator.page(page)
except PageNotAnInteger:
checks = paginator.page(1)
except EmptyPage:
checks = paginator.page(paginator.num_pages)
return render(request, 'aggregator/layer_checks.html', {'layer': layer,
'checks': checks,
'resource': resource})
@login_required
def tasks_runner(request):
"""
A page that let the admin to run global tasks.
"""
# server info
cached_layers_number = 0
cached_layers = cache.get('layers')
if cached_layers:
cached_layers_number = len(cached_layers)
cached_deleted_layers_number = 0
cached_deleted_layers = cache.get('deleted_layers')
if cached_deleted_layers:
cached_deleted_layers_number = len(cached_deleted_layers)
# task actions
if request.method == 'POST':
if 'check_all' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
check_all_services()
else:
check_all_services.delay()
if 'index_all' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
index_all_layers()
else:
index_all_layers.delay()
if 'index_cached' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
index_cached_layers()
else:
index_cached_layers.delay()
if 'drop_cached' in request.POST:
cache.set('layers', None)
cache.set('deleted_layers', None)
if 'clear_index' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
clear_index()
else:
clear_index.delay()
if 'remove_index' in request.POST:
if settings.REGISTRY_SKIP_CELERY:
unindex_layers_with_issues()
else:
unindex_layers_with_issues.delay()
return render(
request,
'aggregator/tasks_runner.html', {
'cached_layers_number': cached_layers_number,
'cached_deleted_layers_number': cached_deleted_layers_number,
}
)
def layer_mapproxy(request, catalog_slug, layer_uuid, path_info):
"""
Get Layer with matching catalog and uuid
"""
layer = get_object_or_404(Layer,
uuid=layer_uuid,
catalog__slug=catalog_slug)
# for WorldMap layers we need to use the url of the layer
if layer.service.type in ('Hypermap:WorldMap','Hypermap:WorldMap2',):
layer.service.url = layer.url
# Set up a mapproxy app for this particular layer
mp, yaml_config = get_mapproxy(layer)
query = request.META['QUERY_STRING']
if len(query) > 0:
path_info = path_info + '?' + query
params = {}
headers = {
'X-Script-Name': '/registry/{0}/layer/{1}/map/'.format(catalog_slug, layer.id),
'X-Forwarded-Host': request.META['HTTP_HOST'],
'HTTP_HOST': request.META['HTTP_HOST'],
'SERVER_NAME': request.META['SERVER_NAME'],
}
if path_info == '/config':
response = HttpResponse(yaml_config, content_type='text/plain')
return response
# Get a response from MapProxy as if it was running standalone.
mp_response = mp.get(path_info, params, headers)
# Create a Django response from the MapProxy WSGI response.
response = HttpResponse(mp_response.body, status=mp_response.status_int)
for header, value in mp_response.headers.iteritems():
response[header] = value
return response
|
the-stack_0_22792 | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
from keras.layers import Embedding, GlobalAveragePooling1D,Dense, Masking, Flatten,Dropout, Activation
from models.BasicModel import BasicModel
from keras.models import Model, Input, model_from_json, load_model
from keras.constraints import unit_norm
import sys
from .QDNN import QDNN
from layers import *
import math
import numpy as np
from keras import regularizers
from keras.initializers import Constant
from keras.models import Sequential
projector_to_dense = 1
projector_without_training = 2
amplitude_embedding_without_training =3
word_weight_without_training =4
word_weigth_with_idf = 5
class QDNNAblation(QDNN):
def initialize(self):
super(QDNNAblation, self).initialize()
self.ablation()
def __init__(self,opt):
super(QDNNAblation, self).__init__(opt)
def ablation(self):
if self.opt.ablation== projector_to_dense:
print("projector_to_dense")
self.projection = ComplexDense(units = self.opt.nb_classes, activation= "sigmoid", bias_initializer=Constant(value=-1), init_criterion = self.opt.init_mode)
elif self.opt.ablation == projector_without_training:
print("projector_without_training")
self.projection = ComplexMeasurement(units = self.opt.measurement_size,trainable = False)
elif self.opt.ablation == amplitude_embedding_without_training:
print("amplitude_embedding_without_training")
self.amplitude_embedding = amplitude_embedding_layer(np.transpose(self.opt.lookup_table), self.opt.max_sequence_length, trainable = False, random_init = self.opt.random_init,l2_reg=self.opt.amplitude_l2)
elif self.opt.ablation == word_weight_without_training:
print("word_weight_without_training")
self.weight_embedding = Embedding(self.opt.lookup_table.shape[0], 1, trainable = False)
elif self.opt.ablation == word_weigth_with_idf:
weights= np.array([[num] for num in self.opt.idfs])
print(weights.shape)
# print(self.opt.lookup_table.shape[0], 1)
self.weight_embedding = Embedding(self.opt.lookup_table.shape[0], 1, trainable = False,weights=[weights])
else:
pass
|
the-stack_0_22793 | # -*- coding: utf-8 -*-
import json
import os
import random
import click
import neptune
import numpy as np
import regex
import torch
from loguru import logger
from neptune.exceptions import NoExperimentContext
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from bert.optimization import BertAdam
from bert.tokenization import BertTokenizer
from eval import evalb
from label_encoder import LabelEncoder
from model import ChartParser
from trees import InternalParseNode, load_trees
try:
from apex import amp
except ImportError:
pass
MODEL_FILENAME = "model.bin"
BERT_TOKEN_MAPPING = {
"-LRB-": "(",
"-RRB-": ")",
"-LCB-": "{",
"-RCB-": "}",
"-LSB-": "[",
"-RSB-": "]",
}
def create_dataloader(sentences, batch_size, tag_encoder, tokenizer, is_eval):
features = []
for sentence in sentences:
tokens = []
tags = []
sections = []
for tag, phrase in sentence:
subtokens = []
for token in regex.split(
r"(?<=[^\W_])_(?=[^\W_])", phrase, flags=regex.FULLCASE
):
for subtoken in tokenizer.tokenize(
BERT_TOKEN_MAPPING.get(token, token)
):
subtokens.append(subtoken)
tokens.extend(subtokens)
tags.append(tag_encoder.transform(tag, unknown_label="[UNK]"))
sections.append(len(subtokens))
ids = tokenizer.convert_tokens_to_ids(["[CLS]"] + tokens + ["[SEP]"])
attention_mask = [1] * len(ids)
features.append(
{
"ids": ids,
"attention_mask": attention_mask,
"tags": tags,
"sections": sections,
}
)
dataset = TensorDataset(torch.arange(len(features), dtype=torch.long))
sampler = SequentialSampler(dataset) if is_eval else RandomSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=batch_size)
return dataloader, features
def prepare_batch_input(indices, features, trees, sentences, tag_encoder, device):
_ids = []
_attention_masks = []
_tags = []
_sections = []
_trees = []
_sentences = []
ids_padding_size = 0
tags_padding_size = 0
for _id in indices:
_ids.append(features[_id]["ids"])
_attention_masks.append(features[_id]["attention_mask"])
_tags.append(features[_id]["tags"])
_sections.append(features[_id]["sections"])
_trees.append(trees[_id])
_sentences.append(sentences[_id])
ids_padding_size = max(ids_padding_size, len(features[_id]["ids"]))
tags_padding_size = max(tags_padding_size, len(features[_id]["tags"]))
# Zero-pad
for _id, _attention_mask, _tag in zip(_ids, _attention_masks, _tags):
padding_size = ids_padding_size - len(_id)
_id += [0] * padding_size
_attention_mask += [0] * padding_size
_tag += [tag_encoder.transform("[PAD]")] * (tags_padding_size - len(_tag))
_ids = torch.tensor(_ids, dtype=torch.long, device=device)
_attention_masks = torch.tensor(_attention_masks, dtype=torch.long, device=device)
_tags = torch.tensor(_tags, dtype=torch.long, device=device)
return _ids, _attention_masks, _tags, _sections, _trees, _sentences
def eval(
model,
eval_dataloader,
eval_features,
eval_trees,
eval_sentences,
tag_encoder,
device,
):
# Evaluation phase
model.eval()
all_predicted_trees = []
for indices, *_ in tqdm(eval_dataloader, desc="Iteration"):
ids, attention_masks, tags, sections, _, sentences = prepare_batch_input(
indices=indices,
features=eval_features,
trees=eval_trees,
sentences=eval_sentences,
tag_encoder=tag_encoder,
device=device,
)
with torch.no_grad():
predicted_trees = model(
ids=ids,
attention_masks=attention_masks,
tags=tags,
sections=sections,
sentences=sentences,
gold_trees=None,
)
for predicted_tree in predicted_trees:
all_predicted_trees.append(predicted_tree.convert())
return evalb(eval_trees, all_predicted_trees)
@click.command()
@click.option("--train_file", required=True, type=click.Path())
@click.option("--dev_file", required=True, type=click.Path())
@click.option("--test_file", required=True, type=click.Path())
@click.option("--output_dir", required=True, type=click.Path())
@click.option("--bert_model", required=True, type=click.Path())
@click.option("--lstm_layers", default=2, show_default=True, type=click.INT)
@click.option("--lstm_dim", default=250, show_default=True, type=click.INT)
@click.option("--tag_embedding_dim", default=50, show_default=True, type=click.INT)
@click.option("--label_hidden_dim", default=250, show_default=True, type=click.INT)
@click.option("--dropout_prob", default=0.4, show_default=True, type=click.FLOAT)
@click.option("--batch_size", default=32, show_default=True, type=click.INT)
@click.option("--num_epochs", default=20, show_default=True, type=click.INT)
@click.option("--learning_rate", default=5e-5, show_default=True, type=click.FLOAT)
@click.option("--warmup_proportion", default=0.1, show_default=True, type=click.FLOAT)
@click.option(
"--gradient_accumulation_steps", default=1, show_default=True, type=click.INT
)
@click.option("--seed", default=42, show_default=True, type=click.INT)
@click.option("--device", default=0, show_default=True, type=click.INT)
@click.option("--fp16", is_flag=True)
@click.option("--do_eval", is_flag=True)
@click.option("--resume", is_flag=True)
@click.option("--preload", is_flag=True)
@click.option("--freeze_bert", is_flag=True)
def main(*_, **kwargs):
use_cuda = torch.cuda.is_available() and kwargs["device"] >= 0
device = torch.device("cuda:" + str(kwargs["device"]) if use_cuda else "cpu")
if use_cuda:
torch.cuda.set_device(device)
kwargs["use_cuda"] = use_cuda
neptune.create_experiment(
name="bert-span-parser",
upload_source_files=[],
params={k: str(v) if isinstance(v, bool) else v for k, v in kwargs.items()},
)
logger.info("Settings: {}", json.dumps(kwargs, indent=2, ensure_ascii=False))
# For reproducibility
os.environ["PYTHONHASHSEED"] = str(kwargs["seed"])
random.seed(kwargs["seed"])
np.random.seed(kwargs["seed"])
torch.manual_seed(kwargs["seed"])
torch.cuda.manual_seed_all(kwargs["seed"])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Prepare and load data
tokenizer = BertTokenizer.from_pretrained(kwargs["bert_model"], do_lower_case=False)
logger.info("Loading data...")
train_treebank = load_trees(kwargs["train_file"])
dev_treebank = load_trees(kwargs["dev_file"])
test_treebank = load_trees(kwargs["test_file"])
logger.info(
"Loaded {:,} train, {:,} dev, and {:,} test examples!",
len(train_treebank),
len(dev_treebank),
len(test_treebank),
)
logger.info("Preprocessing data...")
train_parse = [tree.convert() for tree in train_treebank]
train_sentences = [
[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in train_parse
]
dev_sentences = [
[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in dev_treebank
]
test_sentences = [
[(leaf.tag, leaf.word) for leaf in tree.leaves()] for tree in test_treebank
]
logger.info("Data preprocessed!")
logger.info("Preparing data for training...")
tags = []
labels = []
for tree in train_parse:
nodes = [tree]
while nodes:
node = nodes.pop()
if isinstance(node, InternalParseNode):
labels.append(node.label)
nodes.extend(reversed(node.children))
else:
tags.append(node.tag)
tag_encoder = LabelEncoder()
tag_encoder.fit(tags, reserved_labels=["[PAD]", "[UNK]"])
label_encoder = LabelEncoder()
label_encoder.fit(labels, reserved_labels=[()])
logger.info("Data prepared!")
# Settings
num_train_optimization_steps = kwargs["num_epochs"] * (
(len(train_parse) - 1) // kwargs["batch_size"] + 1
)
kwargs["batch_size"] //= kwargs["gradient_accumulation_steps"]
logger.info("Creating dataloaders for training...")
train_dataloader, train_features = create_dataloader(
sentences=train_sentences,
batch_size=kwargs["batch_size"],
tag_encoder=tag_encoder,
tokenizer=tokenizer,
is_eval=False,
)
dev_dataloader, dev_features = create_dataloader(
sentences=dev_sentences,
batch_size=kwargs["batch_size"],
tag_encoder=tag_encoder,
tokenizer=tokenizer,
is_eval=True,
)
test_dataloader, test_features = create_dataloader(
sentences=test_sentences,
batch_size=kwargs["batch_size"],
tag_encoder=tag_encoder,
tokenizer=tokenizer,
is_eval=True,
)
logger.info("Dataloaders created!")
# Initialize model
model = ChartParser.from_pretrained(
kwargs["bert_model"],
tag_encoder=tag_encoder,
label_encoder=label_encoder,
lstm_layers=kwargs["lstm_layers"],
lstm_dim=kwargs["lstm_dim"],
tag_embedding_dim=kwargs["tag_embedding_dim"],
label_hidden_dim=kwargs["label_hidden_dim"],
dropout_prob=kwargs["dropout_prob"],
)
model.to(device)
# Prepare optimizer
param_optimizers = list(model.named_parameters())
if kwargs["freeze_bert"]:
for p in model.bert.parameters():
p.requires_grad = False
param_optimizers = [(n, p) for n, p in param_optimizers if p.requires_grad]
# Hack to remove pooler, which is not used thus it produce None grad that break apex
param_optimizers = [n for n in param_optimizers if "pooler" not in n[0]]
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizers if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizers if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = BertAdam(
optimizer_grouped_parameters,
lr=kwargs["learning_rate"],
warmup=kwargs["warmup_proportion"],
t_total=num_train_optimization_steps,
)
if kwargs["fp16"]:
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
pretrained_model_file = os.path.join(kwargs["output_dir"], MODEL_FILENAME)
if kwargs["do_eval"]:
assert os.path.isfile(
pretrained_model_file
), "Pretrained model file does not exist!"
logger.info("Loading pretrained model from {}", pretrained_model_file)
# Load model from file
params = torch.load(pretrained_model_file, map_location=device)
model.load_state_dict(params["model"])
logger.info(
"Loaded pretrained model (Epoch: {:,}, Fscore: {:.2f})",
params["epoch"],
params["fscore"],
)
eval_score = eval(
model=model,
eval_dataloader=test_dataloader,
eval_features=test_features,
eval_trees=test_treebank,
eval_sentences=test_sentences,
tag_encoder=tag_encoder,
device=device,
)
neptune.send_metric("test_eval_precision", eval_score.precision())
neptune.send_metric("test_eval_recall", eval_score.recall())
neptune.send_metric("test_eval_fscore", eval_score.fscore())
tqdm.write("Evaluation score: {}".format(str(eval_score)))
else:
# Training phase
global_steps = 0
start_epoch = 0
best_dev_fscore = 0
if kwargs["preload"] or kwargs["resume"]:
assert os.path.isfile(
pretrained_model_file
), "Pretrained model file does not exist!"
logger.info("Resuming model from {}", pretrained_model_file)
# Load model from file
params = torch.load(pretrained_model_file, map_location=device)
model.load_state_dict(params["model"])
if kwargs["resume"]:
optimizer.load_state_dict(params["optimizer"])
torch.cuda.set_rng_state_all(
[state.cpu() for state in params["torch_cuda_random_state_all"]]
)
torch.set_rng_state(params["torch_random_state"].cpu())
np.random.set_state(params["np_random_state"])
random.setstate(params["random_state"])
global_steps = params["global_steps"]
start_epoch = params["epoch"] + 1
best_dev_fscore = params["fscore"]
else:
assert not os.path.isfile(
pretrained_model_file
), "Please remove or move the pretrained model file to another place!"
for epoch in trange(start_epoch, kwargs["num_epochs"], desc="Epoch"):
model.train()
train_loss = 0
num_train_steps = 0
for step, (indices, *_) in enumerate(
tqdm(train_dataloader, desc="Iteration")
):
ids, attention_masks, tags, sections, trees, sentences = prepare_batch_input(
indices=indices,
features=train_features,
trees=train_parse,
sentences=train_sentences,
tag_encoder=tag_encoder,
device=device,
)
loss = model(
ids=ids,
attention_masks=attention_masks,
tags=tags,
sections=sections,
sentences=sentences,
gold_trees=trees,
)
if kwargs["gradient_accumulation_steps"] > 1:
loss /= kwargs["gradient_accumulation_steps"]
if kwargs["fp16"]:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
train_loss += loss.item()
num_train_steps += 1
if (step + 1) % kwargs["gradient_accumulation_steps"] == 0:
optimizer.step()
optimizer.zero_grad()
global_steps += 1
# Write logs
neptune.send_metric("train_loss", epoch, train_loss / num_train_steps)
neptune.send_metric("global_steps", epoch, global_steps)
tqdm.write(
"Epoch: {:,} - Train loss: {:.4f} - Global steps: {:,}".format(
epoch, train_loss / num_train_steps, global_steps
)
)
# Evaluate
eval_score = eval(
model=model,
eval_dataloader=dev_dataloader,
eval_features=dev_features,
eval_trees=dev_treebank,
eval_sentences=dev_sentences,
tag_encoder=tag_encoder,
device=device,
)
neptune.send_metric("eval_precision", epoch, eval_score.precision())
neptune.send_metric("eval_recall", epoch, eval_score.recall())
neptune.send_metric("eval_fscore", epoch, eval_score.fscore())
tqdm.write(
"Epoch: {:,} - Evaluation score: {}".format(epoch, str(eval_score))
)
# Save best model
if eval_score.fscore() > best_dev_fscore:
best_dev_fscore = eval_score.fscore()
tqdm.write("** Saving model...")
os.makedirs(kwargs["output_dir"], exist_ok=True)
torch.save(
{
"epoch": epoch,
"global_steps": global_steps,
"fscore": best_dev_fscore,
"random_state": random.getstate(),
"np_random_state": np.random.get_state(),
"torch_random_state": torch.get_rng_state(),
"torch_cuda_random_state_all": torch.cuda.get_rng_state_all(),
"optimizer": optimizer.state_dict(),
"model": (
model.module if hasattr(model, "module") else model
).state_dict(),
},
pretrained_model_file,
)
tqdm.write("** Best evaluation fscore: {:.2f}".format(best_dev_fscore))
if __name__ == "__main__":
neptune.init(project_qualified_name=os.getenv("NEPTUNE_PROJECT_NAME"))
try:
# main(
# [
# "--train_file=corpora/WSJ-PTB/02-21.10way.clean.train",
# "--dev_file=corpora/WSJ-PTB/22.auto.clean.dev",
# "--test_file=corpora/WSJ-PTB/23.auto.clean.test",
# "--output_dir=outputs",
# "--bert_model=models/bert-base-multilingual-cased",
# "--batch_size=32",
# "--num_epochs=20",
# "--learning_rate=3e-5",
# # "--fp16",
# # "--do_eval",
# ]
# )
main()
finally:
try:
neptune.stop()
except NoExperimentContext:
pass
|
the-stack_0_22794 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import gym
import logging
import pickle
import ray
from ray.rllib.env.atari_wrappers import wrap_deepmind, is_atari
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.env.env_context import EnvContext
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.env.external_multi_agent_env import ExternalMultiAgentEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.interface import EvaluatorInterface
from ray.rllib.evaluation.sampler import AsyncSampler, SyncSampler
from ray.rllib.policy.sample_batch import MultiAgentBatch, DEFAULT_POLICY_ID
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.offline import NoopOutput, IOContext, OutputWriter, InputReader
from ray.rllib.offline.is_estimator import ImportanceSamplingEstimator
from ray.rllib.offline.wis_estimator import WeightedImportanceSamplingEstimator
from ray.rllib.models import ModelCatalog
from ray.rllib.models.preprocessors import NoPreprocessor
from ray.rllib.utils import merge_dicts
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.debug import disable_log_once_globally, log_once, \
summarize, enable_periodic_logging
from ray.rllib.utils.filter import get_filter
from ray.rllib.utils.tf_run_builder import TFRunBuilder
from ray.rllib.utils import try_import_tf, try_import_torch
tf = try_import_tf()
torch, _ = try_import_torch()
logger = logging.getLogger(__name__)
# Handle to the current rollout worker, which will be set to the most recently
# created RolloutWorker in this process. This can be helpful to access in
# custom env or policy classes for debugging or advanced use cases.
_global_worker = None
@DeveloperAPI
def get_global_worker():
"""Returns a handle to the active rollout worker in this process."""
global _global_worker
return _global_worker
@DeveloperAPI
class RolloutWorker(EvaluatorInterface):
"""Common experience collection class.
This class wraps a policy instance and an environment class to
collect experiences from the environment. You can create many replicas of
this class as Ray actors to scale RL training.
This class supports vectorized and multi-agent policy evaluation (e.g.,
VectorEnv, MultiAgentEnv, etc.)
Examples:
>>> # Create a rollout worker and using it to collect experiences.
>>> worker = RolloutWorker(
... env_creator=lambda _: gym.make("CartPole-v0"),
... policy=PGTFPolicy)
>>> print(worker.sample())
SampleBatch({
"obs": [[...]], "actions": [[...]], "rewards": [[...]],
"dones": [[...]], "new_obs": [[...]]})
>>> # Creating a multi-agent rollout worker
>>> worker = RolloutWorker(
... env_creator=lambda _: MultiAgentTrafficGrid(num_cars=25),
... policies={
... # Use an ensemble of two policies for car agents
... "car_policy1":
... (PGTFPolicy, Box(...), Discrete(...), {"gamma": 0.99}),
... "car_policy2":
... (PGTFPolicy, Box(...), Discrete(...), {"gamma": 0.95}),
... # Use a single shared policy for all traffic lights
... "traffic_light_policy":
... (PGTFPolicy, Box(...), Discrete(...), {}),
... },
... policy_mapping_fn=lambda agent_id:
... random.choice(["car_policy1", "car_policy2"])
... if agent_id.startswith("car_") else "traffic_light_policy")
>>> print(worker.sample())
MultiAgentBatch({
"car_policy1": SampleBatch(...),
"car_policy2": SampleBatch(...),
"traffic_light_policy": SampleBatch(...)})
"""
@DeveloperAPI
@classmethod
def as_remote(cls,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None):
return ray.remote(
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources)(cls)
@DeveloperAPI
def __init__(self,
env_creator,
policy,
policy_mapping_fn=None,
policies_to_train=None,
tf_session_creator=None,
batch_steps=100,
batch_mode="truncate_episodes",
episode_horizon=None,
preprocessor_pref="deepmind",
sample_async=False,
compress_observations=False,
num_envs=1,
observation_filter="NoFilter",
clip_rewards=None,
clip_actions=True,
env_config=None,
model_config=None,
policy_config=None,
worker_index=0,
monitor_path=None,
log_dir=None,
log_level=None,
callbacks=None,
input_creator=lambda ioctx: ioctx.default_sampler_input(),
input_evaluation=frozenset([]),
output_creator=lambda ioctx: NoopOutput(),
remote_worker_envs=False,
remote_env_batch_wait_ms=0,
soft_horizon=False,
no_done_at_end=False,
seed=None,
_fake_sampler=False):
"""Initialize a rollout worker.
Arguments:
env_creator (func): Function that returns a gym.Env given an
EnvContext wrapped configuration.
policy (class|dict): Either a class implementing
Policy, or a dictionary of policy id strings to
(Policy, obs_space, action_space, config) tuples. If a
dict is specified, then we are in multi-agent mode and a
policy_mapping_fn should also be set.
policy_mapping_fn (func): A function that maps agent ids to
policy ids in multi-agent mode. This function will be called
each time a new agent appears in an episode, to bind that agent
to a policy for the duration of the episode.
policies_to_train (list): Optional whitelist of policies to train,
or None for all policies.
tf_session_creator (func): A function that returns a TF session.
This is optional and only useful with TFPolicy.
batch_steps (int): The target number of env transitions to include
in each sample batch returned from this worker.
batch_mode (str): One of the following batch modes:
"truncate_episodes": Each call to sample() will return a batch
of at most `batch_steps * num_envs` in size. The batch will
be exactly `batch_steps * num_envs` in size if
postprocessing does not change batch sizes. Episodes may be
truncated in order to meet this size requirement.
"complete_episodes": Each call to sample() will return a batch
of at least `batch_steps * num_envs` in size. Episodes will
not be truncated, but multiple episodes may be packed
within one batch to meet the batch size. Note that when
`num_envs > 1`, episode steps will be buffered until the
episode completes, and hence batches may contain
significant amounts of off-policy data.
episode_horizon (int): Whether to stop episodes at this horizon.
preprocessor_pref (str): Whether to prefer RLlib preprocessors
("rllib") or deepmind ("deepmind") when applicable.
sample_async (bool): Whether to compute samples asynchronously in
the background, which improves throughput but can cause samples
to be slightly off-policy.
compress_observations (bool): If true, compress the observations.
They can be decompressed with rllib/utils/compression.
num_envs (int): If more than one, will create multiple envs
and vectorize the computation of actions. This has no effect if
if the env already implements VectorEnv.
observation_filter (str): Name of observation filter to use.
clip_rewards (bool): Whether to clip rewards to [-1, 1] prior to
experience postprocessing. Setting to None means clip for Atari
only.
clip_actions (bool): Whether to clip action values to the range
specified by the policy action space.
env_config (dict): Config to pass to the env creator.
model_config (dict): Config to use when creating the policy model.
policy_config (dict): Config to pass to the policy. In the
multi-agent case, this config will be merged with the
per-policy configs specified by `policy`.
worker_index (int): For remote workers, this should be set to a
non-zero and unique value. This index is passed to created envs
through EnvContext so that envs can be configured per worker.
monitor_path (str): Write out episode stats and videos to this
directory if specified.
log_dir (str): Directory where logs can be placed.
log_level (str): Set the root log level on creation.
callbacks (dict): Dict of custom debug callbacks.
input_creator (func): Function that returns an InputReader object
for loading previous generated experiences.
input_evaluation (list): How to evaluate the policy performance.
This only makes sense to set when the input is reading offline
data. The possible values include:
- "is": the step-wise importance sampling estimator.
- "wis": the weighted step-wise is estimator.
- "simulation": run the environment in the background, but
use this data for evaluation only and never for learning.
output_creator (func): Function that returns an OutputWriter object
for saving generated experiences.
remote_worker_envs (bool): If using num_envs > 1, whether to create
those new envs in remote processes instead of in the current
process. This adds overheads, but can make sense if your envs
remote_env_batch_wait_ms (float): Timeout that remote workers
are waiting when polling environments. 0 (continue when at
least one env is ready) is a reasonable default, but optimal
value could be obtained by measuring your environment
step / reset and model inference perf.
soft_horizon (bool): Calculate rewards but don't reset the
environment when the horizon is hit.
no_done_at_end (bool): Ignore the done=True at the end of the
episode and instead record done=False.
seed (int): Set the seed of both np and tf to this value to
to ensure each remote worker has unique exploration behavior.
_fake_sampler (bool): Use a fake (inf speed) sampler for testing.
"""
global _global_worker
_global_worker = self
policy_config = policy_config or {}
if (tf and policy_config.get("eager")
and not policy_config.get("no_eager_on_workers")):
tf.enable_eager_execution()
if log_level:
logging.getLogger("ray.rllib").setLevel(log_level)
if worker_index > 1:
disable_log_once_globally() # only need 1 worker to log
elif log_level == "DEBUG":
enable_periodic_logging()
env_context = EnvContext(env_config or {}, worker_index)
self.policy_config = policy_config
self.callbacks = callbacks or {}
self.worker_index = worker_index
model_config = model_config or {}
policy_mapping_fn = (policy_mapping_fn
or (lambda agent_id: DEFAULT_POLICY_ID))
if not callable(policy_mapping_fn):
raise ValueError("Policy mapping function not callable?")
self.env_creator = env_creator
self.sample_batch_size = batch_steps * num_envs
self.batch_mode = batch_mode
self.compress_observations = compress_observations
self.preprocessing_enabled = True
self.last_batch = None
self._fake_sampler = _fake_sampler
self.env = _validate_env(env_creator(env_context))
if is_atari(self.env) and \
not model_config.get("custom_preprocessor") and \
preprocessor_pref == "deepmind":
# Deepmind wrappers already handle all preprocessing
self.preprocessing_enabled = False
if clip_rewards is None:
clip_rewards = True
def wrap(env):
env = wrap_deepmind(
env,
dim=model_config.get("dim"),
framestack=model_config.get("framestack"))
if monitor_path:
from gym import wrappers
env = wrappers.Monitor(env, monitor_path, resume=True)
return env
else:
def wrap(env):
if monitor_path:
from gym import wrappers
env = wrappers.Monitor(env, monitor_path, resume=True)
return env
self.env = wrap(self.env)
def make_env(vector_index):
return wrap(
env_creator(
env_context.copy_with_overrides(
vector_index=vector_index, remote=remote_worker_envs)))
self.tf_sess = None
policy_dict = _validate_and_canonicalize(policy, self.env)
self.policies_to_train = policies_to_train or list(policy_dict.keys())
# set numpy and python seed
if seed is not None:
np.random.seed(seed)
random.seed(seed)
if not hasattr(self.env, "seed"):
raise ValueError("Env doesn't support env.seed(): {}".format(
self.env))
self.env.seed(seed)
try:
assert torch is not None
torch.manual_seed(seed)
except AssertionError:
logger.info("Could not seed torch")
if _has_tensorflow_graph(policy_dict) and not (tf and
tf.executing_eagerly()):
if not tf:
raise ImportError("Could not import tensorflow")
with tf.Graph().as_default():
if tf_session_creator:
self.tf_sess = tf_session_creator()
else:
self.tf_sess = tf.Session(
config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True)))
with self.tf_sess.as_default():
# set graph-level seed
if seed is not None:
tf.set_random_seed(seed)
self.policy_map, self.preprocessors = \
self._build_policy_map(policy_dict, policy_config)
if (ray.is_initialized()
and ray.worker._mode() != ray.worker.LOCAL_MODE):
if not ray.get_gpu_ids():
logger.debug(
"Creating policy evaluation worker {}".format(
worker_index) +
" on CPU (please ignore any CUDA init errors)")
elif not tf.test.is_gpu_available():
raise RuntimeError(
"GPUs were assigned to this worker by Ray, but "
"TensorFlow reports GPU acceleration is disabled. "
"This could be due to a bad CUDA or TF installation.")
else:
self.policy_map, self.preprocessors = self._build_policy_map(
policy_dict, policy_config)
self.multiagent = set(self.policy_map.keys()) != {DEFAULT_POLICY_ID}
if self.multiagent:
if not ((isinstance(self.env, MultiAgentEnv)
or isinstance(self.env, ExternalMultiAgentEnv))
or isinstance(self.env, BaseEnv)):
raise ValueError(
"Have multiple policies {}, but the env ".format(
self.policy_map) +
"{} is not a subclass of BaseEnv, MultiAgentEnv or "
"ExternalMultiAgentEnv?".format(self.env))
self.filters = {
policy_id: get_filter(observation_filter,
policy.observation_space.shape)
for (policy_id, policy) in self.policy_map.items()
}
if self.worker_index == 0:
logger.info("Built filter map: {}".format(self.filters))
# Always use vector env for consistency even if num_envs = 1
self.async_env = BaseEnv.to_base_env(
self.env,
make_env=make_env,
num_envs=num_envs,
remote_envs=remote_worker_envs,
remote_env_batch_wait_ms=remote_env_batch_wait_ms)
self.num_envs = num_envs
if self.batch_mode == "truncate_episodes":
unroll_length = batch_steps
pack_episodes = True
elif self.batch_mode == "complete_episodes":
unroll_length = float("inf") # never cut episodes
pack_episodes = False # sampler will return 1 episode per poll
else:
raise ValueError("Unsupported batch mode: {}".format(
self.batch_mode))
self.io_context = IOContext(log_dir, policy_config, worker_index, self)
self.reward_estimators = []
for method in input_evaluation:
if method == "simulation":
logger.warning(
"Requested 'simulation' input evaluation method: "
"will discard all sampler outputs and keep only metrics.")
sample_async = True
elif method == "is":
ise = ImportanceSamplingEstimator.create(self.io_context)
self.reward_estimators.append(ise)
elif method == "wis":
wise = WeightedImportanceSamplingEstimator.create(
self.io_context)
self.reward_estimators.append(wise)
else:
raise ValueError(
"Unknown evaluation method: {}".format(method))
if sample_async:
self.sampler = AsyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
blackhole_outputs="simulation" in input_evaluation,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.sampler.start()
else:
self.sampler = SyncSampler(
self.async_env,
self.policy_map,
policy_mapping_fn,
self.preprocessors,
self.filters,
clip_rewards,
unroll_length,
self.callbacks,
horizon=episode_horizon,
pack=pack_episodes,
tf_sess=self.tf_sess,
clip_actions=clip_actions,
soft_horizon=soft_horizon,
no_done_at_end=no_done_at_end)
self.input_reader = input_creator(self.io_context)
assert isinstance(self.input_reader, InputReader), self.input_reader
self.output_writer = output_creator(self.io_context)
assert isinstance(self.output_writer, OutputWriter), self.output_writer
logger.debug(
"Created rollout worker with env {} ({}), policies {}".format(
self.async_env, self.env, self.policy_map))
@override(EvaluatorInterface)
def sample(self):
"""Evaluate the current policies and return a batch of experiences.
Return:
SampleBatch|MultiAgentBatch from evaluating the current policies.
"""
if self._fake_sampler and self.last_batch is not None:
return self.last_batch
if log_once("sample_start"):
logger.info("Generating sample batch of size {}".format(
self.sample_batch_size))
batches = [self.input_reader.next()]
steps_so_far = batches[0].count
# In truncate_episodes mode, never pull more than 1 batch per env.
# This avoids over-running the target batch size.
if self.batch_mode == "truncate_episodes":
max_batches = self.num_envs
else:
max_batches = float("inf")
while steps_so_far < self.sample_batch_size and len(
batches) < max_batches:
batch = self.input_reader.next()
steps_so_far += batch.count
batches.append(batch)
batch = batches[0].concat_samples(batches)
if self.callbacks.get("on_sample_end"):
self.callbacks["on_sample_end"]({"worker": self, "samples": batch})
# Always do writes prior to compression for consistency and to allow
# for better compression inside the writer.
self.output_writer.write(batch)
# Do off-policy estimation if needed
if self.reward_estimators:
for sub_batch in batch.split_by_episode():
for estimator in self.reward_estimators:
estimator.process(sub_batch)
if log_once("sample_end"):
logger.info("Completed sample batch:\n\n{}\n".format(
summarize(batch)))
if self.compress_observations == "bulk":
batch.compress(bulk=True)
elif self.compress_observations:
batch.compress()
if self._fake_sampler:
self.last_batch = batch
return batch
@DeveloperAPI
@ray.method(num_return_vals=2)
def sample_with_count(self):
"""Same as sample() but returns the count as a separate future."""
batch = self.sample()
return batch, batch.count
@override(EvaluatorInterface)
def get_weights(self, policies=None):
if policies is None:
policies = self.policy_map.keys()
return {
pid: policy.get_weights()
for pid, policy in self.policy_map.items() if pid in policies
}
@override(EvaluatorInterface)
def set_weights(self, weights):
for pid, w in weights.items():
self.policy_map[pid].set_weights(w)
@override(EvaluatorInterface)
def compute_gradients(self, samples):
if log_once("compute_gradients"):
logger.info("Compute gradients on:\n\n{}\n".format(
summarize(samples)))
if isinstance(samples, MultiAgentBatch):
grad_out, info_out = {}, {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "compute_gradients")
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
grad_out[pid], info_out[pid] = (
self.policy_map[pid]._build_compute_gradients(
builder, batch))
grad_out = {k: builder.get(v) for k, v in grad_out.items()}
info_out = {k: builder.get(v) for k, v in info_out.items()}
else:
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
grad_out[pid], info_out[pid] = (
self.policy_map[pid].compute_gradients(batch))
else:
grad_out, info_out = (
self.policy_map[DEFAULT_POLICY_ID].compute_gradients(samples))
info_out["batch_count"] = samples.count
if log_once("grad_out"):
logger.info("Compute grad info:\n\n{}\n".format(
summarize(info_out)))
return grad_out, info_out
@override(EvaluatorInterface)
def apply_gradients(self, grads):
if log_once("apply_gradients"):
logger.info("Apply gradients:\n\n{}\n".format(summarize(grads)))
if isinstance(grads, dict):
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "apply_gradients")
outputs = {
pid: self.policy_map[pid]._build_apply_gradients(
builder, grad)
for pid, grad in grads.items()
}
return {k: builder.get(v) for k, v in outputs.items()}
else:
return {
pid: self.policy_map[pid].apply_gradients(g)
for pid, g in grads.items()
}
else:
return self.policy_map[DEFAULT_POLICY_ID].apply_gradients(grads)
@override(EvaluatorInterface)
def learn_on_batch(self, samples):
if log_once("learn_on_batch"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize(samples)))
if isinstance(samples, MultiAgentBatch):
info_out = {}
to_fetch = {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "learn_on_batch")
else:
builder = None
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
policy = self.policy_map[pid]
if builder and hasattr(policy, "_build_learn_on_batch"):
to_fetch[pid] = policy._build_learn_on_batch(
builder, batch)
else:
info_out[pid] = policy.learn_on_batch(batch)
info_out.update({k: builder.get(v) for k, v in to_fetch.items()})
else:
info_out = self.policy_map[DEFAULT_POLICY_ID].learn_on_batch(
samples)
if log_once("learn_out"):
logger.debug("Training out:\n\n{}\n".format(summarize(info_out)))
return info_out
@DeveloperAPI
def get_metrics(self):
"""Returns a list of new RolloutMetric objects from evaluation."""
out = self.sampler.get_metrics()
for m in self.reward_estimators:
out.extend(m.get_metrics())
return out
@DeveloperAPI
def foreach_env(self, func):
"""Apply the given function to each underlying env instance."""
envs = self.async_env.get_unwrapped()
if not envs:
return [func(self.async_env)]
else:
return [func(e) for e in envs]
@DeveloperAPI
def get_policy(self, policy_id=DEFAULT_POLICY_ID):
"""Return policy for the specified id, or None.
Arguments:
policy_id (str): id of policy to return.
"""
return self.policy_map.get(policy_id)
@DeveloperAPI
def for_policy(self, func, policy_id=DEFAULT_POLICY_ID):
"""Apply the given function to the specified policy."""
return func(self.policy_map[policy_id])
@DeveloperAPI
def foreach_policy(self, func):
"""Apply the given function to each (policy, policy_id) tuple."""
return [func(policy, pid) for pid, policy in self.policy_map.items()]
@DeveloperAPI
def foreach_trainable_policy(self, func):
"""
Applies the given function to each (policy, policy_id) tuple, which
can be found in `self.policies_to_train`.
Args:
func (callable): A function - taking a Policy and its ID - that is
called on all Policies within `self.policies_to_train`.
Returns:
List[any]: The list of n return values of all
`func([policy], [ID])`-calls.
"""
return [
func(policy, pid) for pid, policy in self.policy_map.items()
if pid in self.policies_to_train
]
@DeveloperAPI
def sync_filters(self, new_filters):
"""Changes self's filter to given and rebases any accumulated delta.
Args:
new_filters (dict): Filters with new state to update local copy.
"""
assert all(k in new_filters for k in self.filters)
for k in self.filters:
self.filters[k].sync(new_filters[k])
@DeveloperAPI
def get_filters(self, flush_after=False):
"""Returns a snapshot of filters.
Args:
flush_after (bool): Clears the filter buffer state.
Returns:
return_filters (dict): Dict for serializable filters
"""
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.clear_buffer()
return return_filters
@DeveloperAPI
def save(self):
filters = self.get_filters(flush_after=True)
state = {
pid: self.policy_map[pid].get_state()
for pid in self.policy_map
}
return pickle.dumps({"filters": filters, "state": state})
@DeveloperAPI
def restore(self, objs):
objs = pickle.loads(objs)
self.sync_filters(objs["filters"])
for pid, state in objs["state"].items():
self.policy_map[pid].set_state(state)
@DeveloperAPI
def set_global_vars(self, global_vars):
self.foreach_policy(lambda p, _: p.on_global_var_update(global_vars))
@DeveloperAPI
def export_policy_model(self, export_dir, policy_id=DEFAULT_POLICY_ID):
self.policy_map[policy_id].export_model(export_dir)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir,
filename_prefix="model",
policy_id=DEFAULT_POLICY_ID):
self.policy_map[policy_id].export_checkpoint(export_dir,
filename_prefix)
@DeveloperAPI
def stop(self):
self.async_env.stop()
def _build_policy_map(self, policy_dict, policy_config):
policy_map = {}
preprocessors = {}
for name, (cls, obs_space, act_space,
conf) in sorted(policy_dict.items()):
logger.debug("Creating policy for {}".format(name))
merged_conf = merge_dicts(policy_config, conf)
if self.preprocessing_enabled:
preprocessor = ModelCatalog.get_preprocessor_for_space(
obs_space, merged_conf.get("model"))
preprocessors[name] = preprocessor
obs_space = preprocessor.observation_space
else:
preprocessors[name] = NoPreprocessor(obs_space)
if isinstance(obs_space, gym.spaces.Dict) or \
isinstance(obs_space, gym.spaces.Tuple):
raise ValueError(
"Found raw Tuple|Dict space as input to policy. "
"Please preprocess these observations with a "
"Tuple|DictFlatteningPreprocessor.")
if tf and tf.executing_eagerly():
if hasattr(cls, "as_eager"):
cls = cls.as_eager()
if policy_config["eager_tracing"]:
cls = cls.with_tracing()
elif not issubclass(cls, TFPolicy):
pass # could be some other type of policy
else:
raise ValueError("This policy does not support eager "
"execution: {}".format(cls))
if tf:
with tf.variable_scope(name):
policy_map[name] = cls(obs_space, act_space, merged_conf)
else:
policy_map[name] = cls(obs_space, act_space, merged_conf)
if self.worker_index == 0:
logger.info("Built policy map: {}".format(policy_map))
logger.info("Built preprocessor map: {}".format(preprocessors))
return policy_map, preprocessors
def __del__(self):
if hasattr(self, "sampler") and isinstance(self.sampler, AsyncSampler):
self.sampler.shutdown = True
def _validate_and_canonicalize(policy, env):
if isinstance(policy, dict):
_validate_multiagent_config(policy)
return policy
elif not issubclass(policy, Policy):
raise ValueError("policy must be a rllib.Policy class")
else:
if (isinstance(env, MultiAgentEnv)
and not hasattr(env, "observation_space")):
raise ValueError(
"MultiAgentEnv must have observation_space defined if run "
"in a single-agent configuration.")
return {
DEFAULT_POLICY_ID: (policy, env.observation_space,
env.action_space, {})
}
def _validate_multiagent_config(policy, allow_none_graph=False):
for k, v in policy.items():
if not isinstance(k, str):
raise ValueError("policy keys must be strs, got {}".format(
type(k)))
if not isinstance(v, (tuple, list)) or len(v) != 4:
raise ValueError(
"policy values must be tuples/lists of "
"(cls or None, obs_space, action_space, config), got {}".
format(v))
if allow_none_graph and v[0] is None:
pass
elif not issubclass(v[0], Policy):
raise ValueError("policy tuple value 0 must be a rllib.Policy "
"class or None, got {}".format(v[0]))
if not isinstance(v[1], gym.Space):
raise ValueError(
"policy tuple value 1 (observation_space) must be a "
"gym.Space, got {}".format(type(v[1])))
if not isinstance(v[2], gym.Space):
raise ValueError("policy tuple value 2 (action_space) must be a "
"gym.Space, got {}".format(type(v[2])))
if not isinstance(v[3], dict):
raise ValueError("policy tuple value 3 (config) must be a dict, "
"got {}".format(type(v[3])))
def _validate_env(env):
# allow this as a special case (assumed gym.Env)
if hasattr(env, "observation_space") and hasattr(env, "action_space"):
return env
allowed_types = [gym.Env, MultiAgentEnv, ExternalEnv, VectorEnv, BaseEnv]
if not any(isinstance(env, tpe) for tpe in allowed_types):
raise ValueError(
"Returned env should be an instance of gym.Env, MultiAgentEnv, "
"ExternalEnv, VectorEnv, or BaseEnv. The provided env creator "
"function returned {} ({}).".format(env, type(env)))
return env
def _has_tensorflow_graph(policy_dict):
for policy, _, _, _ in policy_dict.values():
if issubclass(policy, TFPolicy):
return True
return False
|
the-stack_0_22795 | import random # FOR RANDOM BEGINNINGS
from Tkinter import * # ALL VISUAL EQUIPMENT
WIDTH = 1000 # OF SCREEN IN PIXELS
HEIGHT = 500 # OF SCREEN IN PIXELS
BOIDS = 1 + 6 + 12 # IN SIMULATION
WALL = 100 # FROM SIDE IN PIXELS
WALL_FORCE = 30 # ACCELERATION PER MOVE
SPEED_LIMIT = 800 # FOR BOID VELOCITY
BOID_RADIUS = 3 # FOR BOIDS IN PIXELS
OFFSET_START = 20 # FROM WALL IN PIXELS
FRAMES_PER_SEC = 40 # SCREEN UPDATE RATE
WINDOWED = False # MOVABLE PROGRAM
################################################################################
def main():
# Start the program.
initialise()
mainloop()
def initialise():
# Setup simulation variables.
build_boids()
build_graph()
def build_graph():
# Build GUI environment.
global graph
root = Tk()
if WINDOWED:
root.resizable(False, False)
root.title('Boids')
else:
root.overrideredirect(True)
x = (root.winfo_screenwidth() - WIDTH) / 2
y = (root.winfo_screenheight() - HEIGHT) / 2
root.geometry('%dx%d+%d+%d' % (WIDTH, HEIGHT, x, y))
root.bind_all('<Escape>', lambda event: event.widget.quit())
graph = Canvas(root, width=WIDTH, height=HEIGHT, background='white')
graph.after(1000 / FRAMES_PER_SEC, update)
graph.pack()
def update():
# Main simulation loop.
graph.after(1000 / FRAMES_PER_SEC, update)
draw()
move()
def draw():
# Draw all boids.
graph.delete(ALL)
for boid in boids:
x1 = boid.position.x - BOID_RADIUS
y1 = boid.position.y - BOID_RADIUS
x2 = boid.position.x + BOID_RADIUS
y2 = boid.position.y + BOID_RADIUS
graph.create_oval((x1, y1, x2, y2), fill='red')
graph.update()
def move():
# Move all boids.
for boid in boids:
simulate_wall(boid)
boid.update_velocity(boids)
boid.move()
def simulate_wall(boid):
# Create viewing boundaries.
if boid.position.x < WALL:
boid.velocity.x += WALL_FORCE
elif boid.position.x > WIDTH - WALL:
boid.velocity.x -= WALL_FORCE
if boid.position.y < WALL:
boid.velocity.y += WALL_FORCE
elif boid.position.y > HEIGHT - WALL:
boid.velocity.y -= WALL_FORCE
def limit_speed(boid):
# Limit boid speed.
if boid.velocity.mag() > SPEED_LIMIT:
boid.velocity /= boid.velocity.mag() / SPEED_LIMIT
def build_boids():
# Create boids variable.
global boids
boids = tuple(Boid(WIDTH, HEIGHT, OFFSET_START, FRAMES_PER_SEC) for boid in xrange(BOIDS))
################################################################################
# TWO DIMENTIONAL VECTOR CLASS
class TwoD:
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def __repr__(self):
return 'TwoD(%s, %s)' % (self.x, self.y)
def __add__(self, other):
return TwoD(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return TwoD(self.x - other.x, self.y - other.y)
def __mul__(self, other):
return TwoD(self.x * other, self.y * other)
def __div__(self, other):
return TwoD(self.x / other, self.y / other)
def __iadd__(self, other):
self.x += other.x
self.y += other.y
return self
def __isub__(self, other):
self.x -= other.x
self.y -= other.y
return self
def __idiv__(self, other):
self.x /= other
self.y /= other
return self
def mag(self):
return ((self.x ** 2) + (self.y ** 2)) ** 0.5
################################################################################
# BOID RULE IMPLEMENTATION CLASS
class Boid:
def __init__(self, width, height, offset, move_divider):
self.velocity = TwoD(0, 0)
self.position = TwoD(*self.random_start(width, height, offset))
self.move_divider = move_divider * 5
def random_start(self, width, height, offset):
if random.randint(0, 1):
# along left and right
y = random.randint(1, height)
if random.randint(0, 1):
# along left
x = -offset
else:
# along right
x = width + offset
else:
# along top and bottom
x = random.randint(1, width)
if random.randint(0, 1):
# along top
y = -offset
else:
# along bottom
y = height + offset
return x, y
def update_velocity(self, boids):
v1 = self.rule1(boids)
v2 = self.rule2(boids)
v3 = self.rule3(boids)
self.__temp = v1 + v2 + v3
def move(self):
self.velocity += self.__temp
limit_speed(self)
self.position += self.velocity / self.move_divider
def rule1(self, boids):
# clumping
vector = TwoD(0, 0)
for boid in boids:
if boid is not self:
vector += boid.position
vector /= len(boids) - 1
return (vector - self.position) / 7.5
def rule2(self, boids):
# avoidance
vector = TwoD(0, 0)
for boid in boids:
if boid is not self:
if (self.position - boid.position).mag() < 30:
vector -= (boid.position - self.position)
return vector * 1.5
def rule3(self, boids):
# schooling
vector = TwoD(0, 0)
for boid in boids:
if boid is not self:
vector += boid.velocity
vector /= len(boids) - 1
return (vector - self.velocity) / 8
################################################################################
# Execute the simulation.
if __name__ == '__main__':
main()
|
the-stack_0_22796 | import logging
import pickle
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from ray.tune.result import DEFAULT_METRIC
from ray.tune.sample import Categorical, Domain, Float, Integer, LogUniform, \
Quantized, Uniform
from ray.tune.suggest.suggestion import UNRESOLVED_SEARCH_SPACE, \
UNDEFINED_METRIC_MODE, UNDEFINED_SEARCH_SPACE
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils.util import is_nan_or_inf, unflatten_dict, \
validate_warmstart
try: # Python 3 only -- needed for lint test.
import hebo
import torch # hebo has torch as a dependency
except ImportError:
hebo = None
from ray.tune.suggest import Searcher
logger = logging.getLogger(__name__)
SPACE_ERROR_MESSAGE = ("Space must be either a HEBO DesignSpace object"
"or a dictionary with ONLY tune search spaces.")
class HEBOSearch(Searcher):
"""Uses HEBO (Heteroscedastic Evolutionary Bayesian Optimization)
to optimize hyperparameters.
HEBO is a cutting edge black-box optimization framework created
by Huawei's Noah Ark. More info can be found here:
https://github.com/huawei-noah/noah-research/tree/master/HEBO.
You will need to install HEBO via the following:
.. code-block:: bash
pip install HEBO
`space` can either be a HEBO's `DesignSpace` object or a dict of Tune
search spaces.
Please note that the first few trials will be random and used
to kickstart the search process. In order to achieve good results,
we recommend setting the number of trials to at least 16.
Maximum number of concurrent trials is determined by `max_concurrent`
argument. Trials will be done in batches of `max_concurrent` trials.
It is not recommended to use this Searcher in a `ConcurrencyLimiter`.
Args:
space (dict|hebo.design_space.design_space.DesignSpace):
A dict mapping parameter names to Tune search spaces or a
HEBO DesignSpace object.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want to run first to help the algorithm make better suggestions
for future parameters. Needs to be a list of dicts containing the
configurations.
evaluated_rewards (list): If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate. (See tune/examples/hebo_example.py)
random_state_seed (int, None): Seed for reproducible
results. Defaults to None. Please note that setting this to a value
will change global random states for `numpy` and `torch`
on initalization and loading from checkpoint.
max_concurrent (int, 8): Number of maximum concurrent trials.
**kwargs: The keyword arguments will be passed to `HEBO()``.
Tune automatically converts search spaces to HEBO's format:
.. code-block:: python
from ray import tune
from ray.tune.suggest.hebo import HEBOSearch
config = {
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100)
}
hebo = HEBOSearch(metric="mean_loss", mode="min")
tune.run(my_func, config=config, search_alg=hebo)
Alternatively, you can pass a HEBO `DesignSpace` object manually to the
Searcher:
.. code-block:: python
from ray import tune
from ray.tune.suggest.hebo import HEBOSearch
from hebo.design_space.design_space import DesignSpace
space_config = [
{'name' : 'width', 'type' : 'num', 'lb' : 0, 'ub' : 20},
{'name' : 'height', 'type' : 'num', 'lb' : -100, 'ub' : 100},
]
space = DesignSpace().parse(space_config)
hebo = HEBOSearch(space, metric="mean_loss", mode="min")
tune.run(my_func, search_alg=hebo)
"""
def __init__(
self,
space: Optional[Union[
Dict, "hebo.design_space.design_space.DesignSpace"]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
evaluated_rewards: Optional[List] = None,
random_state_seed: Optional[int] = None,
max_concurrent: int = 8,
**kwargs):
assert hebo is not None, (
"HEBO must be installed!. You can install HEBO with"
" the command: `pip install HEBO`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
assert isinstance(max_concurrent, int) and max_concurrent >= 1, (
"`max_concurrent` must be an integer and at least 1.")
if random_state_seed is not None:
assert isinstance(
random_state_seed, int
), "random_state_seed must be None or int, got '{}'.".format(
type(random_state_seed))
super(HEBOSearch, self).__init__(metric=metric, mode=mode)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if resolved_vars:
raise TypeError(SPACE_ERROR_MESSAGE)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space)
elif space is not None and not isinstance(
space, hebo.design_space.design_space.DesignSpace):
raise TypeError(SPACE_ERROR_MESSAGE +
" Got {}.".format(type(space)))
self._hebo_config = kwargs
self._random_state_seed = random_state_seed
self._space = space
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._initial_points = []
self._live_trial_mapping = {}
self._max_concurrent = max_concurrent
self._suggestions_cache = []
self._batch_filled = False
self._opt = None
if space:
self._setup_optimizer()
def _setup_optimizer(self):
# HEBO internally minimizes, so "max" => -1
if self._mode == "max":
self._metric_op = -1.
elif self._mode == "min":
self._metric_op = 1.
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
if not isinstance(self._space,
hebo.design_space.design_space.DesignSpace):
raise ValueError(
f"Invalid search space: {type(self._space)}. Either pass a "
f"valid search space to the `HEBOSearch` class or pass "
f"a `config` parameter to `tune.run()`")
if self._space.num_paras <= 0:
raise ValueError(
"Got empty search space. Please make sure to pass "
"a valid search space with at least one parameter to "
"`HEBOSearch`")
if self._random_state_seed is not None:
np.random.seed(self._random_state_seed)
torch.random.manual_seed(self._random_state_seed)
self._opt = hebo.optimizers.hebo.HEBO(
space=self._space, **self._hebo_config)
if self._points_to_evaluate:
validate_warmstart(self._space.para_names,
self._points_to_evaluate,
self._evaluated_rewards)
if self._evaluated_rewards:
self._opt.observe(
pd.DataFrame(self._points_to_evaluate),
np.array(self._evaluated_rewards) * self._metric_op)
else:
self._initial_points = self._points_to_evaluate
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict, **spec) -> bool:
if self._opt:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
self._setup_optimizer()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
if not self._opt:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
if not self._live_trial_mapping:
self._batch_filled = False
if self._initial_points:
params = self._initial_points.pop(0)
suggestion = pd.DataFrame(params, index=[0])
else:
if self._batch_filled or len(
self._live_trial_mapping) >= self._max_concurrent:
return None
if not self._suggestions_cache:
suggestion = self._opt.suggest(
n_suggestions=self._max_concurrent)
self._suggestions_cache = suggestion.to_dict("records")
params = self._suggestions_cache.pop(0)
suggestion = pd.DataFrame(params, index=[0])
self._live_trial_mapping[trial_id] = suggestion
if len(self._live_trial_mapping) >= self._max_concurrent:
self._batch_filled = True
return unflatten_dict(params)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
HEBO always minimizes."""
if result:
self._process_result(trial_id, result)
self._live_trial_mapping.pop(trial_id)
def _process_result(self, trial_id: str, result: Dict):
trial_info = self._live_trial_mapping[trial_id]
if result and not is_nan_or_inf(result[self._metric]):
self._opt.observe(
trial_info, np.array([self._metric_op * result[self._metric]]))
def add_evaluated_point(self,
parameters: Dict,
value: float,
error: bool = False,
pruned: bool = False,
intermediate_values: Optional[List[float]] = None):
if intermediate_values:
logger.warning("HEBO doesn't use intermediate_values. Ignoring.")
if not error and not pruned:
self._opt.observe(
pd.DataFrame([parameters]),
np.array([value]) * self._metric_op)
else:
logger.warning("Only non errored and non pruned points"
" can be added to HEBO.")
def save(self, checkpoint_path: str):
"""Storing current optimizer state."""
if self._random_state_seed is not None:
numpy_random_state = np.random.get_state()
torch_random_state = torch.get_rng_state()
else:
numpy_random_state = None
torch_random_state = None
with open(checkpoint_path, "wb") as f:
pickle.dump((self._opt, self._initial_points, numpy_random_state,
torch_random_state, self._live_trial_mapping,
self._max_concurrent, self._suggestions_cache,
self._space, self._hebo_config, self._batch_filled),
f)
def restore(self, checkpoint_path: str):
"""Restoring current optimizer state."""
with open(checkpoint_path, "rb") as f:
(self._opt, self._initial_points, numpy_random_state,
torch_random_state, self._live_trial_mapping,
self._max_concurrent, self._suggestions_cache, self._space,
self._hebo_config, self._batch_filled) = pickle.load(f)
if numpy_random_state is not None:
np.random.set_state(numpy_random_state)
if torch_random_state is not None:
torch.random.set_rng_state(torch_random_state)
@staticmethod
def convert_search_space(spec: Dict, prefix: str = "") -> Dict:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
params = []
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a HEBO search space.")
def resolve_value(par: str, domain: Domain):
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning("HEBO search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if isinstance(sampler, LogUniform):
return {
"name": par,
"type": "pow",
"lb": domain.lower,
"ub": domain.upper,
"base": sampler.base
}
elif isinstance(sampler, Uniform):
return {
"name": par,
"type": "num",
"lb": domain.lower,
"ub": domain.upper
}
elif isinstance(domain, Integer):
if isinstance(sampler, LogUniform):
return {
"name": par,
"type": "pow_int",
"lb": domain.lower,
"ub": domain.upper - 1, # Upper bound exclusive
"base": sampler.base
}
elif isinstance(sampler, Uniform):
return {
"name": par,
"type": "int",
"lb": domain.lower,
"ub": domain.upper - 1, # Upper bound exclusive
}
elif isinstance(domain, Categorical):
return {
"name": par,
"type": "cat",
"categories": list(domain.categories)
}
raise ValueError("HEBO does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__,
type(domain.sampler).__name__))
for path, domain in domain_vars:
par = "/".join(
[str(p) for p in ((prefix, ) + path if prefix else path)])
value = resolve_value(par, domain)
params.append(value)
return hebo.design_space.design_space.DesignSpace().parse(params)
|
the-stack_0_22798 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from logging.config import dictConfig
import datetime
from ._compat import bytes, str
def setup_logger(level=None):
dictConfig({
'version': 1,
'disable_existing_loggers': False,
'root': {
'handlers': ['console'],
'level': 'INFO',
},
'loggers': {
'meepo': {
'handlers': ['console'],
'propagate': False,
'level': level or 'INFO',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console'
},
},
'formatters': {
'console': {
'format': '%(asctime)s [%(levelname)s] [%(name)s][%(process)d]'
': %(message)s',
},
}
})
def cast_bytes(s, encoding='utf8', errors='strict'):
"""cast str or bytes to bytes"""
if isinstance(s, bytes):
return s
elif isinstance(s, str):
return s.encode(encoding, errors)
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
b = cast_bytes
def cast_str(s, encoding='utf8', errors='strict'):
"""cast bytes or str to str"""
if isinstance(s, bytes):
return s.decode(encoding, errors)
elif isinstance(s, str):
return s
else:
raise TypeError("Expected unicode or bytes, got %r" % s)
s = cast_str
def cast_datetime(ts, fmt=None):
"""cast timestamp to datetime or date str"""
dt = datetime.datetime.fromtimestamp(ts)
if fmt:
return dt.strftime(fmt)
return dt
d = cast_datetime
|
the-stack_0_22799 | # Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict, Iterable, List, Set
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.types import GroupID, JsonDict, get_domain_from_id
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
def _create_rerouter(func_name):
"""Returns an async function that looks at the group id and calls the function
on federation or the local group server if the group is local
"""
async def f(self, group_id, *args, **kwargs):
if not GroupID.is_valid(group_id):
raise SynapseError(400, "%s is not a legal group ID" % (group_id,))
if self.is_mine_id(group_id):
return await getattr(self.groups_server_handler, func_name)(
group_id, *args, **kwargs
)
else:
destination = get_domain_from_id(group_id)
try:
return await getattr(self.transport_client, func_name)(
destination, group_id, *args, **kwargs
)
except HttpResponseException as e:
# Capture errors returned by the remote homeserver and
# re-throw specific errors as SynapseErrors. This is so
# when the remote end responds with things like 403 Not
# In Group, we can communicate that to the client instead
# of a 500.
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
return f
class GroupsLocalWorkerHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastore()
self.room_list_handler = hs.get_room_list_handler()
self.groups_server_handler = hs.get_groups_server_handler()
self.transport_client = hs.get_federation_transport_client()
self.auth = hs.get_auth()
self.clock = hs.get_clock()
self.keyring = hs.get_keyring()
self.is_mine_id = hs.is_mine_id
self.signing_key = hs.signing_key
self.server_name = hs.hostname
self.notifier = hs.get_notifier()
self.attestations = hs.get_groups_attestation_signing()
self.profile_handler = hs.get_profile_handler()
# The following functions merely route the query to the local groups server
# or federation depending on if the group is local or remote
get_group_profile = _create_rerouter("get_group_profile")
get_rooms_in_group = _create_rerouter("get_rooms_in_group")
get_invited_users_in_group = _create_rerouter("get_invited_users_in_group")
get_group_category = _create_rerouter("get_group_category")
get_group_categories = _create_rerouter("get_group_categories")
get_group_role = _create_rerouter("get_group_role")
get_group_roles = _create_rerouter("get_group_roles")
async def get_group_summary(
self, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get the group summary for a group.
If the group is remote we check that the users have valid attestations.
"""
if self.is_mine_id(group_id):
res = await self.groups_server_handler.get_group_summary(
group_id, requester_user_id
)
else:
try:
res = await self.transport_client.get_group_summary(
get_domain_from_id(group_id), group_id, requester_user_id
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
group_server_name = get_domain_from_id(group_id)
# Loop through the users and validate the attestations.
chunk = res["users_section"]["users"]
valid_users = []
for entry in chunk:
g_user_id = entry["user_id"]
attestation = entry.pop("attestation", {})
try:
if get_domain_from_id(g_user_id) != group_server_name:
await self.attestations.verify_attestation(
attestation,
group_id=group_id,
user_id=g_user_id,
server_name=get_domain_from_id(g_user_id),
)
valid_users.append(entry)
except Exception as e:
logger.info("Failed to verify user is in group: %s", e)
res["users_section"]["users"] = valid_users
res["users_section"]["users"].sort(key=lambda e: e.get("order", 0))
res["rooms_section"]["rooms"].sort(key=lambda e: e.get("order", 0))
# Add `is_publicised` flag to indicate whether the user has publicised their
# membership of the group on their profile
result = await self.store.get_publicised_groups_for_user(requester_user_id)
is_publicised = group_id in result
res.setdefault("user", {})["is_publicised"] = is_publicised
return res
async def get_users_in_group(
self, group_id: str, requester_user_id: str
) -> JsonDict:
"""Get users in a group"""
if self.is_mine_id(group_id):
return await self.groups_server_handler.get_users_in_group(
group_id, requester_user_id
)
group_server_name = get_domain_from_id(group_id)
try:
res = await self.transport_client.get_users_in_group(
get_domain_from_id(group_id), group_id, requester_user_id
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
chunk = res["chunk"]
valid_entries = []
for entry in chunk:
g_user_id = entry["user_id"]
attestation = entry.pop("attestation", {})
try:
if get_domain_from_id(g_user_id) != group_server_name:
await self.attestations.verify_attestation(
attestation,
group_id=group_id,
user_id=g_user_id,
server_name=get_domain_from_id(g_user_id),
)
valid_entries.append(entry)
except Exception as e:
logger.info("Failed to verify user is in group: %s", e)
res["chunk"] = valid_entries
return res
async def get_joined_groups(self, user_id: str) -> JsonDict:
group_ids = await self.store.get_joined_groups(user_id)
return {"groups": group_ids}
async def get_publicised_groups_for_user(self, user_id: str) -> JsonDict:
if self.hs.is_mine_id(user_id):
result = await self.store.get_publicised_groups_for_user(user_id)
# Check AS associated groups for this user - this depends on the
# RegExps in the AS registration file (under `users`)
for app_service in self.store.get_app_services():
result.extend(app_service.get_groups_for_user(user_id))
return {"groups": result}
else:
try:
bulk_result = await self.transport_client.bulk_get_publicised_groups(
get_domain_from_id(user_id), [user_id]
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
result = bulk_result.get("users", {}).get(user_id)
# TODO: Verify attestations
return {"groups": result}
async def bulk_get_publicised_groups(
self, user_ids: Iterable[str], proxy: bool = True
) -> JsonDict:
destinations: Dict[str, Set[str]] = {}
local_users = set()
for user_id in user_ids:
if self.hs.is_mine_id(user_id):
local_users.add(user_id)
else:
destinations.setdefault(get_domain_from_id(user_id), set()).add(user_id)
if not proxy and destinations:
raise SynapseError(400, "Some user_ids are not local")
results = {}
failed_results: List[str] = []
for destination, dest_user_ids in destinations.items():
try:
r = await self.transport_client.bulk_get_publicised_groups(
destination, list(dest_user_ids)
)
results.update(r["users"])
except Exception:
failed_results.extend(dest_user_ids)
for uid in local_users:
results[uid] = await self.store.get_publicised_groups_for_user(uid)
# Check AS associated groups for this user - this depends on the
# RegExps in the AS registration file (under `users`)
for app_service in self.store.get_app_services():
results[uid].extend(app_service.get_groups_for_user(uid))
return {"users": results}
class GroupsLocalHandler(GroupsLocalWorkerHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
# Ensure attestations get renewed
hs.get_groups_attestation_renewer()
# The following functions merely route the query to the local groups server
# or federation depending on if the group is local or remote
update_group_profile = _create_rerouter("update_group_profile")
add_room_to_group = _create_rerouter("add_room_to_group")
update_room_in_group = _create_rerouter("update_room_in_group")
remove_room_from_group = _create_rerouter("remove_room_from_group")
update_group_summary_room = _create_rerouter("update_group_summary_room")
delete_group_summary_room = _create_rerouter("delete_group_summary_room")
update_group_category = _create_rerouter("update_group_category")
delete_group_category = _create_rerouter("delete_group_category")
update_group_summary_user = _create_rerouter("update_group_summary_user")
delete_group_summary_user = _create_rerouter("delete_group_summary_user")
update_group_role = _create_rerouter("update_group_role")
delete_group_role = _create_rerouter("delete_group_role")
set_group_join_policy = _create_rerouter("set_group_join_policy")
async def create_group(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Create a group"""
logger.info("Asking to create group with ID: %r", group_id)
if self.is_mine_id(group_id):
res = await self.groups_server_handler.create_group(
group_id, user_id, content
)
local_attestation = None
remote_attestation = None
else:
raise SynapseError(400, "Unable to create remote groups")
is_publicised = content.get("publicise", False)
token = await self.store.register_user_group_membership(
group_id,
user_id,
membership="join",
is_admin=True,
local_attestation=local_attestation,
remote_attestation=remote_attestation,
is_publicised=is_publicised,
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
return res
async def join_group(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Request to join a group"""
if self.is_mine_id(group_id):
await self.groups_server_handler.join_group(group_id, user_id, content)
local_attestation = None
remote_attestation = None
else:
local_attestation = self.attestations.create_attestation(group_id, user_id)
content["attestation"] = local_attestation
try:
res = await self.transport_client.join_group(
get_domain_from_id(group_id), group_id, user_id, content
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
remote_attestation = res["attestation"]
await self.attestations.verify_attestation(
remote_attestation,
group_id=group_id,
user_id=user_id,
server_name=get_domain_from_id(group_id),
)
# TODO: Check that the group is public and we're being added publicly
is_publicised = content.get("publicise", False)
token = await self.store.register_user_group_membership(
group_id,
user_id,
membership="join",
is_admin=False,
local_attestation=local_attestation,
remote_attestation=remote_attestation,
is_publicised=is_publicised,
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
return {}
async def accept_invite(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""Accept an invite to a group"""
if self.is_mine_id(group_id):
await self.groups_server_handler.accept_invite(group_id, user_id, content)
local_attestation = None
remote_attestation = None
else:
local_attestation = self.attestations.create_attestation(group_id, user_id)
content["attestation"] = local_attestation
try:
res = await self.transport_client.accept_group_invite(
get_domain_from_id(group_id), group_id, user_id, content
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
remote_attestation = res["attestation"]
await self.attestations.verify_attestation(
remote_attestation,
group_id=group_id,
user_id=user_id,
server_name=get_domain_from_id(group_id),
)
# TODO: Check that the group is public and we're being added publicly
is_publicised = content.get("publicise", False)
token = await self.store.register_user_group_membership(
group_id,
user_id,
membership="join",
is_admin=False,
local_attestation=local_attestation,
remote_attestation=remote_attestation,
is_publicised=is_publicised,
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
return {}
async def invite(
self, group_id: str, user_id: str, requester_user_id: str, config: JsonDict
) -> JsonDict:
"""Invite a user to a group"""
content = {"requester_user_id": requester_user_id, "config": config}
if self.is_mine_id(group_id):
res = await self.groups_server_handler.invite_to_group(
group_id, user_id, requester_user_id, content
)
else:
try:
res = await self.transport_client.invite_to_group(
get_domain_from_id(group_id),
group_id,
user_id,
requester_user_id,
content,
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
return res
async def on_invite(
self, group_id: str, user_id: str, content: JsonDict
) -> JsonDict:
"""One of our users were invited to a group"""
# TODO: Support auto join and rejection
if not self.is_mine_id(user_id):
raise SynapseError(400, "User not on this server")
local_profile = {}
if "profile" in content:
if "name" in content["profile"]:
local_profile["name"] = content["profile"]["name"]
if "avatar_url" in content["profile"]:
local_profile["avatar_url"] = content["profile"]["avatar_url"]
token = await self.store.register_user_group_membership(
group_id,
user_id,
membership="invite",
content={"profile": local_profile, "inviter": content["inviter"]},
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
try:
user_profile = await self.profile_handler.get_profile(user_id)
except Exception as e:
logger.warning("No profile for user %s: %s", user_id, e)
user_profile = {}
return {"state": "invite", "user_profile": user_profile}
async def remove_user_from_group(
self, group_id: str, user_id: str, requester_user_id: str, content: JsonDict
) -> JsonDict:
"""Remove a user from a group"""
if user_id == requester_user_id:
token = await self.store.register_user_group_membership(
group_id, user_id, membership="leave"
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
# TODO: Should probably remember that we tried to leave so that we can
# retry if the group server is currently down.
if self.is_mine_id(group_id):
res = await self.groups_server_handler.remove_user_from_group(
group_id, user_id, requester_user_id, content
)
else:
content["requester_user_id"] = requester_user_id
try:
res = await self.transport_client.remove_user_from_group(
get_domain_from_id(group_id),
group_id,
requester_user_id,
user_id,
content,
)
except HttpResponseException as e:
raise e.to_synapse_error()
except RequestSendFailed:
raise SynapseError(502, "Failed to contact group server")
return res
async def user_removed_from_group(
self, group_id: str, user_id: str, content: JsonDict
) -> None:
"""One of our users was removed/kicked from a group"""
# TODO: Check if user in group
token = await self.store.register_user_group_membership(
group_id, user_id, membership="leave"
)
self.notifier.on_new_event("groups_key", token, users=[user_id])
|
the-stack_0_22800 | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.ops.gather import Gather
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph
class TestGatherPartialInfer(unittest.TestCase):
@staticmethod
def _create_graph():
nodes_attributes = {'gather_input': {'shape': None, 'value': None, 'kind': 'data'},
'gather_input2': {'shape': None, 'value': None, 'kind': 'data'},
'gather_node': {'op': 'Gather', 'kind': 'op'},
'gather_output': {'shape': None, 'value': None, 'kind': 'data'}
}
return build_graph(nodes_attributes,
[
('gather_input', 'gather_node'), ('gather_node', 'gather_output'), ('gather_input2', 'gather_node')
],
{
'gather_input': {'shape': int64_array([10, 15]), 'value': np.ones((3, 15))},
'gather_input2': {'shape': int64_array([2]), 'value': np.array([0, 2])},
'gather_node': {'axis': 0},
})
def test_gather_infer(self):
graph = self._create_graph()
gather_node = Node(graph, 'gather_node')
Gather.infer(gather_node)
exp_shape = int64_array([2, 15])
res_shape = graph.node['gather_output']['shape']
res_value = graph.node['gather_output']['value']
self.assertTrue(np.array_equal(exp_shape, res_shape),
'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape))
self.assertTrue(np.array_equal(res_value, np.ones(exp_shape)),
'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape))
|
the-stack_0_22801 | #
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
"""
This module contains declarations and classes for XML Schema constraint facets.
"""
import re
import operator
from collections.abc import MutableSequence
from elementpath import XPath2Parser, ElementPathError
from elementpath.datatypes import XSD_BUILTIN_TYPES
from ..qnames import XSD_LENGTH, XSD_MIN_LENGTH, XSD_MAX_LENGTH, XSD_ENUMERATION, \
XSD_INTEGER, XSD_WHITE_SPACE, XSD_PATTERN, XSD_MAX_INCLUSIVE, XSD_MAX_EXCLUSIVE, \
XSD_MIN_INCLUSIVE, XSD_MIN_EXCLUSIVE, XSD_TOTAL_DIGITS, XSD_FRACTION_DIGITS, \
XSD_ASSERTION, XSD_DECIMAL, XSD_EXPLICIT_TIMEZONE, XSD_NOTATION_TYPE, \
XSD_BASE64_BINARY, XSD_HEX_BINARY, XSD_QNAME
from ..helpers import count_digits
from ..regex import get_python_regex
from .exceptions import XMLSchemaValidationError, XMLSchemaDecodeError
from .xsdbase import XsdComponent
class XsdFacet(XsdComponent):
"""
XML Schema constraining facets base class.
"""
fixed = False
def __init__(self, elem, schema, parent, base_type):
self.base_type = base_type
super(XsdFacet, self).__init__(elem, schema, parent)
def __repr__(self):
return '%s(value=%r, fixed=%r)' % (self.__class__.__name__, self.value, self.fixed)
def __call__(self, value):
try:
yield from self.validator(value)
except (TypeError, ValueError) as err:
yield XMLSchemaValidationError(self, value, str(err))
def _parse(self):
super(XsdFacet, self)._parse()
if 'fixed' in self.elem.attrib and self.elem.attrib['fixed'] in ('true', '1'):
self.fixed = True
base_facet = self.base_facet
self.base_value = None if base_facet is None else base_facet.value
try:
self._parse_value(self.elem)
except (KeyError, ValueError, XMLSchemaDecodeError) as err:
self.value = None
self.parse_error(str(err))
else:
if base_facet is not None and base_facet.fixed and \
base_facet.value is not None and self.value != base_facet.value:
self.parse_error(
"%r facet value is fixed to %r" % (self.elem.tag, base_facet.value)
)
def _parse_value(self, elem):
self.value = elem.attrib['value']
@property
def built(self):
return True
@property
def base_facet(self):
"""
An object of the same type if the instance has a base facet, `None` otherwise.
"""
base_type = self.base_type
tag = self.elem.tag
while True:
try:
return base_type.facets[tag]
except (AttributeError, KeyError):
if hasattr(base_type, 'base_type'):
base_type = base_type.base_type
else:
return None
@staticmethod
def validator(_):
return ()
class XsdWhiteSpaceFacet(XsdFacet):
"""
XSD *whiteSpace* facet.
.. <whiteSpace
fixed = boolean : false
id = ID
value = (collapse | preserve | replace)
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</whiteSpace>
"""
_ADMITTED_TAGS = XSD_WHITE_SPACE,
def _parse_value(self, elem):
self.value = value = elem.attrib['value']
if self.base_value == 'collapse' and value in ('preserve', 'replace'):
self.parse_error("facet value can be only 'collapse'")
elif self.base_value == 'replace' and value == 'preserve':
self.parse_error("facet value can be only 'replace' or 'collapse'")
elif value == 'replace':
self.validator = self.replace_white_space_validator
elif value == 'collapse':
self.validator = self.collapse_white_space_validator
elif value != 'preserve':
self.parse_error("attribute 'value' must be one of "
"('preserve', 'replace', 'collapse').")
def replace_white_space_validator(self, x):
if '\t' in x or '\n' in x:
yield XMLSchemaValidationError(self, x)
def collapse_white_space_validator(self, x):
if '\t' in x or '\n' in x or ' ' in x:
yield XMLSchemaValidationError(self, x)
class XsdLengthFacet(XsdFacet):
"""
XSD *length* facet.
.. <length
fixed = boolean : false
id = ID
value = nonNegativeInteger
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</length>
"""
_ADMITTED_TAGS = XSD_LENGTH,
def _parse_value(self, elem):
self.value = int(elem.attrib['value'])
if self.base_value is not None and self.value != self.base_value:
self.parse_error("base type has a different 'length': %r" % self.base_value)
primitive_type = getattr(self.base_type, 'primitive_type', None)
if primitive_type is None:
self.validator = self.length_validator
elif primitive_type.name == XSD_HEX_BINARY:
self.validator = self.hex_length_validator
elif primitive_type.name == XSD_BASE64_BINARY:
self.validator = self.base64_length_validator
elif primitive_type.name in (XSD_QNAME, XSD_NOTATION_TYPE):
pass # See: https://www.w3.org/Bugs/Public/show_bug.cgi?id=4009 and id=4049
else:
self.validator = self.length_validator
def length_validator(self, x):
if len(x) != self.value:
yield XMLSchemaValidationError(self, x, "length has to be %r." % self.value)
def hex_length_validator(self, x):
if len(x) != self.value * 2:
yield XMLSchemaValidationError(self, x, "binary length has to be %r." % self.value)
def base64_length_validator(self, x):
x = x.replace(' ', '')
if (len(x) // 4 * 3 - (x[-1] == '=') - (x[-2] == '=')) != self.value:
yield XMLSchemaValidationError(self, x, "binary length has to be %r." % self.value)
class XsdMinLengthFacet(XsdFacet):
"""
XSD *minLength* facet.
.. <minLength
fixed = boolean : false
id = ID
value = nonNegativeInteger
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</minLength>
"""
_ADMITTED_TAGS = XSD_MIN_LENGTH,
def _parse_value(self, elem):
self.value = int(elem.attrib['value'])
if self.base_value is not None and self.value < self.base_value:
self.parse_error("base type has a greater 'minLength': %r" % self.base_value)
primitive_type = getattr(self.base_type, 'primitive_type', None)
if primitive_type is None:
self.validator = self.min_length_validator
elif primitive_type.name == XSD_HEX_BINARY:
self.validator = self.hex_min_length_validator
elif primitive_type.name == XSD_BASE64_BINARY:
self.validator = self.base64_min_length_validator
elif primitive_type.name not in (XSD_QNAME, XSD_NOTATION_TYPE):
self.validator = self.min_length_validator
def min_length_validator(self, x):
if len(x) < self.value:
yield XMLSchemaValidationError(
self, x, "length cannot be lesser than %r." % self.value
)
def hex_min_length_validator(self, x):
if len(x) < self.value * 2:
yield XMLSchemaValidationError(
self, x, "binary length cannot be lesser than %r." % self.value
)
def base64_min_length_validator(self, x):
x = x.replace(' ', '')
if (len(x) // 4 * 3 - (x[-1] in ('=', 61)) - (x[-2] in ('=', 61))) < self.value:
yield XMLSchemaValidationError(
self, x, "binary length cannot be lesser than %r." % self.value
)
class XsdMaxLengthFacet(XsdFacet):
"""
XSD *maxLength* facet.
.. <maxLength
fixed = boolean : false
id = ID
value = nonNegativeInteger
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</maxLength>
"""
_ADMITTED_TAGS = XSD_MAX_LENGTH,
def _parse_value(self, elem):
self.value = int(elem.attrib['value'])
if self.base_value is not None and self.value > self.base_value:
self.parse_error("base type has a lesser 'maxLength': %r" % self.base_value)
primitive_type = getattr(self.base_type, 'primitive_type', None)
if primitive_type is None:
self.validator = self.max_length_validator
elif primitive_type.name == XSD_HEX_BINARY:
self.validator = self.hex_max_length_validator
elif primitive_type.name == XSD_BASE64_BINARY:
self.validator = self.base64_max_length_validator
elif primitive_type.name not in (XSD_QNAME, XSD_NOTATION_TYPE):
self.validator = self.max_length_validator
def max_length_validator(self, x):
if len(x) > self.value:
yield XMLSchemaValidationError(
self, x, "length cannot be greater than %r." % self.value
)
def hex_max_length_validator(self, x):
if len(x) > self.value * 2:
yield XMLSchemaValidationError(
self, x, "binary length cannot be greater than %r." % self.value
)
def base64_max_length_validator(self, x):
x = x.replace(' ', '')
if (len(x) // 4 * 3 - (x[-1] == '=') - (x[-2] == '=')) > self.value:
yield XMLSchemaValidationError(
self, x, "binary length cannot be greater than %r." % self.value
)
class XsdMinInclusiveFacet(XsdFacet):
"""
XSD *minInclusive* facet.
.. <minInclusive
fixed = boolean : false
id = ID
value = anySimpleType
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</minInclusive>
"""
_ADMITTED_TAGS = XSD_MIN_INCLUSIVE,
def _parse_value(self, elem):
self.value, errors = self.base_type.decode(elem.attrib['value'], validation='lax')
for e in errors:
if not isinstance(e.validator, self.__class__) or e.validator.value != self.value:
raise e
facet = self.base_type.get_facet(XSD_MIN_EXCLUSIVE)
if facet is not None and facet.value >= self.value:
self.parse_error("minimum value of base_type is greater")
facet = self.base_type.get_facet(XSD_MIN_INCLUSIVE)
if facet is not None and facet.value > self.value:
self.parse_error("minimum value of base_type is greater")
facet = self.base_type.get_facet(XSD_MAX_EXCLUSIVE)
if facet is not None and facet.value <= self.value:
self.parse_error("maximum value of base_type is lesser")
facet = self.base_type.get_facet(XSD_MAX_INCLUSIVE)
if facet is not None and facet.value < self.value:
self.parse_error("maximum value of base_type is lesser")
def __call__(self, value):
try:
if value < self.value:
reason = "value has to be greater or equal than %r." % self.value
yield XMLSchemaValidationError(self, value, reason)
except (TypeError, ValueError) as err:
yield XMLSchemaValidationError(self, value, str(err))
class XsdMinExclusiveFacet(XsdFacet):
"""
XSD *minExclusive* facet.
.. <minExclusive
fixed = boolean : false
id = ID
value = anySimpleType
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</minExclusive>
"""
_ADMITTED_TAGS = XSD_MIN_EXCLUSIVE,
def _parse_value(self, elem):
self.value, errors = self.base_type.decode(elem.attrib['value'], validation='lax')
for e in errors:
if not isinstance(e.validator, self.__class__) or e.validator.value != self.value:
raise e
facet = self.base_type.get_facet(XSD_MIN_EXCLUSIVE)
if facet is not None and facet.value > self.value:
self.parse_error("minimum value of base_type is greater")
facet = self.base_type.get_facet(XSD_MIN_INCLUSIVE)
if facet is not None and facet.value > self.value:
self.parse_error("minimum value of base_type is greater")
facet = self.base_type.get_facet(XSD_MAX_EXCLUSIVE)
if facet is not None and facet.value <= self.value:
self.parse_error("maximum value of base_type is lesser")
facet = self.base_type.get_facet(XSD_MAX_INCLUSIVE)
if facet is not None and facet.value <= self.value:
self.parse_error("maximum value of base_type is lesser")
def __call__(self, value):
try:
if value <= self.value:
reason = "value has to be greater than %r." % self.value
yield XMLSchemaValidationError(self, value, reason)
except (TypeError, ValueError) as err:
yield XMLSchemaValidationError(self, value, str(err))
class XsdMaxInclusiveFacet(XsdFacet):
"""
XSD *maxInclusive* facet.
.. <maxInclusive
fixed = boolean : false
id = ID
value = anySimpleType
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</maxInclusive>
"""
_ADMITTED_TAGS = XSD_MAX_INCLUSIVE,
def _parse_value(self, elem):
self.value, errors = self.base_type.decode(elem.attrib['value'], validation='lax')
for e in errors:
if not isinstance(e.validator, self.__class__) or e.validator.value != self.value:
raise e
facet = self.base_type.get_facet(XSD_MIN_EXCLUSIVE)
if facet is not None and facet.value >= self.value:
self.parse_error("minimum value of base_type is greater")
facet = self.base_type.get_facet(XSD_MIN_INCLUSIVE)
if facet is not None and facet.value > self.value:
self.parse_error("minimum value of base_type is greater")
facet = self.base_type.get_facet(XSD_MAX_EXCLUSIVE)
if facet is not None and facet.value <= self.value:
self.parse_error("maximum value of base_type is lesser")
facet = self.base_type.get_facet(XSD_MAX_INCLUSIVE)
if facet is not None and facet.value < self.value:
self.parse_error("maximum value of base_type is lesser")
def __call__(self, value):
try:
if value > self.value:
reason = "value has to be lesser or equal than %r." % self.value
yield XMLSchemaValidationError(self, value, reason)
except (TypeError, ValueError) as err:
yield XMLSchemaValidationError(self, value, str(err))
class XsdMaxExclusiveFacet(XsdFacet):
"""
XSD *maxExclusive* facet.
.. <maxExclusive
fixed = boolean : false
id = ID
value = anySimpleType
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</maxExclusive>
"""
_ADMITTED_TAGS = XSD_MAX_EXCLUSIVE,
def _parse_value(self, elem):
self.value, errors = self.base_type.decode(elem.attrib['value'], validation='lax')
for e in errors:
if not isinstance(e.validator, self.__class__) or e.validator.value != self.value:
raise e
facet = self.base_type.get_facet(XSD_MIN_EXCLUSIVE)
if facet is not None and facet.value >= self.value:
self.parse_error("minimum value of base_type is greater")
facet = self.base_type.get_facet(XSD_MIN_INCLUSIVE)
if facet is not None and facet.value >= self.value:
self.parse_error("minimum value of base_type is greater")
facet = self.base_type.get_facet(XSD_MAX_EXCLUSIVE)
if facet is not None and facet.value < self.value:
self.parse_error("maximum value of base_type is lesser")
facet = self.base_type.get_facet(XSD_MAX_INCLUSIVE)
if facet is not None and facet.value < self.value:
self.parse_error("maximum value of base_type is lesser")
def __call__(self, value):
try:
if value >= self.value:
reason = "value has to be lesser than %r" % self.value
yield XMLSchemaValidationError(self, value, reason)
except (TypeError, ValueError) as err:
yield XMLSchemaValidationError(self, value, str(err))
class XsdTotalDigitsFacet(XsdFacet):
"""
XSD *totalDigits* facet.
.. <totalDigits
fixed = boolean : false
id = ID
value = positiveInteger
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</totalDigits>
"""
_ADMITTED_TAGS = XSD_TOTAL_DIGITS,
def _parse_value(self, elem):
self.value = int(elem.attrib['value'])
if self.value < 1:
raise ValueError("'value' must be greater or equal than 1")
self.validator = self.total_digits_validator
def total_digits_validator(self, x):
try:
if operator.add(*count_digits(x)) > self.value:
reason = "the number of digits is greater than %r." % self.value
yield XMLSchemaValidationError(self, x, reason)
except (TypeError, ValueError, ArithmeticError) as err:
yield XMLSchemaValidationError(self, x, str(err))
class XsdFractionDigitsFacet(XsdFacet):
"""
XSD *fractionDigits* facet.
.. <fractionDigits
fixed = boolean : false
id = ID
value = nonNegativeInteger
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</fractionDigits>
"""
_ADMITTED_TAGS = XSD_FRACTION_DIGITS,
def __init__(self, elem, schema, parent, base_type):
super(XsdFractionDigitsFacet, self).__init__(elem, schema, parent, base_type)
if not base_type.is_derived(self.maps.types[XSD_DECIMAL]):
self.parse_error(
"fractionDigits facet can be applied only to types derived from xs:decimal"
)
def _parse_value(self, elem):
self.value = int(elem.attrib['value'])
if self.value < 0:
raise ValueError("'value' must be greater or equal than 0")
elif self.value > 0 and self.base_type.is_derived(self.maps.types[XSD_INTEGER]):
raise ValueError("fractionDigits facet value has to be 0 "
"for types derived from xs:integer.")
self.validator = self.fraction_digits_validator
def fraction_digits_validator(self, x):
try:
if count_digits(x)[1] > self.value:
reason = "the number of fraction digits is greater than %r." % self.value
yield XMLSchemaValidationError(self, x, reason)
except (TypeError, ValueError, ArithmeticError) as err:
yield XMLSchemaValidationError(self, x, str(err))
class XsdExplicitTimezoneFacet(XsdFacet):
"""
XSD 1.1 *explicitTimezone* facet.
.. <explicitTimezone
fixed = boolean : false
id = ID
value = NCName
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</explicitTimezone>
"""
_ADMITTED_TAGS = XSD_EXPLICIT_TIMEZONE,
def _parse_value(self, elem):
self.value = value = elem.attrib['value']
if value == 'prohibited':
self.validator = self.prohibited_timezone_validator
elif value == 'required':
self.validator = self.required_timezone_validator
elif value != 'optional':
self.parse_error(
"attribute 'value' must be one of ('required', 'prohibited', 'optional')."
)
def required_timezone_validator(self, x):
if x.tzinfo is None:
yield XMLSchemaValidationError(
self, x, "time zone required for value %r." % self.value
)
def prohibited_timezone_validator(self, x):
if x.tzinfo is not None:
yield XMLSchemaValidationError(
self, x, "time zone prohibited for value %r." % self.value
)
class XsdEnumerationFacets(MutableSequence, XsdFacet):
"""
Sequence of XSD *enumeration* facets. Values are validates if match any of enumeration values.
.. <enumeration
id = ID
value = anySimpleType
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</enumeration>
"""
_ADMITTED_TAGS = {XSD_ENUMERATION}
def __init__(self, elem, schema, parent, base_type):
XsdFacet.__init__(self, elem, schema, parent, base_type)
def _parse(self):
super(XsdFacet, self)._parse()
self._elements = [self.elem]
self.enumeration = [self._parse_value(self.elem)]
def _parse_value(self, elem):
try:
value = self.base_type.decode(elem.attrib['value'], namespaces=self.schema.namespaces)
except KeyError:
self.parse_error("missing 'value' attribute", elem)
except XMLSchemaDecodeError as err:
self.parse_error(err, elem)
else:
if self.base_type.name == XSD_NOTATION_TYPE:
try:
notation_qname = self.schema.resolve_qname(value)
except (KeyError, ValueError, RuntimeError) as err:
self.parse_error(err, elem)
else:
if notation_qname not in self.maps.notations:
msg = "value {!r} must match a notation declaration"
self.parse_error(msg.format(value), elem)
return value
# Implements the abstract methods of MutableSequence
def __getitem__(self, i):
return self._elements[i]
def __setitem__(self, i, elem):
self._elements[i] = elem
self.enumeration[i] = self._parse_value(elem)
def __delitem__(self, i):
del self._elements[i]
del self.enumeration[i]
def __len__(self):
return len(self._elements)
def insert(self, i, elem):
self._elements.insert(i, elem)
self.enumeration.insert(i, self._parse_value(elem))
def __repr__(self):
if len(self.enumeration) > 5:
return '%s(%r)' % (
self.__class__.__name__, '[%s, ...]' % ', '.join(map(repr, self.enumeration[:5]))
)
else:
return '%s(%r)' % (self.__class__.__name__, self.enumeration)
def __call__(self, value):
if value not in self.enumeration:
reason = "invalid value %r, it must be one of %r" % (value, self.enumeration)
yield XMLSchemaValidationError(self, value, reason=reason)
class XsdPatternFacets(MutableSequence, XsdFacet):
"""
Sequence of XSD *pattern* facets. Values are validates if match any of patterns.
.. <pattern
id = ID
value = string
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</pattern>
"""
_ADMITTED_TAGS = {XSD_PATTERN}
def __init__(self, elem, schema, parent, base_type):
XsdFacet.__init__(self, elem, schema, parent, base_type)
def _parse(self):
super(XsdFacet, self)._parse()
self._elements = [self.elem]
self.patterns = [self._parse_value(self.elem)]
def _parse_value(self, elem):
try:
return re.compile(get_python_regex(elem.attrib['value'], self.xsd_version))
except KeyError:
self.parse_error("missing 'value' attribute", elem)
return re.compile(r'^$')
except (re.error, XMLSchemaDecodeError) as err:
self.parse_error(err, elem)
return re.compile(r'^$')
# Implements the abstract methods of MutableSequence
def __getitem__(self, i):
return self._elements[i]
def __setitem__(self, i, elem):
self._elements[i] = elem
self.patterns[i] = self._parse_value(elem)
def __delitem__(self, i):
del self._elements[i]
del self.patterns[i]
def __len__(self):
return len(self._elements)
def insert(self, i, elem):
self._elements.insert(i, elem)
self.patterns.insert(i, self._parse_value(elem))
def __repr__(self):
s = repr(self.regexps)
if len(s) < 70:
return '%s(%s)' % (self.__class__.__name__, s)
else:
return '%s(%s...\'])' % (self.__class__.__name__, s[:70])
def __call__(self, text):
try:
if all(pattern.match(text) is None for pattern in self.patterns):
msg = "value doesn't match any pattern of %r."
yield XMLSchemaValidationError(self, text, reason=msg % self.regexps)
except TypeError as err:
yield XMLSchemaValidationError(self, text, str(err))
@property
def regexps(self):
return [e.get('value', '') for e in self._elements]
class XsdAssertionXPathParser(XPath2Parser):
"""Parser for XSD 1.1 assertion facets."""
XsdAssertionXPathParser.unregister('last')
XsdAssertionXPathParser.unregister('position')
# noinspection PyUnusedLocal
@XsdAssertionXPathParser.method(XsdAssertionXPathParser.function('last', nargs=0))
def evaluate(self, context=None):
self.missing_context("Context item size is undefined")
# noinspection PyUnusedLocal
@XsdAssertionXPathParser.method(XsdAssertionXPathParser.function('position', nargs=0))
def evaluate(self, context=None):
self.missing_context("Context item position is undefined")
XsdAssertionXPathParser.build_tokenizer()
class XsdAssertionFacet(XsdFacet):
"""
XSD 1.1 *assertion* facet for simpleType definitions.
.. <assertion
id = ID
test = an XPath expression
xpathDefaultNamespace = (anyURI | (##defaultNamespace | ##targetNamespace | ##local))
{any attributes with non-schema namespace . . .}>
Content: (annotation?)
</assertion>
"""
_ADMITTED_TAGS = {XSD_ASSERTION}
def __repr__(self):
return '%s(test=%r)' % (self.__class__.__name__, self.path)
def _parse(self):
super(XsdFacet, self)._parse()
try:
self.path = self.elem.attrib['test']
except KeyError as err:
self.parse_error(str(err), elem=self.elem)
self.path = 'true()'
try:
builtin_type_name = self.base_type.primitive_type.local_name
variables = {'value': XSD_BUILTIN_TYPES[builtin_type_name].value}
except AttributeError:
variables = {'value': XSD_BUILTIN_TYPES['anySimpleType'].value}
if 'xpathDefaultNamespace' in self.elem.attrib:
self.xpath_default_namespace = self._parse_xpath_default_namespace(self.elem)
else:
self.xpath_default_namespace = self.schema.xpath_default_namespace
self.parser = XsdAssertionXPathParser(self.namespaces, strict=False, variables=variables,
default_namespace=self.xpath_default_namespace)
try:
self.token = self.parser.parse(self.path)
except ElementPathError as err:
self.parse_error(err, elem=self.elem)
self.token = self.parser.parse('true()')
def __call__(self, value):
self.parser.variables['value'] = value
try:
if not self.token.evaluate():
msg = "value is not true with test path %r."
yield XMLSchemaValidationError(self, value, reason=msg % self.path)
except ElementPathError as err:
yield XMLSchemaValidationError(self, value, reason=str(err))
XSD_10_FACETS_BUILDERS = {
XSD_WHITE_SPACE: XsdWhiteSpaceFacet,
XSD_LENGTH: XsdLengthFacet,
XSD_MIN_LENGTH: XsdMinLengthFacet,
XSD_MAX_LENGTH: XsdMaxLengthFacet,
XSD_MIN_INCLUSIVE: XsdMinInclusiveFacet,
XSD_MIN_EXCLUSIVE: XsdMinExclusiveFacet,
XSD_MAX_INCLUSIVE: XsdMaxInclusiveFacet,
XSD_MAX_EXCLUSIVE: XsdMaxExclusiveFacet,
XSD_TOTAL_DIGITS: XsdTotalDigitsFacet,
XSD_FRACTION_DIGITS: XsdFractionDigitsFacet,
XSD_ENUMERATION: XsdEnumerationFacets,
XSD_PATTERN: XsdPatternFacets
}
XSD_11_FACETS_BUILDERS = XSD_10_FACETS_BUILDERS.copy()
XSD_11_FACETS_BUILDERS.update({
XSD_ASSERTION: XsdAssertionFacet,
XSD_EXPLICIT_TIMEZONE: XsdExplicitTimezoneFacet
})
XSD_10_FACETS = set(XSD_10_FACETS_BUILDERS)
XSD_11_FACETS = set(XSD_11_FACETS_BUILDERS)
XSD_10_LIST_FACETS = {XSD_LENGTH, XSD_MIN_LENGTH, XSD_MAX_LENGTH, XSD_PATTERN,
XSD_ENUMERATION, XSD_WHITE_SPACE}
XSD_11_LIST_FACETS = XSD_10_LIST_FACETS | {XSD_ASSERTION}
XSD_10_UNION_FACETS = {XSD_PATTERN, XSD_ENUMERATION}
XSD_11_UNION_FACETS = MULTIPLE_FACETS = {XSD_PATTERN, XSD_ENUMERATION, XSD_ASSERTION}
|
the-stack_0_22802 | # Copyright (c) 2018, Arm Limited and affiliates.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import pkg_resources
from .host_test_plugins import HostTestPluginBase
class HostTestPluginResetMethod_Mbed(HostTestPluginBase):
# Plugin interface
name = 'HostTestPluginResetMethod_Mbed'
type = 'ResetMethod'
stable = True
capabilities = ['default']
required_parameters = ['serial']
def __init__(self):
"""! ctor
@details We can check module version by referring to version attribute
import pkg_resources
print pkg_resources.require("mbed-host-tests")[0].version
'2.7'
"""
HostTestPluginBase.__init__(self)
self.re_float = re.compile("^\d+\.\d+")
pyserial_version = pkg_resources.require("pyserial")[0].version
self.pyserial_version = self.get_pyserial_version(pyserial_version)
self.is_pyserial_v3 = float(self.pyserial_version) >= 3.0
def get_pyserial_version(self, pyserial_version):
"""! Retrieve pyserial module version
@return Returns float with pyserial module number
"""
version = 3.0
m = self.re_float.search(pyserial_version)
if m:
try:
version = float(m.group(0))
except ValueError:
version = 3.0 # We will assume you've got latest (3.0+)
return version
def safe_sendBreak(self, serial):
"""! Closure for pyserial version dependant API calls
"""
if self.is_pyserial_v3:
return self._safe_sendBreak_v3_0(serial)
return self._safe_sendBreak_v2_7(serial)
def _safe_sendBreak_v2_7(self, serial):
"""! pyserial 2.7 API implementation of sendBreak/setBreak
@details
Below API is deprecated for pyserial 3.x versions!
http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.sendBreak
http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.setBreak
"""
result = True
try:
serial.sendBreak()
except:
# In Linux a termios.error is raised in sendBreak and in setBreak.
# The following setBreak() is needed to release the reset signal on the target mcu.
try:
serial.setBreak(False)
except:
result = False
return result
def _safe_sendBreak_v3_0(self, serial):
"""! pyserial 3.x API implementation of send_brea / break_condition
@details
http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.send_break
http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.break_condition
"""
result = True
try:
serial.send_break()
except:
# In Linux a termios.error is raised in sendBreak and in setBreak.
# The following break_condition = False is needed to release the reset signal on the target mcu.
try:
serial.break_condition = False
except Exception as e:
self.print_plugin_error("Error while doing 'serial.break_condition = False' : %s"% str(e))
result = False
return result
def setup(self, *args, **kwargs):
"""! Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capability, *args, **kwargs):
"""! Executes capability by name
@param capability Capability name
@param args Additional arguments
@param kwargs Additional arguments
@details Each capability e.g. may directly just call some command line program or execute building pythonic function
@return Capability call return value
"""
if not kwargs['serial']:
self.print_plugin_error("Error: serial port not set (not opened?)")
return False
result = False
if self.check_parameters(capability, *args, **kwargs) is True:
if kwargs['serial']:
if capability == 'default':
serial = kwargs['serial']
result = self.safe_sendBreak(serial)
return result
def load_plugin():
"""! Returns plugin available in this module
"""
return HostTestPluginResetMethod_Mbed()
|
the-stack_0_22804 | import time
from direct.interval.IntervalGlobal import Sequence, Func
from direct.showbase.ShowBaseGlobal import *
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
from direct.showbase import DirectObject
from direct.actor import Actor
from direct.task import Task
from pandac.PandaModules import *
from pandac.PandaModules import CardMaker
from pirates.piratesgui.GuiPanel import *
from pirates.piratesgui import GuiButton
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesbase import Freebooter
from pirates.quest.QuestTaskDNA import PotionsTaskDNA
import PotionGlobals
class PotionRecipePicker(DirectFrame):
def __init__(self, potionGame):
DirectFrame.__init__(self, parent=potionGame.background, relief=None)
self.potionGame = potionGame
self.enabled = True
self.setupScene()
return
def setupScene(self):
self.background = self.attachNewNode('background')
guiAssets = loader.loadModel('models/minigames/pir_m_gui_pot_textureCard')
parch = guiAssets.find('**/pir_t_gui_pot_scroll')
parch.setTransparency(1, 1)
parch.setScale(1.9, 1, 3.4)
parch.setPos(0.5, 0, 0.0)
parch.copyTo(self.background)
self.title = DirectLabel(parent=self.background, relief=None, text=PLocalizer.PotionGui['RecipeList'], text_scale=PiratesGuiGlobals.TextScaleTitleSmall, text_align=TextNode.ACenter, text_fg=PotionGlobals.TextColor, text_wordwrap=30, pos=(0.57, 0, 0.77), textMayChange=0)
self.recipeList = DirectScrolledList(parent=self, numItemsVisible=23, pos=(0.25, 0, -0.4), decButton_pos=(0.72, 0.0, 1.04), incButton_pos=(0.72, 0.0, -0.18), decButton_hpr=(0, 0.0, 0), incButton_hpr=(0, 0.0, 180), itemFrame_pos=(0, 0, 1.1), decButton_scale=0.07, decButton_image=(guiAssets.find('**/pir_t_gui_pot_scrollbutton'), guiAssets.find('**/pir_t_gui_pot_scrollbuttonOn'), guiAssets.find('**/pir_t_gui_pot_scrollbuttonOn'), guiAssets.find('**/pir_t_gui_pot_scrollbuttonDisable')), decButton_image_scale=(2.0, 2.0, 2.0), decButton_relief=None, incButton_scale=0.07, incButton_image=(guiAssets.find('**/pir_t_gui_pot_scrollbutton'), guiAssets.find('**/pir_t_gui_pot_scrollbuttonOn'), guiAssets.find('**/pir_t_gui_pot_scrollbuttonOn'), guiAssets.find('**/pir_t_gui_pot_scrollbuttonDisable')), incButton_image_scale=(2.0, 2.0, 2.0), incButton_relief=None, itemFrame_scale=1.0, forceHeight=0.0616)
self.buttons = []
self.inactiveButtons = []
self.updateList()
guiAssets.removeNode()
return
def updateList(self):
self.recipeList.removeAndDestroyAllItems()
self.buttons = []
self.inactiveButtons = []
playerLevel = self.potionGame.dist.getPlayerPotionLevel()
notNew_list = self.potionGame.dist.getPlayerNotNewFlags()
for recipe in self.potionGame.recipes:
if playerLevel >= recipe.level:
recipe.enabled = True
else:
recipe.enabled = False
if recipe.enabled and recipe.potionID not in notNew_list:
recipe.haveMade = False
else:
recipe.haveMade = True
self.potionGame.recipes.sort()
for recipe in self.potionGame.recipes:
if recipe.questOnly and localAvatar.getInventory():
class brewable(Exception):
pass
try:
for currQuest in localAvatar.getInventory().getQuestList():
bonusComplete = currQuest.isComplete(bonus=True)
primaryComplete = currQuest.isComplete()
if not bonusComplete or not primaryComplete:
tasks = currQuest.getQuestDNA().getTaskDNAs()
for currTask in tasks:
if isinstance(currTask, PotionsTaskDNA) and (PotionGlobals.getPotionItemID(recipe.potionID) == currTask.potionType and not primaryComplete or PotionGlobals.getPotionItemID(recipe.potionID) == currTask.potionTypeBonus and not bonusComplete):
raise brewable
except brewable:
pass
continue
if recipe.level - playerLevel > 3 and not recipe.questOnly:
continue
buttonImage = None
recipe.loadIngredients()
buttonImageScale = 0.0
text = recipe.name
helptext = recipe.desc
if not recipe.haveMade and len(recipe.ingredients) > 0:
if recipe.questOnly:
iconText = PLocalizer.PotionGui['QuestLabel']
iconTextColor = PiratesGuiGlobals.TextFG13
else:
iconText = PLocalizer.PotionGui['NewLabel']
iconTextColor = PiratesGuiGlobals.TextFG1
guiAssets = loader.loadModel('models/minigames/pir_m_gui_pot_textureCard')
buttonImage = guiAssets.find('**/pir_t_gui_pot_seal').copyTo(NodePath())
buttonImageScale = 0.08
buttonText = DirectLabel(parent=buttonImage, relief=None, text=iconText, text_scale=PiratesGuiGlobals.TextScaleLarge / buttonImageScale, text_font=PiratesGlobals.getPirateOutlineFont(), text_align=TextNode.ACenter, text_fg=iconTextColor, text_shadow=PiratesGuiGlobals.TextFG14, hpr=(0,
0,
20), pos=(-0.25, 0, 0), textMayChange=0)
guiAssets.removeNode()
if Freebooter.getPaidStatus(localAvatar.doId) or recipe.isFree:
cmd = self.potionGame.selectRecipe
buttonGeom = None
buttonGeomScale = 1
buttonGeomPos = (0, 0, 0)
args = None
else:
gui = loader.loadModel('models/gui/toplevel_gui')
buttonGeom = gui.find('**/pir_t_gui_gen_key_subscriber')
buttonGeomScale = 0.16
buttonGeomPos = (-0.05, 0, 0.01)
cmd = base.localAvatar.guiMgr.showNonPayer
args = ['Restricted_Potion_Crafting_Recipe', 9]
gui.removeNode()
if recipe.enabled and recipe.available:
button = GuiButton.GuiButton(text=(text, text, text, text), canReposition=True, text_wordwrap=0, image_scale=buttonImageScale, image_pos=(-0.04, 0.0, 0.01), image=(buttonImage, buttonImage, buttonImage, buttonImage), geom=buttonGeom, geom_scale=buttonGeomScale, geom_pos=buttonGeomPos, text0_fg=PotionGlobals.TextColor, text1_fg=PiratesGuiGlobals.TextFG0, text2_fg=PiratesGuiGlobals.TextFG15, text3_fg=PotionGlobals.TextColorDisabled, text_align=TextNode.ALeft, text_shadow=None, text_scale=PiratesGuiGlobals.TextScaleExtraLarge, command=cmd, state=DGG.NORMAL, extraArgs=[recipe])
button.bind(DGG.ENTER, recipe.showDetails)
button.bind(DGG.EXIT, recipe.hideDetails)
if button['image'][0]:
button['image_pos'] = (
button.getBounds()[1] + 0.075, 0, 0.01)
self.buttons.append(button)
else:
button = GuiButton.GuiButton(text=(text, text, text, text), canReposition=True, text_wordwrap=0, image_scale=buttonImageScale, image_pos=(-0.04, 0.0, 0.01), image=(buttonImage, buttonImage, buttonImage, buttonImage), geom=buttonGeom, geom_scale=buttonGeomScale, geom_pos=buttonGeomPos, text0_fg=PotionGlobals.TextColorDisabled, text1_fg=PotionGlobals.TextColorDisabled, text2_fg=PotionGlobals.TextColorDisabled, text3_fg=PotionGlobals.TextColorDisabled, text_shadow=None, text_scale=PiratesGuiGlobals.TextScaleExtraLarge, text_align=TextNode.ALeft, state=DGG.NORMAL, extraArgs=[recipe])
button.bind(DGG.ENTER, recipe.showDetails)
button.bind(DGG.EXIT, recipe.hideDetails)
if button['image'][0]:
button['image_pos'] = (
button.getBounds()[1] + 0.075, 0, 0.01)
self.inactiveButtons.append(button)
self.recipeList.addItem(button)
self.recipeList.refresh()
self.lastIncButtonState = self.recipeList.incButton['state']
self.lastDecButtonState = self.recipeList.decButton['state']
self.recipeList.incButton['command'] = self.recipeList.scrollBy
self.recipeList.incButton['extraArgs'] = [1]
self.recipeList.decButton['command'] = self.recipeList.scrollBy
self.recipeList.decButton['extraArgs'] = [-1]
return
def hide(self):
for b in self.buttons:
b.unbind(DGG.ENTER)
b.unbind(DGG.EXIT)
for b in self.inactiveButtons:
b.unbind(DGG.ENTER)
b.unbind(DGG.EXIT)
self.stash()
def setEnabled(self, enabled):
if enabled != self.enabled:
self.enabled = enabled
if enabled:
self.recipeList.incButton['state'] = self.lastIncButtonState
self.recipeList.decButton['state'] = self.lastDecButtonState
for button in self.buttons:
button['state'] = DGG.NORMAL
for button in self.buttons + self.inactiveButtons:
button.bind(DGG.ENTER, button['extraArgs'][0].showDetails)
button.bind(DGG.EXIT, button['extraArgs'][0].hideDetails)
self.accept('wheel_up', self.recipeList.scrollBy, [-1])
self.accept('wheel_down', self.recipeList.scrollBy, [1])
else:
self.lastIncButtonState = self.recipeList.incButton['state']
self.lastDecButtonState = self.recipeList.decButton['state']
self.recipeList.incButton['state'] = DGG.DISABLED
self.recipeList.decButton['state'] = DGG.DISABLED
for button in self.buttons:
button['state'] = DGG.DISABLED
for button in self.buttons + self.inactiveButtons:
button.unbind(DGG.ENTER)
button.unbind(DGG.EXIT)
self.ignoreAll()
def destroy(self):
DirectFrame.destroy(self)
self.ignoreAll()
self.background.removeNode()
del self.background
for b in self.buttons:
b.unbind(DGG.ENTER)
b.unbind(DGG.EXIT)
b.destroy()
for b in self.inactiveButtons:
b.unbind(DGG.ENTER)
b.unbind(DGG.EXIT)
b.destroy()
del self.buttons |
the-stack_0_22805 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Detection Training Script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import logging
import os
from collections import OrderedDict
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
CityscapesEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains a number pre-defined logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if comm.is_main_process():
verify_results(cfg, res)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop or subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
the-stack_0_22806 | import torch
import torch.utils.data as data
import numpy as np
from PIL import Image
class CIFAR_New(data.Dataset):
def __init__(self, root, transform=None, target_transform=None, version='v6'):
self.data = np.load('%s/cifar10.1_%s_data.npy' %(root, version))
self.targets = np.load('%s/cifar10.1_%s_labels.npy' %(root, version)).astype('long')
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.targets) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.