ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a54798bd704630fe7d0cf297c01f35d9e9b9b13 | frase = str(input('Digite uma Frase: ')).strip().upper()
palavras = frase.split()
junto = ''.join(palavras)
inverso = ''
#para cada letra no Range, a gente ta pegando o valor total menos 1 pra corrigir; invertido; fazendo vir no caminho oposto.
for letra in range(len(junto) - 1, -1, -1):
inverso += junto[letra]
print(f'O inverso de {junto} é {inverso}! ')
if inverso == junto:
print('Temos um palindromo! ')
else:
print('A frase digitada não é um palindromo ') |
py | 1a547b89d355e3e9f312b0bd9c97de4e62f29487 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Functions for working with arbitrarily nested sequences of elements.
This module can perform operations on nested structures. A nested structure is a
Python collection that can contain further collections as well as other objects
called atoms. Note that numpy arrays are considered atoms.
nest recognizes the following types of collections:
1.tuple
2.namedtuple
3.dict
4.orderedDict
5.MutableMapping
6.attr.s
attr.s decorated classes (http://www.attrs.org) are also supported, in the
same way as `namedtuple`.
The utilities here assume (and do not check) that the nested structures form a
'tree', i.e., no references in the structure of the input of these functions
should be recursive.
Example structures: `((3, 4), 5, (6, 7, (9, 10), 8))`, `(np.array(0),
(np.array([3, 4]), tf.constant([3, 4])))`
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import six as _six
import wrapt as _wrapt
from tensorflow.python import _pywrap_utils
from tensorflow.python.util.compat import collections_abc as _collections_abc
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.platform import tf_logging
_SHALLOW_TREE_HAS_INVALID_KEYS = (
"The shallow_tree's keys are not a subset of the input_tree's keys. The "
"shallow_tree has the following keys that are not in the input_tree: {}.")
_STRUCTURES_HAVE_MISMATCHING_TYPES = (
"The two structures don't have the same sequence type. Input structure has "
"type {input_type}, while shallow structure has type {shallow_type}.")
_STRUCTURES_HAVE_MISMATCHING_LENGTHS = (
"The two structures don't have the same sequence length. Input "
"structure has length {input_length}, while shallow structure has length "
"{shallow_length}."
)
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE = (
"The input_tree has fewer elements than the shallow_tree. Input structure "
"has length {input_size}, while shallow structure has length "
"{shallow_size}.")
_IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ = (
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: {}.")
def _get_attrs_items(obj):
"""Returns a list of (name, value) pairs from an attrs instance.
The list will be sorted by name.
Args:
obj: an object.
Returns:
A list of (attr_name, attr_value) pairs, sorted by attr_name.
"""
attrs = getattr(obj.__class__, "__attrs_attrs__")
attr_names = (a.name for a in attrs)
return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]
def _sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(dict_.keys())
except TypeError:
raise TypeError("nest only supports dicts with sortable keys.")
def _is_namedtuple(instance, strict=False):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
strict: If True, `instance` is considered to be a `namedtuple` only if
it is a "plain" namedtuple. For instance, a class inheriting
from a `namedtuple` will be considered to be a `namedtuple`
iff `strict=False`.
Returns:
True if `instance` is a `namedtuple`.
"""
return _pywrap_utils.IsNamedtuple(instance, strict)
# See the swig file (util.i) for documentation.
_is_mapping_view = _pywrap_utils.IsMappingView
_is_attrs = _pywrap_utils.IsAttrs
_is_composite_tensor = _pywrap_utils.IsCompositeTensor
_is_type_spec = _pywrap_utils.IsTypeSpec
_is_mutable_mapping = _pywrap_utils.IsMutableMapping
_is_mapping = _pywrap_utils.IsMapping
def _sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`,
`collections.OrderedDict`, or `composite_tensor.Composite_Tensor`
or `type_spec.TypeSpec`.
args: elements to be converted to the `instance` type.
Returns:
`args` with the type of `instance`.
"""
if _is_mutable_mapping(instance):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
if instance_type == _collections.defaultdict:
d = _collections.defaultdict(instance.default_factory)
else:
d = instance_type()
for key in instance:
d[key] = result[key]
return d
elif _is_mapping(instance):
result = dict(zip(_sorted(instance), args))
instance_type = type(instance)
tf_logging.log_first_n(
tf_logging.WARN, "Mapping types may not work well with tf.nest. Prefer"
" using MutableMapping for {}".format(instance_type), 1)
try:
return instance_type((key, result[key]) for key in instance)
except TypeError as err:
raise TypeError("Error creating an object of type {} like {}. Note that "
"it must accept a single positional argument "
"representing an iterable of key-value pairs, in "
"addition to self. Cause: {}".format(
type(instance), instance, err))
elif _is_mapping_view(instance):
# We can't directly construct mapping views, so we create a list instead
return list(args)
elif _is_namedtuple(instance) or _is_attrs(instance):
if isinstance(instance, _wrapt.ObjectProxy):
instance_type = type(instance.__wrapped__)
else:
instance_type = type(instance)
return instance_type(*args)
elif _is_composite_tensor(instance):
assert len(args) == 1
spec = instance._type_spec # pylint: disable=protected-access
return spec._from_components(args[0]) # pylint: disable=protected-access
elif _is_type_spec(instance):
# Pack a CompositeTensor's components according to a TypeSpec.
assert len(args) == 1
return instance._from_components(args[0]) # pylint: disable=protected-access
elif isinstance(instance, _six.moves.range):
return _sequence_like(list(instance), args)
elif isinstance(instance, _wrapt.ObjectProxy):
# For object proxies, first create the underlying type and then re-wrap it
# in the proxy type.
return type(instance)(_sequence_like(instance.__wrapped__, args))
else:
# Not a namedtuple
return type(instance)(args)
def _yield_value(iterable):
for _, v in _yield_sorted_items(iterable):
yield v
def _yield_sorted_items(iterable):
"""Yield (key, value) pairs for `iterable` in a deterministic order.
For Sequences, the key will be an int, the array index of a value.
For Mappings, the key will be the dictionary key.
For objects (e.g. namedtuples), the key will be the attribute name.
In all cases, the keys will be iterated in sorted order.
Args:
iterable: an iterable.
Yields:
The iterable's (key, value) pairs, in order of sorted keys.
"""
# Ordered to check common structure types (list, tuple, dict) first.
if isinstance(iterable, list):
for item in enumerate(iterable):
yield item
# namedtuples handled separately to avoid expensive namedtuple check.
elif type(iterable) == tuple: # pylint: disable=unidiomatic-typecheck
for item in enumerate(iterable):
yield item
elif isinstance(iterable, (dict, _collections_abc.Mapping)):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _sorted(iterable):
yield key, iterable[key]
elif _is_attrs(iterable):
for item in _get_attrs_items(iterable):
yield item
elif _is_namedtuple(iterable):
for field in iterable._fields:
yield field, getattr(iterable, field)
elif _is_composite_tensor(iterable):
type_spec = iterable._type_spec # pylint: disable=protected-access
yield type_spec.value_type.__name__, type_spec._to_components(iterable) # pylint: disable=protected-access
elif _is_type_spec(iterable):
# Note: to allow CompositeTensors and their TypeSpecs to have matching
# structures, we need to use the same key string here.
yield iterable.value_type.__name__, iterable._component_specs # pylint: disable=protected-access
else:
for item in enumerate(iterable):
yield item
# See the swig file (util.i) for documentation.
is_sequence = _pywrap_utils.IsSequence
# See the swig file (util.i) for documentation.
is_sequence_or_composite = _pywrap_utils.IsSequenceOrComposite
@tf_export("nest.is_nested")
def is_nested(seq):
"""Returns true if its input is a collections.abc.Sequence (except strings).
Args:
seq: an input sequence.
Returns:
True if the sequence is a not a string and is a collections.abc.Sequence
or a dict.
"""
return is_sequence(seq)
@tf_export("nest.flatten")
def flatten(structure, expand_composites=False):
"""Returns a flat list from a given nested structure.
If nest is not a structure , tuple (or a namedtuple), dict, or an attrs class,
then returns a single-element list:
[nest].
In the case of dict instances, the sequence consists of the values, sorted by
key to ensure deterministic behavior. This is true also for OrderedDict
instances: their sequence order is ignored, the sorting order of keys is used
instead. The same convention is followed in pack_sequence_as. This correctly
repacks dicts and OrderedDicts after they have been flattened, and also allows
flattening an OrderedDict and then repacking it back using a corresponding
plain dict, or vice-versa. Dictionaries with non-sortable keys cannot be
flattened.
Users must not modify any collections used in nest while this function is
running.
Examples:
1. Python dict (ordered by key):
>>> dict = { "key3": "value3", "key1": "value1", "key2": "value2" }
>>> tf.nest.flatten(dict)
['value1', 'value2', 'value3']
2. For a nested python tuple:
>>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), (6.0))
>>> tf.nest.flatten(tuple)
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
3. Numpy array (will not flatten):
>>> array = np.array([[1, 2], [3, 4]])
>>> tf.nest.flatten(array)
[array([[1, 2],
[3, 4]])]
4. `tf.Tensor` (will not flatten):
>>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> tf.nest.flatten(tensor)
[<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=float32)>]
Args:
structure: an arbitrarily nested structure. Note, numpy arrays are
considered atoms and are not flattened.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the flattened version of the input.
Raises:
TypeError: The nest is or contains a dict with non-sortable keys.
"""
if structure is None:
return [None]
expand_composites = bool(expand_composites)
return _pywrap_utils.Flatten(structure, expand_composites)
# See the swig file (util.i) for documentation.
_same_namedtuples = _pywrap_utils.SameNamedtuples
class _DotString(object):
__slots__ = []
def __str__(self):
return "."
def __repr__(self):
return "."
_DOT = _DotString()
@tf_export("nest.assert_same_structure")
def assert_same_structure(nest1, nest2, check_types=True,
expand_composites=False):
"""Asserts that two structures are nested in the same way.
Note that namedtuples with identical name and fields are always considered
to have the same shallow structure (even with `check_types=True`).
For instance, this code will print `True`:
```python
def nt(a, b):
return collections.namedtuple('foo', 'a b')(a, b)
print(assert_same_structure(nt(0, 1), nt(2, 3)))
```
Args:
nest1: an arbitrarily nested structure.
nest2: an arbitrarily nested structure.
check_types: if `True` (default) types of sequences are checked as well,
including the keys of dictionaries. If set to `False`, for example a
list and a tuple of objects will look the same if they have the same
size. Note that namedtuples with identical name and fields are always
considered to have the same shallow structure. Two types will also be
considered the same if they are both list subtypes (which allows "list"
and "_ListWrapper" from trackable dependency tracking to compare
equal).
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Raises:
ValueError: If the two structures do not have the same number of elements or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
# Convert to bool explicitly as otherwise pybind will not be able# to handle
# type mismatch message correctly. See GitHub issue 42329 for details.
check_types = bool(check_types)
expand_composites = bool(expand_composites)
try:
_pywrap_utils.AssertSameStructure(nest1, nest2, check_types,
expand_composites)
except (ValueError, TypeError) as e:
str1 = str(map_structure(lambda _: _DOT, nest1))
str2 = str(map_structure(lambda _: _DOT, nest2))
raise type(e)("%s\n"
"Entire first structure:\n%s\n"
"Entire second structure:\n%s"
% (str(e), str1, str2))
def flatten_dict_items(dictionary):
"""Returns a dictionary with flattened keys and values.
This function flattens the keys and values of a dictionary, which can be
arbitrarily nested structures, and returns the flattened version of such
structures:
```python
example_dictionary = {(4, 5, (6, 8)): ("a", "b", ("c", "d"))}
result = {4: "a", 5: "b", 6: "c", 8: "d"}
flatten_dict_items(example_dictionary) == result
```
The input dictionary must satisfy two properties:
1. Its keys and values should have the same exact nested structure.
2. The set of all flattened keys of the dictionary must not contain repeated
keys.
Args:
dictionary: the dictionary to zip
Returns:
The zipped dictionary.
Raises:
TypeError: If the input is not a dictionary.
ValueError: If any key and value do not have the same structure layout, or
if keys are not unique.
"""
if not isinstance(dictionary, (dict, _collections_abc.Mapping)):
raise TypeError("input must be a dictionary")
flat_dictionary = {}
for i, v in _six.iteritems(dictionary):
if not is_sequence(i):
if i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique." % i)
flat_dictionary[i] = v
else:
flat_i = flatten(i)
flat_v = flatten(v)
if len(flat_i) != len(flat_v):
raise ValueError(
"Could not flatten dictionary. Key had %d elements, but value had "
"%d elements. Key: %s, value: %s."
% (len(flat_i), len(flat_v), flat_i, flat_v))
for new_i, new_v in zip(flat_i, flat_v):
if new_i in flat_dictionary:
raise ValueError(
"Could not flatten dictionary: key %s is not unique."
% (new_i))
flat_dictionary[new_i] = new_v
return flat_dictionary
def _packed_nest_with_indices(structure, flat, index, is_seq, sequence_fn=None):
"""Helper function for pack_sequence_as.
Args:
structure: Substructure (list / tuple / dict) to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_seq: Function used to test if a value should be treated as a sequence.
sequence_fn: Function used to generate a new sequence instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
sequence_fn = sequence_fn or _sequence_like
for s in _yield_value(structure):
if is_seq(s):
new_index, child = _packed_nest_with_indices(s, flat, index, is_seq,
sequence_fn)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def _pack_sequence_as(structure, flat_sequence, expand_composites,
sequence_fn=None):
"""Implements sequence packing, with the option to alter the structure."""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
sequence_fn = sequence_fn or _sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_seq(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a sequence, but found "
"incompatible type `{}` instead."
.format(truncate(flat_sequence, 100), type(flat_sequence)))
if not is_seq(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"structure is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure), truncate(structure, 100), type(flat_sequence),
len(flat_sequence), truncate(flat_sequence, 100)))
return flat_sequence[0]
try:
final_index, packed = _packed_nest_with_indices(structure, flat_sequence,
0, is_seq, sequence_fn)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Structure had %d elements, but "
"flat_sequence had %d elements. Structure: %s, flat_sequence: %s." %
(len(flat_structure), len(flat_sequence), structure, flat_sequence))
return sequence_fn(structure, packed)
@tf_export("nest.pack_sequence_as")
def pack_sequence_as(structure, flat_sequence, expand_composites=False):
"""Returns a given flattened sequence packed into a given structure.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. This is true also for
`OrderedDict` instances: their sequence order is ignored, the sorting order of
keys is used instead. The same convention is followed in `flatten`.
This correctly repacks dicts and `OrderedDict`s after they have been
flattened, and also allows flattening an `OrderedDict` and then repacking it
back using a corresponding plain dict, or vice-versa.
Dictionaries with non-sortable keys cannot be flattened.
Args:
structure: Nested structure, whose structure is given by nested lists,
tuples, and dicts. Note: numpy arrays and strings are considered
scalars.
flat_sequence: flat sequence to pack.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If `flat_sequence` and `structure` have different
element counts.
TypeError: `structure` is or contains a dict with non-sortable keys.
"""
return _pack_sequence_as(structure, flat_sequence, expand_composites)
@tf_export("nest.map_structure")
def map_structure(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain results with the same structure layout.
Examples:
1. A single Python dict:
>>> a = {"hello": 24, "world": 76}
>>> tf.nest.map_structure(lambda p: p * 2, a)
{'hello': 48, 'world': 152}
2. Multiple Python dictionaries:
>>> d1 = {"hello": 24, "world": 76}
>>> d2 = {"hello": 36, "world": 14}
>>> tf.nest.map_structure(lambda p1, p2: p1 + p2, d1, d2)
{'hello': 60, 'world': 90}
Args:
func: A callable that accepts as many arguments as there are structures.
*structure: scalar, or tuple or dict or list of constructed scalars and/or
other tuples/lists, or scalars. Note: numpy arrays are considered as
scalars.
**kwargs: Valid keyword args are:
* `check_types`: If set to `True` (default) the types of
iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`.
Note that namedtuples with identical name and fields are always
considered to have the same shallow structure.
* `expand_composites`: If set to `True`, then composite tensors such
as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into
their component tensors. If `False` (the default), then composite
tensors are not expanded.
Returns:
A new structure with the same arity as `structure`, whose values correspond
to `func(x[0], x[1], ...)` where `x[i]` is a value in the corresponding
location in `structure[i]`. If there are different sequence types and
`check_types` is `False` the sequence types of the first structure will be
used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
if kwargs:
raise ValueError(
"Only valid keyword arguments are `check_types` and "
"`expand_composites`, not: `%s`" % ("`, `".join(kwargs.keys())))
for other in structure[1:]:
assert_same_structure(structure[0], other, check_types=check_types,
expand_composites=expand_composites)
flat_structure = (flatten(s, expand_composites) for s in structure)
entries = zip(*flat_structure)
return pack_sequence_as(
structure[0], [func(*x) for x in entries],
expand_composites=expand_composites)
def map_structure_with_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(path, x[0], x[1], ..., **kwargs)` where x[i] is an entry in
`structure[i]` and `path` is the common path to x[i] in the structures. All
structures in `structure` must have the same arity, and the return value will
contain the results with the same structure layout. Special kwarg
`check_types` determines whether the types of iterables within the structure
must be the same-- see **kwargs definition below.
Args:
func: A callable with the signature func(path, *values, **kwargs) that is
evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.,
`map_structure(func, [1], (1,))` raises a `TypeError` exception). By
default, the types must match. To allow iteration over structures of
different types (but common arity), set this kwarg to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
def wrapper_func(tuple_path, *inputs, **kwargs):
string_path = "/".join(str(s) for s in tuple_path)
return func(string_path, *inputs, **kwargs)
return map_structure_with_tuple_paths_up_to(structure[0],
wrapper_func,
*structure,
**kwargs)
def map_structure_with_tuple_paths(func, *structure, **kwargs):
"""Applies `func` to each entry in `structure` and returns a new structure.
Applies `func(tuple_path, x[0], x[1], ..., **kwargs)` where `x[i]` is an entry
in `structure[i]` and `tuple_path` is a tuple of indices and/or dictionary
keys (as returned by `nest.yield_flat_paths`), which uniquely specifies the
common path to x[i] in the structures. All structures in `structure` must have
the same arity, and the return value will contain the results in the same
structure. Special kwarg `check_types` determines whether the types of
iterables within the structure must be the same-- see **kwargs definition
below.
Args:
func: A callable with the signature `func(tuple_path, *values, **kwargs)`
that is evaluated on the leaves of the structure.
*structure: A variable number of compatible structures to process.
**kwargs: Optional kwargs to be passed through to func. Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Returns:
A structure of the same form as the input structures whose leaves are the
result of evaluating func on corresponding leaves of the input structures.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
TypeError: If `check_types` is not `False` and the two structures differ in
the type of sequence in any of their substructures.
ValueError: If no structures are provided.
"""
return map_structure_with_tuple_paths_up_to(structure[0],
func,
*structure,
**kwargs)
def _yield_flat_up_to(shallow_tree, input_tree, is_seq, path=()):
"""Yields (path, value) pairs of input_tree flattened up to shallow_tree.
Args:
shallow_tree: Nested structure. Traverse no further than its leaf nodes.
input_tree: Nested structure. Return the paths and values from this tree.
Must have the same upper structure as shallow_tree.
is_seq: Function used to test if a value should be treated as a sequence.
path: Tuple. Optional argument, only used when recursing. The path from the
root of the original shallow_tree, down to the root of the shallow_tree
arg of this recursive call.
Yields:
Pairs of (path, value), where path the tuple path of a leaf node in
shallow_tree, and value is the value of the corresponding node in
input_tree.
"""
if not is_seq(shallow_tree):
yield (path, input_tree)
else:
input_tree = dict(_yield_sorted_items(input_tree))
for shallow_key, shallow_subtree in _yield_sorted_items(shallow_tree):
subpath = path + (shallow_key,)
input_subtree = input_tree[shallow_key]
for leaf_path, leaf_value in _yield_flat_up_to(shallow_subtree,
input_subtree, is_seq,
path=subpath):
yield (leaf_path, leaf_value)
def assert_shallow_structure(shallow_tree,
input_tree,
check_types=True,
expand_composites=False):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
That is, this function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = {"a": "A", "b": "B"}
input_tree = {"a": 1, "c": 2}
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same. Note that even with check_types==True,
this function will consider two different namedtuple classes with the same
name and _fields attribute to be the same class.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
if is_seq(shallow_tree):
if not is_seq(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s." % type(input_tree))
if isinstance(shallow_tree, _wrapt.ObjectProxy):
shallow_type = type(shallow_tree.__wrapped__)
else:
shallow_type = type(shallow_tree)
if check_types and not isinstance(input_tree, shallow_type):
# Duck-typing means that nest should be fine with two different
# namedtuples with identical name and fields.
shallow_is_namedtuple = _is_namedtuple(shallow_tree, False)
input_is_namedtuple = _is_namedtuple(input_tree, False)
if shallow_is_namedtuple and input_is_namedtuple:
if not _same_namedtuples(shallow_tree, input_tree):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
elif ((_is_composite_tensor(shallow_tree) or
_is_composite_tensor(input_tree)) and
(_is_type_spec(shallow_tree) or _is_type_spec(input_tree))):
pass # Compatibility will be checked below.
elif not (isinstance(shallow_tree, _collections_abc.Mapping) and
isinstance(input_tree, _collections_abc.Mapping)):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
if _is_composite_tensor(shallow_tree) or _is_composite_tensor(input_tree):
if not (
(_is_composite_tensor(input_tree) or _is_type_spec(input_tree)) and
(_is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree))):
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree),
shallow_type=type(shallow_tree)))
type_spec_1 = (shallow_tree if _is_type_spec(shallow_tree) else
shallow_tree._type_spec) # pylint: disable=protected-access
type_spec_2 = (input_tree if _is_type_spec(input_tree) else
input_tree._type_spec) # pylint: disable=protected-access
try:
_ = type_spec_1.most_specific_compatible_type(type_spec_2)
except (TypeError, ValueError) as e:
raise ValueError(
"Incompatible CompositeTensor TypeSpecs: %s vs. %s -- %s" %
(type_spec_1, type_spec_2, e))
elif _is_type_spec(shallow_tree):
if not _is_type_spec(input_tree):
raise TypeError("If shallow structure is a TypeSpec, input must also "
"be a TypeSpec. Input has type: %s."
% type(input_tree))
else:
if len(input_tree) != len(shallow_tree):
raise ValueError(
_STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree)))
elif len(input_tree) < len(shallow_tree):
raise ValueError(
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format(
input_size=len(input_tree), shallow_size=len(shallow_tree)))
if isinstance(shallow_tree, _collections_abc.Mapping):
absent_keys = set(shallow_tree) - set(input_tree)
if absent_keys:
raise ValueError(_SHALLOW_TREE_HAS_INVALID_KEYS
.format(sorted(absent_keys)))
for shallow_branch, input_branch in zip(_yield_value(shallow_tree),
_yield_value(input_tree)):
assert_shallow_structure(shallow_branch, input_branch,
check_types=check_types,
expand_composites=expand_composites)
def flatten_up_to(shallow_tree, input_tree, check_types=True,
expand_composites=False):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flatten output.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
# Discard paths returned by _yield_flat_up_to.
return [v for _, v in _yield_flat_up_to(shallow_tree, input_tree, is_seq)]
def flatten_with_tuple_paths_up_to(shallow_tree,
input_tree,
check_types=True,
expand_composites=False):
"""Flattens `input_tree` up to `shallow_tree`.
Any further depth in structure in `input_tree` is retained as elements in the
partially flattened output.
Returns a list of (path, value) pairs, where value a leaf node in the
flattened tree, and path is the tuple path of that leaf in input_tree.
If `shallow_tree` and `input_tree` are not sequences, this returns a
single-element list: `[((), input_tree)]`.
Use Case:
Sometimes we may wish to partially flatten a nested sequence, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_with_tuple_paths_up_to(shallow_tree,
input_tree)
flattened_shallow_tree = flatten_with_tuple_paths_up_to(shallow_tree,
shallow_tree)
# Output is:
# [((0, 0), [2, 2]),
# ((0, 1), [3, 3]),
# ((1, 0), [4, 9]),
# ((1, 1), [5, 5])]
#
# [((0, 0), True),
# ((0, 1), True),
# ((1, 0), False),
# ((1, 1), True)]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [((0, 0), ('a', 1)),
# ((0, 1, 0), ('b', 2)),
# ((0, 1, 1, 0), ('c', 3)),
# ((0, 1, 1, 1), ('d', 4))]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Non-Sequence Edge Cases:
```python
flatten_with_tuple_paths_up_to(0, 0) # Output: [(), 0]
flatten_with_tuple_paths_up_to(0, [0, 1, 2]) # Output: [(), [0, 1, 2]]
flatten_with_tuple_paths_up_to([0, 1, 2], 0) # Output: TypeError
flatten_with_tuple_paths_up_to([0, 1, 2], [0, 1, 2])
# Output: [((0,) 0), ((1,), 1), ((2,), 2)]
```
Args:
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an arbitrarily nested structure or a scalar object.
Note, numpy arrays are considered scalars.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
assert_shallow_structure(shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
return list(_yield_flat_up_to(shallow_tree, input_tree, is_seq))
def map_structure_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
sequence (for example when the function itself takes sequence inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure layout as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
shallow_tree = [None, None]
inp_val = [1, 2, 3]
out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val)
# Output is: [2, 4]
```
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: arbitrarily nested combination of objects that are compatible with
shallow_tree. The function `func` is applied to corresponding
partially flattened elements of each input, so the function must support
arity of `len(inputs)`.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with the same structure layout as
`shallow_tree`.
"""
return map_structure_with_tuple_paths_up_to(
shallow_tree,
lambda _, *values: func(*values), # Discards the path arg.
*inputs,
**kwargs)
def map_structure_with_tuple_paths_up_to(shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
Like map_structure_up_to(), except that the 'func' argument takes a path
tuple as its first argument, followed by the corresponding values from
*inputs.
Example:
```python
lowercase = {'a': 'a', 'b': ('b0', 'b1')}
uppercase = {'a': 'A', 'b': ('B0', 'B1')}
def print_path_and_values(path, *values):
print("path: {}, values: {}".format(path, values))
shallow_tree = {'a': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase)
path: ('a',), values: ('a', 'A')
path: ('b', 0), values: ('b0', 'B0')
path: ('b', 1), values: ('b1', 'B1')
shallow_tree = {'b': None}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
path: ('b', 1), values: (('bo', 'b1'), ('B0', 'B1'))
shallow_tree = {'a': None, 'b': {1: None}}
map_structure_with_tuple_paths_up_to(shallow_tree,
print_path_and_values,
lowercase,
uppercase,
check_types=False)
path: ('a',), values: ('a', 'A')
path: ('b', 1), values: ('b1', B1')
```
Args:
shallow_tree: a shallow tree, common to all the inputs.
func: callable that takes args (path, inputs_0_value, ... , inputs_N_value),
where path is a tuple path to a leaf node in shallow_tree, and
inputs_i_value is the corresponding value from inputs[i].
*inputs: nested structures that are all structurally compatible with
shallow_tree.
**kwargs: kwargs to feed to func(). Special kwarg
`check_types` is not passed to func, but instead determines whether the
types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a sequence but one of `*inputs` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
Returns:
Result of repeatedly applying `func`. Has the same structure layout as
`shallow_tree`.
"""
if not inputs:
raise ValueError("Cannot map over no sequences")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for input_tree in inputs:
assert_shallow_structure(
shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
flat_value_gen = (
flatten_up_to( # pylint: disable=g-complex-comprehension
shallow_tree,
input_tree,
check_types,
expand_composites=expand_composites) for input_tree in inputs)
flat_path_gen = (
path for path, _ in _yield_flat_up_to(shallow_tree, inputs[0], is_seq))
results = [
func(*args, **kwargs) for args in zip(flat_path_gen, *flat_value_gen)
]
return pack_sequence_as(structure=shallow_tree, flat_sequence=results,
expand_composites=expand_composites)
def get_traverse_shallow_structure(traverse_fn, structure,
expand_composites=False):
"""Generates a shallow structure from a `traverse_fn` and `structure`.
`traverse_fn` must accept any possible subtree of `structure` and return
a depth=1 structure containing `True` or `False` values, describing which
of the top-level subtrees may be traversed. It may also
return scalar `True` or `False` "traversal is OK / not OK for all subtrees."
Examples are available in the unit tests (nest_test.py).
Args:
traverse_fn: Function taking a substructure and returning either a scalar
`bool` (whether to traverse that substructure or not) or a depth=1
shallow structure of the same type, describing which parts of the
substructure to traverse.
structure: The structure to traverse.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A shallow structure containing python bools, which can be passed to
`map_structure_up_to` and `flatten_up_to`.
Raises:
TypeError: if `traverse_fn` returns a sequence for a non-sequence input,
or a structure with depth higher than 1 for a sequence input,
or if any leaf values in the returned structure or scalar are not type
`bool`.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
to_traverse = traverse_fn(structure)
if not is_seq(structure):
if not isinstance(to_traverse, bool):
raise TypeError("traverse_fn returned structure: %s for non-structure: %s"
% (to_traverse, structure))
return to_traverse
level_traverse = []
if isinstance(to_traverse, bool):
if not to_traverse:
# Do not traverse this substructure at all. Exit early.
return False
else:
# Traverse the entire substructure.
for branch in _yield_value(structure):
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch,
expand_composites=expand_composites))
elif not is_seq(to_traverse):
raise TypeError("traverse_fn returned a non-bool scalar: %s for input: %s"
% (to_traverse, structure))
else:
# Traverse some subset of this substructure.
assert_shallow_structure(to_traverse, structure,
expand_composites=expand_composites)
for t, branch in zip(_yield_value(to_traverse),
_yield_value(structure)):
if not isinstance(t, bool):
raise TypeError(
"traverse_fn didn't return a depth=1 structure of bools. saw: %s "
" for structure: %s" % (to_traverse, structure))
if t:
level_traverse.append(
get_traverse_shallow_structure(traverse_fn, branch))
else:
level_traverse.append(False)
return _sequence_like(structure, level_traverse)
def yield_flat_paths(nest, expand_composites=False):
"""Yields paths for some nested structure.
Paths are lists of objects which can be str-converted, which may include
integers or other types which are used as indices in a dict.
The flat list will be in the corresponding order as if you called
`nest.flatten` on the structure. This is handy for naming Tensors such
the TF scope structure matches the tuple structure.
E.g. if we have a tuple `value = Foo(a=3, b=Bar(c=23, d=42))`
```shell
nest.flatten(value)
[3, 23, 42]
list(nest.yield_flat_paths(value))
[('a',), ('b', 'c'), ('b', 'd')]
```
```shell
list(nest.yield_flat_paths({'a': [3]}))
[('a', 0)]
list(nest.yield_flat_paths({'a': 3}))
[('a',)]
```
Args:
nest: the value to produce a flattened paths list for.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Yields:
Tuples containing index or key values which form the path to a specific
leaf value in the nested structure.
"""
is_seq = is_sequence_or_composite if expand_composites else is_sequence
for k, _ in _yield_flat_up_to(nest, nest, is_seq):
yield k
def flatten_with_joined_string_paths(structure, separator="/",
expand_composites=False):
"""Returns a list of (string path, data element) tuples.
The order of tuples produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information.
Args:
structure: the nested structure to flatten.
separator: string to separate levels of hierarchy in the results, defaults
to '/'.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A list of (string, data element) tuples.
"""
flat_paths = yield_flat_paths(structure, expand_composites=expand_composites)
def stringify_and_join(path_elements):
return separator.join(str(path_element) for path_element in path_elements)
flat_string_paths = (stringify_and_join(path) for path in flat_paths)
return list(zip(flat_string_paths,
flatten(structure, expand_composites=expand_composites)))
def flatten_with_tuple_paths(structure, expand_composites=False):
"""Returns a list of `(tuple_path, leaf_element)` tuples.
The order of pairs produced matches that of `nest.flatten`. This allows you
to flatten a nested structure while keeping information about where in the
structure each data element was located. See `nest.yield_flat_paths`
for more information about tuple paths.
Args:
structure: the nested structure to flatten.
expand_composites: If true, then composite tensors such as
`tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their
component tensors.
Returns:
A list of `(tuple_path, leaf_element)` tuples. Each `tuple_path` is a tuple
of indices and/or dictionary keys that uniquely specify the path to
`leaf_element` within `structure`.
"""
return list(zip(yield_flat_paths(structure,
expand_composites=expand_composites),
flatten(structure, expand_composites=expand_composites)))
def list_to_tuple(structure):
"""Replace all lists with tuples.
The fork of nest that tf.data uses treats lists as single elements, while
tf.nest treats them as structures to recurse into. Keras has chosen to adopt
the latter convention, and must therefore deeply replace all lists with tuples
before passing structures to Dataset.from_generator.
Args:
structure: A nested structure to be remapped.
Returns:
structure mapped to replace all lists with tuples.
"""
def sequence_fn(instance, args):
if isinstance(instance, list):
return tuple(args)
return _sequence_like(instance, args)
return _pack_sequence_as(structure, flatten(structure), False,
sequence_fn=sequence_fn)
_pywrap_utils.RegisterType("Mapping", _collections_abc.Mapping)
_pywrap_utils.RegisterType("MutableMapping", _collections_abc.MutableMapping)
_pywrap_utils.RegisterType("Sequence", _collections_abc.Sequence)
_pywrap_utils.RegisterType("MappingView", _collections_abc.MappingView)
_pywrap_utils.RegisterType("ObjectProxy", _wrapt.ObjectProxy)
|
py | 1a547c2cea76e8e33e387193a42ee4376f481ea0 | class Pedido:
def __init__(self, cliente, observacoes, data_pedido, valor, status, produtos):
self.__cliente = cliente
self.__observacoes = observacoes
self.__data_pedido = data_pedido
self.__valor = valor
self.__status = status
self.__produtos = produtos
@property
def cliente(self):
return self.__cliente
@cliente.setter
def cliente(self, cliente):
self.__cliente = cliente
@property
def observacoes(self):
return self.__observacoes
@observacoes.setter
def observacoes(self, observacoes):
self.__observacoes = observacoes
@property
def data_pedido(self):
return self.__data_pedido
@data_pedido.setter
def data_pedido(self, data_pedido):
self.__data_pedido = data_pedido
@property
def valor(self):
return self.__valor
@valor.setter
def valor(self, valor):
self.__valor = valor
@property
def status(self):
return self.__status
@status.setter
def status(self, status):
self.__status = status
@property
def produtos(self):
return self.__produtos
@produtos.setter
def produtos(self, produtos):
self.__produtos = produtos
|
py | 1a547dc3677e7cbaa930c0540ad6d3beb36b1735 | AP_LOCATION = None
'''constant value 3 '''
AP_ORIENTATION = None
'''constant value 4 '''
AP_PANNING = None
'''constant value 1 '''
AP_PITCH = None
'''constant value 2 '''
AP_VOLUME = None
'''constant value 0 '''
CHANNELS_INVALID = None
'''constant value 0 '''
CHANNELS_MONO = None
'''constant value 1 '''
CHANNELS_STEREO = None
'''constant value 2 '''
CHANNELS_STEREO_LFE = None
'''constant value 3 '''
CHANNELS_SURROUND4 = None
'''constant value 4 '''
CHANNELS_SURROUND5 = None
'''constant value 5 '''
CHANNELS_SURROUND51 = None
'''constant value 6 '''
CHANNELS_SURROUND61 = None
'''constant value 7 '''
CHANNELS_SURROUND71 = None
'''constant value 8 '''
CODEC_AAC = None
'''constant value 1 '''
CODEC_AC3 = None
'''constant value 2 '''
CODEC_FLAC = None
'''constant value 3 '''
CODEC_INVALID = None
'''constant value 0 '''
CODEC_MP2 = None
'''constant value 4 '''
CODEC_MP3 = None
'''constant value 5 '''
CODEC_OPUS = None
'''constant value 8 '''
CODEC_PCM = None
'''constant value 6 '''
CODEC_VORBIS = None
'''constant value 7 '''
CONTAINER_AC3 = None
'''constant value 1 '''
CONTAINER_FLAC = None
'''constant value 2 '''
CONTAINER_INVALID = None
'''constant value 0 '''
CONTAINER_MATROSKA = None
'''constant value 3 '''
CONTAINER_MP2 = None
'''constant value 4 '''
CONTAINER_MP3 = None
'''constant value 5 '''
CONTAINER_OGG = None
'''constant value 6 '''
CONTAINER_WAV = None
'''constant value 7 '''
DISTANCE_MODEL_EXPONENT = None
'''constant value 5 '''
DISTANCE_MODEL_EXPONENT_CLAMPED = None
'''constant value 6 '''
DISTANCE_MODEL_INVALID = None
'''constant value 0 '''
DISTANCE_MODEL_INVERSE = None
'''constant value 1 '''
DISTANCE_MODEL_INVERSE_CLAMPED = None
'''constant value 2 '''
DISTANCE_MODEL_LINEAR = None
'''constant value 3 '''
DISTANCE_MODEL_LINEAR_CLAMPED = None
'''constant value 4 '''
FORMAT_FLOAT32 = None
'''constant value 36 '''
FORMAT_FLOAT64 = None
'''constant value 40 '''
FORMAT_INVALID = None
'''constant value 0 '''
FORMAT_S16 = None
'''constant value 18 '''
FORMAT_S24 = None
'''constant value 19 '''
FORMAT_S32 = None
'''constant value 20 '''
FORMAT_U8 = None
'''constant value 1 '''
RATE_11025 = None
'''constant value 11025 '''
RATE_16000 = None
'''constant value 16000 '''
RATE_192000 = None
'''constant value 192000 '''
RATE_22050 = None
'''constant value 22050 '''
RATE_32000 = None
'''constant value 32000 '''
RATE_44100 = None
'''constant value 44100 '''
RATE_48000 = None
'''constant value 48000 '''
RATE_8000 = None
'''constant value 8000 '''
RATE_88200 = None
'''constant value 88200 '''
RATE_96000 = None
'''constant value 96000 '''
RATE_INVALID = None
'''constant value 0 '''
STATUS_INVALID = None
'''constant value 0 '''
STATUS_PAUSED = None
'''constant value 2 '''
STATUS_PLAYING = None
'''constant value 1 '''
STATUS_STOPPED = None
'''constant value 3 '''
class Device:
'''Unlocks the device after a lock call, see lock() for details. '''
channels = None
'''The channel count of the device. '''
distance_model = None
'''The distance model of the device. '''
doppler_factor = None
'''The doppler factor of the device. This factor is a scaling factor for the velocity vectors in doppler calculation. So a value bigger than 1 will exaggerate the effect as it raises the velocity. '''
format = None
'''The native sample format of the device. '''
listener_location = None
'''The listeners’s location in 3D space, a 3D tuple of floats. '''
listener_orientation = None
'''The listener’s orientation in 3D space as quaternion, a 4 float tuple. '''
listener_velocity = None
'''The listener’s velocity in 3D space, a 3D tuple of floats. '''
rate = None
'''The sampling rate of the device in Hz. '''
speed_of_sound = None
'''The speed of sound of the device. The speed of sound in air is typically 343.3 m/s. '''
volume = None
'''The overall volume of the device. '''
class DynamicMusic:
'''Stops playback of the scene. '''
fadeTime = None
'''The length in seconds of the crossfade transition '''
position = None
'''The playback position of the scene in seconds. '''
scene = None
'''The current scene '''
status = None
'''Whether the scene is playing, paused or stopped (=invalid). '''
volume = None
'''The volume of the scene. '''
class Handle:
'''Stops playback. '''
attenuation = None
'''This factor is used for distance based attenuation of the source. '''
cone_angle_inner = None
'''The opening angle of the inner cone of the source. If the cone values of a source are set there are two (audible) cones with the apex at the location of the source and with infinite height, heading in the direction of the source’s orientation. In the inner cone the volume is normal. Outside the outer cone the volume will be cone_volume_outer and in the area between the volume will be interpolated linearly. '''
cone_angle_outer = None
'''The opening angle of the outer cone of the source. '''
cone_volume_outer = None
'''The volume outside the outer cone of the source. '''
distance_maximum = None
'''The maximum distance of the source. If the listener is further away the source volume will be 0. '''
distance_reference = None
'''The reference distance of the source. At this distance the volume will be exactly volume. '''
keep = None
'''Whether the sound should be kept paused in the device when its end is reached. This can be used to seek the sound to some position and start playback again. '''
location = None
'''The source’s location in 3D space, a 3D tuple of floats. '''
loop_count = None
'''The (remaining) loop count of the sound. A negative value indicates infinity. '''
orientation = None
'''The source’s orientation in 3D space as quaternion, a 4 float tuple. '''
pitch = None
'''The pitch of the sound. '''
position = None
'''The playback position of the sound in seconds. '''
relative = None
'''Whether the source’s location, velocity and orientation is relative or absolute to the listener. '''
status = None
'''Whether the sound is playing, paused or stopped (=invalid). '''
velocity = None
'''The source’s velocity in 3D space, a 3D tuple of floats. '''
volume = None
'''The volume of the sound. '''
volume_maximum = None
'''The maximum volume of the source. '''
volume_minimum = None
'''The minimum volume of the source. '''
class PlaybackManager:
'''Stops playback of the category. '''
pass
class Sequence:
'''Writes animation data to a sequence. '''
channels = None
'''The channel count of the sequence. '''
distance_model = None
'''The distance model of the sequence. '''
doppler_factor = None
'''The doppler factor of the sequence. This factor is a scaling factor for the velocity vectors in doppler calculation. So a value bigger than 1 will exaggerate the effect as it raises the velocity. '''
fps = None
'''The listeners’s location in 3D space, a 3D tuple of floats. '''
muted = None
'''Whether the whole sequence is muted. '''
rate = None
'''The sampling rate of the sequence in Hz. '''
speed_of_sound = None
'''The speed of sound of the sequence. The speed of sound in air is typically 343.3 m/s. '''
class SequenceEntry:
'''Writes animation data to a sequenced entry. '''
attenuation = None
'''This factor is used for distance based attenuation of the source. '''
cone_angle_inner = None
'''The opening angle of the inner cone of the source. If the cone values of a source are set there are two (audible) cones with the apex at the location of the source and with infinite height, heading in the direction of the source’s orientation. In the inner cone the volume is normal. Outside the outer cone the volume will be cone_volume_outer and in the area between the volume will be interpolated linearly. '''
cone_angle_outer = None
'''The opening angle of the outer cone of the source. '''
cone_volume_outer = None
'''The volume outside the outer cone of the source. '''
distance_maximum = None
'''The maximum distance of the source. If the listener is further away the source volume will be 0. '''
distance_reference = None
'''The reference distance of the source. At this distance the volume will be exactly volume. '''
muted = None
'''Whether the entry is muted. '''
relative = None
'''Whether the source’s location, velocity and orientation is relative or absolute to the listener. '''
sound = None
'''The sound the entry is representing and will be played in the sequence. '''
volume_maximum = None
'''The maximum volume of the source. '''
volume_minimum = None
'''The minimum volume of the source. '''
class Sound:
'''Writes the sound to a file. '''
length = None
'''The sample specification of the sound as a tuple with rate and channel count. '''
specs = None
'''The sample specification of the sound as a tuple with rate and channel count. '''
class Source:
'''The source object represents the source position of a binaural sound. '''
azimuth = None
'''The azimuth angle. '''
distance = None
'''The distance value. 0 is min, 1 is max. '''
elevation = None
'''The elevation angle. '''
class ThreadPool:
'''A ThreadPool is used to parallelize convolution efficiently. '''
pass
class error:
pass
|
py | 1a547e6ac6eb0a6bf0d2c786ebf2912bd10c0580 | import os
print("Starting Capsian Setup Tool...")
print("This script will install all the dependencies you need")
input("Press enter to continue or close to terminate ")
_pip_type = "pip"
if os.name == "posix":
_pip_type = "pip3"
os.system(_pip_type + " install pyglet==1.5.6")
os.system(_pip_type + " install PyOpenGL")
os.system(_pip_type + " install pyinstaller")
input("Installation complete!\nPress enter to exit ")
|
py | 1a547fc2d2ee953271a6491f6718bd3196777344 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import functools
import importlib
import json
import operator
import os
import queue
import sys
import tempfile
import time
import traceback
import unittest
import warnings
from contextlib import contextmanager
from functools import partial, reduce
from subprocess import PIPE, Popen
from typing import Callable, Optional, Tuple
from urllib.error import ContentTooShortError, HTTPError
import numpy as np
import torch
import torch.distributed as dist
from monai.apps.utils import download_url
from monai.config import NdarrayTensor
from monai.config.deviceconfig import USE_COMPILED
from monai.config.type_definitions import NdarrayOrTensor
from monai.data import create_test_image_2d, create_test_image_3d
from monai.networks import convert_to_torchscript
from monai.utils import optional_import
from monai.utils.module import pytorch_after, version_leq
from monai.utils.type_conversion import convert_data_type
nib, _ = optional_import("nibabel")
quick_test_var = "QUICKTEST"
_tf32_enabled = None
_test_data_config: dict = {}
def testing_data_config(*keys):
"""get _test_data_config[keys0][keys1]...[keysN]"""
if not _test_data_config:
with open(os.path.join(os.path.dirname(__file__), "testing_data", "data_config.json")) as c:
_config = json.load(c)
for k, v in _config.items():
_test_data_config[k] = v
return reduce(operator.getitem, keys, _test_data_config)
def clone(data: NdarrayTensor) -> NdarrayTensor:
"""
Clone data independent of type.
Args:
data (NdarrayTensor): This can be a Pytorch Tensor or numpy array.
Returns:
Any: Cloned data object
"""
return copy.deepcopy(data)
def assert_allclose(
actual: NdarrayOrTensor,
desired: NdarrayOrTensor,
type_test: bool = True,
device_test: bool = False,
*args,
**kwargs,
):
"""
Assert that types and all values of two data objects are close.
Args:
actual: Pytorch Tensor or numpy array for comparison.
desired: Pytorch Tensor or numpy array to compare against.
type_test: whether to test that `actual` and `desired` are both numpy arrays or torch tensors.
device_test: whether to test the device property.
args: extra arguments to pass on to `np.testing.assert_allclose`.
kwargs: extra arguments to pass on to `np.testing.assert_allclose`.
"""
if type_test:
# check both actual and desired are of the same type
np.testing.assert_equal(isinstance(actual, np.ndarray), isinstance(desired, np.ndarray), "numpy type")
np.testing.assert_equal(isinstance(actual, torch.Tensor), isinstance(desired, torch.Tensor), "torch type")
if isinstance(desired, torch.Tensor) or isinstance(actual, torch.Tensor):
if device_test:
np.testing.assert_equal(str(actual.device), str(desired.device), "torch device check") # type: ignore
actual = actual.detach().cpu().numpy() if isinstance(actual, torch.Tensor) else actual
desired = desired.detach().cpu().numpy() if isinstance(desired, torch.Tensor) else desired
np.testing.assert_allclose(actual, desired, *args, **kwargs)
@contextmanager
def skip_if_downloading_fails():
try:
yield
except (ContentTooShortError, HTTPError, ConnectionError) as e:
raise unittest.SkipTest(f"error while downloading: {e}") from e
except RuntimeError as rt_e:
if "unexpected EOF" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e # incomplete download
if "network issue" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
if "gdown dependency" in str(rt_e): # no gdown installed
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
if "md5 check" in str(rt_e):
raise unittest.SkipTest(f"error while downloading: {rt_e}") from rt_e
raise rt_e
def test_pretrained_networks(network, input_param, device):
with skip_if_downloading_fails():
return network(**input_param).to(device)
def test_is_quick():
return os.environ.get(quick_test_var, "").lower() == "true"
def is_tf32_env():
"""
The environment variable NVIDIA_TF32_OVERRIDE=0 will override any defaults
or programmatic configuration of NVIDIA libraries, and consequently,
cuBLAS will not accelerate FP32 computations with TF32 tensor cores.
"""
global _tf32_enabled
if _tf32_enabled is None:
_tf32_enabled = False
if (
torch.cuda.is_available()
and not version_leq(f"{torch.version.cuda}", "10.100")
and os.environ.get("NVIDIA_TF32_OVERRIDE", "1") != "0"
and torch.cuda.device_count() > 0 # at least 11.0
):
try:
# with TF32 enabled, the speed is ~8x faster, but the precision has ~2 digits less in the result
g_gpu = torch.Generator(device="cuda")
g_gpu.manual_seed(2147483647)
a_full = torch.randn(1024, 1024, dtype=torch.double, device="cuda", generator=g_gpu)
b_full = torch.randn(1024, 1024, dtype=torch.double, device="cuda", generator=g_gpu)
_tf32_enabled = (a_full.float() @ b_full.float() - a_full @ b_full).abs().max().item() > 0.001 # 0.1713
except BaseException:
pass
print(f"tf32 enabled: {_tf32_enabled}")
return _tf32_enabled
def skip_if_quick(obj):
"""
Skip the unit tests if environment variable `quick_test_var=true`.
For example, the user can skip the relevant tests by setting ``export QUICKTEST=true``.
"""
is_quick = test_is_quick()
return unittest.skipIf(is_quick, "Skipping slow tests")(obj)
class SkipIfNoModule:
"""Decorator to be used if test should be skipped
when optional module is not present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_missing = not optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_missing, f"optional module not present: {self.module_name}")(obj)
class SkipIfModule:
"""Decorator to be used if test should be skipped
when optional module is present."""
def __init__(self, module_name):
self.module_name = module_name
self.module_avail = optional_import(self.module_name)[1]
def __call__(self, obj):
return unittest.skipIf(self.module_avail, f"Skipping because optional module present: {self.module_name}")(obj)
def skip_if_no_cpp_extension(obj):
"""
Skip the unit tests if the cpp extension is not available
"""
return unittest.skipUnless(USE_COMPILED, "Skipping cpp extension tests")(obj)
def skip_if_no_cuda(obj):
"""
Skip the unit tests if torch.cuda.is_available is False
"""
return unittest.skipUnless(torch.cuda.is_available(), "Skipping CUDA-based tests")(obj)
def skip_if_windows(obj):
"""
Skip the unit tests if platform is win32
"""
return unittest.skipIf(sys.platform == "win32", "Skipping tests on Windows")(obj)
class SkipIfBeforePyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions older than that given."""
def __init__(self, pytorch_version_tuple):
self.min_version = pytorch_version_tuple
self.version_too_old = not pytorch_after(*pytorch_version_tuple)
def __call__(self, obj):
return unittest.skipIf(
self.version_too_old, f"Skipping tests that fail on PyTorch versions before: {self.min_version}"
)(obj)
class SkipIfAtLeastPyTorchVersion:
"""Decorator to be used if test should be skipped
with PyTorch versions newer than or equal to that given."""
def __init__(self, pytorch_version_tuple):
self.max_version = pytorch_version_tuple
self.version_too_new = pytorch_after(*pytorch_version_tuple)
def __call__(self, obj):
return unittest.skipIf(
self.version_too_new, f"Skipping tests that fail on PyTorch versions at least: {self.max_version}"
)(obj)
def is_main_test_process():
ps = torch.multiprocessing.current_process()
if not ps or not hasattr(ps, "name"):
return False
return ps.name.startswith("Main")
def has_cupy():
"""
Returns True if the user has installed a version of cupy.
"""
cp, has_cp = optional_import("cupy")
if not is_main_test_process():
return has_cp # skip the check if we are running in subprocess
if not has_cp:
return False
try: # test cupy installation with a basic example
x = cp.arange(6, dtype="f").reshape(2, 3)
y = cp.arange(3, dtype="f")
kernel = cp.ElementwiseKernel(
"float32 x, float32 y", "float32 z", """ if (x - 2 > y) { z = x * y; } else { z = x + y; } """, "my_kernel"
)
flag = kernel(x, y)[0, 0] == 0
del x, y, kernel
cp.get_default_memory_pool().free_all_blocks()
return flag
except Exception:
return False
HAS_CUPY = has_cupy()
def make_nifti_image(array: NdarrayOrTensor, affine=None, dir=None, fname=None, suffix=".nii.gz", verbose=False):
"""
Create a temporary nifti image on the disk and return the image name.
User is responsible for deleting the temporary file when done with it.
"""
if isinstance(array, torch.Tensor):
array, *_ = convert_data_type(array, np.ndarray)
if isinstance(affine, torch.Tensor):
affine, *_ = convert_data_type(affine, np.ndarray)
if affine is None:
affine = np.eye(4)
test_image = nib.Nifti1Image(array, affine)
# if dir not given, create random. Else, make sure it exists.
if dir is None:
dir = tempfile.mkdtemp()
else:
os.makedirs(dir, exist_ok=True)
# If fname not given, get random one. Else, concat dir, fname and suffix.
if fname is None:
temp_f, fname = tempfile.mkstemp(suffix=suffix, dir=dir)
os.close(temp_f)
else:
fname = os.path.join(dir, fname + suffix)
nib.save(test_image, fname)
if verbose:
print(f"File written: {fname}.")
return fname
def make_rand_affine(ndim: int = 3, random_state: Optional[np.random.RandomState] = None):
"""Create random affine transformation (with values == -1, 0 or 1)."""
rs = np.random.random.__self__ if random_state is None else random_state # type: ignore
vals = rs.choice([-1, 1], size=ndim)
positions = rs.choice(range(ndim), size=ndim, replace=False)
af = np.zeros([ndim + 1, ndim + 1])
af[ndim, ndim] = 1
for i, (v, p) in enumerate(zip(vals, positions)):
af[i, p] = v
return af
class DistTestCase(unittest.TestCase):
"""
testcase without _outcome, so that it's picklable.
"""
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict["_outcome"]
return self_dict
def __setstate__(self, data_dict):
self.__dict__.update(data_dict)
class DistCall:
"""
Wrap a test case so that it will run in multiple processes on a single machine using `torch.distributed`.
It is designed to be used with `tests.utils.DistTestCase`.
Usage:
decorate a unittest testcase method with a `DistCall` instance::
class MyTests(unittest.TestCase):
@DistCall(nnodes=1, nproc_per_node=3, master_addr="localhost")
def test_compute(self):
...
the `test_compute` method should trigger different worker logic according to `dist.get_rank()`.
Multi-node tests require a fixed master_addr:master_port, with node_rank set manually in multiple scripts
or from environment variable "NODE_RANK".
"""
def __init__(
self,
nnodes: int = 1,
nproc_per_node: int = 1,
master_addr: str = "localhost",
master_port: Optional[int] = None,
node_rank: Optional[int] = None,
timeout=60,
init_method=None,
backend: Optional[str] = None,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
verbose: bool = False,
):
"""
Args:
nnodes: The number of nodes to use for distributed call.
nproc_per_node: The number of processes to call on each node.
master_addr: Master node (rank 0)'s address, should be either the IP address or the hostname of node 0.
master_port: Master node (rank 0)'s free port.
node_rank: The rank of the node, this could be set via environment variable "NODE_RANK".
timeout: Timeout for operations executed against the process group.
init_method: URL specifying how to initialize the process group.
Default is "env://" or "file:///d:/a_temp" (windows) if unspecified.
backend: The backend to use. Depending on build-time configurations,
valid values include ``mpi``, ``gloo``, and ``nccl``.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
verbose: whether to print NCCL debug info.
"""
self.nnodes = int(nnodes)
self.nproc_per_node = int(nproc_per_node)
if self.nnodes < 1 or self.nproc_per_node < 1:
raise ValueError(
f"number of nodes and processes per node must be >= 1, got {self.nnodes} and {self.nproc_per_node}"
)
self.node_rank = int(os.environ.get("NODE_RANK", "0")) if node_rank is None else int(node_rank)
self.master_addr = master_addr
self.master_port = np.random.randint(10000, 20000) if master_port is None else master_port
if backend is None:
self.backend = "nccl" if torch.distributed.is_nccl_available() and torch.cuda.is_available() else "gloo"
else:
self.backend = backend
self.init_method = init_method
if self.init_method is None and sys.platform == "win32":
self.init_method = "file:///d:/a_temp"
self.timeout = datetime.timedelta(0, timeout)
self.daemon = daemon
self.method = method
self.verbose = verbose
def run_process(self, func, local_rank, args, kwargs, results):
_env = os.environ.copy() # keep the original system env
try:
os.environ["MASTER_ADDR"] = self.master_addr
os.environ["MASTER_PORT"] = str(self.master_port)
os.environ["LOCAL_RANK"] = str(local_rank)
if self.verbose:
os.environ["NCCL_DEBUG"] = "INFO"
os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"
os.environ["NCCL_BLOCKING_WAIT"] = str(1)
os.environ["OMP_NUM_THREADS"] = str(1)
os.environ["WORLD_SIZE"] = str(self.nproc_per_node * self.nnodes)
os.environ["RANK"] = str(self.nproc_per_node * self.node_rank + local_rank)
if torch.cuda.is_available():
torch.cuda.set_device(int(local_rank)) # using device ids from CUDA_VISIBILE_DEVICES
dist.init_process_group(
backend=self.backend,
init_method=self.init_method,
timeout=self.timeout,
world_size=int(os.environ["WORLD_SIZE"]),
rank=int(os.environ["RANK"]),
)
func(*args, **kwargs)
# the primary node lives longer to
# avoid _store_based_barrier, RuntimeError: Broken pipe
# as the TCP store daemon is on the rank 0
if int(os.environ["RANK"]) == 0:
time.sleep(0.1)
results.put(True)
except Exception as e:
results.put(False)
raise e
finally:
os.environ.clear()
os.environ.update(_env)
try:
dist.destroy_process_group()
except RuntimeError as e:
warnings.warn(f"While closing process group: {e}.")
def __call__(self, obj):
if not torch.distributed.is_available():
return unittest.skipIf(True, "Skipping distributed tests because not torch.distributed.is_available()")(obj)
if torch.cuda.is_available() and torch.cuda.device_count() < self.nproc_per_node:
return unittest.skipIf(
True,
f"Skipping distributed tests because it requires {self.nnodes} devices "
f"but got {torch.cuda.device_count()}",
)(obj)
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
tmp = torch.multiprocessing.get_context(self.method)
processes = []
results = tmp.Queue()
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
for proc_rank in range(self.nproc_per_node):
p = tmp.Process(
target=self.run_process, args=(func, proc_rank, args, kwargs, results), daemon=self.daemon
)
p.start()
processes.append(p)
for p in processes:
p.join()
assert results.get(), "Distributed call failed."
_del_original_func(obj)
return _wrapper
class TimedCall:
"""
Wrap a test case so that it will run in a new process, raises a TimeoutError if the decorated method takes
more than `seconds` to finish. It is designed to be used with `tests.utils.DistTestCase`.
"""
def __init__(
self,
seconds: float = 60.0,
daemon: Optional[bool] = None,
method: Optional[str] = "spawn",
force_quit: bool = True,
skip_timing=False,
):
"""
Args:
seconds: timeout seconds.
daemon: the process’s daemon flag.
When daemon=None, the initial value is inherited from the creating process.
method: set the method which should be used to start a child process.
method can be 'fork', 'spawn' or 'forkserver'.
force_quit: whether to terminate the child process when `seconds` elapsed.
skip_timing: whether to skip the timing constraint.
this is useful to include some system conditions such as
`torch.cuda.is_available()`.
"""
self.timeout_seconds = seconds
self.daemon = daemon
self.force_quit = force_quit
self.skip_timing = skip_timing
self.method = method
@staticmethod
def run_process(func, args, kwargs, results):
try:
output = func(*args, **kwargs)
results.put(output)
except Exception as e:
e.traceback = traceback.format_exc()
results.put(e)
def __call__(self, obj):
if self.skip_timing:
return obj
_cache_original_func(obj)
@functools.wraps(obj)
def _wrapper(*args, **kwargs):
tmp = torch.multiprocessing.get_context(self.method)
func = _call_original_func
args = [obj.__name__, obj.__module__] + list(args)
results = tmp.Queue()
p = tmp.Process(target=TimedCall.run_process, args=(func, args, kwargs, results), daemon=self.daemon)
p.start()
p.join(timeout=self.timeout_seconds)
timeout_error = None
try:
if p.is_alive():
# create an Exception
timeout_error = torch.multiprocessing.TimeoutError(
f"'{obj.__name__}' in '{obj.__module__}' did not finish in {self.timeout_seconds}s."
)
if self.force_quit:
p.terminate()
else:
warnings.warn(
f"TimedCall: deadline ({self.timeout_seconds}s) "
f"reached but waiting for {obj.__name__} to finish."
)
finally:
p.join()
_del_original_func(obj)
res = None
try:
res = results.get(block=False)
except queue.Empty: # no result returned, took too long
pass
if isinstance(res, Exception): # other errors from obj
if hasattr(res, "traceback"):
raise RuntimeError(res.traceback) from res
raise res
if timeout_error: # no force_quit finished
raise timeout_error
return res
return _wrapper
_original_funcs = {}
def _cache_original_func(obj) -> None:
"""cache the original function by name, so that the decorator doesn't shadow it."""
_original_funcs[obj.__name__] = obj
def _del_original_func(obj):
"""pop the original function from cache."""
_original_funcs.pop(obj.__name__, None)
if torch.cuda.is_available(): # clean up the cached function
torch.cuda.synchronize()
torch.cuda.empty_cache()
def _call_original_func(name, module, *args, **kwargs):
if name not in _original_funcs:
_original_module = importlib.import_module(module) # reimport, refresh _original_funcs
if not hasattr(_original_module, name):
# refresh module doesn't work
raise RuntimeError(f"Could not recover the original {name} from {module}: {_original_funcs}.")
f = _original_funcs[name]
return f(*args, **kwargs)
class NumpyImageTestCase2D(unittest.TestCase):
im_shape = (128, 64)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_2d(
self.im_shape[0], self.im_shape[1], num_objs=4, rad_max=20, noise_max=0.0, num_seg_classes=self.num_classes
)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase2D(NumpyImageTestCase2D):
def setUp(self):
NumpyImageTestCase2D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
class NumpyImageTestCase3D(unittest.TestCase):
im_shape = (64, 48, 80)
input_channels = 1
output_channels = 4
num_classes = 3
def setUp(self):
im, msk = create_test_image_3d(
self.im_shape[0],
self.im_shape[1],
self.im_shape[2],
num_objs=4,
rad_max=20,
noise_max=0.0,
num_seg_classes=self.num_classes,
)
self.imt = im[None, None]
self.seg1 = (msk[None, None] > 0).astype(np.float32)
self.segn = msk[None, None]
class TorchImageTestCase3D(NumpyImageTestCase3D):
def setUp(self):
NumpyImageTestCase3D.setUp(self)
self.imt = torch.tensor(self.imt)
self.seg1 = torch.tensor(self.seg1)
self.segn = torch.tensor(self.segn)
def test_script_save(net, *inputs, device=None, rtol=1e-4, atol=0.0):
"""
Test the ability to save `net` as a Torchscript object, reload it, and apply inference. The value `inputs` is
forward-passed through the original and loaded copy of the network and their results returned.
The forward pass for both is done without gradient accumulation.
The test will be performed with CUDA if available, else CPU.
"""
# TODO: would be nice to use GPU if available, but it currently causes CI failures.
device = "cpu"
with tempfile.TemporaryDirectory() as tempdir:
convert_to_torchscript(
model=net,
filename_or_obj=os.path.join(tempdir, "model.ts"),
verify=True,
inputs=inputs,
device=device,
rtol=rtol,
atol=atol,
)
def download_url_or_skip_test(*args, **kwargs):
"""``download_url`` and skip the tests if any downloading error occurs."""
with skip_if_downloading_fails():
download_url(*args, **kwargs)
def query_memory(n=2):
"""
Find best n idle devices and return a string of device ids using the `nvidia-smi` command.
"""
bash_string = "nvidia-smi --query-gpu=power.draw,temperature.gpu,memory.used --format=csv,noheader,nounits"
try:
p1 = Popen(bash_string.split(), stdout=PIPE)
output, error = p1.communicate()
free_memory = [x.split(",") for x in output.decode("utf-8").split("\n")[:-1]]
free_memory = np.asarray(free_memory, dtype=float).T
free_memory[1] += free_memory[0] # combine 0/1 column measures
ids = np.lexsort(free_memory)[:n]
except (TypeError, IndexError, OSError):
ids = range(n) if isinstance(n, int) else []
return ",".join(f"{int(x)}" for x in ids)
TEST_NDARRAYS: Tuple[Callable] = (np.array, torch.as_tensor) # type: ignore
if torch.cuda.is_available():
gpu_tensor: Callable = partial(torch.as_tensor, device="cuda")
TEST_NDARRAYS = TEST_NDARRAYS + (gpu_tensor,) # type: ignore
TEST_DEVICES = [[torch.device("cpu")]]
if torch.cuda.is_available():
TEST_DEVICES.append([torch.device("cuda")])
if __name__ == "__main__":
print(query_memory())
|
py | 1a547febdf13d97a853cee399278c409568ee0c0 | '''
test_dnssec_nsecx - Tests NSECx support routines.
.. Copyright (c) 2015 Neustar, Inc. All rights reserved.
.. See COPYRIGHT.txt for full notice. See LICENSE.txt for terms and conditions.
'''
# pylint: skip-file
import dns.rdatatype
import dns_sprockets_lib.dnssec_nsecx as nsecx
def test_encode_salt():
tests = [
(None, None),
(1, None),
(b'', ''),
(b'1', '31'),
(b'a', '61'),
(b'Testing', '54657374696e67')]
for test in tests:
print(test)
assert nsecx.encode_salt(test[0]) == test[1]
def test_decode_salt():
tests = [
(None, None),
(1, None),
('', b''),
('1', None),
('31', b'1'),
('54657374696e67', b'Testing'),
('54657374696E67', b'Testing')]
for test in tests:
print(test)
assert nsecx.decode_salt(test[0]) == test[1]
def test_hash_nsec3_name():
tests = [
(None, '7f1962f2', 1, 15, None),
(1, '7f1962f2', 1, 15, None),
('', '7f1962f2', 1, 15, 'lsa969sfkmlb6c92ea510pohd54douqu'),
('.', '7f1962f2', 1, 15, 'lsa969sfkmlb6c92ea510pohd54douqu'),
('001.cst.net.', '7f1962f2', 1, 15, 'uqml1am96tftfmlkagtbs82isr050sh0'),
('001.cst.net.', '7F1962F2', 1, 15, 'uqml1am96tftfmlkagtbs82isr050sh0'),
('001.001.cst.net.', '7F1962F2', 1, 15, '06es9cggdrorfdd4ns9ahocaikldrrp8'),
('test.001.cst.net.', '7F1962F2', 1, 15, 'kqgpu8i0ai43nem212bd0079j5si5r3k'),
('test2.001.cst.net.', '7F1962F2', 1, 15, 'al016abkh6lvdig6503fs92kdmotqh4v'),
('example', 'aabbccdd', 1, 12, '0p9mhaveqvm6t7vbl5lop2u3t2rp3tom'),
('a.example', 'aabbccdd', 1, 12, '35mthgpgcu1qg68fab165klnsnk3dpvl'),
('ai.example', 'aabbccdd', 1, 12, 'gjeqe526plbf1g8mklp59enfd789njgi'),
('ns1.example', 'aabbccdd', 1, 12, '2t7b4g4vsa5smi47k61mv5bv1a22bojr'),
('ns2.example', 'aabbccdd', 1, 12, 'q04jkcevqvmu85r014c7dkba38o0ji5r'),
('w.example', 'aabbccdd', 1, 12, 'k8udemvp1j2f7eg6jebps17vp3n8i58h'),
('*.w.example', 'aabbccdd', 1, 12, 'r53bq7cc2uvmubfu5ocmm6pers9tk9en'),
('x.w.example', 'aabbccdd', 1, 12, 'b4um86eghhds6nea196smvmlo4ors995'),
('y.w.example', 'aabbccdd', 1, 12, 'ji6neoaepv8b5o6k4ev33abha8ht9fgc'),
('x.y.w.example', 'aabbccdd', 1, 12, '2vptu5timamqttgl4luu9kg21e0aor3s'),
('xx.example', 'aabbccdd', 1, 12, 't644ebqk9bibcna874givr6joj62mlhv'),
('2t7b4g4vsa5smi47k61mv5bv1a22bojr.example', 'aabbccdd', 1, 12,
'kohar7mbb8dc2ce8a9qvl8hon4k53uhi')]
for test in tests:
print(test)
assert nsecx.hash_nsec3_name(test[0], test[1], test[2], test[3], False) == test[4]
def test__windows_covers():
tests = [
([(0, None)], dns.rdatatype.A, False),
([(0, bytearray(b'\x00'))], dns.rdatatype.A, False),
([(0, bytearray(b'\x40'))], dns.rdatatype.A, True),
([(0, bytearray(b'\x40'))], dns.rdatatype.NS, False),
([(1, bytearray(b'\x40'))], dns.rdatatype.A, False),
([(1, bytearray(b'\x40'))], dns.rdatatype.CAA, True),
([(0, bytearray(b'\x00\x08'))], dns.rdatatype.PTR, True),
([(0, bytearray(
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08'))],
dns.rdatatype.AXFR, True)]
for test in tests:
print(test)
assert nsecx._windows_covers(test[0], test[1]) == test[2]
def test__windows_get_covered_types():
tests = [
([(0, None)], []),
([(0, bytearray(b'\x00'))], []),
([(0, bytearray(b'\x40'))], [dns.rdatatype.A]),
([(0, bytearray(b'\x60'))], [dns.rdatatype.A, dns.rdatatype.NS]),
([(0, bytearray(b'\x64'))], [
dns.rdatatype.A, dns.rdatatype.NS, dns.rdatatype.CNAME]),
([(1, bytearray(b'\x40'))], [dns.rdatatype.CAA]),
([(0, bytearray(b'\x40')),
(1, bytearray(b'\x40'))], [dns.rdatatype.A, dns.rdatatype.CAA]),
([(0, bytearray(b'\x40\x08')),
(1, bytearray(b'\x40'))], [
dns.rdatatype.A, dns.rdatatype.CAA, dns.rdatatype.PTR])]
for test in tests:
print(test)
assert sorted(nsecx._windows_get_covered_types(test[0])) == sorted(test[1])
# end of file
|
py | 1a548015272fb5757c43d12a37d9821ab77a2ce5 | from setuptools import setup
version = '0.0.0'
setup(
name = 'grid-plot',
version = version,
description = 'Plots data onto a grid.',
url = 'http://github.com/doggan/grid-plot',
license = 'MIT',
author='Shyam Guthikonda',
packages = ['grid_plot'],
install_requires = [
'Pillow == 2.7.0',
],
entry_points = """
[console_scripts]
grid-plot = grid_plot.command_line:main
"""
)
|
py | 1a5480bfa503bb3a4f3388d32e7d7f8385398b68 | from datetime import datetime
import numpy as np
import csv
from utils import total_gini
import tensorflow.compat.v1 as tf
import json
from pgd_attack import LinfPGDAttack
from utils_MLP_model import init_MLP_vars
with open('config.json') as config_file:
config = json.load(config_file)
w_vars, b_vars, stable_var, sparse_vars = init_MLP_vars()
def print_metrics(sess, model, train_dict, nat_dict, val_dict, val_dict_distil, test_dict, ii, args, summary_writer, dict_exp, experiment, global_step):
print('Step {}: ({})'.format(ii, datetime.now()))
try:
nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)
print(' batch training nat accuracy {:.4}'.format(nat_acc * 100))
nat_xent = sess.run(model.xent, feed_dict=nat_dict)
print(' Nat Xent {:.4}'.format(nat_xent))
stable_xent = sess.run(model.stable_xent, feed_dict=nat_dict)
robust_xent = sess.run(model.robust_xent, feed_dict=nat_dict)
robust_stable_xent = sess.run(model.robust_stable_xent, feed_dict=nat_dict)
train_l2 = sess.run(model.l2_loss, feed_dict=nat_dict)
print(' Batch Training L2 Loss {:.4}'.format(train_l2))
except:
train_distil_loss = sess.run(model.distil_loss, feed_dict=nat_dict)
print(' Batch Training Distillation L2 Teacher Student Loss {:.4}'.format(train_distil_loss))
train_normal_acc = sess.run(model.accuracy, feed_dict=train_dict)
print(' Training accuracy {:.4}'.format(train_normal_acc * 100))
train_l2 = sess.run(model.l2_loss, feed_dict=train_dict)
print(' Training L2 Loss Ground Truth {:.4}'.format(train_l2))
summary3 = tf.Summary(value=[tf.Summary.Value(tag='TrainL2', simple_value=train_l2), ])
val_l2 = sess.run(model.l2_loss, feed_dict=val_dict)
print(' Val L2 Loss Ground Truth {:.4}'.format(val_l2))
test_l2 = sess.run(model.l2_loss, feed_dict=test_dict)
print(' Test L2 Loss Ground Truth {:.4}'.format(test_l2))
val_acc = sess.run(model.accuracy, feed_dict=val_dict)
print(' validation nat accuracy {:.4}'.format(val_acc * 100))
if args.n_distillations > 1:
train_l2 = sess.run(model.distil_loss, feed_dict=nat_dict)
print(' Training L2 Loss vs Teacher {:.4}'.format(train_l2))
val_distil_loss = sess.run(model.distil_loss, feed_dict=val_dict_distil)
print(' Validation L2 Loss Ground Truth {:.4}'.format(val_distil_loss))
summary7 = tf.Summary(value=[tf.Summary.Value(tag='ValTeacherL2', simple_value=val_distil_loss), ])
summary_writer.add_summary(summary7, global_step.eval(sess))
# summary1 = tf.Summary(value=[tf.Summary.Value(tag='TrainAcc', simple_value=train_normal_acc),])
summary2 = tf.Summary(value=[tf.Summary.Value(tag='ValAcc', simple_value=val_acc),])
summary4 = tf.Summary(value=[tf.Summary.Value(tag='ValL2', simple_value=val_l2), ])
summary6 = tf.Summary(value=[tf.Summary.Value(tag='TrainTeacherL2', simple_value=train_l2), ])
# summary_writer.add_summary(summary1, global_step.eval(sess))
summary_writer.add_summary(summary2, global_step.eval(sess))
# summary_writer.add_summary(summary3, global_step.eval(sess))
summary_writer.add_summary(summary4, global_step.eval(sess))
summary_writer.add_summary(summary6, global_step.eval(sess))
#summary_writer.add_text('args', str(args), global_step.eval(sess))
# summary5 = sess.run(model.summary, feed_dict=test_dict)
# summary_writer.add_summary(summary5, global_step.eval(sess))
test_acc = sess.run(model.accuracy, feed_dict=test_dict)
print(' Test accuracy {:.4}'.format(test_acc * 100))
# summary_writer.add_summary(test_acc, global_step.eval(sess))
if args.is_stable:
stable_var = sess.run(getattr(model, config['stability_variable']), feed_dict=nat_dict)
print(' Stability Variable {:.4}'.format(stable_var ))
print(' Stable Xent {:.4}'.format(stable_xent))
print(' Stable Xent {:.4}'.format(stable_xent))
if args.rho > 0 :
print(' Robust Xent {:.4}'.format(robust_xent))
if args.is_stable:
print(' Robust Stable Xent {:.4}'.format(robust_stable_xent))
for i in range(len(w_vars)):
if args.l0 > 0:
print(' Killed neurons - ' + w_vars[i], dict_exp[w_vars[i] + '_killed_neurons'][experiment])
print(' Killed input neurons - ' + w_vars[i], dict_exp[w_vars[i] + '_killed_input_features'][experiment])
print(' Non zero features percentage - ' + w_vars[i] , dict_exp[w_vars[i] + '_nonzero'][experiment])
regularizer = sess.run(model.regularizer, feed_dict=nat_dict)
print(' Regularizer', regularizer)
# try:
# summary = tf.Summary(value=[
# tf.Summary.Value(tag='Train Xent', simple_value= nat_xent),
# # tf.Summary.Value(tag='Val Acc', simple_value= val_acc),
# tf.Summary.Value(tag='Train Acc', simple_value= nat_acc),
# tf.Summary.Value(tag='Train Stable Xent', simple_value= stable_xent),
# tf.Summary.Value(tag='Train Robust Stable Xent', simple_value= robust_stable_xent),
# tf.Summary.Value(tag='Test Acc', simple_value= test_acc)])
# except:
# pass
for i in range(len(w_vars)):
if args.l0 > 0:
summary_sparse = tf.Summary(value=[
tf.Summary.Value(tag=w_vars[i] + '_killed_neurons', simple_value=dict_exp[w_vars[i] + '_killed_neurons'][experiment]),
tf.Summary.Value(tag=w_vars[i] + '_killed_inputs', simple_value=dict_exp[w_vars[i] + '_killed_input_features'][experiment]),
tf.Summary.Value(tag=w_vars[i] + '_nonzero', simple_value=dict_exp[w_vars[i] + '_nonzero'][experiment])])
summary_writer.add_summary(summary_sparse, global_step.eval(sess))
def update_dict_output(dict_exp, experiment, sess, test_acc, model, test_dict, num_iters):
dict_exp['test_accs'][experiment] = test_acc*100
dict_exp['iterations'][experiment] = num_iters
return dict_exp
def update_adv_acc(args, best_model, x_test, y_test, experiment, dict_exp):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
clip = True
if "uci" in args.data_set:
clip = False
for rho_test in args.robust_test:
attack = LinfPGDAttack(best_model, rho_test, config['k'], config['a'],
config['random_start'], config['loss_func'], clip)
x_test_adv = attack.perturb(x_test, y_test, sess)
adv_dict = {best_model.x_input: x_test_adv, best_model.y_input: y_test}
dict_exp['adv_test_accs'][rho_test][experiment] = sess.run(best_model.accuracy, feed_dict=adv_dict)
def print_stability_measures(dict_exp, args, num_experiments, batch_size, subset_ratio, tot_test_acc, tot_train_acc, max_train_steps, network_path):
avg_test_acc = tot_test_acc / num_experiments
avg_train_acc = tot_train_acc / num_experiments
std = np.array([float(k) for k in dict_exp['test_accs']]).std()
logit_stability = np.mean(np.std(dict_exp['logits_acc'], axis=0), axis=0)
gini_stability = total_gini(dict_exp['preds'].transpose())
print(' Average training accuracy {:.4}'.format(avg_train_acc * 100))
print(' Average testing accuracy {:.4}'.format(avg_test_acc * 100))
print(' Individual accuracies: \n', dict_exp['test_accs'])
print(' Adv testing accuracies', dict_exp['adv_test_accs'])
print(' Stability values', dict_exp[stable_var])
print(' Test Accuracy std {:.2}'.format(np.array([float(k) for k in dict_exp['test_accs']]).std()))
print(" Logits std", np.mean(np.mean(np.std(dict_exp['logits_acc'], axis=0), axis=0)))
print(" Gini stability", gini_stability)
weights_stability = print_layer_stability_ff(dict_exp, num_experiments)
weights_nonzero = [np.mean(dict_exp[w_vars[i]]) for i in range(len(w_vars))]
for i in range(len(w_vars)):
print(w_vars[i] + ' non zero percentage', weights_nonzero[i])
file = open(str('results_' + network_path + args.data_set + '.csv'), 'a+', newline='')
file_read = open(str('results_' + network_path + args.data_set + '.csv'), "r")
one_char = file_read.read(1)
writer = csv.writer(file)
if not len(one_char):
headers = []
headers += ['num_experiments', 'batch_size', 'subset_ratio', 'max_train_steps']
headers += ['test accuracy '+ str(i) for i in range(num_experiments)]
for key in dict_exp:
if key not in w_vars+ b_vars+ [stable_var]+ sparse_vars + ['adv_test_accs', 'preds']:
headers += ['Avg '+str(key)]
headers += ['Avg test adversarial acc for rho = '+ str(rho) for rho in args.robust_test]
headers += ['is_stable', 'rho', 'train_size', 'l2', 'l0', 'network_size', 'learning rate']
headers += [w_vars[i] + ' Nonzero weights' for i in range(len(w_vars))]
headers += [w_vars[i] + ' Stability' for i in range(len(w_vars))]
headers += ['std', 'logit_stability', 'gini_stability' ]
writer.writerow(headers)
with file:
cols = []
cols += [num_experiments, batch_size, subset_ratio, max_train_steps]
cols += [dict_exp['test_accs'][i] for i in range(num_experiments)]
for key in dict_exp:
if key not in w_vars+ b_vars+ [stable_var]+ sparse_vars + ['adv_test_accs', 'preds']:
cols += [np.mean(dict_exp[key])]
cols += [np.mean(dict_exp['adv_test_accs'][rho]) for rho in args.robust_test]
cols += [args.is_stable, args.rho, args.train_size, args.l2, args.l0, args.network_size, args.lr]
cols += weights_nonzero
cols += weights_stability
cols += [std, logit_stability, gini_stability ]
print(cols)
writer.writerow(cols)
def print_layer_stability_ff(dict_exp, num_experiments):
stabilities = []
for i in range(len(w_vars)):
w_i = [dict_exp[w_vars[i]][experiment].reshape(-1) for experiment in range(num_experiments)]
w_stability = np.mean(np.std(w_i, axis=0), axis=0)
print(w_vars[i] + " std", w_stability)
stabilities = stabilities + [w_stability]
return stabilities
|
py | 1a54827c567f4e4fdca9936c7082e3e9d5fefb9e | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualApplianceSitesOperations:
"""VirtualApplianceSitesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
site_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
site_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified site from a Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of the Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param site_name: The name of the site.
:type site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
site_name=site_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
site_name: str,
**kwargs
) -> "_models.VirtualApplianceSite":
"""Gets the specified Virtual Appliance Site.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of the Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param site_name: The name of the site.
:type site_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualApplianceSite, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.VirtualApplianceSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualApplianceSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualApplianceSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
site_name: str,
parameters: "_models.VirtualApplianceSite",
**kwargs
) -> "_models.VirtualApplianceSite":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualApplianceSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualApplianceSite')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualApplianceSite', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualApplianceSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
site_name: str,
parameters: "_models.VirtualApplianceSite",
**kwargs
) -> AsyncLROPoller["_models.VirtualApplianceSite"]:
"""Creates or updates the specified Network Virtual Appliance Site.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of the Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param site_name: The name of the site.
:type site_name: str
:param parameters: Parameters supplied to the create or update Network Virtual Appliance Site
operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.VirtualApplianceSite
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualApplianceSite or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_11_01.models.VirtualApplianceSite]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualApplianceSite"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
site_name=site_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualApplianceSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'siteName': self._serialize.url("site_name", site_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites/{siteName}'} # type: ignore
def list(
self,
resource_group_name: str,
network_virtual_appliance_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkVirtualApplianceSiteListResult"]:
"""Lists all Network Virtual Appliance Sites in a Network Virtual Appliance resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of the Network Virtual Appliance.
:type network_virtual_appliance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceSiteListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_11_01.models.NetworkVirtualApplianceSiteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceSiteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceSiteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}/virtualApplianceSites'} # type: ignore
|
py | 1a5482918627c78d2882cb032e28ed3c4a29ee9c | from django.utils.module_loading import import_string
from betty.conf.app import settings
def get_cache_flusher():
if settings.BETTY_CACHE_FLUSHER:
if callable(settings.BETTY_CACHE_FLUSHER):
return settings.BETTY_CACHE_FLUSHER
else:
return import_string(settings.BETTY_CACHE_FLUSHER)
|
py | 1a54851979bb0898dda45a061220edbfb3701003 | # Generated by Django 2.2.24 on 2022-02-09 20:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0081_embedconfig_map_style'),
]
operations = [
migrations.AlterField(
model_name='facilityclaim',
name='facility_type',
field=models.CharField(blank=True, choices=[('final product assembly', 'Final Product Assembly'), ('office hq', 'Office / HQ'), ('printing product dyeing and laundering', 'Printing, Product Dyeing and Laundering'), ('raw material processing or production', 'Raw Material Processing or Production'), ('textile or material production', 'Textile or Material Production'), ('warehousing distribution', 'Warehousing / Distribution')], help_text='The editable facility type for this claim.', max_length=300, null=True, verbose_name='facility type'),
),
migrations.AlterField(
model_name='historicalfacilityclaim',
name='facility_type',
field=models.CharField(blank=True, choices=[('final product assembly', 'Final Product Assembly'), ('office hq', 'Office / HQ'), ('printing product dyeing and laundering', 'Printing, Product Dyeing and Laundering'), ('raw material processing or production', 'Raw Material Processing or Production'), ('textile or material production', 'Textile or Material Production'), ('warehousing distribution', 'Warehousing / Distribution')], help_text='The editable facility type for this claim.', max_length=300, null=True, verbose_name='facility type'),
),
]
|
py | 1a5487ecd891a0ea8cee78d4205f6004dc9007f2 | from __future__ import print_function, absolute_import, unicode_literals, division
import unittest2 as unittest
import os
import re
from local import get_env
import rds_log_dog.s3_utils
class Test(unittest.TestCase):
@classmethod
def setUpClass(self):
(self.function_stack_name, self.bucket_stack_name,
self.lambda_function_name, self.bucket_name) = get_env()
def test_count_s3_rds_logs_equals_rds_instances(self):
folder_result = rds_log_dog.s3_utils.list_folders(
self.bucket_name, 'rds_logs')
# discover # of rds instances
import boto3
client = boto3.client('rds')
response = client.describe_db_instances()
self.assertEqual(len(response['DBInstances']), len(
folder_result), "number of rds instances doesn't match number of folders in rds_logs/")
if __name__ == '__main__':
unittest.main()
pass
|
py | 1a5487f82e23406191a15a99e3d9eda2b746d605 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
import threading
from ceilometerclient import client as ceilometer_client
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
LOG = logging.getLogger(__name__)
def get_flavor_names(request):
# TODO(lsmola) The flavors can be set per project,
# so it should show only valid ones.
try:
flavors = nova.flavor_list(request, None)
return [f.name for f in flavors]
except Exception:
return ['m1.tiny', 'm1.small', 'm1.medium',
'm1.large', 'm1.xlarge']
def is_iterable(var):
"""Return True if the given is list or tuple."""
return (isinstance(var, (list, tuple)) or
issubclass(var.__class__, (list, tuple)))
def make_query(user_id=None, tenant_id=None, resource_id=None,
user_ids=None, tenant_ids=None, resource_ids=None):
"""Returns query built from given parameters.
This query can be then used for querying resources, meters and
statistics.
:Parameters:
- `user_id`: user_id, has a priority over list of ids
- `tenant_id`: tenant_id, has a priority over list of ids
- `resource_id`: resource_id, has a priority over list of ids
- `user_ids`: list of user_ids
- `tenant_ids`: list of tenant_ids
- `resource_ids`: list of resource_ids
"""
user_ids = user_ids or []
tenant_ids = tenant_ids or []
resource_ids = resource_ids or []
query = []
if user_id:
user_ids = [user_id]
for u_id in user_ids:
query.append({"field": "user_id", "op": "eq", "value": u_id})
if tenant_id:
tenant_ids = [tenant_id]
for t_id in tenant_ids:
query.append({"field": "project_id", "op": "eq", "value": t_id})
if resource_id:
resource_ids = [resource_id]
for r_id in resource_ids:
query.append({"field": "resource_id", "op": "eq", "value": r_id})
return query
class Meter(base.APIResourceWrapper):
"""Represents one Ceilometer meter."""
_attrs = ['name', 'type', 'unit', 'resource_id', 'user_id', 'project_id']
def __init__(self, apiresource):
super(Meter, self).__init__(apiresource)
self._label = self.name
self._description = ""
def augment(self, label=None, description=None):
if label:
self._label = label
if description:
self._description = description
@property
def description(self):
return self._description
@property
def label(self):
return self._label
class Resource(base.APIResourceWrapper):
"""Represents one Ceilometer resource."""
_attrs = ['resource_id', 'source', 'user_id', 'project_id', 'metadata',
'links']
def __init__(self, apiresource, ceilometer_usage=None):
super(Resource, self).__init__(apiresource)
# Save empty strings to IDs rather than None, so it gets
# serialized correctly. We don't want 'None' strings.
self.project_id = self.project_id or ""
self.user_id = self.user_id or ""
self.resource_id = self.resource_id or ""
self._id = "%s__%s__%s" % (self.project_id,
self.user_id,
self.resource_id)
# Meters with statistics data
self._meters = {}
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and self.project_id:
self._tenant = ceilometer_usage.get_tenant(self.project_id)
else:
self._tenant = None
if ceilometer_usage and self.user_id:
self._user = ceilometer_usage.get_user(self.user_id)
else:
self._user = None
self._query = make_query(tenant_id=self.project_id,
user_id=self.user_id,
resource_id=self.resource_id)
@property
def name(self):
name = self.metadata.get("name", None)
display_name = self.metadata.get("display_name", None)
return name or display_name or ""
@property
def id(self):
return self._id
@property
def tenant(self):
return self._tenant
@property
def user(self):
return self._user
@property
def resource(self):
return self.resource_id
@property
def query(self):
return self._query
@property
def meters(self):
return self._meters
def get_meter(self, meter_name):
return self._meters.get(meter_name, None)
def set_meter(self, meter_name, value):
self._meters[meter_name] = value
class ResourceAggregate(Resource):
"""Represents aggregate of more resources together.
Aggregate of resources can be obtained by specifying
multiple ids in one parameter or by not specifying
one parameter.
It can also be specified by query directly.
Example:
We can obtain an aggregate of resources by specifying
multiple resource_ids in resource_id parameter in init.
Or we can specify only tenant_id, which will return
all resources of that tenant.
"""
def __init__(self, tenant_id=None, user_id=None, resource_id=None,
tenant_ids=None, user_ids=None, resource_ids=None,
ceilometer_usage=None, query=None, identifier=None):
self._id = identifier
self.tenant_id = None
self.user_id = None
self.resource_id = None
# Meters with statistics data
self._meters = {}
if query:
self._query = query
else:
# TODO(lsmola) make parallel obtaining of tenant and user
# make the threading here, thread join into resource_list
if ceilometer_usage and tenant_id:
self.tenant_id = tenant_id
self._tenant = ceilometer_usage.get_tenant(tenant_id)
else:
self._tenant = None
if ceilometer_usage and user_id:
self.user_id = user_id
self._user = ceilometer_usage.get_user(user_id)
else:
self._user = None
if resource_id:
self.resource_id = resource_id
self._query = make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id,
tenant_ids=tenant_ids,
user_ids=user_ids,
resource_ids=resource_ids)
@property
def id(self):
return self._id
class Sample(base.APIResourceWrapper):
"""Represents one Ceilometer sample."""
_attrs = ['counter_name', 'user_id', 'resource_id', 'timestamp',
'resource_metadata', 'source', 'counter_unit', 'counter_volume',
'project_id', 'counter_type', 'resource_metadata']
@property
def instance(self):
display_name = self.resource_metadata.get('display_name', None)
instance_id = self.resource_metadata.get('instance_id', None)
return display_name or instance_id
@property
def name(self):
name = self.resource_metadata.get("name", None)
display_name = self.resource_metadata.get("display_name", None)
return name or display_name or ""
class Statistic(base.APIResourceWrapper):
"""Represents one Ceilometer statistic."""
_attrs = ['period', 'period_start', 'period_end',
'count', 'min', 'max', 'sum', 'avg',
'duration', 'duration_start', 'duration_end']
@memoized
def ceilometerclient(request):
"""Initialization of Ceilometer client."""
endpoint = base.url_for(request, 'metering')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return ceilometer_client.Client('2', endpoint,
token=(lambda: request.user.token.id),
insecure=insecure,
cacert=cacert)
def resource_list(request, query=None, ceilometer_usage_object=None):
"""List the resources."""
resources = ceilometerclient(request).resources.list(q=query)
return [Resource(r, ceilometer_usage_object) for r in resources]
def sample_list(request, meter_name, query=None, limit=None):
"""List the samples for this meters."""
samples = ceilometerclient(request).samples.list(meter_name=meter_name,
q=query, limit=limit)
return [Sample(s) for s in samples]
def meter_list(request, query=None):
"""List the user's meters."""
meters = ceilometerclient(request).meters.list(query)
return [Meter(m) for m in meters]
def statistic_list(request, meter_name, query=None, period=None):
"""List of statistics."""
statistics = ceilometerclient(request).\
statistics.list(meter_name=meter_name, q=query, period=period)
return [Statistic(s) for s in statistics]
class ThreadedUpdateResourceWithStatistics(threading.Thread):
"""Multithread wrapper for update_with_statistics method of
resource_usage.
A join logic is placed in process_list class method. All resources
will have its statistics attribute filled in separate threads.
The resource_usage object is shared between threads. Each thread is
updating one Resource.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `resources`: List of Resource or ResourceAggregate object,
that will be filled by statistic data.
- `resource_usage`: Wrapping resource usage object, that holds
all statistics data.
- `meter_names`: List of meter names of the statistics we want.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will be
returned, divided into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the attribute name of the stats.
E.g. (avg, max, min...) If None is given, whole
statistic object is returned,
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
# TODO(lsmola) Can be removed once Ceilometer supports sample-api
# and group-by, so all of this optimization will not be necessary.
# It is planned somewhere to I.
def __init__(self, resource_usage, resource, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
super(ThreadedUpdateResourceWithStatistics, self).__init__()
self.resource_usage = resource_usage
self.resource = resource
self.meter_names = meter_names
self.period = period
self.stats_attr = stats_attr
self.additional_query = additional_query
def run(self):
# Run the job
self.resource_usage.update_with_statistics(
self.resource,
meter_names=self.meter_names, period=self.period,
stats_attr=self.stats_attr, additional_query=self.additional_query)
@classmethod
def process_list(cls, resource_usage, resources, meter_names=None,
period=None, filter_func=None, stats_attr=None,
additional_query=None):
threads = []
for resource in resources:
# add statistics data into resource
thread = cls(resource_usage, resource, meter_names=meter_names,
period=period, stats_attr=stats_attr,
additional_query=additional_query)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
class CeilometerUsage(object):
"""Represents wrapper of any Ceilometer queries.
One instance of this class should be shared between resources
as this class provides a place where users and tenants are
cached. So there are no duplicate queries to API.
This class also wraps Ceilometer API calls and provides parallel
HTTP calls to API.
This class should also serve as reasonable abstraction, that will
cover huge amount of optimization due to optimization of Ceilometer
service, without changing of the interface.
"""
def __init__(self, request):
self._request = request
# Cached users and tenants.
self._users = {}
self._tenants = {}
def get_user(self, user_id):
"""Returns user fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
user = self._users.get(user_id, None)
if not user:
user = keystone.user_get(self._request, user_id)
# caching the user, for later use
self._users[user_id] = user
return user
def preload_all_users(self):
"""Preloads all users into dictionary.
It's more effective to preload all users, rather than fetching many
users by separate API get calls.
"""
users = keystone.user_list(self._request)
# Cache all users on right indexes, this is more effective than to
# obtain large number of users one by one by keystone.user_get
for u in users:
self._users[u.id] = u
def get_tenant(self, tenant_id):
"""Returns tenant fetched from API.
Caching the result, so it doesn't contact API twice with the
same query.
"""
tenant = self._tenants.get(tenant_id, None)
if not tenant:
tenant = keystone.tenant_get(self._request, tenant_id)
# caching the tenant for later use
self._tenants[tenant_id] = tenant
return tenant
def preload_all_tenants(self):
"""Preloads all tenants into dictionary.
It's more effective to preload all tenants, rather than fetching each
tenant by separate API get calls.
"""
tenants, more = keystone.tenant_list(self._request)
# Cache all tenants on right indexes, this is more effective than to
# obtain large number of tenants one by one by keystone.tenant_get
for t in tenants:
self._tenants[t.id] = t
def global_data_get(self, used_cls=None, query=None,
with_statistics=False, additional_query=None,
with_users_and_tenants=True):
"""Obtaining a resources for table view.
It obtains resources with statistics data according to declaration
in used_cls class.
:Parameters:
- `user_cls`: Class wrapper for usage data. It acts as wrapper for
settings needed. See the call of this method for
details.
- `query`: Explicit query definition for fetching the resources. If
no query is provided, it takes a default_query from
used_cls. If no default query is provided, it fetches
all the resources and filters them by meters defined
in used_cls.
- `with_statistic`: Define whether statistics data from the meters
defined in used_cls should be fetched.
Can be used to first obtain only the pure
resources, then with the statistics data by
AJAX.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
default_query = used_cls.default_query
query = query or default_query
filter_func = None
def filter_resources(resource):
"""Method for filtering resources by their links.rel attr.
The links.rel attributes contain all meters the resource has.
"""
for link in resource.links:
if link['rel'] in used_cls.meters:
return True
return False
if not query:
# Not all resource types can be obtained by query, if there is not
# a query, we are filtering all resources by this function.
filter_func = filter_resources
if with_statistics:
# Will add statistic data into resources.
resources = self.resources_with_statistics(
query,
used_cls.meters,
filter_func=filter_func,
stats_attr=used_cls.stats_attr,
additional_query=additional_query,
with_users_and_tenants=with_users_and_tenants)
else:
# Will load only resources without statistical data.
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
return [used_cls(resource) for resource in resources]
def query_from_object_id(self, object_id):
"""Obtaining a query from resource id.
Query can be then used to identify a resource in resources or meters
API calls. ID is being built in the Resource initializer, or returned
by Datatable into UpdateRow functionality.
"""
try:
tenant_id, user_id, resource_id = object_id.split("__")
except ValueError:
return []
return make_query(tenant_id=tenant_id, user_id=user_id,
resource_id=resource_id)
def update_with_statistics(self, resource, meter_names=None, period=None,
stats_attr=None, additional_query=None):
"""Adding statistical data into one Resource or ResourceAggregate.
It adds each statistic of each meter_names into the resource
attributes. Attribute name is the meter name with replaced '.' to '_'.
:Parameters:
- `resource`: Resource or ResourceAggregate object, that will
be filled by statistic data.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given a faceted result will be
returned, dividend into given periods. Periods with no
data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
if not meter_names:
raise ValueError("meter_names and resources must be defined to be "
"able to obtain the statistics.")
# query for identifying one resource in meters
query = resource.query
if additional_query:
if not is_iterable(additional_query):
raise ValueError("Additional query must be list of"
" conditions. See the docs for format.")
query = query + additional_query
# TODO(lsmola) thread for each meter will be probably overkill
# but I should test lets say thread pool with 100 of threads
# and apply it only to this code.
# Though I do expect Ceilometer will support bulk requests,
# so all of this optimization will not be necessary.
for meter in meter_names:
statistics = statistic_list(self._request, meter,
query=query, period=period)
meter = meter.replace(".", "_")
if statistics:
if stats_attr:
# I want to load only a specific attribute
resource.set_meter(
meter,
getattr(statistics[0], stats_attr, None))
else:
# I want a dictionary of all statistics
resource.set_meter(meter, statistics)
else:
resource.set_meter(meter, None)
return resource
def resources(self, query=None, filter_func=None,
with_users_and_tenants=False):
"""Obtaining resources with the query or filter_func.
Obtains resources and also fetch tenants and users associated
with those resources if with_users_and_tenants flag is true.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
if with_users_and_tenants:
ceilometer_usage_object = self
else:
ceilometer_usage_object = None
resources = resource_list(
self._request,
query=query, ceilometer_usage_object=ceilometer_usage_object)
if filter_func:
resources = [resource for resource in resources if
filter_func(resource)]
return resources
def resources_with_statistics(self, query=None, meter_names=None,
period=None, filter_func=None,
stats_attr=None, additional_query=None,
with_users_and_tenants=False):
"""Obtaining resources with statistics data inside.
:Parameters:
- `query`: Query for fetching the Ceilometer Resources.
- `filter_func`: Callable for filtering of the obtained
resources.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
- `with_users_and_tenants`: If true a user and a tenant object will
be added to each resource object.
"""
resources = self.resources(
query, filter_func=filter_func,
with_users_and_tenants=with_users_and_tenants)
ThreadedUpdateResourceWithStatistics.process_list(
self, resources,
meter_names=meter_names, period=period, stats_attr=stats_attr,
additional_query=additional_query)
return resources
def resource_aggregates(self, queries=None):
"""Obtaining resource aggregates with queries.
Representing a resource aggregate by query is a most general way
how to obtain a resource aggregates.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
"""
resource_aggregates = []
for identifier, query in queries.items():
resource_aggregates.append(ResourceAggregate(query=query,
ceilometer_usage=None,
identifier=identifier))
return resource_aggregates
def resource_aggregates_with_statistics(self, queries=None,
meter_names=None, period=None,
filter_func=None, stats_attr=None,
additional_query=None):
"""Obtaining resource aggregates with statistics data inside.
:Parameters:
- `queries`: Dictionary of named queries that defines a bulk of
resource aggregates.
- `meter_names`: List of meter names of which we want the
statistics.
- `period`: In seconds. If no period is given, only one aggregate
statistic is returned. If given, a faceted result will
be returned, divided into given periods. Periods with
no data are ignored.
- `stats_attr`: String representing the specific name of the stats.
E.g. (avg, max, min...) If defined, meter attribute
will contain just the one value. If None is given,
meter attribute will contain the whole Statistic
object.
- `additional_query`: Additional query for the statistics.
E.g. timespan, etc.
"""
resource_aggregates = self.resource_aggregates(queries)
ThreadedUpdateResourceWithStatistics.process_list(
self,
resource_aggregates, meter_names=meter_names, period=period,
stats_attr=stats_attr, additional_query=additional_query)
return resource_aggregates
def diff_lists(a, b):
if not a:
return []
elif not b:
return a
else:
return list(set(a) - set(b))
class Meters(object):
"""Class for listing of available meters.
It is listing meters defined in this class that are available
in Ceilometer meter_list.
It is storing information that is not available in Ceilometer, i.e.
label, description.
"""
def __init__(self, request=None, ceilometer_meter_list=None):
# Storing the request.
self._request = request
# Storing the Ceilometer meter list
if ceilometer_meter_list:
self._ceilometer_meter_list = ceilometer_meter_list
else:
try:
self._ceilometer_meter_list = meter_list(request)
except Exception:
self._ceilometer_meter_list = []
exceptions.handle(self._request,
_('Unable to retrieve Ceilometer meter '
'list.'))
# Storing the meters info categorized by their services.
self._nova_meters_info = self._get_nova_meters_info()
self._neutron_meters_info = self._get_neutron_meters_info()
self._glance_meters_info = self._get_glance_meters_info()
self._cinder_meters_info = self._get_cinder_meters_info()
self._swift_meters_info = self._get_swift_meters_info()
self._kwapi_meters_info = self._get_kwapi_meters_info()
self._ipmi_meters_info = self._get_ipmi_meters_info()
# Storing the meters info of all services together.
all_services_meters = (self._nova_meters_info,
self._neutron_meters_info,
self._glance_meters_info,
self._cinder_meters_info,
self._swift_meters_info,
self._kwapi_meters_info,
self._ipmi_meters_info)
self._all_meters_info = {}
for service_meters in all_services_meters:
self._all_meters_info.update(dict([(meter_name, meter_info)
for meter_name, meter_info
in service_meters.items()]))
# Here will be the cached Meter objects, that will be reused for
# repeated listing.
self._cached_meters = {}
def list_all(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=only_meters,
except_meters=except_meters)
def list_nova(self, except_meters=None):
"""Returns a list of meters tied to nova.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._nova_meters_info.keys(),
except_meters=except_meters)
def list_neutron(self, except_meters=None):
"""Returns a list of meters tied to neutron.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._neutron_meters_info.keys(),
except_meters=except_meters)
def list_glance(self, except_meters=None):
"""Returns a list of meters tied to glance.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._glance_meters_info.keys(),
except_meters=except_meters)
def list_cinder(self, except_meters=None):
"""Returns a list of meters tied to cinder.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._cinder_meters_info.keys(),
except_meters=except_meters)
def list_swift(self, except_meters=None):
"""Returns a list of meters tied to swift.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._swift_meters_info.keys(),
except_meters=except_meters)
def list_kwapi(self, except_meters=None):
"""Returns a list of meters tied to kwapi.
:Parameters:
- `except_meters`: The list of meter names we don't want to show.
"""
return self._list(only_meters=self._kwapi_meters_info.keys(),
except_meters=except_meters)
def list_ipmi(self, except_meters=None):
"""Returns a list of meters tied to ipmi
:Parameters:
- `except_meters`: The list of meter names we don't want to show
"""
return self._list(only_meters=self._ipmi_meters_info.keys(),
except_meters=except_meters)
def _list(self, only_meters=None, except_meters=None):
"""Returns a list of meters based on the meters names.
:Parameters:
- `only_meters`: The list of meter names we want to show.
- `except_meters`: The list of meter names we don't want to show.
"""
# Get all wanted meter names.
if only_meters:
meter_names = only_meters
else:
meter_names = [meter_name for meter_name
in self._all_meters_info.keys()]
meter_names = diff_lists(meter_names, except_meters)
# Collect meters for wanted meter names.
return self._get_meters(meter_names)
def _get_meters(self, meter_names):
"""Obtain meters based on meter_names.
The meters that do not exist in Ceilometer meter list are left out.
:Parameters:
- `meter_names`: A list of meter names we want to fetch.
"""
meters = []
for meter_name in meter_names:
meter = self._get_meter(meter_name)
if meter:
meters.append(meter)
return meters
def _get_meter(self, meter_name):
"""Obtains a meter.
Obtains meter either from cache or from Ceilometer meter list
joined with statically defined meter info like label and description.
:Parameters:
- `meter_name`: A meter name we want to fetch.
"""
meter = self._cached_meters.get(meter_name, None)
if not meter:
meter_candidates = [m for m in self._ceilometer_meter_list
if m.name == meter_name]
if meter_candidates:
meter_info = self._all_meters_info.get(meter_name, None)
if meter_info:
label = meter_info["label"]
description = meter_info["description"]
else:
label = ""
description = ""
meter = meter_candidates[0]
meter.augment(label=label, description=description)
self._cached_meters[meter_name] = meter
return meter
def _get_nova_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
meters_info = OrderedDict([
("instance", {
'label': '',
'description': _("Existence of instance"),
}),
("instance:<type>", {
'label': '',
'description': _("Existence of instance <type> "
"(openstack types)"),
}),
("memory", {
'label': '',
'description': _("Volume of RAM"),
}),
("memory.usage", {
'label': '',
'description': _("Volume of RAM used"),
}),
("cpu", {
'label': '',
'description': _("CPU time used"),
}),
("cpu_util", {
'label': '',
'description': _("Average CPU utilization"),
}),
("vcpus", {
'label': '',
'description': _("Number of VCPUs"),
}),
("disk.read.requests", {
'label': '',
'description': _("Number of read requests"),
}),
("disk.write.requests", {
'label': '',
'description': _("Number of write requests"),
}),
("disk.read.bytes", {
'label': '',
'description': _("Volume of reads"),
}),
("disk.write.bytes", {
'label': '',
'description': _("Volume of writes"),
}),
("disk.read.requests.rate", {
'label': '',
'description': _("Average rate of read requests"),
}),
("disk.write.requests.rate", {
'label': '',
'description': _("Average rate of write requests"),
}),
("disk.read.bytes.rate", {
'label': '',
'description': _("Average rate of reads"),
}),
("disk.write.bytes.rate", {
'label': '',
'description': _("Average volume of writes"),
}),
("disk.root.size", {
'label': '',
'description': _("Size of root disk"),
}),
("disk.ephemeral.size", {
'label': '',
'description': _("Size of ephemeral disk"),
}),
("network.incoming.bytes", {
'label': '',
'description': _("Number of incoming bytes "
"on the network for a VM interface"),
}),
("network.outgoing.bytes", {
'label': '',
'description': _("Number of outgoing bytes "
"on the network for a VM interface"),
}),
("network.incoming.packets", {
'label': '',
'description': _("Number of incoming "
"packets for a VM interface"),
}),
("network.outgoing.packets", {
'label': '',
'description': _("Number of outgoing "
"packets for a VM interface"),
}),
("network.incoming.bytes.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"bytes on a VM network interface"),
}),
("network.outgoing.bytes.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"bytes on a VM network interface"),
}),
("network.incoming.packets.rate", {
'label': '',
'description': _("Average rate per sec of incoming "
"packets on a VM network interface"),
}),
("network.outgoing.packets.rate", {
'label': '',
'description': _("Average rate per sec of outgoing "
"packets on a VM network interface"),
}),
])
# Adding flavor based meters into meters_info dict
# TODO(lsmola) this kind of meter will be probably deprecated
# https://bugs.launchpad.net/ceilometer/+bug/1208365 . Delete it then.
for flavor in get_flavor_names(self._request):
name = 'instance:%s' % flavor
meters_info[name] = dict(meters_info["instance:<type>"])
meters_info[name]['description'] = (
_('Duration of instance type %s (openstack flavor)') %
flavor)
# TODO(lsmola) allow to set specific in local_settings. For all meters
# because users can have their own agents and meters.
return meters_info
def _get_neutron_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('network', {
'label': '',
'description': _("Existence of network"),
}),
('network.create', {
'label': '',
'description': _("Creation requests for this network"),
}),
('network.update', {
'label': '',
'description': _("Update requests for this network"),
}),
('subnet', {
'label': '',
'description': _("Existence of subnet"),
}),
('subnet.create', {
'label': '',
'description': _("Creation requests for this subnet"),
}),
('subnet.update', {
'label': '',
'description': _("Update requests for this subnet"),
}),
('port', {
'label': '',
'description': _("Existence of port"),
}),
('port.create', {
'label': '',
'description': _("Creation requests for this port"),
}),
('port.update', {
'label': '',
'description': _("Update requests for this port"),
}),
('router', {
'label': '',
'description': _("Existence of router"),
}),
('router.create', {
'label': '',
'description': _("Creation requests for this router"),
}),
('router.update', {
'label': '',
'description': _("Update requests for this router"),
}),
('ip.floating', {
'label': '',
'description': _("Existence of floating ip"),
}),
('ip.floating.create', {
'label': '',
'description': _("Creation requests for this floating ip"),
}),
('ip.floating.update', {
'label': '',
'description': _("Update requests for this floating ip"),
}),
])
def _get_glance_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('image', {
'label': '',
'description': _("Image existence check"),
}),
('image.size', {
'label': '',
'description': _("Uploaded image size"),
}),
('image.update', {
'label': '',
'description': _("Number of image updates"),
}),
('image.upload', {
'label': '',
'description': _("Number of image uploads"),
}),
('image.delete', {
'label': '',
'description': _("Number of image deletions"),
}),
('image.download', {
'label': '',
'description': _("Image is downloaded"),
}),
('image.serve', {
'label': '',
'description': _("Image is served out"),
}),
])
def _get_cinder_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('volume', {
'label': '',
'description': _("Existence of volume"),
}),
('volume.size', {
'label': '',
'description': _("Size of volume"),
}),
])
def _get_swift_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('storage.objects', {
'label': '',
'description': _("Number of objects"),
}),
('storage.objects.size', {
'label': '',
'description': _("Total size of stored objects"),
}),
('storage.objects.containers', {
'label': '',
'description': _("Number of containers"),
}),
('storage.objects.incoming.bytes', {
'label': '',
'description': _("Number of incoming bytes"),
}),
('storage.objects.outgoing.bytes', {
'label': '',
'description': _("Number of outgoing bytes"),
}),
('storage.api.request', {
'label': '',
'description': _("Number of API requests against swift"),
}),
])
def _get_kwapi_meters_info(self):
"""Returns additional info for each meter.
That will be used for augmenting the Ceilometer meter.
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('energy', {
'label': '',
'description': _("Amount of energy"),
}),
('power', {
'label': '',
'description': _("Power consumption"),
}),
])
def _get_ipmi_meters_info(self):
"""Returns additional info for each meter
That will be used for augmenting the Ceilometer meter
"""
# TODO(lsmola) Unless the Ceilometer will provide the information
# below, I need to define it as a static here. I will be joining this
# to info that I am able to obtain from Ceilometer meters, hopefully
# some day it will be supported all.
return OrderedDict([
('hardware.ipmi.node.power', {
'label': '',
'description': _("System Current Power"),
}),
('hardware.ipmi.fan', {
'label': '',
'description': _("Fan RPM"),
}),
('hardware.ipmi.temperature', {
'label': '',
'description': _("Sensor Temperature Reading"),
}),
('hardware.ipmi.current', {
'label': '',
'description': _("Sensor Current Reading"),
}),
('hardware.ipmi.voltage', {
'label': '',
'description': _("Sensor Voltage Reading"),
}),
('hardware.ipmi.node.temperature', {
'label': '',
'description': _("System Temperature Reading"),
}),
('hardware.ipmi.node.outlet_temperature', {
'label': '',
'description': _("System Outlet Temperature Reading"),
}),
('hardware.ipmi.node.airflow', {
'label': '',
'description': _("System Airflow Reading"),
}),
('hardware.ipmi.node.cups', {
'label': '',
'description': _("System CUPS Reading"),
}),
('hardware.ipmi.node.cpu_util', {
'label': '',
'description': _("System CPU Utility Reading"),
}),
('hardware.ipmi.node.mem_util', {
'label': '',
'description': _("System Memory Utility Reading"),
}),
('hardware.ipmi.node.io_util', {
'label': '',
'description': _("System IO Utility Reading"),
}),
])
|
py | 1a548854f603addbbf75efdf09fe9319d717482b | import datetime
import logging
import os
from itertools import groupby
from math import ceil
from django.db.models import Max
from django.db.models import Sum
from le_utils.constants import content_kinds
from sqlalchemy import and_
from sqlalchemy import cast
from sqlalchemy import exists
from sqlalchemy import false
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import select
from .paths import get_content_file_name
from .paths import get_content_storage_file_path
from .sqlalchemybridge import Bridge
from .sqlalchemybridge import filter_by_uuids
from kolibri.core.content.apps import KolibriContentConfig
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import File
from kolibri.core.content.models import LocalFile
from kolibri.core.content.utils.sqlalchemybridge import filter_by_checksums
from kolibri.core.device.models import ContentCacheKey
logger = logging.getLogger(__name__)
CONTENT_APP_NAME = KolibriContentConfig.label
CHUNKSIZE = 10000
def _generate_MPTT_descendants_statement(mptt_values, ContentNodeTable):
"""
This logic is modified from:
https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L137
in order to render the result as a SQL Alchemy expression that we can use
in other queries.
"""
queries = []
# Group the resultant mptt data by tree_id and parent_id,
# this will allow us to consolidate contiguous siblings to reduce
# the total number of constraints.
# This logic is verbatim from Django MPTT, only the query construction
# has been translated from Django Q statements to SQL Alchemy and_ statements.
for group in groupby(
mptt_values,
key=lambda n: (
# tree id
n[0],
# parent id
n[1],
),
):
next_lft = None
for node in list(group[1]):
tree = node[0]
lft = min_val = node[2]
rght = max_val = node[3]
if next_lft is None:
next_lft = rght + 1
min_max = {"min": min_val, "max": max_val}
elif lft == next_lft:
if min_val < min_max["min"]:
min_max["min"] = min_val
if max_val > min_max["max"]:
min_max["max"] = max_val
next_lft = rght + 1
elif lft != next_lft:
queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
min_max = {"min": min_val, "max": max_val}
next_lft = rght + 1
queries.append(
and_(
ContentNodeTable.c.tree_id == tree,
ContentNodeTable.c.lft >= min_max["min"],
ContentNodeTable.c.rght <= min_max["max"],
)
)
return queries
def _MPTT_descendant_ids_statement(
bridge, channel_id, node_ids, min_boundary, max_boundary
):
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
# Setup list to collect queries
or_queries = []
# First we fetch a list of non-topic ids from the specified node ids
# that match the specified tree boundary ranges
non_topic_results = connection.execute(
select([ContentNodeTable.c.id]).where(
and_(
ContentNodeTable.c.channel_id == channel_id,
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Also filter by the boundary conditions
# We are only interested in non-topic nodes that
# are inside the range
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
# Produce an id list for non topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
).fetchall()
non_topic_node_ids = [result[0] for result in non_topic_results]
# If we have any node ids that are for non-topics, then we add an explicit query
# to match against those node ids
if non_topic_node_ids:
or_queries.append(filter_by_uuids(ContentNodeTable.c.id, non_topic_node_ids))
# Now get the relevant MPTT values from the database for the specified node_ids
# for topic nodes in the specified lft/rght range.
# Query modified from:
# https://github.com/django-mptt/django-mptt/blob/38d46c26ca362c471b097ab96a3616b9b20fb883/mptt/managers.py#L123
mptt_values = connection.execute(
select(
[
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
ContentNodeTable.c.rght,
]
)
.order_by(
ContentNodeTable.c.tree_id,
ContentNodeTable.c.parent_id,
ContentNodeTable.c.lft,
)
.where(
and_(
ContentNodeTable.c.channel_id == channel_id,
filter_by_uuids(ContentNodeTable.c.id, node_ids),
# Add constraints specific to our requirements, in terms of batching:
# Also filter by the boundary conditions
# We are only interested in nodes that are ancestors of
# the nodes in the range, but they could be ancestors of any node
# in this range, so we filter the lft value by being less than
# or equal to the max_boundary, and the rght value by being
# greater than or equal to the min_boundary.
ContentNodeTable.c.lft <= max_boundary,
ContentNodeTable.c.rght >= min_boundary,
# And topics:
# Only select values for descendant constraints from topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
).fetchall()
# Extend the constraints we are filtering by with ones generated from the relevant
# MPTT values we have queried above.
or_queries.extend(
_generate_MPTT_descendants_statement(mptt_values, ContentNodeTable)
)
if not or_queries:
# No constraints that apply in this range, so therefore this query should always
# evaluate to False, because nothing can match it.
return select([ContentNodeTable.c.id]).where(false())
# Return a query that ors each of the constraints
return select([ContentNodeTable.c.id]).where(or_(*or_queries))
def _create_batch_update_statement(
bridge, channel_id, min_boundary, max_boundary, node_ids, exclude_node_ids
):
ContentNodeTable = bridge.get_table(ContentNode)
# Restrict the update statement to nodes falling within the boundaries
batch_statement = ContentNodeTable.update().where(
and_(
# Only update leaf nodes (non topics)
ContentNodeTable.c.kind != content_kinds.TOPIC,
# Only update nodes in the channel we specified
ContentNodeTable.c.channel_id == channel_id,
# Only select nodes inside the boundary conditions
ContentNodeTable.c.rght >= min_boundary,
ContentNodeTable.c.rght <= max_boundary,
)
)
if node_ids is not None:
# Construct a statement that restricts which nodes we update
# in this batch by the specified inclusion constraints
node_ids_statement = _MPTT_descendant_ids_statement(
bridge, channel_id, node_ids, min_boundary, max_boundary
)
# Add this statement to the query
batch_statement = batch_statement.where(
ContentNodeTable.c.id.in_(node_ids_statement)
)
if exclude_node_ids is not None:
# Construct a statement that restricts nodes we update
# in this batch by the specified exclusion constraints
exclude_node_ids_statement = _MPTT_descendant_ids_statement(
bridge, channel_id, exclude_node_ids, min_boundary, max_boundary
)
# Add this statement to the query
batch_statement = batch_statement.where(
~ContentNodeTable.c.id.in_(exclude_node_ids_statement)
)
return batch_statement
def _calculate_batch_params(bridge, channel_id, node_ids, exclude_node_ids):
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
# To chunk the tree, we first find the full extent of the tree - this gives the
# highest rght value for this channel.
max_rght = connection.execute(
select([func.max(ContentNodeTable.c.rght)]).where(
ContentNodeTable.c.channel_id == channel_id
)
).scalar()
# Count the total number of constraints
constraint_count = len(node_ids or []) + len(exclude_node_ids or [])
# Aim for a constraint per batch count of about 250 on average
# This means that there will be at most 750 parameters from the constraints
# and should therefore also limit the overall SQL expression size.
dynamic_chunksize = int(
min(CHUNKSIZE, ceil(250 * max_rght / (constraint_count or 1)))
)
return max_rght, dynamic_chunksize
def set_leaf_nodes_invisible(channel_id, node_ids=None, exclude_node_ids=None):
"""
Set nodes in a channel as unavailable.
With no additional arguments, this will hide an entire channel.
With the additional nodes arguments, it will selectively flag nodes
as unavailable, based on the passed in ids, setting them as unavailable if
they are in node_ids, or descendants of those nodes, but not in
exclude_node_ids or descendants of those nodes.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME)
connection = bridge.get_connection()
# Start a counter for the while loop
min_boundary = 1
# Calculate batch parameters
max_rght, dynamic_chunksize = _calculate_batch_params(
bridge, channel_id, node_ids, exclude_node_ids
)
logger.info(
"Removing availability of non-topic ContentNode objects in {} batches of {}".format(
int(ceil(max_rght / dynamic_chunksize)), dynamic_chunksize
)
)
while min_boundary < max_rght:
batch_statement = _create_batch_update_statement(
bridge,
channel_id,
min_boundary,
min_boundary + dynamic_chunksize,
node_ids,
exclude_node_ids,
)
# Execute the update for this batch
connection.execute(
batch_statement.values(available=False).execution_options(autocommit=True)
)
min_boundary += dynamic_chunksize
bridge.end()
def set_leaf_node_availability_from_local_file_availability(
channel_id, node_ids=None, exclude_node_ids=None
):
"""
Set nodes in a channel as available, based on their required files.
With no additional arguments, this will make every node in the channel
available or unavailable based on whether the files needed to render
those nodes are present on disk.
With the additional nodes arguments, it will selectively flag nodes
based on the passed in ids, marking their availability if
they are in node_ids, or descendants of those nodes, but not in
exclude_node_ids or descendants of those nodes.
Nodes in the channel not captured by the constraints will not have
their availability changed either way.
"""
bridge = Bridge(app_name=CONTENT_APP_NAME)
# SQL Alchemy reference to the content node table
ContentNodeTable = bridge.get_table(ContentNode)
# SQL Alchemy reference to the file table - a mapping from
# contentnodes to the files that they use
FileTable = bridge.get_table(File)
# SQL Alchemy reference to the localfile table which tracks
# information about the files on disk, such as availability
LocalFileTable = bridge.get_table(LocalFile)
connection = bridge.get_connection()
# This statement defines the update condition for the contentnode
# running exists on this (as it is used below) will produce either
# True, in the case when the contentnode has the required files
# available for rendering, or False otherwise.
contentnode_statement = (
# We could select any property here, as it's the exist that matters.
select([1]).select_from(
# This does the first step in the many to many lookup for File
# and LocalFile.
FileTable.join(
LocalFileTable,
and_(
# This does the actual correlation between file and local file
FileTable.c.local_file_id == LocalFileTable.c.id,
# This only joins on LocalFile objects that we know
# have associated files on disk.
LocalFileTable.c.available == True, # noqa
),
)
)
# Only look at files that are required (not supplementary)
.where(FileTable.c.supplementary == False)
# Correlate between the contentnode id and the foreign key
# to the content node on the file table to complete the
# many to many lookup
.where(ContentNodeTable.c.id == FileTable.c.contentnode_id)
)
# Start a counter for the while loop
min_boundary = 1
# Calculate batch parameters
max_rght, dynamic_chunksize = _calculate_batch_params(
bridge, channel_id, node_ids, exclude_node_ids
)
logger.info(
"Setting availability of non-topic ContentNode objects based on LocalFile availability in {} batches of {}".format(
int(ceil(max_rght / dynamic_chunksize)), dynamic_chunksize
)
)
while min_boundary < max_rght:
batch_statement = _create_batch_update_statement(
bridge,
channel_id,
min_boundary,
min_boundary + dynamic_chunksize,
node_ids,
exclude_node_ids,
)
# Execute the update for this batch
connection.execute(
batch_statement.values(
available=exists(contentnode_statement)
).execution_options(autocommit=True)
)
min_boundary += dynamic_chunksize
bridge.end()
def mark_local_files_as_unavailable(checksums, destination=None):
mark_local_files_availability(checksums, False, destination=destination)
def mark_local_files_as_available(checksums, destination=None):
"""
Shortcut method to update database if we are sure that the files are available.
Can be used after successful downloads to flag availability without having to do expensive disk reads.
"""
mark_local_files_availability(checksums, True, destination=destination)
def mark_local_files_availability(checksums, availability, destination=None):
if checksums:
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
LocalFileTable = bridge.get_table(LocalFile)
logger.info(
"Setting availability to {availability} of {number} LocalFile objects based on passed in checksums".format(
number=len(checksums), availability=availability
)
)
connection = bridge.get_connection()
trans = connection.begin()
for i in range(0, len(checksums), CHUNKSIZE):
connection.execute(
LocalFileTable.update()
.where(
filter_by_checksums(
LocalFileTable.c.id, checksums[i : i + CHUNKSIZE]
)
)
.values(available=availability)
)
trans.commit()
bridge.end()
def _check_file_availability(files):
checksums_to_set_available = []
checksums_to_set_unavailable = []
for file in files:
try:
# Update if the file exists, *and* the localfile is set as unavailable.
if os.path.exists(
get_content_storage_file_path(
get_content_file_name({"id": file[0], "extension": file[2]})
)
):
if not file[1]:
checksums_to_set_available.append(file[0])
# Update if the file does not exist, *and* the localfile is set as available.
else:
if file[1]:
checksums_to_set_unavailable.append(file[0])
except InvalidStorageFilenameError:
continue
return checksums_to_set_available, checksums_to_set_unavailable
def set_local_file_availability_from_disk(checksums=None, destination=None):
if type(checksums) == list and len(checksums) > CHUNKSIZE:
for i in range(0, len(checksums), CHUNKSIZE):
set_local_file_availability_from_disk(
checksums=checksums[i : i + CHUNKSIZE], destination=destination
)
return
bridge = Bridge(app_name=CONTENT_APP_NAME, sqlite_file_path=destination)
LocalFileTable = bridge.get_table(LocalFile)
query = select(
[LocalFileTable.c.id, LocalFileTable.c.available, LocalFileTable.c.extension]
)
if checksums is None:
logger.info(
"Setting availability of LocalFile objects based on disk availability"
)
elif type(checksums) == list:
logger.info(
"Setting availability of {number} LocalFile objects based on disk availability".format(
number=len(checksums)
)
)
query = query.where(filter_by_checksums(LocalFileTable.c.id, checksums))
else:
logger.info(
"Setting availability of LocalFile object with checksum {checksum} based on disk availability".format(
checksum=checksums
)
)
query = query.where(LocalFileTable.c.id == checksums)
connection = bridge.get_connection()
files = connection.execute(query).fetchall()
checksums_to_set_available, checksums_to_set_unavailable = _check_file_availability(
files
)
bridge.end()
mark_local_files_as_available(checksums_to_set_available, destination=destination)
mark_local_files_as_unavailable(
checksums_to_set_unavailable, destination=destination
)
def recurse_annotation_up_tree(channel_id):
bridge = Bridge(app_name=CONTENT_APP_NAME)
ContentNodeClass = bridge.get_class(ContentNode)
ContentNodeTable = bridge.get_table(ContentNode)
connection = bridge.get_connection()
node_depth = (
bridge.session.query(func.max(ContentNodeClass.level))
.filter_by(channel_id=channel_id)
.scalar()
)
logger.info(
"Annotating ContentNode objects with children for {levels} levels".format(
levels=node_depth
)
)
child = ContentNodeTable.alias()
# start a transaction
trans = connection.begin()
start = datetime.datetime.now()
# Update all leaf ContentNodes to have num_coach_content to 1 or 0
# Update all leaf ContentNodes to have on_device_resources to 1 or 0
connection.execute(
ContentNodeTable.update()
.where(
and_(
# In this channel
ContentNodeTable.c.channel_id == channel_id,
# That are not topics
ContentNodeTable.c.kind != content_kinds.TOPIC,
)
)
.values(
num_coach_contents=cast(ContentNodeTable.c.coach_content, Integer()),
on_device_resources=cast(ContentNodeTable.c.available, Integer()),
)
)
# Before starting set availability to False on all topics.
connection.execute(
ContentNodeTable.update()
.where(
and_(
# In this channel
ContentNodeTable.c.channel_id == channel_id,
# That are topics
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
.values(available=False)
)
# Expression to capture all available child nodes of a contentnode
available_nodes = select([child.c.available]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expressions for annotation of coach content
# Expression that will resolve a boolean value for all the available children
# of a content node, whereby if they all have coach_content flagged on them, it will be true,
# but otherwise false.
# Everything after the select statement should be identical to the available_nodes expression above.
if bridge.engine.name == "sqlite":
# Use a min function to simulate an AND.
coach_content_nodes = select([func.min(child.c.coach_content)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
elif bridge.engine.name == "postgresql":
# Use the postgres boolean AND operator
coach_content_nodes = select([func.bool_and(child.c.coach_content)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of coach contents for each child node
# of a contentnode
coach_content_num = select([func.sum(child.c.num_coach_contents)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Expression that sums the total number of on_device_resources for each child node
# of a contentnode
on_device_num = select([func.sum(child.c.on_device_resources)]).where(
and_(
child.c.available == True, # noqa
ContentNodeTable.c.id == child.c.parent_id,
)
)
# Go from the deepest level to the shallowest
for level in range(node_depth, 0, -1):
logger.info(
"Annotating ContentNode objects with children for level {level}".format(
level=level
)
)
# Only modify topic availability here
connection.execute(
ContentNodeTable.update()
.where(
and_(
ContentNodeTable.c.level == level - 1,
ContentNodeTable.c.channel_id == channel_id,
ContentNodeTable.c.kind == content_kinds.TOPIC,
)
)
# Because we have set availability to False on all topics as a starting point
# we only need to make updates to topics with available children.
.where(exists(available_nodes))
.values(
available=exists(available_nodes),
coach_content=coach_content_nodes,
num_coach_contents=coach_content_num,
on_device_resources=on_device_num,
)
)
# commit the transaction
trans.commit()
elapsed = datetime.datetime.now() - start
logger.debug(
"Recursive topic tree annotation took {} seconds".format(elapsed.seconds)
)
bridge.end()
def calculate_dummy_progress_for_annotation(node_ids, exclude_node_ids, total_progress):
num_annotation_constraints = len(node_ids or []) + len(exclude_node_ids or [])
# Calculate a percentage of the total progress to denote to annotation
# between 1 and 10
annotation_proportion = min(10, max(1, int(num_annotation_constraints / 500)))
# Create some progress proportional to annotation task
return int(annotation_proportion * total_progress / (100 - annotation_proportion))
def propagate_forced_localfile_removal(localfiles_list):
files = File.objects.filter(supplementary=False, local_file__in=localfiles_list)
ContentNode.objects.filter(files__in=files).update(available=False)
for channel_id in ChannelMetadata.objects.all().values_list("id", flat=True):
recurse_annotation_up_tree(channel_id)
def update_content_metadata(
channel_id, node_ids=None, exclude_node_ids=None, public=None
):
set_leaf_node_availability_from_local_file_availability(
channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids
)
recurse_annotation_up_tree(channel_id)
set_channel_metadata_fields(channel_id, public=public)
ContentCacheKey.update_cache_key()
def set_content_visibility(
channel_id, checksums, node_ids=None, exclude_node_ids=None, public=None
):
mark_local_files_as_available(checksums)
update_content_metadata(
channel_id, node_ids=node_ids, exclude_node_ids=exclude_node_ids, public=public
)
def set_content_visibility_from_disk(channel_id):
set_local_file_availability_from_disk()
update_content_metadata(channel_id)
def set_content_invisible(channel_id, node_ids, exclude_node_ids):
set_leaf_nodes_invisible(channel_id, node_ids, exclude_node_ids)
recurse_annotation_up_tree(channel_id)
set_channel_metadata_fields(channel_id)
ContentCacheKey.update_cache_key()
def set_channel_metadata_fields(channel_id, public=None):
channel = ChannelMetadata.objects.get(id=channel_id)
calculate_published_size(channel)
calculate_total_resource_count(channel)
calculate_included_languages(channel)
calculate_next_order(channel)
if public is not None:
channel.public = public
channel.save()
def files_for_nodes(nodes):
return LocalFile.objects.filter(files__contentnode__in=nodes)
def total_file_size(files_or_nodes):
if issubclass(files_or_nodes.model, LocalFile):
localfiles = files_or_nodes
elif issubclass(files_or_nodes.model, ContentNode):
localfiles = files_for_nodes(files_or_nodes)
else:
raise TypeError("Expected queryset for LocalFile or ContentNode")
return localfiles.distinct().aggregate(Sum("file_size"))["file_size__sum"] or 0
def calculate_published_size(channel):
content_nodes = ContentNode.objects.filter(channel_id=channel.id)
channel.published_size = total_file_size(
files_for_nodes(content_nodes).filter(available=True)
)
channel.save()
def calculate_total_resource_count(channel):
content_nodes = ContentNode.objects.filter(channel_id=channel.id)
channel.total_resource_count = (
content_nodes.filter(available=True)
.exclude(kind=content_kinds.TOPIC)
.dedupe_by_content_id()
.count()
)
channel.save()
def calculate_included_languages(channel):
content_nodes = ContentNode.objects.filter(
channel_id=channel.id, available=True
).exclude(lang=None)
languages = content_nodes.order_by("lang").values_list("lang", flat=True).distinct()
channel.included_languages.add(*list(languages))
def calculate_next_order(channel, model=ChannelMetadata):
if channel.order is None or channel.order == 0:
max_order = model.objects.aggregate(Max("order")).get("order__max", 0)
if max_order is None:
max_order = 0
channel.order = max_order + 1
channel.save()
|
py | 1a54892cef65e65a93f56c76ca1afcd073fcfc83 | """ Evo-LeViT in PyTorch
A PyTorch implement of Evo-LeViT as described in
'Evo-ViT: Slow-Fast Token Evolution for Dynamic Vision Transformer'
The code is modified from LeViT as described in
'LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference' - https://arxiv.org/abs/2104.01136
The official code of LeViT is released and available at https://github.com/facebookresearch/LeViT
"""
import torch
import utils
import torch.nn as nn
from timm.models.vision_transformer import trunc_normal_
from timm.models.registry import register_model
specification = {
'EvoLeViT_128S': {
'C': '128_256_384', 'D': 16, 'N': '4_6_8', 'X': '2_3_4', 'drop_path': 0,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth'},
'EvoLeViT_128': {
'C': '128_256_384', 'D': 16, 'N': '4_8_12', 'X': '4_4_4', 'drop_path': 0,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth'},
'EvoLeViT_192': {
'C': '192_288_384', 'D': 32, 'N': '3_5_6', 'X': '4_4_4', 'drop_path': 0,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth'},
'EvoLeViT_256': {
'C': '256_384_512', 'D': 32, 'N': '4_6_8', 'X': '4_4_4', 'drop_path': 0,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth'},
'EvoLeViT_384': {
'C': '384_512_768', 'D': 32, 'N': '6_9_12', 'X': '4_4_4', 'drop_path': 0.1,
'weights': 'https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth'},
}
prune_ratio_list = {
'EvoLeViT_128S': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
'EvoLeViT_128': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]],
'EvoLeViT_192': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]],
'EvoLeViT_256': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]],
'EvoLeViT_384': [[1, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5]],
}
__all__ = [specification.keys()]
@register_model
def EvoLeViT_128S(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_128S'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_128S'])
@register_model
def EvoLeViT_128(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_128'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_128'])
@register_model
def EvoLeViT_192(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_192'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_192'])
@register_model
def EvoLeViT_256(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_256'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_256'])
@register_model
def EvoLeViT_384(num_classes=1000, distillation=True,
pretrained=False, fuse=False):
return model_factory(**specification['EvoLeViT_384'], num_classes=num_classes,
distillation=distillation, pretrained=pretrained, fuse=fuse,
prune_ratio=prune_ratio_list['EvoLeViT_384'])
global_attn = 0
ori_indices = None
learn_tradeoff_mode = True
def easy_gather(x, indices):
# x: B,N,C; indices: B,N
B, N, C = x.shape
N_new = indices.shape[1]
offset = torch.arange(B, dtype=torch.long, device=x.device).view(B, 1) * N
indices = indices + offset
out = x.reshape(B * N, C)[indices.view(-1)].reshape(B, N_new, C)
return out
def merge_tokens(x_drop, score):
# score B,N
# scale
weight = score / torch.sum(score, dim=1, keepdim=True)
x_drop = weight.unsqueeze(-1) * x_drop
return torch.sum(x_drop, dim=1, keepdim=True)
class CatModule(torch.nn.Module):
def __init__(self, m1, m2, prune_ratio, N):
super().__init__()
self.m1 = m1
self.m2 = m2
self.prune_ratio = prune_ratio
# self.i = i
if prune_ratio < 1.0:
N_ = N - int(N * prune_ratio)
self.drop_fc = nn.AdaptiveAvgPool1d(1)
# self.recover_fc=nn.Linear(1,N_)
def set_prune_ratio(self, prune_ratio):
self.prune_ratio = prune_ratio
def forward(self, x_):
global global_attn # ga
global ori_indices # oi
if self.prune_ratio < 1:
x = x_[:, 1:] # split out cls token
N = x.shape[1]
N_ = int(N * self.prune_ratio)
indices = torch.argsort(global_attn, dim=1, descending=True)
x_ga_oi = torch.cat((x, global_attn.unsqueeze(-1), ori_indices.unsqueeze(-1)), dim=-1)
x_ga_oi = easy_gather(x_ga_oi, indices)
x_sorted, global_attn, ori_indices = x_ga_oi[:, :, :-2], x_ga_oi[:, :, -2], x_ga_oi[:, :, -1]
if self.training:
x_ = torch.cat((x_[:, :1], x_sorted), dim=1)
else:
x_[:, 1:] = x_sorted
x = x_[:, :N_ + 1]
x_drop = x_[:, N_ + 1:]
add_token = merge_tokens(x_drop, global_attn[:, N_:]) # B,1,C
x = torch.cat((x, add_token), dim=1) # B,N+1,C
x, raw_x1 = self.m1(x)
x, raw_x2 = self.m2(x)
x = x[:, :-1]
# fast update via skip connection
add_token1 = raw_x1[:, -1:]
add_token2 = raw_x2[:, -1:]
x_drop = x_drop + add_token1.expand(-1, x_drop.shape[1], -1) + add_token2.expand(-1, x_drop.shape[1], -1)
x_ = torch.cat((x, x_drop), dim=1)
# x_[:, N_ + 1:] = x_drop
# x_[:, :N_ + 1] = x
else:
x_, _ = self.m1(x_)
x_, _ = self.m2(x_)
return x_
class StageModule(torch.nn.Module):
def __init__(self, m, prune_ratio):
super().__init__()
self.m = m
self.prune_ratio = prune_ratio
def forward(self, x_):
global global_attn # ga
global ori_indices # oi
if isinstance(x_, tuple):
x_ = x_[0]
if self.prune_ratio < 1:
x = x_[:, 1:] # split out cls token
N = x.shape[1]
N_ = int(N * self.prune_ratio)
indices = torch.argsort(global_attn, dim=1, descending=True)
x_ga_oi = torch.cat((x, global_attn.unsqueeze(-1), ori_indices.unsqueeze(-1)), dim=-1)
x_ga_oi = easy_gather(x_ga_oi, indices)
x_sorted, global_attn, ori_indices = x_ga_oi[:, :, :-2], x_ga_oi[:, :, -2], x_ga_oi[:, :, -1]
if self.training:
x_ = torch.cat((x_[:, :1], x_sorted), dim=1)
else:
x_[:, 1:] = x_sorted
x = x_[:, :N_ + 1]
x_drop = x_[:, N_ + 1:]
merge_weight = global_attn[:, N_:]
add_token = merge_tokens(x_drop, merge_weight) # B,1,C
x = torch.cat((x, add_token), dim=1) # B,N+1,C
raw_total = 0
for blk in self.m:
x, raw = blk(x)
raw_total = raw_total + raw[:, -1:]
x_drop = x_drop + raw_total.expand(-1, x_drop.shape[1], -1)
x = x[:, :-1]
if self.training:
x_ = torch.cat((x, x_drop), dim=1)
else:
x_[:, N_ + 1:] = x_drop
x_[:, :N_ + 1] = x
else:
x_ = self.m(x_)
return x_
class Conv2d_BN(torch.nn.Sequential):
def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
groups=1, bn_weight_init=1, resolution=-10000):
super().__init__()
self.add_module('c', torch.nn.Conv2d(
a, b, ks, stride, pad, dilation, groups, bias=False))
bn = torch.nn.BatchNorm2d(b)
torch.nn.init.constant_(bn.weight, bn_weight_init)
torch.nn.init.constant_(bn.bias, 0)
self.add_module('bn', bn)
@torch.no_grad()
def fuse(self):
c, bn = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps) ** 0.5
m = torch.nn.Conv2d(w.size(1), w.size(
0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation,
groups=self.c.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class Linear_BN(torch.nn.Sequential):
def __init__(self, a, b, bn_weight_init=1, resolution=-100000):
super().__init__()
self.add_module('c', torch.nn.Linear(a, b, bias=False))
bn = torch.nn.BatchNorm1d(b)
torch.nn.init.constant_(bn.weight, bn_weight_init)
torch.nn.init.constant_(bn.bias, 0)
self.add_module('bn', bn)
@torch.no_grad()
def fuse(self):
l, bn = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[:, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps) ** 0.5
m = torch.nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
def forward(self, x):
l, bn = self._modules.values()
x = l(x)
return bn(x.flatten(0, 1)).reshape_as(x)
class BN_Linear(torch.nn.Sequential):
def __init__(self, a, b, bias=True, std=0.02):
super().__init__()
self.add_module('bn', torch.nn.BatchNorm1d(a))
l = torch.nn.Linear(a, b, bias=bias)
trunc_normal_(l.weight, std=std)
if bias:
torch.nn.init.constant_(l.bias, 0)
self.add_module('l', l)
@torch.no_grad()
def fuse(self):
bn, l = self._modules.values()
w = bn.weight / (bn.running_var + bn.eps) ** 0.5
b = bn.bias - self.bn.running_mean * \
self.bn.weight / (bn.running_var + bn.eps) ** 0.5
w = l.weight * w[None, :]
if l.bias is None:
b = b @ self.l.weight.T
else:
b = (l.weight @ b[:, None]).view(-1) + self.l.bias
m = torch.nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
def b16(n, activation, resolution=224):
return torch.nn.Sequential(
Conv2d_BN(3, n // 8, 3, 2, 1, resolution=resolution),
activation(),
Conv2d_BN(n // 8, n // 4, 3, 2, 1, resolution=resolution // 2),
activation(),
Conv2d_BN(n // 4, n // 2, 3, 2, 1, resolution=resolution // 4),
activation(),
Conv2d_BN(n // 2, n, 3, 2, 1, resolution=resolution // 8))
class Residual(torch.nn.Module):
def __init__(self, m, drop, out_raw=False):
super().__init__()
self.m = m
self.drop = drop
self.out_raw = out_raw
def set_prune_ratio(self, prune_ratio):
pass
def forward(self, x):
if isinstance(x, tuple):
x = x[0]
if self.training and self.drop > 0:
raw = self.m(x) * torch.rand(x.size(0), 1, 1,
device=x.device).ge_(self.drop).div(1 - self.drop).detach()
else:
raw = self.m(x)
if self.out_raw:
return x + raw, raw
else:
return x + raw
class Attention(torch.nn.Module):
def __init__(self, dim, key_dim, num_heads=8,
attn_ratio=4,
activation=None,
resolution=14, posembed=False, global_attn_tradeoff=0.5):
super().__init__()
self.tradeoff = global_attn_tradeoff
self.learn_tradeoff = torch.nn.Parameter(torch.Tensor([0]))
self.sigmoid = torch.nn.Sigmoid()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.nh_kd = nh_kd = key_dim * num_heads
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * num_heads
self.attn_ratio = attn_ratio
h = self.dh + nh_kd * 2
self.qkv = Linear_BN(dim, h, resolution=resolution)
self.proj = torch.nn.Sequential(activation(), Linear_BN(
self.dh, dim, bn_weight_init=0, resolution=resolution))
self.pos_embed = posembed
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and hasattr(self, 'ab'):
del self.ab
def forward(self, x): # x (B,N,C)
global global_attn
global learn_tradeoff_mode
B, N, C = x.shape
qkv = self.qkv(x)
q, k, v = qkv.view(B, N, self.num_heads, -
1).split([self.key_dim, self.key_dim, self.d], dim=3)
q = q.permute(0, 2, 1, 3)
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
attn_raw = (q @ k.transpose(-2, -1)) * self.scale
attn = attn_raw.softmax(dim=-1)
# update global attn
if learn_tradeoff_mode:
tradeoff = self.sigmoid(self.learn_tradeoff)
else:
tradeoff = self.tradeoff
if isinstance(global_attn, int):
cls_attn = torch.mean(attn[:, :, 0, 1:], dim=1) # B,N
global_attn = cls_attn
else:
if global_attn.shape[1] - N + 2 == 1:
# no additional token and no pruning
cls_attn = torch.mean(attn[:, :, 0, 1:], dim=1)
global_attn = (1 - tradeoff) * global_attn + tradeoff * cls_attn
else:
cls_attn = torch.mean(attn[:, :, 0, 1:-1], dim=1)
if self.training:
temp_attn = (1 - tradeoff) * global_attn[:, :N - 2] + tradeoff * cls_attn
global_attn = torch.cat((temp_attn, global_attn[:, N - 2:]), dim=1)
else:
global_attn[:, :N - 2] = (1 - tradeoff) * global_attn[:, :N - 2] + tradeoff * cls_attn
x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh)
x = self.proj(x)
return x
class Subsample(torch.nn.Module):
def __init__(self, stride, resolution):
super().__init__()
self.stride = stride
self.resolution = resolution
def forward(self, x, with_cls=True):
if with_cls:
B, N, C = x.shape
x1 = x[:, 1:, :]
x1 = x1.view(B, self.resolution, self.resolution, C)[
:, ::self.stride, ::self.stride].reshape(B, -1, C)
x = torch.cat((x[:, :1, :], x1), dim=1)
else:
B, N, C = x.shape
x = x.view(B, self.resolution, self.resolution, C)[
:, ::self.stride, ::self.stride].reshape(B, -1, C)
return x
class AttentionSubsample(torch.nn.Module):
def __init__(self, in_dim, out_dim, key_dim, num_heads=8,
attn_ratio=2,
activation=None,
stride=2,
resolution=14, resolution_=7, posembed=False, global_attn_tradeoff=0.5):
super().__init__()
self.tradeoff = global_attn_tradeoff
self.learn_tradeoff = torch.nn.Parameter(torch.Tensor([0]))
self.sigmoid = torch.nn.Sigmoid()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.nh_kd = nh_kd = key_dim * num_heads
self.d = int(attn_ratio * key_dim)
self.dh = int(attn_ratio * key_dim) * self.num_heads
self.attn_ratio = attn_ratio
self.resolution_ = resolution_
self.resolution_2 = resolution_ ** 2
h = self.dh + nh_kd
self.kv = Linear_BN(in_dim, h, resolution=resolution)
self.q = torch.nn.Sequential(
Subsample(stride, resolution),
Linear_BN(in_dim, nh_kd, resolution=resolution_))
self.proj = torch.nn.Sequential(activation(), Linear_BN(
self.dh, out_dim, resolution=resolution_))
self.pos_embed = posembed
if posembed:
self.poss = nn.Parameter(torch.zeros(1, resolution ** 2 + 1, in_dim))
trunc_normal_(self.poss, std=.02)
self.stride = stride
self.resolution = resolution
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and hasattr(self, 'ab'):
del self.ab
def set_prune_ratio(self, prune_ratio):
pass
def forward(self, x):
global global_attn # ga
global ori_indices # oi
global learn_tradeoff_mode
if isinstance(x, tuple):
x = x[0]
# recover sequence
old_global_scale = torch.sum(global_attn, dim=1, keepdim=True)
x_patch = x[:, 1:]
indices = torch.argsort(ori_indices, dim=1)
x_ga_oi = torch.cat((x_patch, global_attn.unsqueeze(-1), ori_indices.unsqueeze(-1)), dim=-1)
x_ga_oi = easy_gather(x_ga_oi, indices)
x_patch, ga_oi = x_ga_oi[:, :, :-2], x_ga_oi[:, :, -2:]
# subsample global attn and ori indices
ga_oi = self.q[0](ga_oi, False)
global_attn, ori_indices = ga_oi[:, :, 0], ga_oi[:, :, 1]
# global_attn, ori_indices = ga_oi[:, :, 0], ga_oi[:, :, 1]
if self.training:
x = torch.cat((x[:, :1], x_patch), dim=1)
else:
x[:, 1:] = x_patch
x = x + self.poss
B, N, C = x.shape
k, v = self.kv(x).view(B, N, self.num_heads, -
1).split([self.key_dim, self.d], dim=3)
k = k.permute(0, 2, 1, 3) # BHNC
v = v.permute(0, 2, 1, 3) # BHNC
q = self.q(x).view(B, self.resolution_2 + 1, self.num_heads,
self.key_dim).permute(0, 2, 1, 3)
attn_raw = (q @ k.transpose(-2, -1)) * self.scale
attn = attn_raw.softmax(dim=-1)
cls_attn = torch.mean(attn[:, :, 0, 1:], dim=1) # B,N
cls_attn = self.q[0](cls_attn.unsqueeze(-1), False).squeeze(-1)
if learn_tradeoff_mode:
tradeoff = self.sigmoid(self.learn_tradeoff)
else:
tradeoff = self.tradeoff
global_attn = (1 - tradeoff) * global_attn + tradeoff * cls_attn
# normalize global attention
new_global_scale = torch.sum(global_attn, dim=1, keepdim=True)
scale = old_global_scale / new_global_scale
global_attn = global_attn * scale
x = (attn @ v).transpose(1, 2).reshape(B, -1, self.dh)
x = self.proj(x)
return x
class LeViT(torch.nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dim=[192],
key_dim=[64],
depth=[12],
num_heads=[3],
attn_ratio=[2],
mlp_ratio=[2],
hybrid_backbone=None,
down_ops=[],
attention_activation=torch.nn.Hardswish,
mlp_activation=torch.nn.Hardswish,
distillation=True,
drop_path=0, prune_ratio=None):
super().__init__()
self.stage_wise_prune = True
self.num_classes = num_classes
self.num_features = embed_dim[-1]
self.embed_dim = embed_dim
self.distillation = distillation
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim[0]))
self.patch_embed = hybrid_backbone
self.pos_embed = True
self.blocks = []
self.stage_blocks = []
down_ops.append([''])
resolution = img_size // patch_size
if self.pos_embed:
self.poss = nn.Parameter(torch.zeros(1, resolution ** 2 + 1, embed_dim[0]))
trunc_normal_(self.poss, std=.02)
self.prune_ratio = prune_ratio[0]
self.stage_prune_ratio = prune_ratio[1]
layer_index = -1
n = 14
j = 0
for i, (ed, kd, dpth, nh, ar, mr, do) in enumerate(
zip(embed_dim, key_dim, depth, num_heads, attn_ratio, mlp_ratio, down_ops)):
stage_subblocks = []
for _ in range(dpth):
layer_index += 1
m1 = Residual(Attention(
ed, kd, nh,
attn_ratio=ar,
activation=attention_activation,
resolution=resolution,
posembed=self.pos_embed
), drop_path, out_raw=True)
if self.prune_ratio[layer_index] == 1:
self.stage_blocks.append(m1)
else:
stage_subblocks.append(m1)
if mr > 0:
h = int(ed * mr)
m2 = Residual(torch.nn.Sequential(
Linear_BN(ed, h, resolution=resolution),
mlp_activation(),
Linear_BN(h, ed, bn_weight_init=0,
resolution=resolution),
), drop_path, out_raw=True)
else:
m2 = torch.nn.Identity()
if self.prune_ratio[layer_index] == 1:
self.stage_blocks.append(m2)
else:
stage_subblocks.append(m2)
self.blocks.append(CatModule(m1, m2, prune_ratio=self.prune_ratio[layer_index], N=n ** 2))
if self.prune_ratio[layer_index] < 1:
j = j + 1
if len(stage_subblocks) != 0:
stage_subblocks = torch.nn.ModuleList(stage_subblocks)
self.stage_blocks.append(StageModule(stage_subblocks, prune_ratio=self.stage_prune_ratio[i]))
if do[0] == 'Subsample':
n = int((n + 1) / 2)
# ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride)
resolution_ = (resolution - 1) // do[5] + 1
subsample = AttentionSubsample(
*embed_dim[i:i + 2], key_dim=do[1], num_heads=do[2],
attn_ratio=do[3],
activation=attention_activation,
stride=do[5],
resolution=resolution,
resolution_=resolution_,
posembed=self.pos_embed)
self.blocks.append(subsample)
self.stage_blocks.append(subsample)
resolution = resolution_
if do[4] > 0: # mlp_ratio
h = int(embed_dim[i + 1] * do[4])
ffn = Residual(torch.nn.Sequential(
Linear_BN(embed_dim[i + 1], h,
resolution=resolution),
mlp_activation(),
Linear_BN(
h, embed_dim[i + 1], bn_weight_init=0, resolution=resolution),
), drop_path)
self.blocks.append(ffn)
self.stage_blocks.append(ffn)
self.blocks = torch.nn.Sequential(*self.blocks)
self.stage_blocks = torch.nn.Sequential(*self.stage_blocks)
# Classifier head
self.head = BN_Linear(
embed_dim[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
if distillation:
self.head_dist = BN_Linear(
embed_dim[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
self.clsc = True
if self.clsc:
self.head_cls = BN_Linear(
embed_dim[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
if distillation:
self.head_cls_dist = BN_Linear(
embed_dim[-1], num_classes) if num_classes > 0 else torch.nn.Identity()
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'poss' in x}
def set_learn_tradeoff(self, mode):
global learn_tradeoff_mode
learn_tradeoff_mode = mode
def set_prune_ratio(self, mode):
pass
def remove_cls(self):
if hasattr(self, 'head_cls'):
del self.head_cls
if hasattr(self, 'head_cls_dist'):
del self.head_cls_dist
def forward(self, x):
global global_attn
global ori_indices
global learn_tradeoff_mode
global_attn = 0
x = self.patch_embed(x)
x = x.flatten(2).transpose(1, 2)
ori_indices = torch.arange(x.shape[1], dtype=torch.long, device=x.device).unsqueeze(0)
ori_indices = ori_indices.expand(x.shape[0], -1)
cls_token = self.cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_token, x), 1)
if self.pos_embed:
x = x + self.poss
if self.stage_wise_prune:
x = self.stage_blocks(x)
else:
x = self.blocks(x)
cls = x[:, 0, :]
x = x[:, 1:, :]
x = x.mean(1)
if self.distillation:
x = self.head(x), self.head_dist(x)
if self.clsc:
if self.training:
xcls = self.head_cls(cls)
xcls_dist = self.head_cls_dist(cls)
return x[0], x[1], xcls, xcls_dist
else:
return (x[0] + x[1]) / 2
if not self.training:
x = (x[0] + x[1]) / 2
else:
x = self.head(x)
return x
def model_factory(C, D, X, N, drop_path, weights,
num_classes, distillation, pretrained, fuse, prune_ratio):
embed_dim = [int(x) for x in C.split('_')]
num_heads = [int(x) for x in N.split('_')]
depth = [int(x) for x in X.split('_')]
act = torch.nn.Hardswish
model = LeViT(
patch_size=16,
embed_dim=embed_dim,
num_heads=num_heads,
key_dim=[D] * 3,
depth=depth,
attn_ratio=[2, 2, 2],
mlp_ratio=[2, 2, 2],
down_ops=[
# ('Subsample',key_dim, num_heads, attn_ratio, mlp_ratio, stride)
['Subsample', D, embed_dim[0] // D, 4, 2, 2],
['Subsample', D, embed_dim[1] // D, 4, 2, 2],
],
attention_activation=act,
mlp_activation=act,
hybrid_backbone=b16(embed_dim[0], activation=act),
num_classes=num_classes,
drop_path=drop_path,
distillation=distillation,
prune_ratio=prune_ratio
)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
weights, map_location='cpu')
model.load_state_dict(checkpoint['model'])
if fuse:
utils.replace_batchnorm(model)
return model
if __name__ == '__main__':
for name in specification:
net = globals()[name](fuse=False, pretrained=False)
net.eval()
net.remove_cls()
net(torch.randn(4, 3, 224, 224))
print(name, 'Parameters:', sum(p.numel() for p in net.parameters() if p.requires_grad))
|
py | 1a5489ab16601f7d0665e5ad8c68e2c4e811d534 | # Generated by Django 2.2 on 2020-09-12 18:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20200802_1750'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='address',
),
migrations.AddField(
model_name='customer',
name='address_line_1',
field=models.CharField(max_length=250, null=True),
),
migrations.AddField(
model_name='customer',
name='address_line_2',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='customer',
name='city',
field=models.CharField(max_length=250, null=True),
),
migrations.AddField(
model_name='customer',
name='country',
field=models.CharField(default='Nigeria', max_length=250),
),
migrations.AddField(
model_name='customer',
name='email',
field=models.EmailField(blank=True, max_length=250, null=True),
),
migrations.AddField(
model_name='customer',
name='first_name',
field=models.CharField(max_length=250, null=True),
),
migrations.AddField(
model_name='customer',
name='last_name',
field=models.CharField(max_length=250, null=True),
),
migrations.AddField(
model_name='customer',
name='postal_code',
field=models.PositiveIntegerField(null=True),
),
migrations.AddField(
model_name='customer',
name='state',
field=models.CharField(max_length=250, null=True),
),
migrations.CreateModel(
name='DeliveryAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=250, null=True)),
('last_name', models.CharField(max_length=250, null=True)),
('email', models.EmailField(blank=True, max_length=250, null=True)),
('address_line_1', models.CharField(max_length=250, null=True)),
('address_line_2', models.CharField(blank=True, max_length=250, null=True)),
('city', models.CharField(max_length=250, null=True)),
('state', models.CharField(max_length=250, null=True)),
('country', models.CharField(default='Nigeria', max_length=250)),
('postal_code', models.PositiveIntegerField(null=True)),
('phone_number', models.PositiveIntegerField(null=True)),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='account.Customer')),
],
),
migrations.CreateModel(
name='DefaultAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DeliveryAddress', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='account.DeliveryAddress')),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='account.Customer')),
],
),
]
|
py | 1a548ae67fe9da05111ee335470e6c9cb5d41c2d | from django.contrib import admin
from .models import Movie
# Register your models here.
admin.site.register(Movie) |
py | 1a548d72cf17c4853a45dff41264abc683933f12 | import ssl
import threading
import time
import websocket # pip3 install websocket_client
websocket.enableTrace(True)
def on_wss_msg(evt):
print(evt)
class SubscribeClient(websocket.WebSocketApp):
def __init__(self, url, on_wss_open, on_wss_msg):
self.url = url
self.reconnect_intv_sec = 60
super().__init__(url=url,
on_open=on_wss_open,
on_message=on_wss_msg,
on_error=self.on_wss_error,
on_close=self.on_wss_close)
def on_wss_open(self):
print(self.__class__.__name__ + ': on_wss_open')
def on_wss_cont_message(self, msg, cont):
print(self.__class__.__name__ + ': on_wss_cont_message')
def on_wss_error(self, evt):
print(self.__class__.__name__ + ': on_wss_error: msg = ' + str(evt))
print(evt)
def on_wss_close(self):
print(self.__class__.__name__ + ': on_wss_close')
def do_start(self):
try:
while True:
print('Starting SubscribeClient url = ' + self.url)
self.run_forever(ping_timeout=30, sslopt={"cert_reqs": ssl.CERT_NONE})
print(f'Sleep {self.reconnect_intv_sec} seconds and connect again....')
time.sleep(self.reconnect_intv_sec)
except Exception as e:
print('SubscribeClient: run_forever exception ' + str(e))
def start(self):
threading.Thread(target=self.do_start).start()
|
py | 1a548d997c35d8b41d450eade72a03c464b51f34 | from usaspending_api.references.models.agency import Agency
from usaspending_api.references.models.cfda import Cfda
from usaspending_api.references.models.cgac import CGAC
from usaspending_api.references.models.city_county_state_code import CityCountyStateCode
from usaspending_api.references.models.definition import Definition
from usaspending_api.references.models.filter_hash import FilterHash
from usaspending_api.references.models.frec import FREC
from usaspending_api.references.models.frec_map import FrecMap
from usaspending_api.references.models.gtas_total_obligation import GTASTotalObligation
from usaspending_api.references.models.naics import NAICS
from usaspending_api.references.models.object_class import ObjectClass
from usaspending_api.references.models.overall_totals import OverallTotals
from usaspending_api.references.models.psc import PSC
from usaspending_api.references.models.ref_country_code import RefCountryCode
from usaspending_api.references.models.ref_program_activity import RefProgramActivity
from usaspending_api.references.models.rosetta import Rosetta
from usaspending_api.references.models.subtier_agency import SubtierAgency
from usaspending_api.references.models.toptier_agency import ToptierAgency
__all__ = [
"Agency",
"Cfda",
"CGAC",
"CityCountyStateCode",
"Definition",
"FilterHash",
"FREC",
"FrecMap",
"GTASTotalObligation",
"NAICS",
"ObjectClass",
"OverallTotals",
"PSC",
"RefCountryCode",
"RefProgramActivity",
"Rosetta",
"SubtierAgency",
"ToptierAgency",
]
|
py | 1a548e57c3f1c07d54c1dd6a5ea48ec369ca2474 | # encoding:utf-8
import sys
sys.path.append("..")
from mf import MF
from utility.matrix import SimMatrix
from utility.similarity import cosine_sp
class ItemCF(MF):
"""
docstring for ItemCF
implement the ItemCF
Sarwar B, Karypis G, Konstan J, et al. Item-based collaborative filtering recommendation algorithms[C]//Proceedings of the 10th international conference on World Wide Web. ACM, 2001: 285-295.
"""
def __init__(self):
super(ItemCF, self).__init__()
self.config.n = 10
# self.init_model()
def init_model(self, k):
super(ItemCF, self).init_model(k)
# def init_model(self):
# self.item_sim = SimMatrix()
# for i_test in self.rg.testSet_i:
# for i_train in self.rg.item:
# if i_test != i_train:
# if self.item_sim.contains(i_test, i_train):
# continue
# sim = cosine_sp(self.rg.get_col(i_test), self.rg.get_col(i_train))
# self.item_sim.set(i_test, i_train, sim)
def predict(self, u, i):
item_sim = dict()
for i_train in self.rg.item:
if i != i_train:
if i_train in item_sim:
continue
sim = cosine_sp(self.rg.get_col(i), self.rg.get_col(i_train))
item_sim[i_train] = sim
matchItems = sorted(item_sim.items(), key=lambda x: x[1], reverse=True)
itemCount = self.config.n
if itemCount > len(matchItems):
itemCount = len(matchItems)
sum, denom = 0, 0
for n in range(itemCount):
similarItem = matchItems[n][0]
if self.rg.containsUserItem(u, similarItem):
similarity = matchItems[n][1]
rating = self.rg.trainSet_u[u][similarItem]
sum += similarity * (rating - self.rg.itemMeans[similarItem])
denom += similarity
if sum == 0:
if not self.rg.containsItem(i):
return self.rg.globalMean
return self.rg.itemMeans[i]
pred = self.rg.itemMeans[i] + sum / float(denom)
# print('finished user:'+str(u)+" item:"+str(i))
return pred
pass
if __name__ == '__main__':
ic = ItemCF()
ic.init_model(0)
print(ic.predict_model())
print(ic.predict_model_cold_users())
ic.init_model(1)
print(ic.predict_model())
print(ic.predict_model_cold_users())
|
py | 1a548f1837a4cd46ff57f7ea4b301628adb9f77f | #!/usr/bin/env python
import json
import os
import requests
from typing import List, Dict
from typing_extensions import Final
# 1 page fetches 100 proposals. Remember to increment the number below periodically
# to match the number of currently open proposals on
# https://github.com/godotengine/godot-proposals/issues.
NUM_PAGES: Final = 15
def main() -> None:
# Change to the directory where the script is located,
# so that the script can be run from any location.
os.chdir(os.path.dirname(os.path.realpath(__file__)))
print("[*] Fetching proposal JSON pages...")
all_proposals: List[Dict] = []
for page in range(1, NUM_PAGES + 1):
print(f" Requesting batch of proposals {page}/{NUM_PAGES}...")
request = requests.get(
f"https://api.github.com/repos/godotengine/godot-proposals/issues?state=open&per_page=100&page={page}",
headers={"Accept": "application/vnd.github.squirrel-girl-preview"},
)
request_dict = json.loads(request.text)
for proposal in request_dict:
# Only include fields we use on the frontend.
all_proposals.append(
{
"id": proposal["id"],
"number": proposal["number"],
"title": proposal["title"],
"created_at": proposal["created_at"],
"html_url": proposal["html_url"],
"user": {"login": proposal["user"]["login"]},
"comments": proposal["comments"],
"reactions": {
"+1": proposal["reactions"]["+1"],
"-1": proposal["reactions"]["-1"],
},
}
)
print("[*] Saving proposals.json...")
with open("proposals.json", "w") as file:
json.dump(all_proposals, file)
print("[*] Success!")
if __name__ == "__main__":
main()
|
py | 1a548f304e062d9cb32145c503139c7d71369ca5 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureWorkloadJobExtendedInfo(Model):
"""Azure VM workload-specific additional information for job.
:param tasks_list: List of tasks for this job
:type tasks_list:
list[~azure.mgmt.recoveryservicesbackup.models.AzureWorkloadJobTaskDetails]
:param property_bag: Job properties.
:type property_bag: dict[str, str]
:param dynamic_error_message: Non localized error message on job
execution.
:type dynamic_error_message: str
"""
_attribute_map = {
'tasks_list': {'key': 'tasksList', 'type': '[AzureWorkloadJobTaskDetails]'},
'property_bag': {'key': 'propertyBag', 'type': '{str}'},
'dynamic_error_message': {'key': 'dynamicErrorMessage', 'type': 'str'},
}
def __init__(self, **kwargs):
super(AzureWorkloadJobExtendedInfo, self).__init__(**kwargs)
self.tasks_list = kwargs.get('tasks_list', None)
self.property_bag = kwargs.get('property_bag', None)
self.dynamic_error_message = kwargs.get('dynamic_error_message', None)
|
py | 1a5492156a1dca031f7c31dfec8899f817206cbe | from django.urls import path
from . import views
urlpatterns = [
path('', views.WelcomeTemplateView.as_view(), name='welcome'),
path('about/', views.AboutTemplateView.as_view(), name='about'),
path('cookies/', views.CookiesTemplateView.as_view(), name='cookies'),
]
|
py | 1a5493f3f4b4708fb127277985acfac72ff8b686 | import cgi
import datetime
from paste.fixture import AppError
from pylons import config
from pylons import c
from genshi.core import escape as genshi_escape
from difflib import unified_diff
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal
from ckan.tests import *
from ckan.tests.html_check import HtmlCheckMethods
from ckan.tests.pylons_controller import PylonsTestCase
from base import FunctionalTestCase
import ckan.model as model
from ckan.lib.create_test_data import CreateTestData
import ckan.lib.helpers as h
import ckan.lib.search as search
from ckan.logic.action import get, update
from ckan.controllers.package import PackageController
from ckan.plugins import SingletonPlugin, implements, IPackageController
from ckan import plugins
from ckan.rating import set_rating
from ckan.lib.search.common import SolrSettings
class MockPackageControllerPlugin(SingletonPlugin):
implements(IPackageController)
def __init__(self):
from collections import defaultdict
self.calls = defaultdict(int)
def read(self, entity):
self.calls['read'] += 1
def create(self, entity):
self.calls['create'] += 1
def edit(self, entity):
self.calls['edit'] += 1
def authz_add_role(self, object_role):
self.calls['authz_add_role'] += 1
def authz_remove_role(self, object_role):
self.calls['authz_remove_role'] += 1
def delete(self, entity):
self.calls['delete'] += 1
def before_search(self, search_params):
self.calls['before_search'] += 1
return search_params
def after_search(self, search_results, search_params):
self.calls['after_search'] += 1
return search_results
def before_index(self, data_dict):
self.calls['before_index'] += 1
return data_dict
def before_view(self, data_dict):
self.calls['before_view'] += 1
return data_dict
def after_create(self, context, data_dict):
self.calls['after_create'] += 1
self.id_in_dict = 'id' in data_dict
return data_dict
def after_update(self, context, data_dict):
self.calls['after_update'] += 1
return data_dict
def after_delete(self, context, data_dict):
self.calls['after_delete'] += 1
return data_dict
def after_show(self, context, data_dict):
self.calls['after_show'] += 1
return data_dict
def update_facet_titles(self, facet_titles):
return facet_titles
existing_extra_html = ('<label class="field_opt" for="Package-%(package_id)s-extras-%(key)s">%(capitalized_key)s</label>', '<input id="Package-%(package_id)s-extras-%(key)s" name="Package-%(package_id)s-extras-%(key)s" size="20" type="text" value="%(value)s">')
class TestPackageBase(FunctionalTestCase):
key1 = u'key1 Less-than: < Umlaut: \xfc'
value1 = u'value1 Less-than: < Umlaut: \xfc'
# Note: Can't put a quotation mark in key1 or value1 because
# paste.fixture doesn't unescape the value in an input field
# on form submission. (But it works in real life.)
def _assert_form_errors(self, res):
self.check_tag(res, '<form', 'has-errors')
assert 'field_error' in res, res
def diff_responses(self, res1, res2):
return self.diff_html(res1.body, res2.body)
def diff_html(self, html1, html2):
return '\n'.join(unified_diff(html1.split('\n'),
html2.split('\n')))
class TestPackageForm(TestPackageBase):
'''Inherit this in tests for these form testing methods'''
def _check_package_read(self, res, **params):
assert not 'Error' in res, res
assert u'%s - Datasets' % params['title'] in res, res
main_res = self.main_div(res)
main_div = main_res
main_div_str = main_div.encode('utf8')
assert params['name'] in main_div, main_div_str
assert params['title'] in main_div, main_div_str
assert params['version'] in main_div, main_div_str
self.check_named_element(main_div, 'a', 'href="%s"' % params['url'])
prefix = 'Dataset-%s-' % params.get('id', '')
for res_index, values in self._get_resource_values(params['resources'], by_resource=True):
self.check_named_element(main_div, 'tr', *values)
assert params['notes'] in main_div, main_div_str
license = model.Package.get_license_register()[params['license_id']]
assert license.title in main_div, (license.title, main_div_str)
tag_names = list(params['tags'])
self.check_named_element(main_div, 'ul', *tag_names)
if params.has_key('state'):
assert 'State: %s' % params['state'] in main_div.replace('</strong>', ''), main_div_str
if isinstance(params['extras'], dict):
extras = []
for key, value in params['extras'].items():
extras.append((key, value, False))
elif isinstance(params['extras'], (list, tuple)):
extras = params['extras']
else:
raise NotImplementedError
for key, value, deleted in extras:
if not deleted:
key_in_html_body = self.escape_for_html_body(key)
value_in_html_body = self.escape_for_html_body(value)
self.check_named_element(main_div, 'tr', key_in_html_body, value_in_html_body)
else:
self.check_named_element(main_div, 'tr', '!' + key)
self.check_named_element(main_div, 'tr', '!' + value)
def _get_resource_values(self, resources, by_resource=False):
assert isinstance(resources, (list, tuple))
for res_index, resource in enumerate(resources):
if by_resource:
values = []
for i, res_field in enumerate(model.Resource.get_columns(extra_columns = False)):
if isinstance(resource, (str, unicode)):
expected_value = resource if res_field == 'url' else ''
elif hasattr(resource, res_field):
expected_value = getattr(resource, res_field)
elif isinstance(resource, (list, tuple)):
expected_value = resource[i]
elif isinstance(resource, dict):
expected_value = resource.get(res_field, u'')
else:
raise NotImplemented
if not by_resource:
yield (res_index, res_field, expected_value)
else:
values.append(expected_value)
if by_resource:
yield(res_index, values)
def escape_for_html_body(self, unescaped_str):
# just deal with chars in tests
return unescaped_str.replace('<', '<')
def check_form_filled_correctly(self, res, **params):
if params.has_key('pkg'):
for key, value in params['pkg'].as_dict().items():
if key == 'license':
key = 'license_id'
params[key] = value
prefix = ''
main_res = self.main_div(res)
self.check_tag(main_res, prefix+'name', params['name'])
self.check_tag(main_res, prefix+'title', params['title'])
self.check_tag(main_res, prefix+'version', params['version'])
self.check_tag(main_res, prefix+'url', params['url'])
#for res_index, res_field, expected_value in self._get_resource_values(params['resources']):
# ## only check fields that are on the form
# if res_field not in ['url', 'id', 'description', 'hash']:
# continue
# self.check_tag(main_res, '%sresources__%i__%s' % (prefix, res_index, res_field), expected_value)
self.check_tag_and_data(main_res, prefix+'notes', params['notes'])
self.check_tag_and_data(main_res, 'selected', params['license_id'])
if isinstance(params['tags'], (str, unicode)):
tags = map(lambda s: s.strip(), params['tags'].split(','))
else:
tags = params['tags']
for tag in tags:
self.check_tag(main_res, prefix+'tag_string', tag)
if params.has_key('state'):
self.check_tag_and_data(main_res, 'selected', str(params['state']))
if isinstance(params['extras'], dict):
extras = []
for key, value in params['extras'].items():
extras.append((key, value, False))
else:
extras = params['extras']
for num, (key, value, deleted) in enumerate(sorted(extras)):
key_in_html_body = self.escape_for_html_body(key)
value_in_html_body = self.escape_for_html_body(value)
key_escaped = genshi_escape(key)
value_escaped = genshi_escape(value)
self.check_tag(main_res, 'extras__%s__key' % num, key_in_html_body)
self.check_tag(main_res, 'extras__%s__value' % num, value_escaped)
if deleted:
self.check_tag(main_res, 'extras__%s__deleted' % num, 'checked')
assert params['log_message'] in main_res, main_res
def _check_redirect(self, return_url_param, expected_redirect,
pkg_name_to_edit='',extra_environ=None):
'''
@param return_url_param - encoded url to be given as param - if None
then assume redirect is specified in pylons config
@param expected_redirect - url we expect to redirect to (but <NAME>
not yet substituted)
@param pkg_name_to_edit - '' means create a new dataset
'''
try:
new_name = u'new-name'
offset_params = {'controller':'package'}
if pkg_name_to_edit:
pkg_name = pkg_name_to_edit
pkg = model.Package.by_name(pkg_name)
assert pkg
pkg_id = pkg.id
offset_params['action'] = 'edit'
offset_params['id'] = pkg_name_to_edit
else:
offset_params['action'] = 'new'
pkg_id = ''
if return_url_param:
offset_params['return_to'] = return_url_param
offset = url_for(**offset_params)
res = self.app.get(offset, extra_environ=extra_environ)
assert 'Datasets -' in res
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = new_name
res = fv.submit('save', status=302, extra_environ=extra_environ)
assert not 'Error' in res, res
redirected_to = dict(res.headers).get('Location') or dict(res.headers)['location']
expected_redirect_url = expected_redirect.replace('<NAME>', new_name)
assert redirected_to == expected_redirect_url, \
'Redirected to %s but should have been %s' % \
(redirected_to, expected_redirect_url)
finally:
# revert name change or pkg creation
pkg = model.Package.by_name(new_name)
if pkg:
rev = model.repo.new_revision()
if pkg_name_to_edit:
pkg.name = pkg_name_to_edit
else:
pkg.purge()
model.repo.commit_and_remove()
class TestReadOnly(TestPackageForm, HtmlCheckMethods, PylonsTestCase):
@classmethod
def setup_class(cls):
PylonsTestCase.setup_class()
CreateTestData.create()
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def test_read(self):
name = u'annakarenina'
c.hide_welcome_message = True
offset = url_for(controller='package', action='read', id=name)
res = self.app.get(offset)
# check you get the same html when specifying the pkg by id
# instead of by name
offset = url_for(controller='package', action='read', id=self.anna.id)
res_by_id = self.app.get(offset)
# just check the stuff in the package div
pkg_by_name_main = self.named_div('dataset', res)
pkg_by_id_main = self.named_div('dataset', res_by_id)
# rename some things which may be in the wrong order sometimes
txt_order_non_deterministic = (u'Flexible \u30a1', 'russian', 'tolstoy', 'david', 'roger')
for txt in txt_order_non_deterministic:
for pkg_ in (pkg_by_name_main, pkg_by_id_main):
pkg_ = pkg_.replace(txt, 'placeholder')
print pkg_by_name_main
res_diff = self.diff_html(pkg_by_name_main, pkg_by_id_main)
assert not res_diff, res_diff.encode('utf8')
# not true as language selection link return url differs:
#assert len(res_by_id.body) == len(res.body)
# only retrieve after app has been called
anna = self.anna
assert name in res
assert anna.version in res
assert anna.url in res
assert 'Some test notes' in res
self.check_named_element(res, 'a',
'http://ckan.net/',
'target="_blank"',
'rel="nofollow"')
assert '<strong>Some bolded text.</strong>' in res
self.check_tag_and_data(res, 'left arrow', '<')
self.check_tag_and_data(res, 'umlaut', u'\xfc')
#assert 'OKD Compliant::' in res
assert u'Flexible \u30a1' in res, res
assert 'russian' in res
assert 'david' in res
assert 'roger' in res
assert 'genre' in res, res
assert 'romantic novel' in res, res
assert 'original media' in res, res
assert 'book' in res, res
assert 'This dataset satisfies the Open Definition' in res, res
def test_read_war_rdf(self):
name = u'warandpeace'
offset = url_for(controller='package', action='read', id=name + ".rdf")
res = self.app.get(offset)
assert '<dct:title>A Wonderful Story</dct:title>' in res, res
def test_read_war(self):
name = u'warandpeace'
c.hide_welcome_message = True
offset = url_for(controller='package', action='read', id=name)
res = self.app.get(offset)
assert 'This dataset is Not Open' in res, res
def test_read_nonexistentpackage(self):
name = 'anonexistentpackage'
offset = url_for(controller='package', action='read', id=name)
res = self.app.get(offset, status=404)
def test_read_internal_links(self):
pkg_name = u'link-test',
CreateTestData.create_arbitrary([
{'name':pkg_name,
'notes':'Decoy link here: decoy:decoy, real links here: package:pkg-1, ' \
'tag:tag_1 group:test-group-1 and a multi-word tag: tag:"multi word with punctuation."',
}
])
offset = url_for(controller='package', action='read', id=pkg_name)
res = self.app.get(offset)
def check_link(res, controller, id):
id_in_uri = id.strip('"').replace(' ', '%20') # remove quotes and percent-encode spaces
self.check_tag_and_data(res, 'a ', '/%s/%s' % (controller, id_in_uri),
'%s:%s' % (controller, id))
check_link(res, 'package', 'pkg-1')
check_link(res, 'tag', 'tag_1')
check_link(res, 'tag', '"multi word with punctuation."')
check_link(res, 'group', 'test-group-1')
assert 'decoy</a>' not in res, res
assert 'decoy"' not in res, res
#res = self.app.get(offset)
#assert 'Datasets' in res
#name = u'annakarenina'
#title = u'A Novel By Tolstoy'
#assert title in res
#res = res.click(title)
#assert '%s - Datasets' % title in res, res
#main_div = self.main_div(res)
#assert title in main_div, main_div.encode('utf8')
def test_history(self):
name = 'annakarenina'
offset = url_for(controller='package', action='history', id=name)
res = self.app.get(offset)
assert 'History' in res
assert 'Revisions' in res
assert name in res
def test_read_plugin_hook(self):
plugin = MockPackageControllerPlugin()
plugins.load(plugin)
name = u'annakarenina'
offset = url_for(controller='package', action='read', id=name)
res = self.app.get(offset)
assert plugin.calls['read'] == 1, plugin.calls
assert plugin.calls['after_show'] == 1, plugin.calls
plugins.unload(plugin)
def test_resource_list(self):
# TODO restore this test. It doesn't make much sense with the
# present resource list design.
name = 'annakarenina'
cache_url = 'http://thedatahub.org/test_cache_url.csv'
# add a cache_url to the first resource in the package
context = {'model': model, 'session': model.Session, 'user': 'testsysadmin'}
data = {'id': 'annakarenina'}
pkg = get.package_show(context, data)
pkg['resources'][0]['cache_url'] = cache_url
# FIXME need to pretend to be called by the api
context['api_version'] = 3
update.package_update(context, pkg)
# check that the cache url is included on the dataset view page
offset = url_for(controller='package', action='read', id=name)
res = self.app.get(offset)
#assert '[cached]'in res
#assert cache_url in res
class TestReadAtRevision(FunctionalTestCase, HtmlCheckMethods):
@classmethod
def setup_class(cls):
cls.before = datetime.datetime(2010, 1, 1)
cls.date1 = datetime.datetime(2011, 1, 1)
cls.date2 = datetime.datetime(2011, 1, 2)
cls.date3 = datetime.datetime(2011, 1, 3)
cls.today = datetime.datetime.now()
cls.pkg_name = u'testpkg'
# create dataset
rev = model.repo.new_revision()
rev.timestamp = cls.date1
pkg = model.Package(name=cls.pkg_name, title=u'title1')
model.Session.add(pkg)
model.setup_default_user_roles(pkg)
model.repo.commit_and_remove()
# edit dataset
rev = model.repo.new_revision()
rev.timestamp = cls.date2
pkg = model.Package.by_name(cls.pkg_name)
pkg.title = u'title2'
pkg.add_tag_by_name(u'tag 2')
pkg.extras = {'key2': u'value2'}
model.repo.commit_and_remove()
# edit dataset again
rev = model.repo.new_revision()
rev.timestamp = cls.date3
pkg = model.Package.by_name(cls.pkg_name)
pkg.title = u'title3'
pkg.add_tag_by_name(u'tag3.')
pkg.extras['key2'] = u'value3'
model.repo.commit_and_remove()
cls.offset = url_for(controller='package',
action='read',
id=cls.pkg_name)
pkg = model.Package.by_name(cls.pkg_name)
cls.revision_ids = [rev[0].id for rev in pkg.all_related_revisions[::-1]]
# revision order is reversed to be chronological
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def test_read_normally(self):
res = self.app.get(self.offset, status=200)
pkg_html = self.named_div('dataset', res)
side_html = self.named_div('sidebar', res)
print 'PKG', pkg_html
assert 'title3' in res
assert 'key2' in pkg_html
assert 'value3' in pkg_html
print 'SIDE', side_html
assert 'tag3.' in side_html
assert 'tag 2' in side_html
def test_read_date1(self):
offset = self.offset + self.date1.strftime('@%Y-%m-%d')
res = self.app.get(offset, status=200)
pkg_html = self.named_div('dataset', res)
side_html = self.named_div('sidebar', res)
assert 'title1' in res, res
assert 'key2' not in pkg_html, pkg_html
assert 'value3' not in pkg_html, pkg_html
assert 'tag3.' not in side_html, side_html
assert 'tag 2' not in side_html, side_html
def test_read_date2(self):
date2_plus_3h = self.date2 + datetime.timedelta(hours=3)
offset = self.offset + date2_plus_3h.strftime('@%Y-%m-%d')
res = self.app.get(offset, status=200)
pkg_html = self.named_div('dataset', res)
side_html = self.named_div('sidebar', res)
print 'PKG', pkg_html
assert 'title2' in res
assert 'key2' in pkg_html
assert 'value2' in pkg_html
print 'SIDE', side_html
assert 'tag3.' not in side_html
assert 'tag 2' in side_html
def test_read_date3(self):
offset = self.offset + self.date3.strftime('@%Y-%m-%d-%H-%M')
res = self.app.get(offset, status=200)
pkg_html = self.named_div('dataset', res)
side_html = self.named_div('sidebar', res)
print 'PKG', pkg_html
assert 'title3' in res
assert 'key2' in pkg_html
assert 'value3' in pkg_html
print 'SIDE', side_html
assert 'tag3.' in side_html
assert 'tag 2' in side_html
def test_read_date_before_created(self):
offset = self.offset + self.before.strftime('@%Y-%m-%d')
res = self.app.get(offset, status=404)
def test_read_date_invalid(self):
res = self.app.get(self.offset + self.date3.strftime('@%Y-%m'),
status=400)
res = self.app.get(self.offset + self.date3.strftime('@%Y'),
status=400)
res = self.app.get(self.offset + self.date3.strftime('@%Y@%m'),
status=400)
def test_read_revision1(self):
offset = self.offset + '@%s' % self.revision_ids[0]
res = self.app.get(offset, status=200)
main_html = self.main_div(res)
pkg_html = self.named_div('dataset', res)
side_html = self.named_div('sidebar', res)
print 'MAIN', main_html
assert 'This is an old revision of this dataset' in main_html
assert 'at Jan 01, 2011, 00:00' in main_html
self.check_named_element(main_html, 'a', 'href="/dataset/%s"' % self.pkg_name)
print 'PKG', pkg_html
assert 'title1' in res
assert 'key2' not in pkg_html
assert 'value3' not in pkg_html
print 'SIDE', side_html
assert 'tag3.' not in side_html
assert 'tag 2' not in side_html
def test_read_revision2(self):
offset = self.offset + '@%s' % self.revision_ids[1]
res = self.app.get(offset, status=200)
main_html = self.main_div(res)
pkg_html = self.named_div('dataset', res)
side_html = self.named_div('sidebar', res)
print 'MAIN', main_html
assert 'This is an old revision of this dataset' in main_html
assert 'at Jan 02, 2011, 00:00' in main_html
self.check_named_element(main_html, 'a', 'href="/dataset/%s"' % self.pkg_name)
print 'PKG', pkg_html
assert 'title2' in res
assert 'key2' in pkg_html
assert 'value2' in pkg_html
print 'SIDE', side_html
assert 'tag3.' not in side_html
assert 'tag 2' in side_html
def test_read_revision3(self):
offset = self.offset + '@%s' % self.revision_ids[2]
res = self.app.get(offset, status=200)
main_html = self.main_div(res)
pkg_html = self.named_div('dataset', res)
side_html = self.named_div('sidebar', res)
print 'MAIN', main_html
assert 'This is an old revision of this dataset' not in main_html
assert 'This is the current revision of this dataset' in main_html
assert 'at Jan 03, 2011, 00:00' in main_html
self.check_named_element(main_html, 'a', 'href="/dataset/%s"' % self.pkg_name)
print 'PKG', pkg_html
assert 'title3' in res
assert 'key2' in pkg_html
assert 'value3' in pkg_html
print 'SIDE', side_html
assert 'tag3.' in side_html
assert 'tag 2' in side_html
def test_read_bad_revision(self):
# this revision doesn't exist in the db
offset = self.offset + '@ccab6798-1f4b-4a22-bcf5-462703aa4594'
res = self.app.get(offset, status=404)
class TestEdit(TestPackageForm):
editpkg_name = u'editpkgtest'
@classmethod
def setup_class(self):
CreateTestData.create()
self._reset_data()
def setup(self):
if not self.res:
self.res = self.app.get(self.offset,extra_environ=self.extra_environ_admin)
model.Session.remove()
@classmethod
def _reset_data(self):
model.Session.remove()
model.repo.rebuild_db()
CreateTestData.create()
CreateTestData.create_arbitrary(
{'name':self.editpkg_name,
'url':u'editpkgurl.com',
'tags':[u'mytesttag'],
'resources':[{'url':u'url escape: & umlaut: \xfc quote: "',
'description':u'description escape: & umlaut: \xfc quote "',
}],
'admins':[u'testadmin'],
})
self.editpkg = model.Package.by_name(self.editpkg_name)
self.pkgid = self.editpkg.id
self.offset = url_for(controller='package', action='edit', id=self.editpkg_name)
self.editpkg = model.Package.by_name(self.editpkg_name)
self.admin = model.User.by_name(u'testsysadmin')
self.extra_environ_admin = {'REMOTE_USER': self.admin.name.encode('utf8')}
self.extra_environ_russianfan = {'REMOTE_USER': 'russianfan'}
self.res = None #get's refreshed by setup
model.Session.remove()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_edit_basic(self):
# just the absolute basics
try:
self.res = self.app.get(self.offset,extra_environ=self.extra_environ_admin)
assert 'Edit - Datasets' in self.res, self.res
new_name = u'new-name'
new_title = u'New Title'
fv = self.res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = new_name
fv[prefix + 'title'] = new_title
res = fv.submit('save',extra_environ=self.extra_environ_admin)
# get redirected ...
res = res.follow()
offset = url_for(controller='package', action='read', id=new_name)
res = self.app.get(offset)
assert '%s - Datasets' % new_title in res, res
pkg = model.Package.by_name(new_name)
assert pkg
assert pkg.title == new_title
finally:
self._reset_data()
def test_edit(self):
# just the key fields
try:
self.res = self.app.get(self.offset,extra_environ=self.extra_environ_admin)
assert 'Edit - Datasets' in self.res, self.res
assert self.editpkg.notes in self.res
new_name = u'new-name'
new_title = u'A Short Description of this Dataset'
newurl = u'http://www.editpkgnewurl.com'
new_download_url = newurl + u'/download/'
newlicense_id = u'cc-by'
newversion = u'0.9b'
fv = self.res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = new_name
fv[prefix + 'title'] = new_title
fv[prefix + 'url'] = newurl
#fv[prefix + 'resources__0__url'] = new_download_url
fv[prefix + 'license_id'] = newlicense_id
fv[prefix + 'version'] = newversion
res = fv.submit('save',extra_environ=self.extra_environ_admin)
# get redirected ...
res = res.follow()
model.Session.remove()
offset = url_for(controller='package', action='read', id=new_name)
res = self.app.get(offset)
assert '%s - Datasets' % new_title in res, res
pkg = model.Package.by_name(new_name)
assert pkg.title == new_title
assert pkg.url == newurl
#assert pkg.resources[0].url == new_download_url
assert pkg.version == newversion
assert newlicense_id == pkg.license.id
finally:
self._reset_data()
def test_edit_basic_pkg_by_id(self):
try:
pkg = model.Package.by_name(u'editpkgtest')
offset = url_for(controller='package', action='edit', id=pkg.id)
res = self.app.get(offset, extra_environ=self.extra_environ_admin)
#assert res.body == self.res.body, self.diff_responses(res, self.res)
assert 'Edit - Datasets' in res, res
assert pkg.name in res
new_name = u'new-name'
new_title = u'A Short Description of this Dataset'
fv = self.res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = new_name
fv[prefix + 'title'] = new_title
res = fv.submit('save', extra_environ=self.extra_environ_admin)
# get redirected ...
res = res.follow()
offset = url_for(controller='package', action='read', id=new_name)
res = self.app.get(offset)
assert '%s - Datasets' % new_title in res, res
pkg = model.Package.by_name(new_name)
assert pkg
finally:
self._reset_data()
def test_edit_2_not_groups(self):
# not allowed to edit groups for now
prefix = 'Dataset-%s-' % self.pkgid
fv = self.res.forms['dataset-edit']
assert not fv.fields.has_key(prefix + 'groups')
def test_edit_2_tags_and_groups(self):
# testing tag updating
newtagnames = [u'russian', u'tolstoy', u'superb book']
newtags = newtagnames
tagvalues = ','.join(newtags)
fv = self.res.forms['dataset-edit']
prefix = ''
fv[prefix + 'tag_string'] = tagvalues
exp_log_message = 'test_edit_2: making some changes'
fv['log_message'] = exp_log_message
res = fv.submit('save', extra_environ=self.extra_environ_admin)
# get redirected ...
res = res.follow()
assert '%s - Datasets' % self.editpkg_name in res
pkg = model.Package.by_name(self.editpkg.name)
assert len(pkg.get_tags()) == len(newtagnames)
outtags = [ tag.name for tag in pkg.get_tags() ]
for tag in newtags:
assert tag in outtags
rev = model.Revision.youngest(model.Session)
assert rev.author == self.admin.name
assert rev.message == exp_log_message
def test_missing_fields(self):
# User edits and a field is left out in the commit parameters.
# (Spammers can cause this)
fv = self.res.forms['dataset-edit']
del fv.fields['log_message']
res = fv.submit('save', status=400, extra_environ=self.extra_environ_admin)
fv = self.res.forms['dataset-edit']
prefix = ''
del fv.fields[prefix + 'license_id']
res = fv.submit('save', status=400, extra_environ=self.extra_environ_admin)
def test_redirect_after_edit_using_param(self):
return_url = 'http://random.site.com/dataset/<NAME>?test=param'
# It's useful to know that this url encodes to:
# 'http%3A%2F%2Frandom.site.com%2Fdataset%2F%3CNAME%3E%3Ftest%3Dparam'
expected_redirect = return_url
self._check_redirect(return_url, expected_redirect,
pkg_name_to_edit=self.editpkg_name, extra_environ=self.extra_environ_admin)
def test_redirect_after_edit_using_config(self):
return_url = '' # redirect comes from test.ini setting
expected_redirect = config['package_edit_return_url']
self._check_redirect(return_url, expected_redirect,
pkg_name_to_edit=self.editpkg_name, extra_environ=self.extra_environ_admin)
def test_edit_all_fields(self):
try:
# Create new item
rev = model.repo.new_revision()
pkg_name = u'new_editpkgtest'
pkg = model.Package(name=pkg_name)
pkg.title = u'This is a Test Title'
pkg.url = u'editpkgurl.com'
pr1 = model.Resource(url=u'editpkgurl1',
format=u'plain text', description=u'Full text',
hash=u'123abc',)
pr2 = model.Resource(url=u'editpkgurl2',
format=u'plain text2', description=u'Full text2',
hash=u'456abc',)
pkg.resources.append(pr1)
pkg.resources.append(pr2)
pkg.notes= u'this is editpkg'
pkg.version = u'2.2'
t1 = model.Tag(name=u'one')
t2 = model.Tag(name=u'two words')
pkg.add_tags([t1, t2])
pkg.state = model.State.DELETED
pkg.license_id = u'other-open'
extras = {'key1':'value1', 'key2':'value2', 'key3':'value3'}
for key, value in extras.items():
pkg.extras[unicode(key)] = unicode(value)
for obj in [pkg, t1, t2, pr1, pr2]:
model.Session.add(obj)
model.repo.commit_and_remove()
pkg = model.Package.by_name(pkg_name)
model.setup_default_user_roles(pkg, [self.admin])
model.repo.commit_and_remove()
# Edit it
offset = url_for(controller='package', action='edit', id=pkg.name)
res = self.app.get(offset, status=200, extra_environ={'REMOTE_USER':'testsysadmin'})
assert 'Edit - Datasets' in res, res
# Check form is correctly filled
pkg = model.Package.by_name(pkg_name)
self.check_form_filled_correctly(res, pkg=pkg, log_message='')
# Amend form
name = u'test_name'
title = u'Test Title'
version = u'1.1'
url = u'http://something.com/somewhere.zip'
resources = ((u'http://something.com/somewhere-else.xml', u'xml', u'Best', u'hash1', 'alt'),
(u'http://something.com/somewhere-else2.xml', u'xml2', u'Best2', u'hash2', 'alt'),
)
assert len(resources[0]) == 5
notes = u'Very important'
license_id = u'odc-by'
state = model.State.ACTIVE
tags = (u'tag1', u'tag2', u'tag 3')
tags_txt = u','.join(tags)
extra_changed = 'key1', self.value1 + ' CHANGED', False
extra_new = 'newkey', 'newvalue', False
log_message = 'This is a comment'
assert not model.Package.by_name(name)
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix+'name'] = name
fv[prefix+'title'] = title
fv[prefix+'version'] = version
fv[prefix+'url'] = url
# TODO consider removing this test entirely, or hardcoding column names
#for res_index, resource in enumerate(resources):
# for field_index, res_field in enumerate(model.Resource.get_columns()):
# fv[prefix+'resources__%s__%s' % (res_index, res_field)] = resource[field_index]
fv[prefix+'notes'] = notes
fv[prefix+'license_id'] = license_id
fv[prefix+'tag_string'] = tags_txt
fv[prefix+'state'] = state
fv[prefix+'extras__0__value'] = extra_changed[1].encode('utf8')
fv[prefix+'extras__3__key'] = extra_new[0].encode('utf8')
fv[prefix+'extras__3__value'] = extra_new[1].encode('utf8')
fv[prefix+'extras__2__deleted'] = True
fv['log_message'] = log_message
extras = (('key2', extras['key2'], False),
extra_changed,
extra_new,
('key3', extras['key3'], True))
res = fv.submit('save', extra_environ={'REMOTE_USER':'testsysadmin'})
# Check dataset page
assert not 'Error' in res, res
# Check dataset object
pkg = model.Package.by_name(name)
assert pkg.name == name
assert pkg.title == title
assert pkg.version == version
assert pkg.url == url
# TODO consider removing this test entirely, or hardcoding column names
#for res_index, resource in enumerate(resources):
# for field_index, res_field in enumerate(model.Resource.get_columns()):
# assert getattr(pkg.resources[res_index], res_field) == resource[field_index]
assert pkg.notes == notes
assert pkg.license.id == license_id
saved_tagnames = [str(tag.name) for tag in pkg.get_tags()]
saved_tagnames.sort()
expected_tagnames = list(tags)
expected_tagnames.sort()
assert saved_tagnames == expected_tagnames
assert pkg.state == state
assert len(pkg.extras) == len([extra for extra in extras if not extra[-1]])
for key, value, deleted in extras:
if not deleted:
assert pkg.extras[key] == value
# for some reason environ['REMOTE_ADDR'] is undefined
rev = model.Revision.youngest(model.Session)
assert rev.author == 'testsysadmin', rev.author
assert rev.message == log_message
# TODO: reinstate once fixed in code
exp_log_message = u'Creating dataset %s' % name
#assert rev.message == exp_log_message
finally:
self._reset_data()
# NB: Cannot test resources now because it is all javascript!
## def test_edit_invalid_resource(self):
## try:
## # Create new dataset
## pkg_name = u'test_res'
## CreateTestData.create_arbitrary({'name': pkg_name,
## 'resources': [{'url': '1.pdf'}]})
## # Edit it
## pkg = model.Package.by_name(pkg_name)
## offset = url_for(controller='package', action='edit', id=pkg.name)
## res = self.app.get(offset, status=200, extra_environ={'REMOTE_USER':'testadmin'})
## assert 'Edit - Datasets' in res, res
## pkg = model.Package.by_name(pkg_name)
## # Amend form
## fv = res.forms['dataset-edit']
## fv['resources__0__size'] = 'abc'
## res = fv.submit('save', extra_environ={'REMOTE_USER':'testadmin'})
## # Check dataset page
## assert 'Errors in form' in res, res
## assert 'Package resource(s) invalid' in res, res
## assert 'Resource 1' in res, res
## finally:
## self._reset_data()
def test_edit_bad_log_message(self):
fv = self.res.forms['dataset-edit']
prefix = ''
fv['log_message'] = u'Free enlargements: http://drugs.com/' # spam
res = fv.submit('save', extra_environ=self.extra_environ_admin)
assert 'Error' in res, res
self.check_tag(res, '<form', 'has-errors')
assert 'No links are allowed' in res, res
def test_edit_bad_name(self):
fv = self.res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = u'a' # invalid name
res = fv.submit('save', extra_environ=self.extra_environ_admin)
assert 'Error' in res, res
assert 'Name must be at least 2 characters long' in res, res
# Ensure there is an error at the top of the form and by the field
self._assert_form_errors(res)
def test_edit_plugin_hook(self):
# just the absolute basics
try:
plugin = MockPackageControllerPlugin()
plugins.load(plugin)
res = self.app.get(self.offset, extra_environ=self.extra_environ_admin)
new_name = u'new-name'
new_title = u'New Title'
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = new_name
fv[prefix + 'title'] = new_title
res = fv.submit('save', extra_environ=self.extra_environ_admin)
# get redirected ...
assert plugin.calls['edit'] == 1, plugin.calls
plugins.unload(plugin)
finally:
self._reset_data()
def test_after_update_plugin_hook(self):
# just the absolute basics
try:
plugin = MockPackageControllerPlugin()
plugins.load(plugin)
res = self.app.get(self.offset, extra_environ=self.extra_environ_admin)
new_name = u'new-name'
new_title = u'New Title'
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = new_name
fv[prefix + 'title'] = new_title
res = fv.submit('save', extra_environ=self.extra_environ_admin)
# get redirected ...
assert plugin.calls['after_update'] == 1, plugin.calls
assert plugin.calls['after_create'] == 0, plugin.calls
plugins.unload(plugin)
finally:
self._reset_data()
def test_edit_700_groups_add(self):
try:
pkg = model.Package.by_name(u'editpkgtest')
grp = model.Group.by_name(u'roger')
assert len(pkg.get_groups()) == 0
offset = url_for(controller='package', action='edit', id=pkg.name)
res = self.app.get(offset, extra_environ=self.extra_environ_admin)
prefix = ''
field_name = prefix + "groups__0__id"
assert field_name in res
fv = res.forms['dataset-edit']
fv[prefix + 'groups__0__id'] = grp.id
res = fv.submit('save', extra_environ=self.extra_environ_admin)
res = res.follow()
pkg = model.Package.by_name(u'editpkgtest')
assert len(pkg.get_groups()) == 1, pkg.get_groups()
assert 'roger' in res, res
finally:
self._reset_data()
def test_edit_700_groups_remove(self):
try:
pkg = model.Package.by_name(u'editpkgtest')
assert len(pkg.get_groups()) == 0
grp = model.Group.by_name(u'roger')
model.repo.new_revision()
model.Session.add(model.Member(table_id=pkg.id, table_name='package', group=grp))
model.repo.commit_and_remove()
pkg = model.Package.by_name(u'editpkgtest')
assert len(pkg.get_groups()) == 1
offset = url_for(controller='package', action='edit', id=pkg.name)
res = self.app.get(offset, extra_environ=self.extra_environ_admin)
prefix = ''
field_name = prefix + "groups__0__id"
fv = res.forms['dataset-edit']
print field_name
fv[field_name] = False
res = fv.submit('save', extra_environ=self.extra_environ_admin)
model.repo.commit_and_remove()
pkg = model.Package.by_name(u'editpkgtest')
assert len(pkg.get_groups()) == 0
finally:
self._reset_data()
def test_edit_404(self):
self.offset = url_for(controller='package', action='edit', id='random_name')
self.res = self.app.get(self.offset, status=404)
def test_edit_indexerror(self):
bad_solr_url = 'http://127.0.0.1/badsolrurl'
solr_url = SolrSettings.get()[0]
try:
SolrSettings.init(bad_solr_url)
plugins.load('synchronous_search')
fv = self.res.forms['dataset-edit']
prefix = ''
fv['log_message'] = u'Test log message'
res = fv.submit('save', status=500, extra_environ=self.extra_environ_admin)
assert 'Unable to update search index' in res, res
finally:
plugins.unload('synchronous_search')
SolrSettings.init(solr_url)
def test_edit_pkg_with_relationships(self):
# 1786
try:
# add a relationship to a package
pkg = model.Package.by_name(self.editpkg_name)
anna = model.Package.by_name(u'annakarenina')
model.repo.new_revision()
pkg.add_relationship(u'depends_on', anna)
model.repo.commit_and_remove()
# check relationship before the test
rels = model.Package.by_name(self.editpkg_name).get_relationships()
assert_equal(str(rels), '[<*PackageRelationship editpkgtest depends_on annakarenina>]')
# edit the package
self.offset = url_for(controller='package', action='edit', id=self.editpkg_name)
self.res = self.app.get(self.offset, extra_environ=self.extra_environ_admin)
fv = self.res.forms['dataset-edit']
fv['title'] = u'New Title'
res = fv.submit('save')
# check relationship still exists
rels = model.Package.by_name(self.editpkg_name).get_relationships()
assert_equal(str(rels), '[<*PackageRelationship editpkgtest depends_on annakarenina>]')
finally:
self._reset_data()
class TestDelete(TestPackageForm):
pkg_names = []
@classmethod
def setup_class(self):
model.repo.init_db()
CreateTestData.create()
CreateTestData.create_test_user()
self.admin = model.User.by_name(u'testsysadmin')
self.extra_environ_admin = {'REMOTE_USER': self.admin.name.encode('utf8')}
self.extra_environ_tester = {'REMOTE_USER': 'tester'}
@classmethod
def teardown_class(self):
self.purge_packages(self.pkg_names)
model.repo.rebuild_db()
def test_delete(self):
plugin = MockPackageControllerPlugin()
plugins.load(plugin)
offset = url_for(controller='package', action='delete',
id='warandpeace')
self.app.post(offset, extra_environ=self.extra_environ_tester, status=401)
self.app.post(offset, extra_environ=self.extra_environ_admin)
assert model.Package.get('warandpeace').state == u'deleted'
assert plugin.calls['delete'] == 1
assert plugin.calls['after_delete'] == 1
plugins.unload(plugin)
class TestNew(TestPackageForm):
pkg_names = []
@classmethod
def setup_class(self):
model.repo.init_db()
CreateTestData.create_test_user()
# self.admin = model.User.by_name(u'russianfan')
# self.extra_environ_admin = {'REMOTE_USER': self.admin.name.encode('utf8')}
self.extra_environ_tester = {'REMOTE_USER': 'tester'}
@classmethod
def teardown_class(self):
self.purge_packages(self.pkg_names)
model.repo.rebuild_db()
def test_new_with_params_1(self):
offset = url_for(controller='package', action='new',
url='http://xxx.org', name='xxx.org')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
form = res.forms['dataset-edit']
assert_equal(form['url'].value, 'http://xxx.org')
assert_equal(form['name'].value, 'xxx.org')
def test_new_without_resource(self):
# new dataset
prefix = ''
name = u'test_no_res'
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
fv = res.forms['dataset-edit']
fv[prefix+'name'] = name
# submit
self.pkg_names.append(name)
res = fv.submit('save', extra_environ=self.extra_environ_tester)
# check dataset page
assert not 'Error' in res, res
res = res.follow()
res1 = self.main_div(res).replace('</strong>', '')
assert '<td><a href="">' not in res1, res1
# check object created
pkg = model.Package.by_name(name)
assert pkg
assert pkg.name == name
assert not pkg.resources, pkg.resources
def test_new(self):
assert not model.Package.by_name(u'annakarenina')
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
assert 'Add - Datasets' in res
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = 'annakarenina'
self.pkg_names.append('annakarenina')
res = fv.submit('save')
assert not 'Error' in res, res
def test_new_bad_name(self):
offset = url_for(controller='package', action='new', id=None)
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
assert 'Add - Datasets' in res
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = u'a' # invalid name
self.pkg_names.append('a')
res = fv.submit('save', extra_environ=self.extra_environ_tester)
assert 'Error' in res, res
assert 'Name must be at least 2 characters long' in res, res
self._assert_form_errors(res)
def test_new_no_name(self):
offset = url_for(controller='package', action='new', id=None)
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
assert 'Add - Datasets' in res
fv = res.forms['dataset-edit']
prefix = ''
# don't set a name
res = fv.submit('save', extra_environ=self.extra_environ_tester)
assert 'Error' in res, res
assert 'URL: Missing value' in res, res
self._assert_form_errors(res)
def test_new_bad_param(self):
offset = url_for(controller='package', action='new', __bad_parameter='value')
res = self.app.post(offset, {'save':'1'},
status=400, extra_environ=self.extra_environ_tester)
assert 'Integrity Error' in res.body
def test_redirect_after_new_using_param(self):
return_url = 'http://random.site.com/dataset/<NAME>?test=param'
# It's useful to know that this url encodes to:
# 'http%3A%2F%2Frandom.site.com%2Fdataset%2F%3CNAME%3E%3Ftest%3Dparam'
expected_redirect = return_url
self._check_redirect(return_url, expected_redirect,
pkg_name_to_edit='', extra_environ=self.extra_environ_tester)
def test_redirect_after_new_using_config(self):
return_url = '' # redirect comes from test.ini setting
expected_redirect = config['package_new_return_url']
self._check_redirect(return_url, expected_redirect,
pkg_name_to_edit='', extra_environ=self.extra_environ_tester)
def test_new_all_fields(self):
name = u'test_name2'
title = u'Test Title'
version = u'1.1'
url = u'http://something.com/somewhere.zip'
download_url = u'http://something.com/somewhere-else.zip'
notes = u'Very important'
license_id = u'odc-by'
tags = (u'tag1', u'tag2.', u'tag 3', u'SomeCaps')
tags_txt = u','.join(tags)
extras = {self.key1:self.value1, 'key2':'value2', 'key3':'value3'}
log_message = 'This is a comment'
assert not model.Package.by_name(name)
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
assert 'Add - Datasets' in res
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix+'name'] = name
fv[prefix+'title'] = title
fv[prefix+'version'] = version
fv[prefix+'url'] = url
#fv[prefix+'resources__0__url'] = download_url
#fv[prefix+'resources__0__description'] = u'description escape: & umlaut: \xfc quote "'.encode('utf8')
fv[prefix+'notes'] = notes
fv[prefix+'license_id'] = license_id
fv[prefix+'tag_string'] = tags_txt
for i, extra in enumerate(sorted(extras.items())):
fv[prefix+'extras__%s__key' % i] = extra[0].encode('utf8')
fv[prefix+'extras__%s__value' % i] = extra[1].encode('utf8')
fv['log_message'] = log_message
# Submit
fv = res.forms['dataset-edit']
self.pkg_names.append(name)
res = fv.submit('save', extra_environ=self.extra_environ_tester)
# Check dataset page
assert not 'Error' in res, res
# Check dataset object
pkg = model.Package.by_name(name)
assert pkg.name == name
assert pkg.title == title
assert pkg.version == version
assert pkg.url == url
#assert pkg.resources[0].url == download_url
assert pkg.notes == notes
assert pkg.license.id == license_id
saved_tagnames = [str(tag.name) for tag in pkg.get_tags()]
saved_tagnames.sort()
expected_tagnames = sorted(tags)
assert saved_tagnames == expected_tagnames, '%r != %r' % (saved_tagnames, expected_tagnames)
saved_groupnames = [str(group.name) for group in pkg.get_groups()]
assert len(pkg.extras) == len(extras)
for key, value in extras.items():
assert pkg.extras[key] == value
# for some reason environ['REMOTE_ADDR'] is undefined
rev = model.Revision.youngest(model.Session)
assert rev.author == 'tester'
assert rev.message == log_message
# TODO: reinstate once fixed in code
exp_log_message = u'Creating dataset %s' % name
# assert rev.message == exp_log_message
def test_new_existing_name(self):
# test creating a dataset with an existing name results in error'
# create initial dataset
pkgname = u'testpkg'
pkgtitle = u'mytesttitle'
assert not model.Package.by_name(pkgname)
offset = url_for(controller='package', action='new', id=None)
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
assert 'Add - Datasets' in res
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = pkgname
self.pkg_names.append(pkgname)
res = fv.submit('save', extra_environ=self.extra_environ_tester)
assert not 'Error' in res, res
assert model.Package.by_name(pkgname)
# create duplicate dataset
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
assert 'Add - Datasets' in res
fv = res.forms['dataset-edit']
fv[prefix+'name'] = pkgname
fv[prefix+'title'] = pkgtitle
res = fv.submit('save', extra_environ=self.extra_environ_tester)
assert 'Error' in res, res
assert 'That URL is already in use.' in res, res
self._assert_form_errors(res)
def test_missing_fields(self):
# A field is left out in the commit parameters.
# (Spammers can cause this)
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
assert 'Add - Datasets' in res, res
prefix = ''
fv = res.forms['dataset-edit']
fv[prefix + 'name'] = 'anything'
del fv.fields['log_message']
self.pkg_names.append('anything')
res = fv.submit('save', status=400, extra_environ=self.extra_environ_tester)
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
assert 'Add - Datasets' in res
fv = res.forms['dataset-edit']
fv[prefix + 'name'] = 'anything'
prefix = ''
del fv.fields[prefix + 'notes']
# NOTE Missing dropdowns fields don't cause KeyError in
# _serialized_value so don't register as an error here like
# text field tested here.
res = fv.submit('save', status=400, extra_environ=self.extra_environ_tester)
def test_new_plugin_hook(self):
plugin = MockPackageControllerPlugin()
plugins.load(plugin)
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
new_name = u'plugged'
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = new_name
res = fv.submit('save', extra_environ=self.extra_environ_tester)
# get redirected ...
assert plugin.calls['edit'] == 0, plugin.calls
assert plugin.calls['create'] == 1, plugin.calls
plugins.unload(plugin)
def test_after_create_plugin_hook(self):
plugin = MockPackageControllerPlugin()
plugins.load(plugin)
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
new_name = u'plugged2'
fv = res.forms['dataset-edit']
prefix = ''
fv[prefix + 'name'] = new_name
res = fv.submit('save', extra_environ=self.extra_environ_tester)
# get redirected ...
assert plugin.calls['after_update'] == 0, plugin.calls
assert plugin.calls['after_create'] == 1, plugin.calls
assert plugin.id_in_dict
plugins.unload(plugin)
def test_new_indexerror(self):
bad_solr_url = 'http://127.0.0.1/badsolrurl'
solr_url = SolrSettings.get()[0]
try:
SolrSettings.init(bad_solr_url)
plugins.load('synchronous_search')
new_package_name = u'new-package-missing-solr'
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
fv = res.forms['dataset-edit']
fv['name'] = new_package_name
# this package shouldn't actually be created but
# add it to the list to purge just in case
self.pkg_names.append(new_package_name)
res = fv.submit('save', status=500, extra_environ=self.extra_environ_tester)
assert 'Unable to add package to search index' in res, res
finally:
plugins.unload('synchronous_search')
SolrSettings.init(solr_url)
def test_change_locale(self):
offset = url_for(controller='package', action='new')
res = self.app.get(offset, extra_environ=self.extra_environ_tester)
res = self.app.get('/de/dataset/new', extra_environ=self.extra_environ_tester)
try:
assert 'Datensatz' in res.body, res.body
finally:
self.clear_language_setting()
class TestSearch(TestPackageForm):
pkg_names = []
@classmethod
def setup_class(self):
model.repo.init_db()
@classmethod
def teardown_class(self):
self.purge_packages(self.pkg_names)
model.repo.rebuild_db()
def test_search_plugin_hooks(self):
plugin = MockPackageControllerPlugin()
plugins.load(plugin)
offset = url_for(controller='package', action='search')
res = self.app.get(offset)
# get redirected ...
assert plugin.calls['before_search'] == 1, plugin.calls
assert plugin.calls['after_search'] == 1, plugin.calls
plugins.unload(plugin)
class TestNewPreview(TestPackageBase):
pkgname = u'testpkg'
pkgtitle = u'mytesttitle'
@classmethod
def setup_class(self):
pass
model.repo.init_db()
@classmethod
def teardown_class(self):
self.purge_packages([self.pkgname])
model.repo.rebuild_db()
class TestNonActivePackages(TestPackageBase):
@classmethod
def setup_class(self):
CreateTestData.create()
self.non_active_name = u'test_nonactive'
pkg = model.Package(name=self.non_active_name)
model.repo.new_revision()
model.Session.add(pkg)
model.repo.commit_and_remove()
pkg = model.Session.query(model.Package).filter_by(name=self.non_active_name).one()
admin = model.User.by_name(u'joeadmin')
model.setup_default_user_roles(pkg, [admin])
model.repo.commit_and_remove()
model.repo.new_revision()
pkg = model.Session.query(model.Package).filter_by(name=self.non_active_name).one()
pkg.delete() # becomes non active
model.repo.commit_and_remove()
@classmethod
def teardown_class(self):
model.repo.rebuild_db()
def test_read(self):
offset = url_for(controller='package', action='read', id=self.non_active_name)
res = self.app.get(offset, status=[302, 401])
def test_read_as_admin(self):
offset = url_for(controller='package', action='read', id=self.non_active_name)
res = self.app.get(offset, status=200, extra_environ={'REMOTE_USER':'testsysadmin'})
class TestRevisions(TestPackageBase):
@classmethod
def setup_class(cls):
model.Session.remove()
model.repo.init_db()
cls.name = u'revisiontest1'
# create pkg
cls.notes = [u'Written by Puccini', u'Written by Rossini', u'Not written at all', u'Written again', u'Written off']
rev = model.repo.new_revision()
cls.pkg1 = model.Package(name=cls.name)
cls.pkg1.notes = cls.notes[0]
model.Session.add(cls.pkg1)
model.setup_default_user_roles(cls.pkg1)
model.repo.commit_and_remove()
# edit pkg
for i in range(5)[1:]:
rev = model.repo.new_revision()
pkg1 = model.Package.by_name(cls.name)
pkg1.notes = cls.notes[i]
model.repo.commit_and_remove()
cls.pkg1 = model.Package.by_name(cls.name)
cls.revision_ids = [rev[0].id for rev in cls.pkg1.all_related_revisions]
# revision ids are newest first
cls.revision_timestamps = [rev[0].timestamp for rev in cls.pkg1.all_related_revisions]
cls.offset = url_for(controller='package', action='history', id=cls.pkg1.name)
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def test_0_read_history(self):
res = self.app.get(self.offset)
main_res = self.main_div(res)
assert self.pkg1.name in main_res, main_res
assert 'radio' in main_res, main_res
latest_rev = self.pkg1.all_revisions[0]
oldest_rev = self.pkg1.all_revisions[-1]
first_radio_checked_html = '<input checked="checked" id="selected1_%s"' % latest_rev.revision_id
assert first_radio_checked_html in main_res, '%s %s' % (first_radio_checked_html, main_res)
last_radio_checked_html = '<input checked="checked" id="selected2_%s"' % oldest_rev.revision_id
assert last_radio_checked_html in main_res, '%s %s' % (last_radio_checked_html, main_res)
def test_1_do_diff(self):
res = self.app.get(self.offset)
form = res.forms['dataset-revisions']
res = form.submit()
res = res.follow()
main_res = self.main_div(res)
assert 'form-errors' not in main_res.lower(), main_res
assert 'Revision Differences' in main_res, main_res
assert self.pkg1.name in main_res, main_res
assert '<tr><td>notes</td><td><pre>- Written by Puccini\n+ Written off</pre></td></tr>' in main_res, main_res
def test_2_atom_feed(self):
offset = "%s?format=atom" % self.offset
res = self.app.get(offset)
assert '<feed' in res, res
assert 'xmlns="http://www.w3.org/2005/Atom"' in res, res
assert '</feed>' in res, res
def test_3_history_revision_link(self):
res = self.app.get(self.offset)
res = res.click('%s' % self.revision_ids[2][:4])
main_res = self.main_div(res)
assert 'Revision: %s' % self.revision_ids[2] in main_res
def test_4_history_revision_package_link(self):
res = self.app.get(self.offset)
url = str(self.revision_timestamps[1])[-6:]
res = res.click(href=url)
main_html = self.main_div(res)
assert 'This is an old revision of this dataset' in main_html
class TestMarkdownHtmlWhitelist(TestPackageForm):
pkg_name = u'markdownhtmlwhitelisttest'
pkg_notes = u'''
<table width="100%" border="1">
<tr>
<td rowspan="2"><b>Description</b></td>
<td rowspan="2"><b>Documentation</b></td>
<td colspan="2"><b><center>Data -- Pkzipped</center></b> </td>
</tr>
<tr>
<td><b>SAS .tpt</b></td>
<td><b>ASCII CSV</b> </td>
</tr>
<tr>
<td><b>Overview</b></td>
<td><A HREF="http://www.nber.org/patents/subcategories.txt">subcategory.txt</A></td>
<td colspan="2"><center>--</center></td>
</tr>
<script><!--
alert('Hello world!');
//-->
</script>
'''
def setup(self):
model.Session.remove()
model.repo.init_db()
rev = model.repo.new_revision()
CreateTestData.create_arbitrary(
{'name':self.pkg_name,
'notes':self.pkg_notes,
'admins':[u'testadmin']}
)
self.pkg = model.Package.by_name(self.pkg_name)
self.pkg_id = self.pkg.id
offset = url_for(controller='package', action='read', id=self.pkg_name)
self.res = self.app.get(offset)
def teardown(self):
model.repo.rebuild_db()
def test_markdown_html_whitelist(self):
self.body = str(self.res)
self.assert_fragment('<table width="100%" border="1">')
self.assert_fragment('<td rowspan="2"><b>Description</b></td>')
self.assert_fragment('<a href="http://www.nber.org/patents/subcategories.txt" target="_blank" rel="nofollow">subcategory.txt</a>')
self.assert_fragment('<td colspan="2"><center>--</center></td>')
self.fail_if_fragment('<script>')
def assert_fragment(self, fragment):
assert fragment in self.body, (fragment, self.body)
def fail_if_fragment(self, fragment):
assert fragment not in self.body, (fragment, self.body)
class TestAutocomplete(PylonsTestCase, TestPackageBase):
@classmethod
def setup_class(cls):
PylonsTestCase.setup_class()
CreateTestData.create()
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def test_package_autocomplete(self):
query = 'a'
res = self.app.get('/dataset/autocomplete?q=%s' % query)
expected = ['A Wonderful Story (warandpeace)|warandpeace','annakarenina|annakarenina']
received = sorted(res.body.split('\n'))
assert expected == received
|
py | 1a549560261a63320b9227e19aa83105f1786024 | c = float(input('Temperatura em °C: '))
f = (((c*9)/5)+32)
print('{:.1f}°C é igual a {:.1f}°F'.format(c,f)) |
py | 1a5495bace1d31b8d971ae58823deccfe90bf2ed | # -*- coding: utf-8 -*-
# Copyright (c) 2020, omar jaber and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class EmployeeLanguageRequirement(Document):
pass
|
py | 1a54960739a3872b2b492d79b065685890e1ab7a | __title__ = 'splitwise'
__description__ = 'Splitwise Python SDK'
__version__ = '2.2.0'
__url__ = 'https://github.com/namaggarwal/splitwise'
__download_url__ = 'https://github.com/namaggarwal/splitwise/tarball/v'+__version__
__build__ = 0x022400
__author__ = 'Naman Aggarwal'
__author_email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Naman Aggarwal'
|
py | 1a5496163f45869e885b7e83f1abbd11d805c662 | from . import config
from . import tensor
from . import utils
from . import metrics
from . import tests
from . import visual
from . import objectives
from . import optimizers
from .features import speech
from .features import image
from .features import text
from model import model
from dataset import dataset, batch
from trainer import trainer
from . import nnet
__version__ = "0.1.0" |
py | 1a54971add3b19c956d634ac54d3113b0837201c | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-24 15:58
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Profile', '0002_delete_user'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, help_text='', primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(help_text='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 1a54984607092211a8ad246b1ec96a5effbf111d | from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class SystemConfig(AppConfig):
"""Default app config"""
name = "apps.api.system"
verbose_name = _("System")
def ready(self):
from . import signals # noqa: F401 # pylint: disable=unused-import
|
py | 1a5498b4c8da99c175b8dcc246e6355f5819d6d6 | # Copyright 2020 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing NVIDIA Driver installation.
"""
import re
from absl import flags
from absl import logging
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import os_types
from perfkitbenchmarker import regex_util
NVIDIA_DRIVER_LOCATION_BASE = 'https://us.download.nvidia.com/tesla'
NVIDIA_TESLA_K80 = 'k80'
NVIDIA_TESLA_P4 = 'p4'
NVIDIA_TESLA_P100 = 'p100'
NVIDIA_TESLA_V100 = 'v100'
NVIDIA_TESLA_T4 = 't4'
NVIDIA_TESLA_A100 = 'a100'
"""Default GPU clocks and autoboost configurations.
Base_clock is the default clock speeds when setting the GPU clocks. Max_clock
is currently unused. The clock speeds are in the format of
[memory_clock in MHz, graphics_clock in MHz].
"""
GPU_DEFAULTS = {
NVIDIA_TESLA_K80: {
'base_clock': [2505, 562],
'max_clock': [2505, 875],
'autoboost_enabled': True,
},
NVIDIA_TESLA_P4: {
'base_clock': [3003, 885],
'max_clock': [3003, 1531],
'autoboost_enabled': None,
},
NVIDIA_TESLA_P100: {
'base_clock': [715, 1189],
'max_clock': [715, 1328],
'autoboost_enabled': None,
},
NVIDIA_TESLA_V100: {
'base_clock': [877, 1312],
'max_clock': [877, 1530],
'autoboost_enabled': None,
},
NVIDIA_TESLA_T4: {
'base_clock': [5001, 585],
'max_clock': [5001, 1590],
'autoboost_enabled': None,
},
NVIDIA_TESLA_A100: {
'base_clock': [1215, 1410],
'max_clock': [1215, 1410],
'autoboost_enabled': None,
},
}
EXTRACT_CLOCK_SPEEDS_REGEX = r'(\d*).*,\s*(\d*)'
flag_util.DEFINE_integerlist('gpu_clock_speeds',
None,
'desired gpu clock speeds in the form '
'[memory clock, graphics clock]')
flags.DEFINE_boolean('gpu_autoboost_enabled', None,
'whether gpu autoboost is enabled')
flags.DEFINE_string(
'nvidia_driver_version', '495.29.05',
'The version of nvidia driver to install. '
'For example, "418.67" or "418.87.01."')
flags.DEFINE_boolean('nvidia_driver_force_install', False,
'Whether to install NVIDIA driver, even if it is already '
'installed.')
flags.DEFINE_string('nvidia_driver_x_library_path', '/usr/lib',
'X library path for nvidia driver installation')
flags.DEFINE_string('nvidia_driver_x_module_path', '/usr/lib/xorg/modules',
'X module path for nvidia driver installation')
flags.DEFINE_boolean('nvidia_driver_persistence_mode', None,
'whether to enable persistence mode on the NVIDIA GPU')
FLAGS = flags.FLAGS
class UnsupportedClockSpeedError(Exception):
pass
class NvidiaSmiParseOutputError(Exception):
pass
class HeterogeneousGpuTypesError(Exception):
pass
class UnsupportedGpuTypeError(Exception):
pass
def CheckNvidiaGpuExists(vm):
"""Returns whether NVIDIA GPU exists or not on the vm.
Args:
vm: The virtual machine to check.
Returns:
True or False depending on whether NVIDIA GPU exists.
"""
# PKB only supports NVIDIA driver on DEBIAN for now.
if vm.BASE_OS_TYPE != os_types.DEBIAN:
return False
vm.Install('pciutils')
output, _ = vm.RemoteCommand('sudo lspci', should_log=True)
regex = re.compile(r'3D controller: NVIDIA Corporation')
return regex.search(output) is not None
def CheckNvidiaSmiExists(vm):
"""Returns whether nvidia-smi is installed or not on a VM.
Args:
vm: The virtual to check.
Returns:
True or False depending on whether nvidia-smi command exists.
"""
# PKB only supports NVIDIA driver on DEBIAN for now.
if vm.BASE_OS_TYPE != os_types.DEBIAN:
return False
resp, _ = vm.RemoteHostCommand('command -v nvidia-smi',
ignore_failure=True,
suppress_warning=True)
return bool(resp.rstrip())
def GetDriverVersion(vm):
"""Returns the NVIDIA driver version as a string.
Args:
vm: Virtual machine to query.
Returns:
String containing NVIDIA driver version installed.
Raises:
NvidiaSmiParseOutputError: If nvidia-smi output cannot be parsed.
"""
stdout, _ = vm.RemoteCommand('nvidia-smi', should_log=True)
regex = r'Driver Version\:\s+(\S+)'
match = re.search(regex, stdout)
if match:
return str(match.group(1))
raise NvidiaSmiParseOutputError('Unable to parse driver version from {}'
.format(stdout))
def GetGpuType(vm):
"""Return the type of NVIDIA gpu(s) installed on the vm.
Args:
vm: Virtual machine to query.
Returns:
Type of gpus installed on the vm as a string.
Raises:
NvidiaSmiParseOutputError: If nvidia-smi output cannot be parsed.
HeterogeneousGpuTypesError: If more than one gpu type is detected.
UnsupportedClockSpeedError: If gpu type is not supported.
Example:
If 'nvidia-smi -L' returns:
GPU 0: Tesla V100-SXM2-16GB (UUID: GPU-1a046bb9-e456-45d3-5a35-52da392d09a5)
GPU 1: Tesla V100-SXM2-16GB (UUID: GPU-56cf4732-054c-4e40-9680-0ec27e97d21c)
GPU 2: Tesla V100-SXM2-16GB (UUID: GPU-4c7685ad-4b3a-8adc-ce20-f3a945127a8a)
GPU 3: Tesla V100-SXM2-16GB (UUID: GPU-0b034e63-22be-454b-b395-382e2d324728)
GPU 4: Tesla V100-SXM2-16GB (UUID: GPU-b0861159-4727-ef2f-ff66-73a765f4ecb6)
GPU 5: Tesla V100-SXM2-16GB (UUID: GPU-16ccaf51-1d1f-babe-9f3d-377e900bf37e)
GPU 6: Tesla V100-SXM2-16GB (UUID: GPU-6eba1fa6-de10-80e9-ec5f-4b8beeff7e12)
GPU 7: Tesla V100-SXM2-16GB (UUID: GPU-cba5a243-219c-df12-013e-1dbc98a8b0de)
GetGpuType() will return:
['V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB',
'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB', 'V100-SXM2-16GB']
"""
stdout, _ = vm.RemoteCommand('nvidia-smi -L', should_log=True)
try:
gpu_types = []
for line in stdout.splitlines():
if not line:
continue
splitted = line.split()
if splitted[2] in ('Tesla', 'NVIDIA'):
gpu_types.append(splitted[3])
else:
gpu_types.append(splitted[2])
except:
raise NvidiaSmiParseOutputError('Unable to parse gpu type from {}'
.format(stdout))
if any(gpu_type != gpu_types[0] for gpu_type in gpu_types):
raise HeterogeneousGpuTypesError(
'PKB only supports one type of gpu per VM')
if 'K80' in gpu_types[0]:
return NVIDIA_TESLA_K80
if 'P4' in gpu_types[0]:
return NVIDIA_TESLA_P4
if 'P100' in gpu_types[0]:
return NVIDIA_TESLA_P100
if 'V100' in gpu_types[0]:
return NVIDIA_TESLA_V100
if 'T4' in gpu_types[0]:
return NVIDIA_TESLA_T4
if 'A100' in gpu_types[0]:
return NVIDIA_TESLA_A100
raise UnsupportedClockSpeedError(
'Gpu type {0} is not supported by PKB'.format(gpu_types[0]))
def QueryNumberOfGpus(vm):
"""Returns the number of NVIDIA GPUs on the system.
Args:
vm: Virtual machine to query.
Returns:
Integer indicating the number of NVIDIA GPUs present on the vm.
"""
stdout, _ = vm.RemoteCommand('sudo nvidia-smi --query-gpu=count --id=0 '
'--format=csv', should_log=True)
return int(stdout.split()[1])
def GetPeerToPeerTopology(vm):
"""Returns a string specifying which GPUs can access each other via p2p.
Args:
vm: Virtual machine to operate on.
Example:
If p2p topology from nvidia-smi topo -p2p r looks like this:
0 1 2 3
0 X OK NS NS
1 OK X NS NS
2 NS NS X OK
3 NS NS OK X
GetTopology will return 'Y Y N N;Y Y N N;N N Y Y;N N Y Y'
"""
stdout, _ = vm.RemoteCommand('nvidia-smi topo -p2p r', should_log=True)
lines = [line.split() for line in stdout.splitlines()]
num_gpus = len(lines[0])
results = []
for idx, line in enumerate(lines[1:]):
if idx >= num_gpus:
break
results.append(' '.join(line[1:]))
# Delimit each GPU result with semicolons,
# and simplify the result character set to 'Y' and 'N'.
return (';'.join(results)
.replace('X', 'Y') # replace X (self) with Y
.replace('OK', 'Y') # replace OK with Y
.replace('NS', 'N')) # replace NS (not supported) with N
def SetAndConfirmGpuClocks(vm):
"""Sets and confirms the GPU clock speed and autoboost policy.
The clock values are provided either by the gpu_pcie_bandwidth_clock_speeds
flags, or from gpu-specific defaults. If a device is queried and its
clock speed does not align with what it was just set to, an exception will
be raised.
Args:
vm: The virtual machine to operate on.
Raises:
UnsupportedClockSpeedError: If a GPU did not accept the
provided clock speeds.
"""
gpu_type = GetGpuType(vm)
gpu_clock_speeds = GPU_DEFAULTS[gpu_type]['base_clock']
autoboost_enabled = GPU_DEFAULTS[gpu_type]['autoboost_enabled']
if FLAGS.gpu_clock_speeds is not None:
gpu_clock_speeds = FLAGS.gpu_clock_speeds
if FLAGS.gpu_autoboost_enabled is not None:
autoboost_enabled = FLAGS.gpu_autoboost_enabled
desired_memory_clock = gpu_clock_speeds[0]
desired_graphics_clock = gpu_clock_speeds[1]
EnablePersistenceMode(vm)
SetGpuClockSpeed(vm, desired_memory_clock, desired_graphics_clock)
SetAutoboostDefaultPolicy(vm, autoboost_enabled)
num_gpus = QueryNumberOfGpus(vm)
for i in range(num_gpus):
if QueryGpuClockSpeed(vm, i) != (desired_memory_clock,
desired_graphics_clock):
raise UnsupportedClockSpeedError(
'Unrecoverable error setting GPU #{} clock speed to {},{}'.format(
i, desired_memory_clock, desired_graphics_clock))
def SetGpuClockSpeed(vm, memory_clock_speed, graphics_clock_speed):
"""Sets autoboost and memory and graphics clocks to the specified frequency.
Args:
vm: Virtual machine to operate on.
memory_clock_speed: Desired speed of the memory clock, in MHz.
graphics_clock_speed: Desired speed of the graphics clock, in MHz.
"""
num_gpus = QueryNumberOfGpus(vm)
for device_id in range(num_gpus):
current_clock_speeds = QueryGpuClockSpeed(vm, device_id)
if current_clock_speeds != (memory_clock_speed, graphics_clock_speed):
vm.RemoteCommand('sudo nvidia-smi -ac {},{} --id={}'.format(
memory_clock_speed,
graphics_clock_speed,
device_id
))
def QueryGpuClockSpeed(vm, device_id):
"""Returns the value of the memory and graphics clock.
All clock values are in MHz.
Args:
vm: Virtual machine to operate on.
device_id: Id of GPU device to query.
Returns:
Tuple of clock speeds in MHz in the form (memory clock, graphics clock).
"""
query = ('sudo nvidia-smi --query-gpu=clocks.applications.memory,'
'clocks.applications.graphics --format=csv --id={0}'
.format(device_id))
stdout, _ = vm.RemoteCommand(query, should_log=True)
clock_speeds = stdout.splitlines()[1]
matches = regex_util.ExtractAllMatches(EXTRACT_CLOCK_SPEEDS_REGEX,
clock_speeds)[0]
return (int(matches[0]), int(matches[1]))
def EnablePersistenceMode(vm):
"""Enables persistence mode on the NVIDIA driver.
Args:
vm: Virtual machine to operate on.
"""
vm.RemoteCommand('sudo nvidia-smi -pm 1')
def SetAutoboostDefaultPolicy(vm, autoboost_enabled):
"""Sets the autoboost policy to the specified value.
For each GPU on the VM, this function will set the autoboost policy
to the value specified by autoboost_enabled.
Args:
vm: Virtual machine to operate on.
autoboost_enabled: Bool or None. Value (if any) to set autoboost policy to
"""
if autoboost_enabled is None:
return
num_gpus = QueryNumberOfGpus(vm)
for device_id in range(num_gpus):
current_state = QueryAutoboostPolicy(vm, device_id)
if current_state['autoboost_default'] != autoboost_enabled:
vm.RemoteCommand('sudo nvidia-smi --auto-boost-default={0} --id={1}'
.format(1 if autoboost_enabled else 0, device_id))
def QueryAutoboostPolicy(vm, device_id):
"""Returns the state of autoboost and autoboost_default.
Args:
vm: Virtual machine to operate on.
device_id: Id of GPU device to query.
Returns:
Dict containing values for autoboost and autoboost_default.
Values can be True (autoboost on), False (autoboost off),
and None (autoboost not supported).
Raises:
NvidiaSmiParseOutputError: If output from nvidia-smi can not be parsed.
"""
autoboost_regex = r'Auto Boost\s*:\s*(\S+)'
autoboost_default_regex = r'Auto Boost Default\s*:\s*(\S+)'
query = 'sudo nvidia-smi -q -d CLOCK --id={0}'.format(device_id)
stdout, _ = vm.RemoteCommand(query, should_log=True)
autoboost_match = re.search(autoboost_regex, stdout)
autoboost_default_match = re.search(autoboost_default_regex, stdout)
nvidia_smi_output_string_to_value = {
'On': True,
'Off': False,
'N/A': None,
}
if (autoboost_match is None) or (autoboost_default_match is None):
raise NvidiaSmiParseOutputError('Unable to parse Auto Boost policy from {}'
.format(stdout))
return {
'autoboost': nvidia_smi_output_string_to_value[
autoboost_match.group(1)],
'autoboost_default': nvidia_smi_output_string_to_value[
autoboost_default_match.group(1)]
}
def GetMetadata(vm):
"""Returns gpu-specific metadata as a dict.
Args:
vm: Virtual machine to operate on.
Returns:
A dict of gpu-specific metadata.
"""
clock_speeds = QueryGpuClockSpeed(vm, 0)
autoboost_policy = QueryAutoboostPolicy(vm, 0)
return {
'gpu_memory_clock': clock_speeds[0],
'gpu_graphics_clock': clock_speeds[1],
'gpu_autoboost': autoboost_policy['autoboost'],
'gpu_autoboost_default': autoboost_policy['autoboost_default'],
'nvidia_driver_version': GetDriverVersion(vm),
'gpu_type': GetGpuType(vm),
'num_gpus': QueryNumberOfGpus(vm),
'peer_to_peer_gpu_topology': GetPeerToPeerTopology(vm),
}
def DoPostInstallActions(vm):
"""Perform post NVIDIA driver install action on the vm.
Args:
vm: The virtual machine to operate on.
"""
SetAndConfirmGpuClocks(vm)
def Install(vm):
"""Install NVIDIA GPU driver on the vm.
Args:
vm: The virtual machine to install NVIDIA driver on.
"""
version_to_install = FLAGS.nvidia_driver_version
if not version_to_install:
logging.info('--nvidia_driver_version unset. Not installing.')
return
elif not FLAGS.nvidia_driver_force_install and CheckNvidiaSmiExists(vm):
logging.warn('NVIDIA drivers already detected. Not installing.')
return
location = ('{base}/{version}/NVIDIA-Linux-x86_64-{version}.run'
.format(base=NVIDIA_DRIVER_LOCATION_BASE,
version=version_to_install))
vm.Install('wget')
tokens = re.split('/', location)
filename = tokens[-1]
vm.RemoteCommand('wget {location} && chmod 755 {filename} '
.format(location=location, filename=filename),
should_log=True)
vm.RemoteCommand('sudo ./{filename} -q -x-module-path={x_module_path} '
'--ui=none -x-library-path={x_library_path} '
'--no-install-compat32-libs'
.format(filename=filename,
x_module_path=FLAGS.nvidia_driver_x_module_path,
x_library_path=FLAGS.nvidia_driver_x_library_path),
should_log=True)
if FLAGS.nvidia_driver_persistence_mode:
EnablePersistenceMode(vm)
|
py | 1a5498f81765fb1eac207c52c6344cd3eedbeb35 | import sys
import typing
def main() -> typing.NoReturn:
n = int(input())
(*t,) = map(int, sys.stdin.read().split())
print(min(t))
main()
|
py | 1a5499eeea88613f0a2176c3b04ded3032c268d7 | #!/usr/bin/env python3
import rich.markup
from pwncat.db import Fact
from pwncat.modules import ModuleFailed
from pwncat.platform.windows import Windows, PowershellError
from pwncat.modules.enumerate import EnumerateModule
class InstalledProgramData(Fact):
def __init__(self, source, path: bool):
super().__init__(source=source, types=["system.programs"])
self.path: bool = path
def title(self, session):
return f"{rich.markup.escape(repr(self.path))}"
class Module(EnumerateModule):
"""Enumerate the current Windows Defender settings on the target"""
PROVIDES = ["system.programs"]
PLATFORM = [Windows]
def enumerate(self, session):
try:
program_files = session.platform.powershell(
'Get-ChildItem "C:\\Program Files","C:\\Program Files (x86)" -ErrorAction SilentlyContinue | Select Fullname'
)[0]
if not isinstance(program_files, list):
program_files = [program_files]
for path in program_files:
yield InstalledProgramData(self.name, path["FullName"])
except (PowershellError, IndexError) as exc:
raise ModuleFailed(
f"failed to list program file directories: {exc}"
) from exc
|
py | 1a549a1e9c11ac0290ef46a757209c1f94fa3ed8 | from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.embeddings import Embedding
# from keras import optimizers
from preprocess.unsw import generate_dataset
from netlearner.utils import quantile_transform
import numpy as np
generate_dataset(one_hot_encode=False)
raw_train_dataset = np.load('UNSW/train_dataset.npy')
train_labels = np.load('UNSW/train_labels.npy')
raw_valid_dataset = np.load('UNSW/valid_dataset.npy')
valid_labels = np.load('UNSW/valid_labels.npy')
raw_test_dataset = np.load('UNSW/test_dataset.npy')
test_labels = np.load('UNSW/test_labels.npy')
embedded_features = raw_train_dataset[:, -3:]
print(embedded_features.shape)
vocabulary_dim = int(np.amax(embedded_features)) + 1
embedding_dim = int(np.log2(vocabulary_dim)) + 1
num_features = embedded_features.shape[1]
print("|V| =", vocabulary_dim)
print("|E| =", embedding_dim)
print("|F| =", num_features)
model1 = Sequential()
model1.add(Embedding(vocabulary_dim, embedding_dim, input_length=num_features))
model1.add(Flatten())
model1.compile('rmsprop', 'mse')
train_embeddings = model1.predict(embedded_features)
valid_embeddings = model1.predict(raw_valid_dataset[:, -3:])
test_embeddings = model1.predict(raw_test_dataset[:, -3:])
print(train_embeddings.shape)
print(test_embeddings.shape)
columns = np.array(range(1, 6) + range(8, 16) + range(17, 19) +
range(23, 25) + [26])
[train_dataset, valid_dataset, test_dataset] = quantile_transform(
raw_train_dataset[:, :-3],
raw_valid_dataset[:, :-3],
raw_test_dataset[:, :-3], columns)
X_train = np.concatenate((train_dataset, train_embeddings), axis=1)
X_valid = np.concatenate((valid_dataset, valid_embeddings), axis=1)
X_test = np.concatenate((test_dataset, test_embeddings), axis=1)
print(X_train.shape, X_test.shape)
num_features = X_train.shape[1]
num_classes = train_labels.shape[1]
model2 = Sequential()
model2.add(Dense(400, input_dim=num_features))
model2.add(Activation('relu'))
model2.add(Dropout(0.8))
model2.add(Dense(512))
model2.add(Activation('relu'))
model2.add(Dropout(0.8))
model2.add(Dense(640))
model2.add(Activation('relu'))
model2.add(Dense(num_classes))
model2.add(Activation('softmax'))
# adam = optimizers.Adam(lr=0.001, decay=0.002)
model2.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model2.fit(X_train, train_labels,
batch_size=100,
epochs=160,
verbose=1,
validation_data=(X_valid, valid_labels))
score = model2.evaluate(X_test, test_labels, batch_size=100, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
py | 1a549b6d629159ca4b12947ed942b068334cafd6 | from setuptools import setup, find_packages
setup(
name = 'x-transformers',
packages = find_packages(exclude=['examples']),
version = '0.12.3',
license='MIT',
description = 'X-Transformers - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
url = 'https://github.com/lucidrains/x-transformers',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers'
],
install_requires=[
'torch>=1.6',
'einops>=0.3',
'entmax'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
py | 1a549bb637028049432f626b882830fe8563486f | #encoding:utf-8
import datetime
import csv
import logging
from multiprocessing import Process
import time
import yaml
from croniter import croniter
from supplier import supply
logger = logging.getLogger(__name__)
def read_own_cron(own_cron_filename, config):
with open(own_cron_filename) as tsv_file:
tsv_reader = csv.DictReader(tsv_file, delimiter='\t')
for row in tsv_reader:
now = datetime.datetime.now()
cron = croniter(row['MASK'])
# prev_run = cron.get_current(datetime.datetime)
prev_run = cron.get_prev(datetime.datetime)
prev_run = cron.get_next(datetime.datetime)
diff = now - prev_run
diff_seconds = diff.total_seconds()
if diff_seconds >= 0.0 and diff_seconds <= 59.9:
# print(row['submodule_name'], diff_seconds)
# supply(row['submodule_name'], config)
supplying_process = Process(target=supply, args=(row['submodule_name'], config))
supplying_process.start()
time.sleep(2)
def main(config_filename):
with open(config_filename) as config_file:
config = yaml.load(config_file.read())
read_own_cron(config['cron_file'], config)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='configs/prod.yml')
args = parser.parse_args()
main(args.config)
|
py | 1a549d773e7ab6f3815f7a06d2bc455c25f9684e | # 基于signals自定义钩子
import django.dispatch
# 1. 定义钩子函数
def func(sender, request, **kwargs):
pass
# 2. 定义信号
post_before = django.dispatch.Signal(providing_args=["args", "kwargs"])
# 3. 注册信号
post_before.connect(func)
def func(sender, request, instance, **kwargs):
pass
patch_before = django.dispatch.Signal(providing_args=["args", "kwargs"])
patch_before.connect(func)
valid_after = django.dispatch.Signal(providing_args=["args", "kwargs"])
valid_after.connect(func)
patch_save = django.dispatch.Signal(providing_args=["args", "kwargs"])
patch_save.connect(func)
|
py | 1a549e429eab352518d2cdf9b6a0a42259e5d58a | __author__ = 'Lambert Justo'
import glob # for getting botcogs
import discord
from discord.ext import commands
#import schmoobot.src.credentials as credentials
import credentials
from botcogs.utils import check
#from schmoobot.src.botcogs.utils import check
bot_prefix = "!"
formatter = commands.HelpFormatter(show_check_failure=False)
bot = commands.Bot(command_prefix=bot_prefix, formatter=formatter,
description="Schmoo Bot!", pm_help=None)
prev = None
def list_cogs():
"""
Gets all modules from [cwd]/botcogs/ and puts them in a list to return in the
form of ["botcogs.module1", "botcogs.module2", "botcogs.module3", ...]
:return: list of strings where each module J.py is of form "botcogs.J"
"""
cogs = glob.glob("botcogs/*.py")
cog_list = []
for c in cogs:
cog_list.append("botcogs." +
c.replace("/","\\").split("\\")[1].replace(".py", ""))
return cog_list
# cogs_list = list_cogs()
cogs_list = ["botcogs.rng", "botcogs.searches", "botcogs.misc", "botcogs.mash", "botcogs.blackjack"] # FOR NOW, KEEP A HARDCODED LIST
def load_cog(cog_name : str):
cog_name = cog_name.strip()
if cog_name not in cogs_list:
print("Couldn't find module", cog_name)
return
try:
bot.load_extension(cog_name)
except (ImportError, discord.ClientException, AttributeError) as e:
print("Failed to load cog", cog_name, " due to", str(e))
return
@bot.event
async def on_ready():
print("Logged in as:")
print("name:", bot.user.name)
print("id:", bot.user.id)
print("--------------------------------------------")
@check.is_owner()
@bot.command()
async def load(extension_name : str):
print("got to load function")
"""
attempt to load a cog - a cog is a module that has commands
"""
# strip any whitespace
extension_name = extension_name.strip()
# check if the extension is in the list of loaded botcogs
if extension_name not in cogs_list:
output = "Couldn't find module " + extension_name
await bot.say(output)
return
# attempt to load the extension
try:
bot.load_extension(extension_name)
except (ImportError, discord.ClientException, AttributeError) as e:
output = "Failed to load cog " + extension_name + " due to ", str(e)
await bot.say(output)
return
output = "Loaded " + extension_name + " successfully!"
await bot.say(output)
"""
@bot.group(name="set", pass_context=True)
async def __set__(context):
if context.invoked_subcommand is None:
pass
"""
@bot.command()
@check.is_owner()
async def load_all():
for elem in cogs_list:
load_cog(elem)
await bot.say("Loaded " + str(cogs_list) + " successfully!")
await bot.say("Active commands (syntax: '![command] [extra_argument]'): "
+ str(list(bot.commands.keys())))
@bot.command()
@check.is_owner()
async def unload(extension_name : str):
"""attempt to load an extension (plugin"""
# extension_name = botcogs.<module_name>
extension_name = extension_name.strip()
try:
bot.unload_extension(extension_name)
except Exception as e:
await bot.say("Failed to unload cog " + extension_name + " due to " + str(e))
await bot.say("Unloaded " + extension_name + " successfully!")
@bot.command()
@check.is_owner()
async def reload(extension_name : str):
extension_name = "botcogs." + extension_name
if extension_name not in cogs_list:
await bot.say("Failed to find cog " + str(extension_name))
return
try:
bot.unload_extension(extension_name)
load_cog(extension_name)
except Exception as e:
await bot.say("Failed to reload cog " + extension_name + " due to " + str(e))
return
await bot.say("Reloaded " + extension_name + " successfully!")
@bot.event
async def on_message(message):
await bot.process_commands(message)
@bot.event
async def on_command(command, context):
# not even sure why this is here
pass
@bot.command()
@check.is_owner()
async def bye():
await bot.say("Bye-bye!")
await bot.logout()
bot.run(credentials.email, credentials.password)
|
py | 1a549e52343676749e09447f148bbf8aa7f9f74a | # Generated by Django 2.2.5 on 2019-12-22 12:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("rooms", "0001_initial"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name="room",
name="host",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="rooms",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="room",
name="house_rules",
field=models.ManyToManyField(
blank=True, related_name="rooms", to="rooms.HouseRule"
),
),
migrations.AddField(
model_name="room",
name="room_type",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="rooms",
to="rooms.RoomType",
),
),
migrations.AddField(
model_name="photo",
name="room",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="photos",
to="rooms.Room",
),
),
]
|
py | 1a549e593dd07c11e7ab1454519d8f3cd72152f7 | import re
def Filter(**kargs):
value = kargs['value']
def filt(data, sr, sctx, sccol, matches):
for m in matches:
m['dup'] = value
return matches
return filt
|
py | 1a549f5a1d1f4d66244b03b7b81365c3d1dd05e3 | from SeismicReduction import *
set_seed(42) # set seed to standardise results
### Data loading:
dataholder = DataHolder("Glitne", [1300, 1502, 2], [1500, 2002, 2])
dataholder.add_near('./data/3d_nearstack.sgy');
dataholder.add_far('./data/3d_farstack.sgy');
dataholder.add_horizon('./data/Top_Heimdal_subset.txt')
### Processing:
processor = Processor(dataholder)
processed_data = processor(flatten=[True, 12, 52], crop=[False, 120, 200], normalise=True)
### Model analysis:
## PCA
pca = PcaModel(processed_data)
pca.reduce(2)
pca.to_2d()
## UMAP
umap = UmapModel(processed_data)
umap.reduce(umap_neighbours=50, umap_dist=0.01)
## vae
vae = VaeModel(processed_data)
vae.reduce(epochs=50, hidden_size=2, lr=0.0005, plot_loss=False)
vae.to_2d()
## bvae
bvae = BVaeModel(processed_data)
bvae.reduce(epochs=50, hidden_size=2, lr=0.0005, beta=7, plot_loss=False)
bvae.to_2d()
## Visualisation
plot_agent(vae, attr='FF', cmap='magma', vmin=-3 ,save_path=False)
plot_agent(bvae, attr='FF', cmap='hot',save_path=False)
plot_agent(vae, attr='FF', cmap='magma' ,save_path=False)
plot_agent(bvae, attr='FF', cmap='winter',save_path=False) |
py | 1a549f875d4492936edf943f4e02cca0ba48a625 | import gdb.printing
class SmallStringPrinter:
"""Print an llvm::SmallString object."""
def __init__(self, val):
self.val = val
def to_string(self):
begin = self.val['BeginX']
end = self.val['EndX']
return begin.cast(gdb.lookup_type("char").pointer()).string(length = end - begin)
def display_hint (self):
return 'string'
class StringRefPrinter:
"""Print an llvm::StringRef object."""
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['Data'].string(length = self.val['Length'])
def display_hint (self):
return 'string'
class SmallVectorPrinter:
"""Print an llvm::SmallVector object."""
class _iterator:
def __init__(self, begin, end):
self.cur = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def next(self):
if self.cur == self.end:
raise StopIteration
count = self.count
self.count = self.count + 1
cur = self.cur
self.cur = self.cur + 1
return '[%d]' % count, cur.dereference()
__next__ = next
def __init__(self, val):
self.val = val
def children(self):
t = self.val.type.template_argument(0).pointer()
begin = self.val['BeginX'].cast(t)
end = self.val['EndX'].cast(t)
return self._iterator(begin, end)
def to_string(self):
t = self.val.type.template_argument(0).pointer()
begin = self.val['BeginX'].cast(t)
end = self.val['EndX'].cast(t)
capacity = self.val['CapacityX'].cast(t)
return 'llvm::SmallVector of length %d, capacity %d' % (end - begin, capacity - begin)
def display_hint (self):
return 'array'
class ArrayRefPrinter:
"""Print an llvm::ArrayRef object."""
class _iterator:
def __init__(self, begin, end):
self.cur = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def next(self):
if self.cur == self.end:
raise StopIteration
count = self.count
self.count = self.count + 1
cur = self.cur
self.cur = self.cur + 1
return '[%d]' % count, cur.dereference()
__next__ = next
def __init__(self, val):
self.val = val
def children(self):
data = self.val['Data']
return self._iterator(data, data + self.val['Length'])
def to_string(self):
return 'llvm::ArrayRef of length %d' % (self.val['Length'])
def display_hint (self):
return 'array'
class OptionalPrinter:
"""Print an llvm::Optional object."""
def __init__(self, value):
self.value = value
class _iterator:
def __init__(self, member, empty):
self.member = member
self.done = empty
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
self.done = True
return ('value', self.member.dereference())
def children(self):
if not self.value['hasVal']:
return self._iterator('', True)
return self._iterator(self.value['storage']['buffer'].address.cast(self.value.type.template_argument(0).pointer()), False)
def to_string(self):
return 'llvm::Optional is %sinitialized' % ('' if self.value['hasVal'] else 'not ')
class DenseMapPrinter:
"Print a DenseMap"
class _iterator:
def __init__(self, key_info_t, begin, end):
self.key_info_t = key_info_t
self.cur = begin
self.end = end
self.advancePastEmptyBuckets()
self.first = True
def __iter__(self):
return self
def advancePastEmptyBuckets(self):
# disabled until the comments below can be addressed
# keeping as notes/posterity/hints for future contributors
return
n = self.key_info_t.name
is_equal = gdb.parse_and_eval(n + '::isEqual')
empty = gdb.parse_and_eval(n + '::getEmptyKey()')
tombstone = gdb.parse_and_eval(n + '::getTombstoneKey()')
# the following is invalid, GDB fails with:
# Python Exception <class 'gdb.error'> Attempt to take address of value
# not located in memory.
# because isEqual took parameter (for the unsigned long key I was testing)
# by const ref, and GDB
# It's also not entirely general - we should be accessing the "getFirst()"
# member function, not the 'first' member variable, but I've yet to figure
# out how to find/call member functions (especially (const) overloaded
# ones) on a gdb.Value.
while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
self.cur = self.cur + 1
def next(self):
if self.cur == self.end:
raise StopIteration
cur = self.cur
v = cur.dereference()['first' if self.first else 'second']
if not self.first:
self.cur = self.cur + 1
self.advancePastEmptyBuckets()
self.first = True
else:
self.first = False
return 'x', v
def __init__(self, val):
self.val = val
def children(self):
t = self.val.type.template_argument(3).pointer()
begin = self.val['Buckets'].cast(t)
end = (begin + self.val['NumBuckets']).cast(t)
return self._iterator(self.val.type.template_argument(2), begin, end)
def to_string(self):
return 'llvm::DenseMap with %d elements' % (self.val['NumEntries'])
def display_hint(self):
return 'map'
class TwinePrinter:
"Print a Twine"
def __init__(self, val):
self._val = val
def display_hint(self):
return 'string'
def string_from_pretty_printer_lookup(self, val):
'''Lookup the default pretty-printer for val and use it.
If no pretty-printer is defined for the type of val, print an error and
return a placeholder string.'''
pp = gdb.default_visualizer(val)
if pp:
s = pp.to_string()
# The pretty-printer may return a LazyString instead of an actual Python
# string. Convert it to a Python string. However, GDB doesn't seem to
# register the LazyString type, so we can't check
# "type(s) == gdb.LazyString".
if 'LazyString' in type(s).__name__:
s = s.value().address.string()
else:
print(('No pretty printer for {} found. The resulting Twine ' +
'representation will be incomplete.').format(val.type.name))
s = '(missing {})'.format(val.type.name)
return s
def is_twine_kind(self, kind, expected):
if not kind.endswith(expected):
return False
# apparently some GDB versions add the NodeKind:: namespace
# (happens for me on GDB 7.11)
return kind in ('llvm::Twine::' + expected,
'llvm::Twine::NodeKind::' + expected)
def string_from_child(self, child, kind):
'''Return the string representation of the Twine::Child child.'''
if self.is_twine_kind(kind, 'EmptyKind') or self.is_twine_kind(kind, 'NullKind'):
return ''
if self.is_twine_kind(kind, 'TwineKind'):
return self.string_from_twine_object(child['twine'].dereference())
if self.is_twine_kind(kind, 'CStringKind'):
return child['cString'].string()
if self.is_twine_kind(kind, 'StdStringKind'):
val = child['stdString'].dereference()
return self.string_from_pretty_printer_lookup(val)
if self.is_twine_kind(kind, 'StringRefKind'):
val = child['stringRef'].dereference()
pp = StringRefPrinter(val)
return pp.to_string()
if self.is_twine_kind(kind, 'SmallStringKind'):
val = child['smallString'].dereference()
pp = SmallStringPrinter(val)
return pp.to_string()
if self.is_twine_kind(kind, 'CharKind'):
return chr(child['character'])
if self.is_twine_kind(kind, 'DecUIKind'):
return str(child['decUI'])
if self.is_twine_kind(kind, 'DecIKind'):
return str(child['decI'])
if self.is_twine_kind(kind, 'DecULKind'):
return str(child['decUL'].dereference())
if self.is_twine_kind(kind, 'DecLKind'):
return str(child['decL'].dereference())
if self.is_twine_kind(kind, 'DecULLKind'):
return str(child['decULL'].dereference())
if self.is_twine_kind(kind, 'DecLLKind'):
return str(child['decLL'].dereference())
if self.is_twine_kind(kind, 'UHexKind'):
val = child['uHex'].dereference()
return hex(int(val))
print(('Unhandled NodeKind {} in Twine pretty-printer. The result will be '
'incomplete.').format(kind))
return '(unhandled {})'.format(kind)
def string_from_twine_object(self, twine):
'''Return the string representation of the Twine object twine.'''
lhs_str = ''
rhs_str = ''
lhs = twine['LHS']
rhs = twine['RHS']
lhs_kind = str(twine['LHSKind'])
rhs_kind = str(twine['RHSKind'])
lhs_str = self.string_from_child(lhs, lhs_kind)
rhs_str = self.string_from_child(rhs, rhs_kind)
return lhs_str + rhs_str
def to_string(self):
return self.string_from_twine_object(self._val)
pp = gdb.printing.RegexpCollectionPrettyPrinter("LLVMSupport")
pp.add_printer('llvm::SmallString', '^llvm::SmallString<.*>$', SmallStringPrinter)
pp.add_printer('llvm::StringRef', '^llvm::StringRef$', StringRefPrinter)
pp.add_printer('llvm::SmallVectorImpl', '^llvm::SmallVector(Impl)?<.*>$', SmallVectorPrinter)
pp.add_printer('llvm::ArrayRef', '^llvm::(Const)?ArrayRef<.*>$', ArrayRefPrinter)
pp.add_printer('llvm::Optional', '^llvm::Optional<.*>$', OptionalPrinter)
pp.add_printer('llvm::DenseMap', '^llvm::DenseMap<.*>$', DenseMapPrinter)
pp.add_printer('llvm::Twine', '^llvm::Twine$', TwinePrinter)
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
|
py | 1a54a0bfde60b24a21c678bd5c2450ed7e412960 | from win32._ts import * |
py | 1a54a16130bc637db1fe84e9fc257ecc95fe2a62 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import tqdm
tqdm.monitor_interval = 0 # workaround for https://github.com/tqdm/tqdm/issues/481
class SimpleTqdm():
def __init__(self, iterable=None, total=None, **kwargs):
self.iterable = list(iterable) if iterable is not None else None
self.total = len(self.iterable) if self.iterable is not None else total
assert self.iterable is not None or self.total is not None
self.current_step = 0
self.print_frequency = max(self.total // 50, 1)
self.desc = ""
def set_description_str(self, desc):
self.desc = desc
def set_description(self, desc):
self.desc = desc
def update(self, steps):
last_print_step = (self.current_step // self.print_frequency) * self.print_frequency
i = 1
while last_print_step + i * self.print_frequency <= self.current_step + steps:
print("*", end='')
i += 1
self.current_step += steps
def close(self):
print("\n" + self.desc)
def __iter__(self):
assert self.iterable is not None
self.index = 0
return self
def __next__(self):
if self.index < self.total:
element = self.iterable[self.index]
self.update(1)
self.index += 1
return element
else:
self.close()
raise StopIteration
def tqdm_notebook_failsafe(*args, **kwargs):
try:
return tqdm.tqdm_notebook(*args, **kwargs)
except:
# tqdm is broken on Google Colab
return SimpleTqdm(*args, **kwargs)
|
py | 1a54a282a4e02fee15b483d99f74eea570df3f33 | # [8 kyu] Century From Year
#
# Author: Hsins
# Date: 2019/12/21
import math
def century(year):
return math.ceil(year / 100)
|
py | 1a54a2844c395e037a321a789f7015e642a0faa6 | #!/usr/bin/env python
"""\
@file md5check.py
@brief Replacement for message template compatibility verifier.
$LicenseInfo:firstyear=2010&license=viewerlgpl$
Second Life Viewer Source Code
Copyright (C) 2010-2011, Linden Research, Inc.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2.1 of the License only.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
$/LicenseInfo$
"""
import sys
import hashlib
if len(sys.argv) != 3:
print """Usage: %s --create|<hash-digest> <file>
Creates an md5sum hash digest of the specified file content
and compares it with the given hash digest.
If --create is used instead of a hash digest, it will simply
print out the hash digest of specified file content.
""" % sys.argv[0]
sys.exit(1)
if sys.argv[2] == '-':
fh = sys.stdin
filename = "<stdin>"
else:
filename = sys.argv[2]
fh = open(filename)
hexdigest = hashlib.md5(fh.read()).hexdigest()
if sys.argv[1] == '--create':
print hexdigest
elif hexdigest == sys.argv[1]:
print "md5sum check passed:", filename
else:
print "md5sum check FAILED:", filename
sys.exit(1)
|
py | 1a54a29242630552ad96105e649e0c374f787df3 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
from unittest import TestCase
from SciDataTool import DataTime, Data1D, DataLinspace, VectorField
from pyleecan.Classes.SolutionData import SolutionData
from pyleecan.Classes.SolutionMat import SolutionMat
from pyleecan.Classes.SolutionVector import SolutionVector
@pytest.mark.MeshSol
def test_SolutionMat():
""" Tests for get_field method from SolutionMat class"""
DELTA = 1e-10
field = np.zeros((2, 3, 2))
field[:, :, 0] = np.array([[1, 2, 3], [2, 3, 4]])
field[:, :, 1] = np.array([[11, 21, 31], [21, 31, 41]])
solution = SolutionMat()
solution.field = field
solution.axis_name = ["time", "indice", "z"]
solution.axis_size = [2, 3, 2]
field = solution.get_field()
correction = field
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
np.testing.assert_almost_equal(result, 0, err_msg=msg)
field = solution.get_field("time[1]", "indice[1,2]", is_squeeze=True)
correction = np.array([[3, 4]])
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
np.testing.assert_almost_equal(result, 0, err_msg=msg)
field = solution.get_field("z[1]", "indice[1,2]", is_squeeze=True)
correction = np.array([[21, 31]])
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
np.testing.assert_almost_equal(result, 0, err_msg=msg)
@pytest.mark.MeshSol
def test_SolutionVector():
DELTA = 1e-10
Indices_Cell = Data1D(name="indice", values=[0, 1, 2, 4], is_components=True)
Time = DataLinspace(
name="time",
unit="s",
initial=0,
final=1,
number=10,
)
H = np.ones((10, 4, 2))
# Store the results for H
componentsH = {}
Hx_data = DataTime(
name="Magnetic Field Hx",
unit="A/m",
symbol="Hx",
axes=[Time, Indices_Cell],
values=H[:, :, 0],
)
componentsH["comp_x"] = Hx_data
Hy_data = DataTime(
name="Magnetic Field Hy",
unit="A/m",
symbol="Hy",
axes=[Time, Indices_Cell],
values=H[:, :, 1],
)
componentsH["comp_y"] = Hy_data
vecH = VectorField(name="Magnetic Field", symbol="H", components=componentsH)
solution = SolutionVector(field=vecH, type_cell="triangle", label="H")
field = solution.get_field()
correction = np.ones((10, 4, 2))
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
np.testing.assert_almost_equal(result, 0, err_msg=msg)
field = solution.get_field("time[0]", "indice[1,2]")
correction = np.ones((2, 2))
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
np.testing.assert_almost_equal(result, 0, err_msg=msg)
@pytest.mark.MeshSol
def test_SolutionData():
DELTA = 1e-10
Indices_Cell = Data1D(name="indice", values=[0, 1, 2, 4], is_components=True)
Time = DataLinspace(
name="time",
unit="s",
initial=0,
final=1,
number=10,
)
# Store the results for H
H = DataTime(
name="Magnetic Field Hx",
unit="A/m",
symbol="Hx",
axes=[Time, Indices_Cell],
values=np.ones((10, 4)),
)
solution = SolutionData(field=H, type_cell="triangle", label="H")
field = solution.get_field()
correction = np.ones((10, 4))
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
np.testing.assert_almost_equal(result, 0, err_msg=msg)
field = solution.get_field("time[0]", "indice[1,2]")
correction = correction[0, 1:3]
result = np.sum(np.abs(correction - field))
msg = "Wrong result: returned " + str(field) + ", expected: " + str(correction)
np.testing.assert_almost_equal(result, 0, err_msg=msg)
if __name__ == "__main__":
test_SolutionMat()
test_SolutionData()
test_SolutionVector()
# test_plot_contour_2group()
|
py | 1a54a35aed5d379676944fd97e3e35bd9b173b34 | """nio encryption module.
Encryption is handled mostly transparently to the user.
The main thing users need to worry about is device verification.
While device verification is handled in the Client classes of nio the classes
that are used to introspect OlmDevices or device authentication sessions are
documented here.
"""
import sys
from .._compat import package_installed
from .attachments import encrypt_attachment, decrypt_attachment
if sys.version_info >= (3, 5):
from .async_attachments import (AsyncDataT, async_encrypt_attachment,
async_generator_from_data,)
if package_installed("olm"):
from .sessions import (
OlmAccount,
Session,
OutboundSession,
InboundSession,
OutboundGroupSession,
InboundGroupSession,
OlmDevice,
OutgoingKeyRequest,
TrustState
)
from .memorystores import (
SessionStore,
GroupSessionStore,
DeviceStore
)
from .log import logger
from .olm_machine import Olm
from .sas import Sas, SasState
ENCRYPTION_ENABLED = True
else:
ENCRYPTION_ENABLED = False
|
py | 1a54a48e19e9ddb907264df193d49de95e20e589 | # coding: utf-8
# flake8: noqa
"""
HubDB endpoints
HubDB is a relational data store that presents data as rows, columns, and cells in a table, much like a spreadsheet. HubDB tables can be added or modified [in the HubSpot CMS](https://knowledge.hubspot.com/cos-general/how-to-edit-hubdb-tables), but you can also use the API endpoints documented here. For more information on HubDB tables and using their data on a HubSpot site, see the [CMS developers site](https://designers.hubspot.com/docs/tools/hubdb). You can also see the [documentation for dynamic pages](https://designers.hubspot.com/docs/tutorials/how-to-build-dynamic-pages-with-hubdb) for more details about the `useForPages` field. HubDB tables now support `DRAFT` and `PUBLISHED` versions. This allows you to update data in the table, either for testing or to allow for a manual approval process, without affecting any live pages using the existing data. Draft data can be reviewed and published by a user working in HubSpot or published via the API. Draft data can also be discarded, allowing users to go back to the live version of the data without disrupting it. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "1.0.0"
# import apis into sdk package
from hubspot.cms.hubdb.api.default_api import DefaultApi
# import ApiClient
from hubspot.cms.hubdb.api_client import ApiClient
from hubspot.cms.hubdb.configuration import Configuration
from hubspot.cms.hubdb.exceptions import OpenApiException
from hubspot.cms.hubdb.exceptions import ApiTypeError
from hubspot.cms.hubdb.exceptions import ApiValueError
from hubspot.cms.hubdb.exceptions import ApiKeyError
from hubspot.cms.hubdb.exceptions import ApiException
# import models into sdk package
from hubspot.cms.hubdb.models.batch_input_hub_db_table_row_v3 import (
BatchInputHubDbTableRowV3,
)
from hubspot.cms.hubdb.models.batch_input_json_node import BatchInputJsonNode
from hubspot.cms.hubdb.models.batch_input_string import BatchInputString
from hubspot.cms.hubdb.models.batch_response_hub_db_table_row_v3_with_errors import (
BatchResponseHubDbTableRowV3WithErrors,
)
from hubspot.cms.hubdb.models.collection_response_with_total_hub_db_table_row_v3_forward_paging import (
CollectionResponseWithTotalHubDbTableRowV3ForwardPaging,
)
from hubspot.cms.hubdb.models.collection_response_with_total_hub_db_table_v3_forward_paging import (
CollectionResponseWithTotalHubDbTableV3ForwardPaging,
)
from hubspot.cms.hubdb.models.column import Column
from hubspot.cms.hubdb.models.column_input import ColumnInput
from hubspot.cms.hubdb.models.error import Error
from hubspot.cms.hubdb.models.error_detail import ErrorDetail
from hubspot.cms.hubdb.models.foreign_id import ForeignId
from hubspot.cms.hubdb.models.forward_paging import ForwardPaging
from hubspot.cms.hubdb.models.hub_db_table_clone_request import HubDbTableCloneRequest
from hubspot.cms.hubdb.models.hub_db_table_row_v3 import HubDbTableRowV3
from hubspot.cms.hubdb.models.hub_db_table_row_v3_input import HubDbTableRowV3Input
from hubspot.cms.hubdb.models.hub_db_table_v3 import HubDbTableV3
from hubspot.cms.hubdb.models.hub_db_table_v3_input import HubDbTableV3Input
from hubspot.cms.hubdb.models.hub_db_table_v3_live_input import HubDbTableV3LiveInput
from hubspot.cms.hubdb.models.import_result import ImportResult
from hubspot.cms.hubdb.models.next_page import NextPage
from hubspot.cms.hubdb.models.option import Option
from hubspot.cms.hubdb.models.simple_user import SimpleUser
|
py | 1a54a5605302154186ec1d96983e1e735b6c3c8d | """Forms."""
from flask_wtf import Form
from flask_wtf.file import (FileField, FileAllowed)
from wtforms import (
BooleanField, StringField, PasswordField, SubmitField, TextAreaField)
import wtforms.validators as validators
from flask.ext.pagedown.fields import PageDownField
class LoginForm(Form):
"""Form to login."""
email = StringField(
'Email',
validators=[validators.DataRequired(), validators.Email()])
password = PasswordField('Password',
validators=[validators.DataRequired()])
submit = SubmitField('Login')
class EditGroupForm(Form):
"""Form to edit group."""
# churchtools
where = StringField('Treffpunkt')
when = StringField('Treffzeit')
audience = StringField('Zielgruppe')
# metadata
description = PageDownField('Beschreibung',
validators=[validators.Length(max=700)])
group_image = FileField('Bild',
validators=[FileAllowed(['jpg'], 'Nur JPGs')])
# submit
submit = SubmitField('Submit')
class EditIndexForm(Form):
"""Form to edit frontpage."""
first_row_image = FileField('Erste Reihe Bild',
validators=[FileAllowed(['jpg'], 'Nur JPGs')])
first_row_link = StringField('Erste Reihe Link',
validators=[validators.URL(require_tld=True)])
second_row_image = FileField('Zweite Reihe Bild',
validators=[FileAllowed(['jpg'], 'Nur JPGs')])
second_row_link = StringField(
'Zweite Reihe Link',
validators=[validators.URL(require_tld=True)])
third_row_left_image = FileField(
'Dritte Reihe Links Bild',
validators=[FileAllowed(['jpg'], 'Nur JPGs')])
third_row_left_link = StringField(
'Dritte Reihe Links Link',
validators=[validators.URL(require_tld=True)])
third_row_right_image = FileField(
'Dritte Reihe Rechts Bild',
validators=[FileAllowed(['jpg'], 'Nur JPGs')])
third_row_right_link = StringField(
'Dritte Reihe Rechts Link',
validators=[validators.URL(require_tld=True)])
# submit
submit = SubmitField('Submit')
class AddPrayerForm(Form):
"""Form to add prayer."""
body = PageDownField(
'',
validators=[validators.DataRequired(), validators.Length(max=700)])
name = StringField('Name', validators=[validators.Length(max=120)])
active = BooleanField('Aktiv')
submit = SubmitField('Submit')
class EditProfileForm(Form):
"""Form to edit profile."""
# churchtools
password = PasswordField('Neues Password',
validators=[validators.EqualTo('confirm')])
confirm = PasswordField('Confirm Password')
street = StringField('Strasse')
postal_code = StringField('PLZ')
city = StringField('Ort')
# metadata
bio = PageDownField('Bio', validators=[validators.Length(max=700)])
user_image = FileField('Bild',
validators=[FileAllowed(['jpg'], 'Nur JPGs')])
twitter = StringField('Twitter',
validators=[validators.optional(), validators.URL()])
facebook = StringField(
'Facebook',
validators=[validators.optional(), validators.URL()])
# submit
submit = SubmitField('Submit')
class MailForm(Form):
"""Form to send mail."""
subject = StringField('Betreff', validators=[validators.DataRequired()])
body = TextAreaField('Nachricht', validators=[validators.DataRequired()])
submit = SubmitField('Submit')
class AddWhatsUp(Form):
"""Form to add whatsup post."""
subject = StringField(
'Subject',
validators=[validators.DataRequired(), validators.Length(max=120)])
body = TextAreaField(
'Body',
validators=[validators.DataRequired(), validators.Length(max=700)])
submit = SubmitField('Submit')
class AddWhatsUpComment(Form):
"""Form to add whatsup comment."""
body = TextAreaField(
'Kommentar',
validators=[validators.DataRequired(), validators.Length(max=700)])
submit = SubmitField('Submit')
class SearchForm(Form):
"""Form to search."""
search = StringField('search', validators=[validators.DataRequired()])
|
py | 1a54a56ca3685a98e5df2e15bc84f6eaed269ee4 | # -*- coding: UTF-8 -*-
# @Time : 04/02/2020 10:58
# @Author : BubblyYi
# @FileName: seeds_net_data_provider_aug.py
# @Software: PyCharm
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch
import pandas as pd
import os
import numpy as np
import SimpleITK as sitk
import random
class DataGenerater(Dataset):
def __init__(self,data_path, pre_fix_path, transform = None, flag = '', target_transform = None):
self.flag = flag
data = []
print("csv path:",data_path)
csv_data = pd.read_csv(data_path)
x_data = csv_data['patch_name']
if self.flag == 'train' or self.flag == 'val':
proximity = csv_data["proximity"]
for i in range(len(x_data)):
if pre_fix_path is None:
data.append((temp, proximity[i]))
else:
temp = os.path.join(pre_fix_path,x_data[i])
data.append((temp, proximity[i]))
else:
for i in range(len(x_data)):
if pre_fix_path is None:
data.append(x_data[i])
else:
temp = os.path.join(pre_fix_path, x_data[i])
data.append(temp)
self.data = data
self.transform = transform
self.target_transform = target_transform
self.p_gaussian_noise = 0.2
def __getitem__(self, index):
if self.flag == 'train' or self.flag == 'val':
data_path, p = self.data[index]
img = sitk.GetArrayFromImage(sitk.ReadImage(data_path, sitk.sitkFloat32))
proximity = p
upper_bound = np.percentile(img, 99.5)
lower_bound = np.percentile(img, 00.5)
img = np.clip(img, lower_bound, upper_bound)
if self.flag=='train':
if np.random.uniform() <= self.p_gaussian_noise:
img = self.augment_gaussian_noise(img)
mean_intensity = np.mean(img)
std_intensity = np.std(img)
img = (img - mean_intensity) / (std_intensity+1e-9)
img = img.astype(np.float32)
img = torch.from_numpy(img)
return img.unsqueeze(0), proximity
elif self.flag == 'test':
data_path = self.data[index]
img = sitk.GetArrayFromImage(sitk.ReadImage(data_path, sitk.sitkFloat32))
upper_bound = np.percentile(img, 99.5)
lower_bound = np.percentile(img, 00.5)
img = np.clip(img, lower_bound, upper_bound)
mean_intensity = np.mean(img)
std_intensity = np.std(img)
# 防止除0
img = (img - mean_intensity) / (std_intensity+1e-9)
img = torch.from_numpy(img)
return img.unsqueeze(0)
def augment_gaussian_noise(self,data_sample, noise_variance=(0, 0.1)):
if noise_variance[0] == noise_variance[1]:
variance = noise_variance[0]
else:
variance = random.uniform(noise_variance[0], noise_variance[1])
data_sample = data_sample + np.random.normal(0.0, variance, size=data_sample.shape)
return data_sample
def __len__(self):
return len(self.data) |
py | 1a54a57a3dc613073e6f5391261d62bdde2d82c6 | import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmid", parent_name="surface", **kwargs):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
py | 1a54a76db422302621ac696372901e4fe8acce16 | import torch
import os
from skimage import io, transform
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torch.autograd import Variable
from torchvision.utils import save_image
import matplotlib.pyplot as plt
import seaborn as sns
from torch.nn.modules.module import _addindent
import numpy as np
import re
from tqdm import tqdm
from scipy.interpolate import interp1d
from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error, mean_absolute_error, classification_report, roc_curve, roc_auc_score
from torch.utils.data import DataLoader
from torchsummary import summary
batch_size = 128
epochs = 2000
no_cuda = False
seed = 1
log_interval = 50
cuda = not no_cuda and torch.cuda.is_available()
torch.manual_seed(seed)
device = torch.device("cuda" if cuda else "cpu")
print(device)
kwargs = {'num_workers': 4, 'pin_memory': True} if cuda else {}
# HIGH res
train_root = 'data/deepfake_bgr/train/'
TRANSFORM_IMG = transforms.Compose([
transforms.Resize(100),
# transforms.CenterCrop(100),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
TRANSFORM_IMG_TEST = transforms.Compose([
transforms.Resize(100),
# transforms.CenterCrop(100),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(train_root, transform=TRANSFORM_IMG),
batch_size=batch_size, shuffle=True)
# for evaluation/testing
def mse_loss_cal(input, target, avg_batch=True):
ret = torch.mean((input - target) ** 2)
return ret.item()
class VAE_CNN(nn.Module):
def __init__(self):
super(VAE_CNN, self).__init__()
# Encoder
self.conv1 = nn.Conv2d(3, 16, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=3,
stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 16, kernel_size=3,
stride=2, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(16)
#self.drop = nn.Dropout(0.2)
# Latent vectors mu and sigma
self.fc1 = nn.Linear(25 * 25 * 16, 1024)
self.fc_bn1 = nn.BatchNorm1d(1024)
self.fc21 = nn.Linear(1024, 1024)
self.fc22 = nn.Linear(1024, 1024)
# Sampling vector
self.fc3 = nn.Linear(1024, 1024)
self.fc_bn3 = nn.BatchNorm1d(1024)
self.fc4 = nn.Linear(1024, 25 * 25 * 16)
self.fc_bn4 = nn.BatchNorm1d(25 * 25 * 16)
self.relu = nn.ReLU()
# Decoder
self.conv5 = nn.ConvTranspose2d(
16, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn5 = nn.BatchNorm2d(64)
self.conv6 = nn.ConvTranspose2d(
64, 32, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
self.bn6 = nn.BatchNorm2d(32)
self.conv7 = nn.ConvTranspose2d(
32, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn7 = nn.BatchNorm2d(16)
self.conv8 = nn.ConvTranspose2d(
16, 3, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False)
def encode(self, x):
conv1 = self.relu(self.bn1(self.conv1(x)))
conv2 = self.relu(self.bn2(self.conv2(conv1)))
conv3 = self.relu(self.bn3(self.conv3(conv2)))
conv4 = self.relu(self.bn4(self.conv4(conv3)))
conv4 = conv4.view(-1, 25 * 25 * 16)
fc1 = self.relu(self.fc_bn1(self.fc1(conv4)))
r1 = self.fc21(fc1)
r2 = self.fc22(fc1)
return r1, r2
def reparameterize(self, mu, logvar):
std = logvar.mul(0.50).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
def decode(self, z):
fc3 = self.relu(self.fc_bn3(self.fc3(z)))
fc4 = self.relu(self.fc_bn4(self.fc4(fc3)))
fc4 = fc4.view(-1, 16, 25, 25)
conv5 = self.relu(self.bn5(self.conv5(fc4)))
conv6 = self.relu(self.bn6(self.conv6(conv5)))
conv7 = self.relu(self.bn7(self.conv7(conv6)))
conv8 = self.conv8(conv7)
return conv8.view(-1, 3, 100, 100)
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
class customLoss(nn.Module):
def __init__(self):
super(customLoss, self).__init__()
self.mse_loss = nn.MSELoss(reduction="sum")
def forward(self, x_recon, x, mu, logvar):
loss_MSE = self.mse_loss(x_recon, x)
loss_KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return loss_MSE + loss_KLD
model = VAE_CNN().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
loss_mse = customLoss()
train_losses = []
print(summary(model, (3, 100, 100)))
#ckpt = torch.load("dfdc/vae_pytorch_dfdc_FT_.pt")
# model.load_state_dict(ckpt)
#model = model.to(device)
for epoch in range(1, epochs + 1):
# train(epoch)
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
#permute = [2, 1, 0]
#data = data[:, permute, :, :]
recon_batch, mu, logvar = model(data)
loss = loss_mse(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
128. * batch_idx / len(train_loader),
loss.item() / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
train_losses.append(train_loss / len(train_loader.dataset))
# EVALUATE YOUR MODEL HERE
# model.eval()
# with torch.no_grad():
plt.figure(figsize=(15, 10))
plt.plot(range(len(train_losses[1:])), train_losses[1:], c="dodgerblue")
plt.title("Loss per epoch", fontsize=18)
plt.xlabel("epoch", fontsize=18)
plt.ylabel("loss", fontsize=18)
plt.legend(['Train. Loss'], fontsize=18)
plt.show()
|
py | 1a54a7d720eccd7dcaf63b00f05d3059f7fd367c | #!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):10319")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
|
py | 1a54a81b3b54a946f39720899150ad8eea437668 | #!/usr/bin/python
# Classification (U)
"""Program: get_status.py
Description: Unit testing of get_status in elastic_db_admin.py.
Usage:
test/unit/elastic_db_admin/get_status.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
import mock
# Local
sys.path.append(os.getcwd())
import elastic_db_admin
import lib.gen_libs as gen_libs
import version
__version__ = version.__version__
class ElasticSearchStatus(object):
"""Class: ElasticSearchStatus
Description: Class representation of the ElasticSearchStatus class.
Methods:
__init__ -> Initialize configuration environment.
get_mem_status -> Holder for ElasticSearchStatus.get_mem_status method.
get_nodes -> Stub holder for ElasticSearchStatus.get_nodes method.
get_cluster -> Stub holder for ElasticSearchStatus.get_cluster method.
get_all -> Stub holder for ElasticSearchStatus.get_all method.
"""
def __init__(self, hosts, port):
"""Method: __init__
Description: Initialization instance of the class.
Arguments:
(input) hosts -> Host name.
(input) port -> Port number.
"""
self.hosts = hosts
self.port = port
def get_mem_status(self):
"""Method: get_mem_status
Description: Holder for ElasticSearchStatus.get_mem_status method.
Arguments:
"""
return {"memory": "memory_status"}
def get_nodes(self):
"""Method: get_nodes
Description: Stub holder for ElasticSearchStatus.get_nodes method.
Arguments:
"""
return {"node": "node_name"}
def get_cluster(self):
"""Method: get_cluster
Description: Stub holder for ElasticSearchStatus.get_cluster method.
Arguments:
"""
return {"cluster": "cluster_name"}
def get_all(self):
"""Method: get_all
Description: Stub holder for ElasticSearchStatus.get_all method.
Arguments:
"""
return True
class ElasticSearch(object):
"""Class: ElasticSearch
Description: Class representation of the ElasticSearch class.
Methods:
__init__ -> Initialize configuration environment.
"""
def __init__(self):
"""Method: __init__
Description: Initialization instance of the class.
Arguments:
"""
self.hosts = ["nodename1", "nodename2"]
self.port = 9200
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp -> Initialization for unit testing.
test_empty_display_list -> Test with empty display list.
test_incorrect_option -> Test with incorrect option.
test_one_option -> Test with one option.
test_all -> Test with all option.
test_no_options -> Test with no options.
test_display_all -> Test with display all option.
test_display_default -> Test with display default option.
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.els = ElasticSearch()
self.args_array = {"-D": ["all"]}
self.args_array2 = {"-D": ["memory"]}
self.args_array3 = {"-D": []}
self.args_array4 = {"-D": [], "-j": True}
self.args_array5 = {"-D": ["all"], "-j": True}
self.args_array6 = {"-D": ["memory"], "-j": True}
self.args_array7 = {"-D": ["incorrect"], "-j": True}
self.args_array8 = {"-D": []}
self.status_call = {"memory": "get_mem_status"}
@mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus")
def test_empty_display_list(self, mock_class):
"""Function: test_empty_display_list
Description: Test with empty display list.
Arguments:
"""
mock_class.return_value = ElasticSearchStatus(self.els.hosts,
self.els.port)
with gen_libs.no_std_out():
self.assertFalse(
elastic_db_admin.get_status(
self.els, status_call=self.status_call,
args_array=self.args_array8))
@mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus")
def test_incorrect_option(self, mock_class):
"""Function: test_incorrect_option
Description: Test with incorrect option.
Arguments:
"""
mock_class.return_value = ElasticSearchStatus(self.els.hosts,
self.els.port)
with gen_libs.no_std_out():
self.assertFalse(
elastic_db_admin.get_status(
self.els, status_call=self.status_call,
args_array=self.args_array7))
@mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus")
def test_one_option(self, mock_class):
"""Function: test_one_option
Description: Test with one option.
Arguments:
"""
mock_class.return_value = ElasticSearchStatus(self.els.hosts,
self.els.port)
with gen_libs.no_std_out():
self.assertFalse(
elastic_db_admin.get_status(
self.els, status_call=self.status_call,
args_array=self.args_array6))
@mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus")
def test_all(self, mock_class):
"""Function: test_all
Description: Test with all option.
Arguments:
"""
mock_class.return_value = ElasticSearchStatus(self.els.hosts,
self.els.port)
with gen_libs.no_std_out():
self.assertFalse(
elastic_db_admin.get_status(
self.els, status_call=self.status_call,
args_array=self.args_array5))
@mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus")
def test_no_options(self, mock_class):
"""Function: test_no_options
Description: Test with no options.
Arguments:
"""
mock_class.return_value = ElasticSearchStatus(self.els.hosts,
self.els.port)
with gen_libs.no_std_out():
self.assertFalse(
elastic_db_admin.get_status(
self.els, status_call=self.status_call,
args_array=self.args_array4))
@mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus")
def test_display_all(self, mock_class):
"""Function: test_display_all
Description: Test with display all option.
Arguments:
"""
mock_class.return_value = ElasticSearchStatus(self.els.hosts,
self.els.port)
with gen_libs.no_std_out():
self.assertFalse(
elastic_db_admin.get_status(
self.els, status_call=self.status_call,
args_array=self.args_array))
@mock.patch("elastic_db_admin.elastic_class.ElasticSearchStatus")
def test_display_default(self, mock_class):
"""Function: test_display_default
Description: Test with display default option.
Arguments:
"""
mock_class.return_value = ElasticSearchStatus(self.els.hosts,
self.els.port)
with gen_libs.no_std_out():
self.assertFalse(
elastic_db_admin.get_status(
self.els, status_call=self.status_call, args_array={}))
if __name__ == "__main__":
unittest.main()
|
py | 1a54a87ebccafe8981eb22ee1e0dc5384f25db8a | from enum import Enum
from typing import Optional
import numpy as np
from pydantic import PrivateAttr, validator
from ..events import EventedModel
from ..events.custom_types import Array
from ..translations import trans
from .colorbars import make_colorbar
from .standardize_color import transform_color
class ColormapInterpolationMode(str, Enum):
"""INTERPOLATION: Interpolation mode for colormaps.
Selects an interpolation mode for the colormap.
* linear: colors are defined by linear interpolation between
colors of neighboring controls points.
* zero: colors are defined by the value of the color in the
bin between by neighboring controls points.
"""
LINEAR = 'linear'
ZERO = 'zero'
class Colormap(EventedModel):
"""Colormap that relates intensity values to colors.
Attributes
----------
colors : array, shape (N, 4)
Data used in the colormap.
name : str
Name of the colormap.
display_name : str
Display name of the colormap.
controls : array, shape (N,) or (N+1,)
Control points of the colormap.
interpolation : str
Colormap interpolation mode, either 'linear' or
'zero'. If 'linear', ncontrols = ncolors (one
color per control point). If 'zero', ncontrols
= ncolors+1 (one color per bin).
"""
# fields
colors: Array[float, (-1, 4)]
name: str = 'custom'
_display_name: Optional[str] = PrivateAttr(None)
interpolation: ColormapInterpolationMode = ColormapInterpolationMode.LINEAR
controls: Array[float, (-1,)] = None
def __init__(self, colors, display_name: Optional[str] = None, **data):
if display_name is None:
display_name = data.get('name', 'custom')
super().__init__(colors=colors, **data)
self._display_name = display_name
# validators
@validator('colors', pre=True)
def _ensure_color_array(cls, v):
return transform_color(v)
# controls validator must be called even if None for correct initialization
@validator('controls', pre=True, always=True)
def _check_controls(cls, v, values):
if v is None or len(v) == 0:
n_controls = len(values['colors']) + int(
values['interpolation'] == ColormapInterpolationMode.ZERO
)
return np.linspace(0, 1, n_controls)
return v
def __iter__(self):
yield from (self.colors, self.controls, self.interpolation)
def map(self, values):
values = np.atleast_1d(values)
if self.interpolation == ColormapInterpolationMode.LINEAR:
# One color per control point
cols = [
np.interp(values, self.controls, self.colors[:, i])
for i in range(4)
]
cols = np.stack(cols, axis=1)
elif self.interpolation == ColormapInterpolationMode.ZERO:
# One color per bin
indices = np.clip(
np.searchsorted(self.controls, values) - 1, 0, len(self.colors)
)
cols = self.colors[indices.astype(np.int32)]
else:
raise ValueError(
trans._(
'Unrecognized Colormap Interpolation Mode',
deferred=True,
)
)
return cols
@property
def colorbar(self):
return make_colorbar(self)
|
py | 1a54a8b8fd08571d7597ac4d86df5e3e02479d77 | import sys
import gzip
import json
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: python3 convert_mrqa_to_fgc.py <mrqa-format-input-fpath> <fgc-format-output-fpath>')
exit(1)
input_fpath = sys.argv[1]
output_fpath = sys.argv[2]
# Read MRQA-format data
with gzip.open(input_fpath) as f:
jsonl_data = f.readlines()
data_info = json.loads(jsonl_data[0])
dataset = data_info['header']['dataset']
line_count = len(jsonl_data) - 1
# Convert MRQA-format to FGC-format
new_data = []
for di, jsonl_line in enumerate(jsonl_data[1:], start=1):
# PQA (Outer loop)
new_PQA = {}
PQA = json.loads(jsonl_line)
DID = '%d' % di
DTEXT = PQA['context']
new_PQA['DID'] = DID
new_PQA['DTEXT'] = DTEXT
new_PQA['QUESTIONS'] = []
# QA (Middle loop)
for qi, QA in enumerate(PQA['qas'], start=1):
new_QA = {'AMODE': 'Single-Span-Extraction', 'ATYPE': ''}
QID = '%s-%d' % (DID, qi)
QTEXT = QA['question']
new_QA['QID'] = QID
new_QA['QTEXT'] = QTEXT
# Inner A (Inner loop)
answer_map = {}
new_ANSWER, new_ASPAN = [], []
for A in QA['detected_answers']:
ATEXT = A['text']
start = A['char_spans'][0][0]
end = A['char_spans'][0][1]
# ANSWER
if ATEXT not in answer_map:
answer_map[ATEXT] = len(answer_map)
new_ANSWER.append({'ATEXT': ATEXT, 'ATOKEN': [{'text': ATEXT, 'start': start}]})
else:
ai = answer_map[ATEXT]
atoken_info = {'text': ATEXT, 'start': start}
if atoken_info not in new_ANSWER[ai]['ATOKEN']:
new_ANSWER[ai]['ATOKEN'].append(atoken_info)
# ASPAN
aspan_info = {'text': ATEXT, 'start': start, 'end': end}
if aspan_info not in new_ASPAN:
new_ASPAN.append(aspan_info)
new_QA['ANSWER'] = new_ANSWER
new_QA['ASPAN'] = new_ASPAN
new_PQA['QUESTIONS'].append(new_QA)
new_data.append(new_PQA)
print('%s: %d/%d (%.2f%%)\r' % (dataset, di, line_count, 100*di/line_count), end='')
print()
# Save FGC-format data as JSON
with open(output_fpath, 'w') as f:
json.dump(new_data, f)
|
py | 1a54a91390a003e1612615bb4ab3cd4505f90f57 | #AUTOGENERATED! DO NOT EDIT! File to edit: dev/60_medical_imaging.ipynb (unless otherwise specified).
__all__ = ['DcmDataset', 'DcmTag', 'DcmMultiValue', 'dcmread', 'pixels', 'scaled_px', 'array_freqhist_bins',
'dicom_windows', 'show']
#Cell
from ..test import *
from ..basics import *
from ..vision import models
import pydicom
from pydicom.dataset import Dataset as DcmDataset
from pydicom.tag import BaseTag as DcmTag
from pydicom.multival import MultiValue as DcmMultiValue
from scipy import ndimage
import skimage
#Cell
@patch
def dcmread(self:Path): return pydicom.dcmread(str(self))
#Cell
@patch_property
def pixels(self:DcmDataset):
"`pixel_array` as a tensor"
return tensor(self.pixel_array.astype(np.float32))
#Cell
@patch_property
def scaled_px(self:DcmDataset):
"`pixels` scaled by `RescaleSlope` and `RescaleIntercept"
img = self.pixels
return img*self.RescaleSlope + self.RescaleIntercept
#Cell
def array_freqhist_bins(self, n_bins=100):
imsd = np.sort(self.flatten())
t = np.array([0.001])
t = np.append(t, np.arange(n_bins)/n_bins+(1/2/n_bins))
t = np.append(t, 0.999)
t = (len(imsd)*t+0.5).astype(np.int)
return np.unique(imsd[t])
#Cell
@patch
def freqhist_bins(self:Tensor, n_bins=100):
imsd = self.view(-1).sort()[0]
t = torch.cat([tensor([0.001]),
torch.arange(n_bins).float()/n_bins+(1/2/n_bins),
tensor([0.999])])
t = (len(imsd)*t).long()
return imsd[t].unique()
#Cell
@patch
def hist_scaled(self:Tensor, brks=None):
if brks is None: brks = self.freqhist_bins()
ys = torch.linspace(0., 1., len(brks))
return self.flatten().interp_1d(brks, ys).reshape(self.shape).clamp(0.,1.)
#Cell
@patch
def hist_scaled_px(self:DcmDataset, brks=None, min_px=None, max_px=None):
px = self.scaled_px
if min_px is not None: px[px<min_px] = min_px
if max_px is not None: px[px>max_px] = max_px
return px.hist_scaled(brks=brks)
#Cell
@patch
def windowed(self:DcmDataset, w, l):
px = self.scaled_px.float()
px_min = l - w//2
px_max = l + w//2
px[px<px_min] = px_min
px[px>px_max] = px_max
return (px-px_min) / (px_max-px_min)
#Cell
# From https://radiopaedia.org/articles/windowing-ct
dicom_windows = types.SimpleNamespace(
brain=(80,40),
subdural=(200,80),
stroke=(8,32),
brain_bone=(2800,600),
brain_soft=(375,40),
lungs=(1500,-600),
mediastinum=(350,50),
abdomen_soft=(400,50),
liver=(150,30),
spine_soft=(250,50),
spine_bone=(1800,400)
)
#Cell
@patch
@delegates(show_image)
def show(self:DcmDataset, scale=True, cmap=plt.cm.bone, min_px=-1100, max_px=None, **kwargs):
px = (self.windowed(*scale) if isinstance(scale,tuple)
else self.hist_scaled_px(min_px=min_px,max_px=max_px,brks=scale) if isinstance(scale,(ndarray,Tensor))
else self.hist_scaled_px(min_px=min_px,max_px=max_px) if scale
else self.scaled_px)
show_image(px, cmap=cmap, **kwargs)
#Cell
@patch
def zoom(self:DcmDataset, ratio):
data_d = ndimage.zoom(self.pixel_array, ratio)
self.PixelData = data_d.tobytes()
self.Rows,self.Columns = data_d.shape
#Cell
def _cast_dicom_special(x):
cls = type(x)
if not cls.__module__.startswith('pydicom'): return x
return cls.__base__(x)
def _split_elem(res,k,v):
if not isinstance(v,DcmMultiValue): return
res[f'Multi{k}'] = 1
for i,o in enumerate(v): res[f'{k}{"" if i==0 else i}']=o
#Cell
@patch
def pct_in_window(dcm:DcmDataset, w, l):
"% of pixels in the window `(w,l)`"
px = dcm.scaled_px
return ((px > l-w//2) & (px < l+w//2)).float().mean().item()
#Cell
@patch
def as_dict(self:DcmDataset, px_summ=True, window=dicom_windows.brain):
pxdata = (0x7fe0,0x0010)
vals = [self[o] for o in self.keys() if o != pxdata]
its = [(v.keyword,v.value) for v in vals]
res = dict(its)
res['fname'] = self.filename
for k,v in its: _split_elem(res,k,v)
if not px_summ: return res
stats = 'min','max','mean','std'
try:
pxs = self.pixel_array
for f in stats: res['img_'+f] = getattr(pxs,f)()
res['img_pct_window'] = self.pct_in_window(*window)
except Exception as e:
for f in stats: res['img_'+f] = 0
print(res,e)
for k in res: res[k] = _cast_dicom_special(res[k])
return res
#Cell
def _dcm2dict(fn, **kwargs): return fn.dcmread().as_dict(**kwargs)
#Cell
@delegates(parallel)
def _from_dicoms(cls, fns, n_workers=0, **kwargs):
return pd.DataFrame(parallel(_dcm2dict, fns, n_workers=n_workers, **kwargs))
pd.DataFrame.from_dicoms = classmethod(_from_dicoms) |
py | 1a54a94def44626bb8a27857befe4e6b4785d710 | '''OpenGL extension EXT.separate_shader_objects
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.separate_shader_objects to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/separate_shader_objects.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.separate_shader_objects import *
from OpenGL.raw.GLES2.EXT.separate_shader_objects import _EXTENSION_NAME
def glInitSeparateShaderObjectsEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glCreateShaderProgramvEXT.strings size not checked against count
glCreateShaderProgramvEXT=wrapper.wrapper(glCreateShaderProgramvEXT).setInputArraySize(
'strings', None
)
# INPUT glDeleteProgramPipelinesEXT.pipelines size not checked against n
glDeleteProgramPipelinesEXT=wrapper.wrapper(glDeleteProgramPipelinesEXT).setInputArraySize(
'pipelines', None
)
# INPUT glGenProgramPipelinesEXT.pipelines size not checked against n
glGenProgramPipelinesEXT=wrapper.wrapper(glGenProgramPipelinesEXT).setInputArraySize(
'pipelines', None
)
# INPUT glGetProgramPipelineInfoLogEXT.infoLog size not checked against bufSize
glGetProgramPipelineInfoLogEXT=wrapper.wrapper(glGetProgramPipelineInfoLogEXT).setInputArraySize(
'infoLog', None
).setInputArraySize(
'length', 1
)
# INPUT glProgramUniform1fvEXT.value size not checked against count
glProgramUniform1fvEXT=wrapper.wrapper(glProgramUniform1fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1ivEXT.value size not checked against count
glProgramUniform1ivEXT=wrapper.wrapper(glProgramUniform1ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2fvEXT.value size not checked against count*2
glProgramUniform2fvEXT=wrapper.wrapper(glProgramUniform2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2ivEXT.value size not checked against count*2
glProgramUniform2ivEXT=wrapper.wrapper(glProgramUniform2ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3fvEXT.value size not checked against count*3
glProgramUniform3fvEXT=wrapper.wrapper(glProgramUniform3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3ivEXT.value size not checked against count*3
glProgramUniform3ivEXT=wrapper.wrapper(glProgramUniform3ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4fvEXT.value size not checked against count*4
glProgramUniform4fvEXT=wrapper.wrapper(glProgramUniform4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4ivEXT.value size not checked against count*4
glProgramUniform4ivEXT=wrapper.wrapper(glProgramUniform4ivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2fvEXT.value size not checked against count*4
glProgramUniformMatrix2fvEXT=wrapper.wrapper(glProgramUniformMatrix2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3fvEXT.value size not checked against count*9
glProgramUniformMatrix3fvEXT=wrapper.wrapper(glProgramUniformMatrix3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4fvEXT.value size not checked against count*16
glProgramUniformMatrix4fvEXT=wrapper.wrapper(glProgramUniformMatrix4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform1uivEXT.value size not checked against count
glProgramUniform1uivEXT=wrapper.wrapper(glProgramUniform1uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform2uivEXT.value size not checked against count*2
glProgramUniform2uivEXT=wrapper.wrapper(glProgramUniform2uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform3uivEXT.value size not checked against count*3
glProgramUniform3uivEXT=wrapper.wrapper(glProgramUniform3uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniform4uivEXT.value size not checked against count*4
glProgramUniform4uivEXT=wrapper.wrapper(glProgramUniform4uivEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4fvEXT.value size not checked against count*16
glProgramUniformMatrix4fvEXT=wrapper.wrapper(glProgramUniformMatrix4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x3fvEXT.value size not checked against count*6
glProgramUniformMatrix2x3fvEXT=wrapper.wrapper(glProgramUniformMatrix2x3fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x2fvEXT.value size not checked against count*6
glProgramUniformMatrix3x2fvEXT=wrapper.wrapper(glProgramUniformMatrix3x2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix2x4fvEXT.value size not checked against count*8
glProgramUniformMatrix2x4fvEXT=wrapper.wrapper(glProgramUniformMatrix2x4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x2fvEXT.value size not checked against count*8
glProgramUniformMatrix4x2fvEXT=wrapper.wrapper(glProgramUniformMatrix4x2fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix3x4fvEXT.value size not checked against count*12
glProgramUniformMatrix3x4fvEXT=wrapper.wrapper(glProgramUniformMatrix3x4fvEXT).setInputArraySize(
'value', None
)
# INPUT glProgramUniformMatrix4x3fvEXT.value size not checked against count*12
glProgramUniformMatrix4x3fvEXT=wrapper.wrapper(glProgramUniformMatrix4x3fvEXT).setInputArraySize(
'value', None
)
### END AUTOGENERATED SECTION |
py | 1a54aa081f074901fb54422276106c08c53ee86b | # py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Functions for loading data and writing summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
import io
import numpy as np
import pandas as pd
import scipy.io
import xmltodict
class Format(Enum):
"""Enumerates supported file formats."""
MOT16 = 'mot16'
"""Milan, Anton, et al. "Mot16: A benchmark for multi-object tracking." arXiv preprint arXiv:1603.00831 (2016)."""
MOT15_2D = 'mot15-2D'
"""Leal-Taixe, Laura, et al. "MOTChallenge 2015: Towards a benchmark for multi-target tracking." arXiv preprint arXiv:1504.01942 (2015)."""
VATIC_TXT = 'vatic-txt'
"""Vondrick, Carl, Donald Patterson, and Deva Ramanan. "Efficiently scaling up crowdsourced video annotation." International Journal of Computer Vision 101.1 (2013): 184-204.
https://github.com/cvondrick/vatic
"""
DETRAC_MAT = 'detrac-mat'
"""Wen, Longyin et al. "UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking." arXiv preprint arXiv:arXiv:1511.04136 (2016).
http://detrac-db.rit.albany.edu/download
"""
DETRAC_XML = 'detrac-xml'
"""Wen, Longyin et al. "UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking." arXiv preprint arXiv:arXiv:1511.04136 (2016).
http://detrac-db.rit.albany.edu/download
"""
def load_motchallenge(fname, **kwargs):
r"""Load MOT challenge data.
Params
------
fname : str
Filename to load data from
Kwargs
------
sep : str
Allowed field separators, defaults to '\s+|\t+|,'
min_confidence : float
Rows with confidence less than this threshold are removed.
Defaults to -1. You should set this to 1 when loading
ground truth MOTChallenge data, so that invalid rectangles in
the ground truth are not considered during matching.
Returns
------
df : pandas.DataFrame
The returned dataframe has the following columns
'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'
The dataframe is indexed by ('FrameId', 'Id')
"""
sep = kwargs.pop('sep', r'\s+|\t+|,')
min_confidence = kwargs.pop('min_confidence', -1)
df = pd.read_csv(
fname,
sep=sep,
index_col=[0, 1],
skipinitialspace=True,
header=None,
names=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'],
engine='python'
)
# Account for matlab convention.
df[['X', 'Y']] -= (1, 1)
# Removed trailing column
del df['unused']
# Remove all rows without sufficient confidence
return df[df['Confidence'] >= min_confidence]
def load_vatictxt(fname, **kwargs):
"""Load Vatic text format.
Loads the vatic CSV text having the following columns per row
0 Track ID. All rows with the same ID belong to the same path.
1 xmin. The top left x-coordinate of the bounding box.
2 ymin. The top left y-coordinate of the bounding box.
3 xmax. The bottom right x-coordinate of the bounding box.
4 ymax. The bottom right y-coordinate of the bounding box.
5 frame. The frame that this annotation represents.
6 lost. If 1, the annotation is outside of the view screen.
7 occluded. If 1, the annotation is occluded.
8 generated. If 1, the annotation was automatically interpolated.
9 label. The label for this annotation, enclosed in quotation marks.
10+ attributes. Each column after this is an attribute set in the current frame
Params
------
fname : str
Filename to load data from
Returns
------
df : pandas.DataFrame
The returned dataframe has the following columns
'X', 'Y', 'Width', 'Height', 'Lost', 'Occluded', 'Generated', 'ClassId', '<Attr1>', '<Attr2>', ...
where <Attr1> is placeholder for the actual attribute name capitalized (first letter). The order of attribute
columns is sorted in attribute name. The dataframe is indexed by ('FrameId', 'Id')
"""
# pylint: disable=too-many-locals
sep = kwargs.pop('sep', ' ')
with io.open(fname) as f:
# First time going over file, we collect the set of all variable activities
activities = set()
for line in f:
for c in line.rstrip().split(sep)[10:]:
activities.add(c)
activitylist = sorted(list(activities))
# Second time we construct artificial binary columns for each activity
data = []
f.seek(0)
for line in f:
fields = line.rstrip().split()
attrs = ['0'] * len(activitylist)
for a in fields[10:]:
attrs[activitylist.index(a)] = '1'
fields = fields[:10]
fields.extend(attrs)
data.append(' '.join(fields))
strdata = '\n'.join(data)
dtype = {
'Id': np.int64,
'X': np.float32,
'Y': np.float32,
'Width': np.float32,
'Height': np.float32,
'FrameId': np.int64,
'Lost': bool,
'Occluded': bool,
'Generated': bool,
'ClassId': str,
}
# Remove quotes from activities
activitylist = [a.replace('\"', '').capitalize() for a in activitylist]
# Add dtypes for activities
for a in activitylist:
dtype[a] = bool
# Read from CSV
names = ['Id', 'X', 'Y', 'Width', 'Height', 'FrameId', 'Lost', 'Occluded', 'Generated', 'ClassId']
names.extend(activitylist)
df = pd.read_csv(io.StringIO(strdata), names=names, index_col=['FrameId', 'Id'], header=None, sep=' ')
# Correct Width and Height which are actually XMax, Ymax in files.
w = df['Width'] - df['X']
h = df['Height'] - df['Y']
df['Width'] = w
df['Height'] = h
return df
def load_detrac_mat(fname):
"""Loads UA-DETRAC annotations data from mat files
Competition Site: http://detrac-db.rit.albany.edu/download
File contains a nested structure of 2d arrays for indexed by frame id
and Object ID. Separate arrays for top, left, width and height are given.
Params
------
fname : str
Filename to load data from
Kwargs
------
Currently none of these arguments used.
Returns
------
df : pandas.DataFrame
The returned dataframe has the following columns
'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'
The dataframe is indexed by ('FrameId', 'Id')
"""
matData = scipy.io.loadmat(fname)
frameList = matData['gtInfo'][0][0][4][0]
leftArray = matData['gtInfo'][0][0][0].astype(np.float32)
topArray = matData['gtInfo'][0][0][1].astype(np.float32)
widthArray = matData['gtInfo'][0][0][3].astype(np.float32)
heightArray = matData['gtInfo'][0][0][2].astype(np.float32)
parsedGT = []
for f in frameList:
ids = [i + 1 for i, v in enumerate(leftArray[f - 1]) if v > 0]
for i in ids:
row = []
row.append(f)
row.append(i)
row.append(leftArray[f - 1, i - 1] - widthArray[f - 1, i - 1] / 2)
row.append(topArray[f - 1, i - 1] - heightArray[f - 1, i - 1])
row.append(widthArray[f - 1, i - 1])
row.append(heightArray[f - 1, i - 1])
row.append(1)
row.append(-1)
row.append(-1)
row.append(-1)
parsedGT.append(row)
df = pd.DataFrame(parsedGT,
columns=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'])
df.set_index(['FrameId', 'Id'], inplace=True)
# Account for matlab convention.
df[['X', 'Y']] -= (1, 1)
# Removed trailing column
del df['unused']
return df
def load_detrac_xml(fname):
"""Loads UA-DETRAC annotations data from xml files
Competition Site: http://detrac-db.rit.albany.edu/download
Params
------
fname : str
Filename to load data from
Kwargs
------
Currently none of these arguments used.
Returns
------
df : pandas.DataFrame
The returned dataframe has the following columns
'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'
The dataframe is indexed by ('FrameId', 'Id')
"""
with io.open(fname) as fd:
doc = xmltodict.parse(fd.read())
frameList = doc['sequence']['frame']
parsedGT = []
for f in frameList:
fid = int(f['@num'])
targetList = f['target_list']['target']
if not isinstance(targetList, list):
targetList = [targetList]
for t in targetList:
row = []
row.append(fid)
row.append(int(t['@id']))
row.append(float(t['box']['@left']))
row.append(float(t['box']['@top']))
row.append(float(t['box']['@width']))
row.append(float(t['box']['@height']))
row.append(1)
row.append(-1)
row.append(-1)
row.append(-1)
parsedGT.append(row)
df = pd.DataFrame(parsedGT,
columns=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'])
df.set_index(['FrameId', 'Id'], inplace=True)
# Account for matlab convention.
df[['X', 'Y']] -= (1, 1)
# Removed trailing column
del df['unused']
return df
def loadtxt(fname, fmt=Format.MOT15_2D, **kwargs):
"""Load data from any known format."""
fmt = Format(fmt)
switcher = {
Format.MOT16: load_motchallenge,
Format.MOT15_2D: load_motchallenge,
Format.VATIC_TXT: load_vatictxt,
Format.DETRAC_MAT: load_detrac_mat,
Format.DETRAC_XML: load_detrac_xml
}
func = switcher.get(fmt)
return func(fname, **kwargs)
def render_summary(summary, formatters=None, namemap=None, buf=None):
"""Render metrics summary to console friendly tabular output.
Params
------
summary : pd.DataFrame
Dataframe containing summaries in rows.
Kwargs
------
buf : StringIO-like, optional
Buffer to write to
formatters : dict, optional
Dicionary defining custom formatters for individual metrics.
I.e `{'mota': '{:.2%}'.format}`. You can get preset formatters
from MetricsHost.formatters
namemap : dict, optional
Dictionary defining new metric names for display. I.e
`{'num_false_positives': 'FP'}`.
Returns
-------
string
Formatted string
"""
if namemap is not None:
summary = summary.rename(columns=namemap)
if formatters is not None:
formatters = {namemap.get(c, c): f for c, f in formatters.items()}
output = summary.to_string(
buf=buf,
formatters=formatters,
)
return output
motchallenge_metric_names = {
'idf1': 'IDF1',
'idp': 'IDP',
'idr': 'IDR',
'recall': 'Rcll',
'precision': 'Prcn',
'num_unique_objects': 'GT',
'mostly_tracked': 'MT',
'partially_tracked': 'PT',
'mostly_lost': 'ML',
'num_false_positives': 'FP',
'num_misses': 'FN',
'num_switches': 'IDs',
'num_fragmentations': 'FM',
'mota': 'MOTA',
'motp': 'MOTP',
'num_transfer': 'IDt',
'num_ascend': 'IDa',
'num_migrate': 'IDm',
}
"""A list mappings for metric names to comply with MOTChallenge."""
|
py | 1a54aa19422822e888fa8b0c37203188d5e19274 | #
# Copyright 2014 Quantopian, Inc.
# Modifications Copyright 2018 Alpaca
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import pandas as pd
from pylivetrader.errors import (
SymbolNotFound,
OrderDuringInitialize,
TradingControlViolation,
RegisterTradingControlPostInit,
)
import pylivetrader.protocol as proto
from pylivetrader.misc import events
from pylivetrader.algorithm import Algorithm
from pylivetrader.executor.executor import AlgorithmExecutor
from pylivetrader.misc.api_context import LiveTraderAPI
from pylivetrader.loader import get_functions
from unittest.mock import Mock
def get_algo(script, **kwargs):
functions = get_functions(script)
return Algorithm(
backend='pylivetrader.testing.fixtures',
**functions, **kwargs,
)
def simulate_init_and_handle(algo):
algo._assets_from_source = \
algo.asset_finder.retrieve_all(algo.asset_finder.sids)
if not algo.initialized:
algo.initialize()
algo.initialized = True
algo.executor = AlgorithmExecutor(algo, algo.data_portal)
dt_to_use = pd.Timestamp(
'2018/08/13 9:30', tz='America/New_York').tz_convert('UTC')
with LiveTraderAPI(algo):
algo.on_dt_changed(dt_to_use)
algo.executor.current_data.datetime = dt_to_use
algo.before_trading_start(algo.executor.current_data)
algo.handle_data(algo.executor.current_data)
def test_algorithm_init():
# check init
algo = Algorithm(backend='pylivetrader.testing.fixtures')
assert not algo.initialized
algo = get_algo('''
def initialize(ctx):
pass
def handle_data(ctx, data):
pass
''')
simulate_init_and_handle(algo)
def test_algorithm_get_datetime():
algo = get_algo('''
import pandas as pd
def initialize(ctx):
pass
def handle_data(ctx, data):
dt = get_datetime()
assert dt == pd.Timestamp(
'2018/08/13 9:30', tz='America/New_York').tz_convert('UTC')
''')
simulate_init_and_handle(algo)
def test_before_trading_start():
algo = get_algo('''
def before_trading_start(ctx, data):
record(value=1)
''')
simulate_init_and_handle(algo)
assert algo.recorded_vars['value'] == 1
def test_datetime_bad_params():
algo = get_algo("""
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
""")
with pytest.raises(TypeError):
simulate_init_and_handle(algo)
def test_schedule():
algo = get_algo("""
def scheduled(context, data):
pass
def initialize(context):
schedule_function(
scheduled,
date_rules.every_day(),
time_rules.market_open(minutes=1)
)
""")
simulate_init_and_handle(algo)
assert algo.event_manager._events[-1].callback.__name__ == 'scheduled'
assert isinstance(algo.event_manager._events[-1].rule, events.OncePerDay)
def test_asset_lookup():
algo = get_algo("""
def initialize(context):
assert symbol('ASSET1').sid == 'asset-1'
""")
simulate_init_and_handle(algo)
algo = get_algo("""
def initialize(context):
symbol('INVALID')
""")
with pytest.raises(SymbolNotFound):
simulate_init_and_handle(algo)
with pytest.raises(TypeError):
algo.symbol(1)
with pytest.raises(TypeError):
algo.symbol((1,))
with pytest.raises(TypeError):
algo.symbol([1])
with pytest.raises(TypeError):
algo.symbol({1})
with pytest.raises(TypeError):
algo.symbol({"foo": "bar"})
@pytest.mark.parametrize('func, amt, expect', [
('order', 1, 1),
('order_value', 1, 1),
('order_target', 1, 1),
('order_percent', 0.1, 1),
('order_percent', 0.2, 2),
('order_target_percent', 0.1, 1),
('order_target_value', 1, 1),
])
def test_order(func, amt, expect):
algo = get_algo('')
simulate_init_and_handle(algo)
target = algo.sid('asset-1')
def assert_order(asset, amount, style):
assert asset == target
assert amount == expect
class portfolio:
portfolio_value = 1000.0
positions = proto.Positions()
algo._backend.portfolio = portfolio()
algo._backend.order = assert_order
getattr(algo, func)(target, amt)
def test_order_in_init():
"""
Test that calling order in initialize
will raise an error.
"""
with pytest.raises(OrderDuringInitialize):
algo = get_algo('''
def initialize(ctx):
order(sid('asset-1'), 1)
''')
simulate_init_and_handle(algo)
def test_portfolio_in_init():
"""
Test that accessing portfolio in init doesn't break.
"""
algo = get_algo('''
def initialize(ctx):
ctx.portfolio
''')
algo._backend.portfolio = {}
simulate_init_and_handle(algo)
def test_account_in_init():
"""
Test that accessing portfolio in init doesn't break.
"""
algo = get_algo('''
def initialize(ctx):
ctx.account
''')
algo._backend.account = {}
simulate_init_and_handle(algo)
def test_long_only():
algo = get_algo('''
def initialize(ctx):
set_long_only()
''')
simulate_init_and_handle(algo)
class portfolio:
portfolio_value = 1000.0
positions = proto.Positions()
class order:
id = 'oid'
algo._backend.portfolio = portfolio
algo._backend.order = lambda *args, **kwrags: order()
with pytest.raises(TradingControlViolation):
algo.order(algo.sid('asset-1'), -1)
algo.order(algo.sid('asset-1'), 1)
def test_post_init():
algo = get_algo('')
simulate_init_and_handle(algo)
with pytest.raises(RegisterTradingControlPostInit):
algo.set_max_position_size(algo.sid('asset-1'), 1, 1)
with pytest.raises(RegisterTradingControlPostInit):
algo.set_max_order_size(algo.sid('asset-1'), 1, 1)
with pytest.raises(RegisterTradingControlPostInit):
algo.set_max_order_count(1)
with pytest.raises(RegisterTradingControlPostInit):
algo.set_long_only()
def test_state_restore():
algo = get_algo('''
def handle_data(ctx, data):
ctx.value = 1
''')
simulate_init_and_handle(algo)
algo = get_algo('''
def handle_data(ctx, data):
ctx.value = 1
''')
algo.initialize()
assert algo.value == 1
# should fail with checksum check
algo = get_algo('''
def handle_data(ctx, data):
ctx.value = 1
''', algoname='invalid', statefile='algo-state.pkl')
with pytest.raises(ValueError):
algo.initialize()
def test_pipeline():
algo = get_algo('')
pipe = Mock()
algo.attach_pipeline(pipe, 'mock')
import sys
pkg = 'pipeline_live.engine'
if pkg in sys.modules:
del sys.modules[pkg]
with pytest.raises(RuntimeError):
algo.pipeline_output('mock')
mod = Mock()
sys.modules[pkg] = mod
eng = Mock()
def ctor(list_symbols):
symbols = list_symbols()
assert symbols[0] == 'ASSET0'
return eng
mod.LivePipelineEngine = ctor
eng.run_pipeline.return_value = pd.DataFrame(
[[42.0]], index=['ASSET0'], columns=['close'])
res = algo.pipeline_output('mock')
assert res.index[0].symbol == 'ASSET0'
del sys.modules[pkg]
def test_backend_param():
class Backend:
pass
bknd = Backend()
algo = Algorithm(backend=bknd)
assert algo._backend == bknd
with pytest.raises(RuntimeError):
Algorithm(backend='foo.does.not.exist')
|
py | 1a54abea92865f0c5f41e90a49ff8b85202805db | #!/usr/bin/env python
from jinja2 import Environment, FileSystemLoader
import ipaddress
from ipaddress import *
import ipaddress
import re
import yaml
loader= FileSystemLoader(".")
ENV = Environment(loader=loader)
def get_config(configfile):
"""Pulls YAML configuration from file and returns dict object"""
with open(configfile) as _:
return yaml.load(_, Loader=yaml.FullLoader)
def gen_snippet(snippet, config):
"""Renders a config snippet.
"config" represents the portion of the YAML
file applicable to this snippet"""
template = ENV.get_template('./snippets/Configuration/' + snippet + ".j2")
return template.render(config)
def printHostNameError():
print('Invalid input for variable hostslist: Hostname not supported or the Hostnames order is not correct')
"""
def supportedInputKeys(inputKeys, supportedInputKeys):
for inputKey in inputKeys:
try:
supportedInputKeys.index(inputKey)
except ValueError:
return -1
return 1
"""
def printHostName(HostName):
return "################### " + HostName + " ###################\n"
def buildIpAddress(subnet,ipOffset ):
return str( ipaddress.ip_network(subnet).network_address + ipOffset )
def buildPrefixlen(subnet):
return str( ipaddress.ip_network(subnet).prefixlen )
def isInt(s):
try:
int(s)
return True
except ValueError:
return False
def isOspfArea(str):
if not isInt(str):
splitedOspfArea = str.split(".")
if len(splitedOspfArea) == 4:
for OspfAreaBits in splitedOspfArea:
if not isInt(OspfAreaBits):
return False
else:
return False
return True
def isSubnet(str):
try:
ipaddress.ip_network(str)
return True
except (AddressValueError, NetmaskValueError):
return False
except ValueError:
return False
|
py | 1a54accdfff03d35bebc32252820ef87c0c77f50 | import pyunitwizard as puw
puw.configure.load_library(['pint', 'simtk.unit'])
puw.configure.set_default_form('pint')
puw.configure.set_standard_units(['nm', 'ps', 'K', 'mole', 'amu', 'e',
'kJ/mol', 'kJ/(mol*nm**2)', 'N', 'degrees'])
|
py | 1a54ace422acec1035a1e59c0438a580b6dbfb1f | from django.contrib import admin
from .models import Flan, ContactForm, Category, Post, Comment
@admin.register(Flan)
class FlanAdmin(admin.ModelAdmin):
list_display = ('name','description','is_private','price')
prepopulated_fields = {'slug': ('name',),}
list_filter = ('is_private',)
search_fields = ('name',)
admin.site.register(ContactForm)
admin.site.register(Category)
@admin.register(Post)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('title','id','status','slug','author')
prepopulated_fields = {'slug': ('title',),}
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = ('post','name','email','publish','status')
list_filter = ('status','publish')
search_fields = ('name','email','content')
# Register your models here.
|
py | 1a54ade49b5337f6480115ef4072b71a96734dd8 | # python3
from itertools import product
from sys import stdin
def partition3(values):
assert 1 <= len(values) <= 20
assert all(1 <= v <= 30 for v in values)
type here
if __name__ == '__main__':
input_n, *input_values = list(map(int, stdin.read().split()))
assert input_n == len(input_values)
print(partition3(input_values))
|
py | 1a54ae312b333bc0484dd8f1e32302f85f6cf43e | import sys
import threading
import time
from io import StringIO
from typing import Optional
from labml.internal.api import ApiCaller, ApiDataSource, Packet
WARMUP_COMMITS = 5
class ApiLogs(ApiDataSource):
api_caller: Optional[ApiCaller]
frequency: float
def __init__(self):
super().__init__()
self.api_caller = None
self.frequency = 1
self.last_committed = time.time()
self.commits_count = 0
self.data = {}
self.lock = threading.Lock()
def set_api(self, api_caller: ApiCaller, *, frequency: float):
self.api_caller = api_caller
self.frequency = frequency
self.check_and_flush()
def check_and_flush(self):
if self.api_caller is None:
return
with self.lock:
if not self.data:
return
t = time.time()
freq = self.frequency
if self.commits_count < WARMUP_COMMITS:
freq /= 2 ** (WARMUP_COMMITS - self.commits_count)
if self.data.get('stderr', '') != '' or self.commits_count == 0 or t - self.last_committed > freq:
self.commits_count += 1
self.api_caller.has_data(self)
def _clean(self, data: str):
last_newline = None
remove = []
for i in range(len(data)):
if data[i] == '\r':
if i + 1 < len(data) and data[i + 1] == '\n':
remove.append((i, i))
elif last_newline is not None:
remove.append((last_newline + 1, i))
last_newline = i
elif data[i] == '\n':
last_newline = i
res = []
offset = 0
for r in remove:
if offset < r[0]:
res.append(data[offset: r[0]])
offset = r[1] + 1
res.append(data[offset:])
return ''.join(res)
def get_data_packet(self) -> Packet:
with self.lock:
self.last_committed = time.time()
self.data['time'] = time.time()
for type_ in ['stdout', 'logger']:
if type_ not in self.data:
continue
self.data[type_] = self._clean(self.data[type_])
packet = Packet(self.data)
self.data = {}
return packet
def outputs(self, *,
stdout_: str = '',
stderr_: str = '',
logger_: str = ''):
with self.lock:
if stdout_ != '':
self.data['stdout'] = self.data.get('stdout', '') + stdout_
if stderr_ != '':
self.data['stderr'] = self.data.get('stderr', '') + stderr_
if logger_ != '':
self.data['logger'] = self.data.get('logger', '') + logger_
self.check_and_flush()
API_LOGS = ApiLogs()
class OutputStream(StringIO):
def write(self, *args, **kwargs): # real signature unknown
super().write(*args, **kwargs)
save = StringIO()
save.write(*args, **kwargs)
API_LOGS.outputs(**{self.type_: save.getvalue()})
self.original.write(*args, **kwargs)
def __init__(self, original, type_): # real signature unknown
super().__init__()
self.type_ = type_
self.original = original
_original_stdout_write = sys.stdout.write
_original_stderr_write = sys.stderr.write
def _write_stdout(*args, **kwargs):
_original_stdout_write(*args, **kwargs)
save = StringIO()
save.write(*args, **kwargs)
API_LOGS.outputs(stdout_=save.getvalue())
def _write_stderr(*args, **kwargs):
_original_stderr_write(*args, **kwargs)
save = StringIO()
save.write(*args, **kwargs)
API_LOGS.outputs(stderr_=save.getvalue())
def capture():
sys.stdout.write = _write_stdout
sys.stderr.write = _write_stderr
capture()
|
py | 1a54ae7a1607addbeab835355ac871a9b82abf3e | model = Model()
i1 = Input("input", "TENSOR_FLOAT32", "{1, 24, 1}")
squeezeDims = Parameter("squeezeDims", "TENSOR_INT32", "{1}", [2])
output = Output("output", "TENSOR_FLOAT32", "{1, 24}")
model = model.Operation("SQUEEZE", i1, squeezeDims).To(output)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
output0 = {output: # output 0
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]}
# Instantiate an example
Example((input0, output0))
|
py | 1a54ae7f1d6af29d59f8169ea6667d3473f74ce4 | class NFCTransactionDialog(AnimatedPopup):
mode = OptionProperty('send', options=('send','receive'))
scanner = ObjectProperty(None)
def __init__(self, **kwargs):
# Delayed Init
global NFCSCanner
if NFCSCanner is None:
from electrum_xuez_gui.kivy.nfc_scanner import NFCScanner
self.scanner = NFCSCanner
super(NFCTransactionDialog, self).__init__(**kwargs)
self.scanner.nfc_init()
self.scanner.bind()
def on_parent(self, instance, value):
sctr = self.ids.sctr
if value:
def _cmp(*l):
anim = Animation(rotation=2, scale=1, opacity=1)
anim.start(sctr)
anim.bind(on_complete=_start)
def _start(*l):
anim = Animation(rotation=350, scale=2, opacity=0)
anim.start(sctr)
anim.bind(on_complete=_cmp)
_start()
return
Animation.cancel_all(sctr)
|
py | 1a54aea66ada4b284f61c2d61c3cb845b4b2840c | """
Деменчук Г.М., вариант 6, стандартные задания
"""
import math
import matplotlib.pyplot as plt
class GraphClass:
"""
Вариант задания для операторов цикла + график
Класс для вывода графика варианта задания
для операторов цикла
"""
def __init__(self):
self.graph()
def graph(self):
args = ([], [])
x = 0.2
end_cycle = 0.8
while x != end_cycle:
obj = MathUpper(x)
args[0].append(x)
args[1].append(obj.result)
x = round(x + 0.1, 2)
ax = plt.figure().gca()
ax.plot(args[0], args[1], linewidth=2, marker="o")
plt.show()
class MathUpper:
"""
Вариант задания на условные операторы
Класс для работы с первым заданием
на условные операторы
"""
def __init__(self, x):
self.x = x
self.getter()
def getter(self):
x = self.x
upper = x ** 3 * math.e ** (x - 1)
lower = x ** 3 - math.fabs(x)
if lower == 0:
print("Знаменатель равен нулю, деление на 0!")
self.result = 0
return
first = upper / lower
log_sqrt = math.sqrt(x) - x
if log_sqrt >= 0:
buf_log = math.log(log_sqrt, 2)
else:
print("Выражение в log[sqrt(x)-x,2] меньше 0!")
self.result = 0
return
self.result = first - buf_log
class CycleClass:
"""
Вариант задания для операторов цикла
Класс для вызова MathUpper в цикле
"""
def __init__(self):
self.cycle()
def cycle(self):
x = 0.2
end_cycle = 0.8
while x != end_cycle:
obj = MathUpper(x)
print("x=", x, "result = ", obj.result)
x = round(x + 0.1, 2)
pass
def main():
try:
x = float(input("Введите x: "))
except:
print("Проблема ввода данных!")
return
obj = MathUpper(x)
print("\n*Условные операторы*")
print("Результат:" + str(obj.result))
print("\n*Операторы цикла*")
CycleClass()
GraphClass()
if __name__ == "__main__":
main()
|
py | 1a54af556e95323a7e06b8391496ea4f78fe7318 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class MetricDefinitionsOperations(object):
"""MetricDefinitionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2018-01-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-01-01"
self.config = config
def list(
self, resource_uri, metricnamespace=None, custom_headers=None, raw=False, **operation_config):
"""Lists the metric definitions for the resource.
:param resource_uri: The identifier of the resource.
:type resource_uri: str
:param metricnamespace: Metric namespace to query metric definitions
for.
:type metricnamespace: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of MetricDefinition
:rtype:
~azure.mgmt.monitor.v2018_01_01.models.MetricDefinitionPaged[~azure.mgmt.monitor.v2018_01_01.models.MetricDefinition]
:raises:
:class:`ErrorResponseException<azure.mgmt.monitor.v2018_01_01.models.ErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if metricnamespace is not None:
query_parameters['metricnamespace'] = self._serialize.query("metricnamespace", metricnamespace, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.MetricDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/{resourceUri}/providers/microsoft.insights/metricDefinitions'}
|
py | 1a54af7d24e710ce77fc50cfb53ae28a3eeeeae0 | import networkx as nx
from parse import read_input_file, write_output_file
from utils import is_valid_solution, calculate_score
import sys
from os.path import basename, normpath
import glob
import heapq
import os
import copy
import random
#from Queue import PriorityQueue
def remove_edge(G, i, j):
G.remove_edge(i, j)
if nx.is_connected(G):
return G
return False
def remove_node(G, i):
G.remove_node(i)
if nx.is_connected(G):
return G
return False
def remove_edges(G, edges):
G.remove_edge_from(edges)
if nx.is_connected(G):
return G
return False
def remove_nodes(G, nodes):
G.remove_node_from(nodes)
if nx.is_connected(G):
return G
return False
def min_cut(G, size):
cut_value, partition = nx.minimum_cut(G, 0, size-1)
return cut_value, partition
'''def try_remove(pack, size, num_c=1, num_k=15):
#score = pack[0]
lst = pack[1]
#G = lst[0]
c = lst[1]
k = lst[2]
if len(c) >= num_c and len(k) >= num_k:
return pack
elif len(c) >= num_c:
return try_edge(pack, size)
else:
return try_node(pack, size)'''
def try_node_random(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
if len(path) < 3:
return None
index = random.randint(1, len(path)-2)
G = remove_node(G, path[index])
node_to_remove = path[index]
if node_to_remove:
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
C.append(node_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_node_first(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
if len(path) < 3:
return None
G = remove_node(G, path[1])
node_to_remove = path[1]
if node_to_remove:
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
C.append(node_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_node_last(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
if len(path) < 3:
return None
G = remove_node(G, path[-2])
node_to_remove = path[-2]
if node_to_remove:
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
C.append(node_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_node_gain(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
if len(path) < 3:
return None
path_edges = []
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
node_to_remove = None
dis_gain = float('-inf')
for i in range(len(path_edges)-1):
len_i = G.edges[path_edges[i][0], path_edges[i][1]]['weight'] + G.edges[path_edges[i+1][0], path_edges[i+1][1]]['weight']
paths_len = []
for j in nx.edge_disjoint_paths(G, path_edges[i][0], path_edges[i+1][1]):
path_len = 0
for k in range(len(j)-1):
path_len += G.edges[j[k], j[k+1]]['weight']
paths_len.append(path_len)
if len(paths_len)==1:
continue
paths_len.sort()
paths_len = paths_len[1:]
dis_gain_i = min(paths_len) - len_i
if dis_gain_i > dis_gain:
dis_gain = dis_gain_i
if path_edges[i][0] == 0:
node_to_remove = path_edges[i][1]
elif path_edges[i][1] == size - 1:
node_to_remove = path_edges[i][0]
else:
if G.edges[path_edges[i-1][0], path_edges[i-1][1]]['weight'] < \
G.edges[path_edges[i+1][0], path_edges[i+1][1]]['weight']:
node_to_remove = path_edges[i][0]
else:
node_to_remove = path_edges[i][1]
if node_to_remove:
G = remove_node(G, node_to_remove)
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
C.append(node_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_node_lightest(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
if len(path) < 3:
return None
path_edges = []
weight = float('inf')
index = 0
for i in range(len(path) - 1):
path_edges.append((path[i], path[i + 1]))
if G.edges[path[i], path[i + 1]]['weight'] < weight:
weight = G.edges[path[i], path[i + 1]]['weight']
index = i
edge_to_remove = path_edges[index]
if edge_to_remove[0] == 0:
G = remove_node(G, edge_to_remove[1])
node_to_remove = edge_to_remove[1]
elif edge_to_remove[1] == size-1:
G = remove_node(G, edge_to_remove[0])
node_to_remove = edge_to_remove[0]
else:
if G[path[path.index(edge_to_remove[0])-1]][edge_to_remove[0]]['weight'] < G[edge_to_remove[1]][path[path.index(edge_to_remove[1])+1]]['weight']:
G = remove_node(G, edge_to_remove[0])
node_to_remove = edge_to_remove[0]
else:
G = remove_node(G, edge_to_remove[1])
node_to_remove = edge_to_remove[1]
if node_to_remove and G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
C.append(node_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_node_cut(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
if len(path) < 3:
return None
path_edges = []
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
min_cut_set = list(nx.connectivity.minimum_edge_cut(G, 0, size - 1))
edge_to_remove = None
for i in path_edges:
if i in min_cut_set:
edge_to_remove = i
break
if edge_to_remove[0] == 0:
G = remove_node(G, edge_to_remove[1])
node_to_remove = edge_to_remove[1]
elif edge_to_remove[1] == size-1:
G = remove_node(G, edge_to_remove[0])
node_to_remove = edge_to_remove[0]
else:
if G[path[path.index(edge_to_remove[0])-1]][edge_to_remove[0]]['weight'] < G[edge_to_remove[1]][path[path.index(edge_to_remove[1])+1]]['weight']:
G = remove_node(G, edge_to_remove[0])
node_to_remove = edge_to_remove[0]
else:
G = remove_node(G, edge_to_remove[1])
node_to_remove = edge_to_remove[0]
if node_to_remove and G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
C.append(node_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_edge_gain(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
path_edges = []
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
edge_to_remove = None
dis_gain = float('-inf')
for i in range(len(path_edges)):
len_i = G.edges[path_edges[i][0], path_edges[i][1]]['weight']
paths_len = []
for j in nx.edge_disjoint_paths(G, path_edges[i][0], path_edges[i][1]):
path_len = 0
for k in range(len(j)-1):
path_len += G.edges[j[k], j[k+1]]['weight']
paths_len.append(path_len)
if len(paths_len)==1:
continue
paths_len.sort()
paths_len = paths_len[1:]
dis_gain_i = min(paths_len) - len_i
if dis_gain_i > dis_gain:
dis_gain = dis_gain_i
edge_to_remove = path_edges[i]
if edge_to_remove:
G = remove_edge(G, edge_to_remove[0], edge_to_remove[1])
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
K.append(edge_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_edge_cut(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
path_edges = []
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
min_cut_set = list(nx.connectivity.minimum_edge_cut(G, 0, size - 1))
edge_to_remove = None
for i in path_edges:
if i in min_cut_set:
edge_to_remove = i
break
if edge_to_remove:
G = remove_edge(G, edge_to_remove[0], edge_to_remove[1])
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
K.append(edge_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
'''
def try_edge_cut_2(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
path_edges = []
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
min_cut_set = list(nx.connectivity.minimum_st_edge_cut(G, 0, size - 1))
edge_to_remove = None
for i in path_edges:
if i in min_cut_set:
edge_to_remove = i
min_cut_set.remove(i)
break
if edge_to_remove:
weight = float('inf')
second_edge = None
neighbor = list(G.neighbors(edge_to_remove[0]))
edge_set = [(edge_to_remove[0], j) for j in neighbor if j != edge_to_remove[1]]
for i in edge_set:
temp = G.edges[i[0], i[1]]['weight']
if temp < weight:
weight = temp
second_edge = i
G = remove_edge(G, edge_to_remove[0], edge_to_remove[1])
if G and second_edge:
G = remove_edge(G, second_edge[0], second_edge[1])
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
K.append(edge_to_remove)
K.append(second_edge)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
'''
def try_edge_random(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
path_edges = []
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
edge_to_remove = path_edges[random.randint(0, len(path_edges)-1)]
if edge_to_remove:
G = remove_edge(G, edge_to_remove[0], edge_to_remove[1])
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
K.append(edge_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_edge_first(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
path_edges = []
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
edge_to_remove = path_edges[0]
if edge_to_remove:
G = remove_edge(G, edge_to_remove[0], edge_to_remove[1])
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
K.append(edge_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_edge_last(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
path_edges = []
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
edge_to_remove = path_edges[-1]
if edge_to_remove:
G = remove_edge(G, edge_to_remove[0], edge_to_remove[1])
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
K.append(edge_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
def try_edge_lightest(pack, size, datum):
lst = pack[3]
G = lst[0]
C = lst[1]
K = lst[2]
path = nx.dijkstra_path(G, 0, size-1)
path_edges = []
weight = float('inf')
index = 0
for i in range(len(path)-1):
path_edges.append((path[i], path[i+1]))
if G.edges[path[i], path[i+1]]['weight'] < weight:
weight = G.edges[path[i], path[i+1]]['weight']
index = i
edge_to_remove = path_edges[index]
if edge_to_remove:
G = remove_edge(G, edge_to_remove[0], edge_to_remove[1])
if G:
value = nx.dijkstra_path_length(G, 0, size - 1)
diff = value - datum
K.append(edge_to_remove)
opts = len(C)*2 + len(K)
new_pack = (diff, diff/opts, random.random(), [G, C, K])
return new_pack
return None
'''
#########################################
#########################################
#########################################
make sure to change num_c and num_k when changing graph size
small graph num_c = 1 and num_k = 15
medium graph num_c = 3 and num_k = 50
large graph num_c = 5 and num_k = 100
#########################################
#########################################
#########################################
'''
def is_finished(pack, num_c=3, num_k=50):
lst = pack[3]
C = lst[1]
K = lst[2]
return num_c - len(C), num_k - len(K)
def clean_graph(G):
edges = list(G.edges)
for i in edges:
if i[0] == i[1]:
G.remove_edge(i[0], i[1])
return G
def prune_pq(pq):
respq = [i for i in pq if i]
numspq1 = []
numspq2 = []
cachespq = []
for i in respq:
if i[0] not in numspq1 and i[1] not in numspq2:
numspq1.append(i[0])
numspq2.append(i[1])
cachespq.append(i)
return cachespq
def solve(G):
"""
Args:
G: networkx.Graph
Returns:
c: list of cities to remove
k: list of edges to remove
"""
size = G.number_of_nodes()
datum = nx.dijkstra_path_length(G, 0, size - 1)
pq = []
G_prime = copy.deepcopy(G)
G_prime = clean_graph(G_prime)
cache = (0, 0, 0, [G_prime, [], []])
heapq.heappush(pq, cache)
count = 0
count1 = 0
score = float('inf')
while pq:
caches = []
if len(pq) <= 512:
#cache = heapq.nlargest(1, pq)[0]
#if score == cache[0]:
#cache = heapq.heappop(pq)
'''tries the smallest or the largest#############'''
#if count1 & 2 == 1:
#cache = heapq.nlargest(1, pq)[0]
#else:
#cache = heapq.heappop(pq)
'''only pops the smallest###############'''
cache = heapq.heappop(pq)
#score = cache[0]
c, k = is_finished(cache)
'''edit the combination of these transitions##########################'''
if c > 0:
caches.append(try_node_random(copy.deepcopy(cache), size, datum))
caches.append(try_node_last(copy.deepcopy(cache), size, datum))
caches.append(try_node_first(copy.deepcopy(cache), size, datum))
caches.append(try_node_gain(copy.deepcopy(cache), size, datum))
caches.append(try_node_cut(copy.deepcopy(cache), size, datum))
caches.append(try_edge_random(copy.deepcopy(cache), size, datum))
caches.append(try_node_lightest(copy.deepcopy(cache), size, datum))
#if k > 1:
#caches.append(try_edge_cut_2(copy.deepcopy(cache), size, datum))
if k > 0:
#caches.append(try_edge_first(copy.deepcopy(cache), size, datum))
#caches.append(try_edge_last(copy.deepcopy(cache), size, datum))
caches.append(try_edge_gain(copy.deepcopy(cache), size, datum))
caches.append(try_edge_cut(copy.deepcopy(cache), size, datum))
caches.append(try_edge_random(copy.deepcopy(cache), size, datum))
caches.append(try_edge_random(copy.deepcopy(cache), size, datum))
caches.append(try_edge_lightest(copy.deepcopy(cache), size, datum))
res = [i for i in caches if i]
nums = []
caches = []
for i in res:
if i[0] not in nums:
nums.append(i[0])
caches.append(i)
if len(caches) > 0:
caches.sort(reverse=True)
for i in range(len(caches)):
if caches[i]:
heapq.heappush(pq, caches[i])
count += 1
pq = prune_pq(pq)
'''edit the width of searching#################################'''
if len(pq) > 64:
while len(pq) > 64:
heapq.heappop(pq)
'''edit the schedule of narrowing##############################'''
if count > 500:
while len(pq) > 32:
heapq.heappop(pq)
if count > 1000:
while len(pq) > 16:
heapq.heappop(pq)
elif count > 1500:
while len(pq) > 8:
heapq.heappop(pq)
elif count > 2000:
while len(pq) > 4:
heapq.heappop(pq)
count1 += 1
print(cache[0])
#print(count)
print(len(pq))
score = cache[0]
print(cache[0])
C = cache[3][1]
K = cache[3][2]
return C, K
pass
input_dir = r"C:\Users\antho\Desktop\untitled\inputs\medium"
output_dir = r"C:\Users\antho\Desktop\untitled\outputs\medium4"
input_file = os.listdir(input_dir)
if __name__ == '__main__':
q = 0
for g in input_file:
if q >= 118:
c, k = None, None
input_path = input_dir + '/' + g
G = read_input_file(input_path)
c, k = solve(G)
assert is_valid_solution(G, c, k)
print("Shortest Path Difference: {}".format(calculate_score(G, c, k)))
write_output_file(G, c, k, output_dir + "//" + g[:-3] + '.out')
q += 1
#if __name__ == '__main__':
# for med_g in graph_med:
# input_path = med_dir + '/' + med_g
# G = read_input_file(input_path)
# c, k = solve(G)
# assert is_valid_solution(G, c, k)
# print("Shortest Path Difference: {}".format(calculate_score(G, c, k)))
# output_dir = r"C:\Users\antho\Desktop\170\project\outputs\medium"
# write_output_file(G, c, k, output_dir + "//" + med_g[:-3] + '.out')
# Here's an example of how to run your solver.
# Usage: python3 solver.py test.in
'''
if __name__ == '__main__':
assert len(sys.argv) == 2
path = sys.argv[1]
G = read_input_file(path)
c, k = solve(G)
assert is_valid_solution(G, c, k)
print("Shortest Path Difference: {}".format(calculate_score(G, c, k)))
write_output_file(G, c, k, 'outputs/small-1.out')'''
# For testing a folder of inputs to create a folder of outputs, you can use glob (need to import it)
# if __name__ == '__main__':
# inputs = glob.glob('inputs/*')
# for input_path in inputs:
# output_path = 'outputs/' + basename(normpath(input_path))[:-3] + '.out'
# G = read_input_file(input_path)
# c, k = solve(G)
# assert is_valid_solution(G, c, k)
# distance = calculate_score(G, c, k)
# write_output_file(G, c, k, output_path)
|
py | 1a54afa29f1cb6256074980c6b4384a71e42973d | from topaz.module import ClassDef
from topaz.objects.objectobject import W_Object
class W_ProcObject(W_Object):
classdef = ClassDef("Proc", W_Object.classdef)
def __init__(self, space, bytecode, w_self, lexical_scope, cells, block,
parent_interp, top_parent_interp, regexp_match_cell,
is_lambda):
W_Object.__init__(self, space)
self.bytecode = bytecode
self.w_self = w_self
self.lexical_scope = lexical_scope
self.cells = cells
self.block = block
self.parent_interp = parent_interp
self.top_parent_interp = top_parent_interp
self.regexp_match_cell = regexp_match_cell
self.is_lambda = is_lambda
def copy(self, space, w_self=None, lexical_scope=None, is_lambda=False):
return W_ProcObject(
space, self.bytecode,
w_self or self.w_self,
lexical_scope or self.lexical_scope,
self.cells, self.block, self.parent_interp, self.top_parent_interp,
self.regexp_match_cell,
is_lambda or self.is_lambda
)
@classdef.singleton_method("new")
def method_new(self, space, block):
if block is None:
raise space.error(space.w_ArgumentError, "tried to create Proc object without a block")
return block.copy(space)
method_allocate = classdef.undefine_allocator()
@classdef.method("yield")
@classdef.method("===")
@classdef.method("[]")
@classdef.method("call")
def method_call(self, space, args_w, block):
from topaz.interpreter import RaiseReturn, RaiseBreak
try:
return space.invoke_block(self, args_w, block_arg=block)
except RaiseReturn as e:
if self.is_lambda:
return e.w_value
else:
raise
except RaiseBreak as e:
if self.is_lambda:
return e.w_value
else:
raise space.error(space.w_LocalJumpError, "break from proc-closure")
@classdef.method("lambda?")
def method_lambda(self, space):
return space.newbool(self.is_lambda)
@classdef.method("arity")
def method_arity(self, space):
return space.newint(self.bytecode.arity(negative_defaults=self.is_lambda))
@classdef.method("binding")
def method_binding(self, space):
return space.newbinding_fromblock(self)
|
py | 1a54b02fb8e1ab546c4c9774e505086acd02def3 | #
# Copyright 2013-2014 eNovance <[email protected]>
#
# Authors: Mehdi Abaakouk <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from ceilometerclient.v2 import alarms
import eventlet
from oslo.config import fixture as fixture_config
from oslo.utils import timeutils
import six
from ceilometer.alarm import rpc as rpc_alarm
from ceilometer.alarm.storage import models
from ceilometer import messaging
from ceilometer.tests import base as tests_base
class FakeNotifier(object):
def __init__(self, transport):
self.rpc = messaging.get_rpc_server(
transport, "alarm_notifier", self)
self.notified = []
def start(self, expected_length):
self.expected_length = expected_length
self.rpc.start()
def notify_alarm(self, context, data):
self.notified.append(data)
if len(self.notified) == self.expected_length:
self.rpc.stop()
class TestRPCAlarmNotifier(tests_base.BaseTestCase):
def setUp(self):
super(TestRPCAlarmNotifier, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
self.notifier_server = FakeNotifier(self.transport)
self.notifier = rpc_alarm.RPCAlarmNotifier()
self.alarms = [
alarms.Alarm(None, info={
'name': 'instance_running_hot',
'meter_name': 'cpu_util',
'comparison_operator': 'gt',
'threshold': 80.0,
'evaluation_periods': 5,
'statistic': 'avg',
'state': 'ok',
'ok_actions': ['http://host:8080/path'],
'user_id': 'foobar',
'project_id': 'snafu',
'period': 60,
'alarm_id': str(uuid.uuid4()),
'matching_metadata':{'resource_id':
'my_instance'}
}),
alarms.Alarm(None, info={
'name': 'group_running_idle',
'meter_name': 'cpu_util',
'comparison_operator': 'le',
'threshold': 10.0,
'statistic': 'max',
'evaluation_periods': 4,
'state': 'insufficient data',
'insufficient_data_actions': ['http://other_host/path'],
'user_id': 'foobar',
'project_id': 'snafu',
'period': 300,
'alarm_id': str(uuid.uuid4()),
'matching_metadata':{'metadata.user_metadata.AS':
'my_group'}
}),
]
def test_rpc_target(self):
topic = self.notifier.client.target.topic
self.assertEqual('alarm_notifier', topic)
def test_notify_alarm(self):
self.notifier_server.start(2)
previous = ['alarm', 'ok']
for i, a in enumerate(self.alarms):
self.notifier.notify(a, previous[i], "what? %d" % i,
{'fire': '%d' % i})
self.notifier_server.rpc.wait()
self.assertEqual(2, len(self.notifier_server.notified))
for i, a in enumerate(self.alarms):
actions = getattr(a, models.Alarm.ALARM_ACTIONS_MAP[a.state])
self.assertEqual(self.alarms[i].alarm_id,
self.notifier_server.notified[i]["alarm_id"])
self.assertEqual(actions,
self.notifier_server.notified[i]["actions"])
self.assertEqual(previous[i],
self.notifier_server.notified[i]["previous"])
self.assertEqual(self.alarms[i].state,
self.notifier_server.notified[i]["current"])
self.assertEqual("what? %d" % i,
self.notifier_server.notified[i]["reason"])
self.assertEqual({'fire': '%d' % i},
self.notifier_server.notified[i]["reason_data"])
def test_notify_non_string_reason(self):
self.notifier_server.start(1)
self.notifier.notify(self.alarms[0], 'ok', 42, {})
self.notifier_server.rpc.wait()
reason = self.notifier_server.notified[0]['reason']
self.assertIsInstance(reason, six.string_types)
def test_notify_no_actions(self):
alarm = alarms.Alarm(None, info={
'name': 'instance_running_hot',
'meter_name': 'cpu_util',
'comparison_operator': 'gt',
'threshold': 80.0,
'evaluation_periods': 5,
'statistic': 'avg',
'state': 'ok',
'user_id': 'foobar',
'project_id': 'snafu',
'period': 60,
'ok_actions': [],
'alarm_id': str(uuid.uuid4()),
'matching_metadata': {'resource_id':
'my_instance'}
})
self.notifier.notify(alarm, 'alarm', "what?", {})
self.assertEqual(0, len(self.notifier_server.notified))
class FakeCoordinator(object):
def __init__(self, transport):
self.rpc = messaging.get_rpc_server(
transport, "alarm_partition_coordination", self)
self.notified = []
def presence(self, context, data):
self._record('presence', data)
def allocate(self, context, data):
self._record('allocate', data)
def assign(self, context, data):
self._record('assign', data)
def _record(self, method, data):
self.notified.append((method, data))
self.rpc.stop()
class TestRPCAlarmPartitionCoordination(tests_base.BaseTestCase):
def setUp(self):
super(TestRPCAlarmPartitionCoordination, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
self.coordinator_server = FakeCoordinator(self.transport)
self.coordinator_server.rpc.start()
eventlet.sleep() # must be sure that fanout queue is created
self.coordination = rpc_alarm.RPCAlarmPartitionCoordination()
self.alarms = [
alarms.Alarm(None, info={
'name': 'instance_running_hot',
'meter_name': 'cpu_util',
'comparison_operator': 'gt',
'threshold': 80.0,
'evaluation_periods': 5,
'statistic': 'avg',
'state': 'ok',
'ok_actions': ['http://host:8080/path'],
'user_id': 'foobar',
'project_id': 'snafu',
'period': 60,
'alarm_id': str(uuid.uuid4()),
'matching_metadata':{'resource_id':
'my_instance'}
}),
alarms.Alarm(None, info={
'name': 'group_running_idle',
'meter_name': 'cpu_util',
'comparison_operator': 'le',
'threshold': 10.0,
'statistic': 'max',
'evaluation_periods': 4,
'state': 'insufficient data',
'insufficient_data_actions': ['http://other_host/path'],
'user_id': 'foobar',
'project_id': 'snafu',
'period': 300,
'alarm_id': str(uuid.uuid4()),
'matching_metadata':{'metadata.user_metadata.AS':
'my_group'}
}),
]
def test_coordination_presence(self):
id = str(uuid.uuid4())
priority = float(timeutils.utcnow().strftime('%s.%f'))
self.coordination.presence(id, priority)
self.coordinator_server.rpc.wait()
method, args = self.coordinator_server.notified[0]
self.assertEqual(id, args['uuid'])
self.assertEqual(priority, args['priority'])
self.assertEqual('presence', method)
def test_coordination_assign(self):
id = str(uuid.uuid4())
self.coordination.assign(id, self.alarms)
self.coordinator_server.rpc.wait()
method, args = self.coordinator_server.notified[0]
self.assertEqual(id, args['uuid'])
self.assertEqual(2, len(args['alarms']))
self.assertEqual('assign', method)
def test_coordination_allocate(self):
id = str(uuid.uuid4())
self.coordination.allocate(id, self.alarms)
self.coordinator_server.rpc.wait()
method, args = self.coordinator_server.notified[0]
self.assertEqual(id, args['uuid'])
self.assertEqual(2, len(args['alarms']))
self.assertEqual('allocate', method)
|
py | 1a54b042e856ba0cf340924267502abd3522ffbf | import random
#file1=open("edge.txt","r")
#file2=open("edge_sssp.txt","w");
#for line in file1:
# file2.write(line.replace("\n","\t")+str(random.randint(1,9))+"\n")
#file1.close()
#file2.close()
file1=open("node.txt","r")
file2=open("node_bp.txt","w")
i=0;
for line in file1:
if i==0:
file2.write(line.replace("\n","\t")+"0.8\n")
else:
file2.write(line.replace("\n","\t")+"0.8\n")
i=i+1
file1.close()
file2.close()
|
py | 1a54b0a76a101a02e17489015f44689187d1831e | import datetime
import os
# =================================================
# Background Information
# -------------------------------------------------
mip = "cmip5"
exp = "historical"
frequency = "mo"
realm = "atm"
# =================================================
# Analysis Options
# -------------------------------------------------
variability_mode = "NAM" # Available domains: NAM, NAO, SAM, PNA, PDO
seasons = [
"DJF",
"MAM",
"JJA",
"SON",
] # Available seasons: DJF, MAM, JJA, SON, monthly, yearly
RemoveDomainMean = True # Remove Domain Mean from each time step (default=True)
EofScaling = False # Convert EOF pattern as unit variance (default=False)
landmask = False # Maskout land region thus consider only ocean grid (default=False)
ConvEOF = True # Calculate conventioanl EOF for model
CBF = True # Calculate Common Basis Function (CBF) for model
# =================================================
# Miscellaneous
# -------------------------------------------------
update_json = True # False
debug = False # False
# =================================================
# Observation
# -------------------------------------------------
reference_data_name = "NOAA-CIRES_20CR"
reference_data_path = os.path.join(
"/p/user_pub/PCMDIobs/PCMDIobs2/atmos/mon/psl/20CR/gn/v20200707",
"psl_mon_20CR_BE_gn_v20200707_187101-201212.nc",
)
varOBS = "psl"
ObsUnitsAdjust = (True, "divide", 100.0) # Pa to hPa; or (False, 0, 0)
osyear = 1900
oeyear = 2005
eofn_obs = 1
# =================================================
# Models
# -------------------------------------------------
modpath = os.path.join(
"/p/user_pub/pmp/pmp_results/pmp_v1.1.2/additional_xmls/latest/v20200116",
"%(mip)/%(exp)/atmos/mon/%(variable)",
"%(mip).%(exp).%(model).%(realization).mon.%(variable).xml",
)
modnames = [
"ACCESS1-0",
"ACCESS1-3",
"BCC-CSM1-1",
"BCC-CSM1-1-M",
"BNU-ESM",
"CanCM4",
"CanESM2",
"CCSM4",
"CESM1-BGC",
"CESM1-CAM5",
"CESM1-FASTCHEM",
"CESM1-WACCM",
"CMCC-CESM",
"CMCC-CM",
"CMCC-CMS",
"CNRM-CM5",
"CNRM-CM5-2",
"CSIRO-Mk3-6-0",
"EC-EARTH",
"FGOALS-g2",
"FGOALS-s2",
"FIO-ESM",
"FIO-ESM",
"GFDL-CM2p1",
"GFDL-CM3",
"GFDL-ESM2G",
"GFDL-ESM2M",
"GISS-E2-H",
"GISS-E2-H-CC",
"GISS-E2-R",
"GISS-E2-R-CC",
"HadCM3",
"HadGEM2-AO",
"HadGEM2-CC",
"HadGEM2-ES",
"INMCM4",
"IPSL-CM5A-LR",
"IPSL-CM5A-MR",
"IPSL-CM5B-LR",
"MIROC-ESM",
"MIROC-ESM-CHEM",
"MIROC4h",
"MIROC5",
"MPI-ESM-LR",
"MPI-ESM-MR",
"MPI-ESM-P",
"NorESM1-M",
"NorESM1-ME",
]
modnames = ["all"]
# modnames = ['ACCESS1-0']
realization = "*" # realizations
# realization = 'r1i1p1'
varModel = "psl"
ModUnitsAdjust = (True, "divide", 100.0) # Pa to hPa
msyear = 1900
meyear = 2005
eofn_mod = 1
# =================================================
# Output
# -------------------------------------------------
case_id = "{:v%Y%m%d}".format(datetime.datetime.now())
pmprdir = "/p/user_pub/pmp/pmp_results/pmp_v1.1.2"
if debug:
pmprdir = "/work/lee1043/imsi/result_test"
results_dir = os.path.join(
pmprdir,
"%(output_type)",
"variability_modes",
"%(mip)",
"%(exp)",
"%(case_id)",
"%(variability_mode)",
"%(reference_data_name)",
)
nc_out = True # Write output in NetCDF
plot = True # Create map graphics
|
py | 1a54b117db06556e9d67044c96ded32508db8c04 | import numpy as np
def get_samples_complex(fp,n):
z = fp.read(2*n)
if len(z)!=2*n:
return None
s = np.fromstring(z,dtype='int8')
s.shape = (n,2)
x = np.empty(n,dtype='c8')
x.real = s[:,0]
x.imag = s[:,1]
return x
|
py | 1a54b141931780046d09a15b40dc26bb3efcaf12 | import sys
import os
import cfnresponse
import boto3
import botocore
import json
import logging
logger = logging.getLogger()
logger.setLevel(os.getenv("LOG_LEVEL", "DEBUG"))
def lambda_handler(event, context):
try:
logger.debug("Received event: {}".format(json.dumps(event)))
result = cfnresponse.SUCCESS
reason = None
client = boto3.client("iam")
# Pull identifiers from the request (passes as Properties in the custom resource)
role_names = event["ResourceProperties"].get("RoleNames", [])
role_arns = {}
missing_roles = []
if event["RequestType"] in ["Create", "Update"]:
for name in role_names:
key = name.split("-")[-1] # Strip the leading ProjectName from role name
try:
logger.debug(f"Checking Account Roles for {name}")
role = client.get_role(RoleName=name)["Role"]
role_arn = role["Arn"]
logger.debug(f"Role already exists: {role_arn}")
role_arns[key + "Arn"] = role_arn
role_arns[key + "Name"] = role["RoleName"]
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] in ["NoSuchEntity", "AccessDenied"]:
logger.error(f"{name} Role does not exist")
# The roles should be deployed all at once or not at all (via the supplied template);
# therefore, it does not make sense to proceed with the deployment if one of them is missing
result = cfnresponse.FAILED
missing_roles.append(name)
else:
logger.error("Uncaught boto exception", e)
result = cfnresponse.FAILED
elif event["RequestType"] == "Delete":
logger.info("Delete request - NOOP")
result = cfnresponse.SUCCESS
except Exception as e:
logger.error("Error: {}".format(e))
result = cfnresponse.FAILED
responseData = role_arns
if result == cfnresponse.FAILED:
reason = ("Required roles were not found in account; please use or refer to the ast-iam-role template for a "
"list of required roles. The following roles were not found: " + ", ".join(missing_roles))
logger.info("Returning response of: {}, with result of: {}".format(result, responseData))
sys.stdout.flush()
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html
cfnresponse.send(event, context, result, responseData, reason=reason)
|
py | 1a54b227d1280d53944ef2dbe0063b87437ed111 | import math
from typing import Sequence, Tuple
import torch
from torch import nn
from torch.nn import functional as F
from tensorfn.config import config_model
from pydantic import StrictInt, StrictFloat
from .layer import DropPath, tuple2, PositionwiseFeedForward
LayerNorm = lambda x: nn.LayerNorm(x, eps=1e-6)
def patchify(input, size):
batch, height, width, dim = input.shape
return (
input.view(batch, height // size, size, width // size, size, dim)
.permute(0, 1, 3, 2, 4, 5)
.reshape(batch, height // size, width // size, -1)
)
class PositionalEncodingGenerator(nn.Module):
def __init__(self, dim):
super().__init__()
self.proj = nn.Conv2d(dim, dim, 3, padding=1, bias=False, groups=dim)
def forward(self, input):
out = input.permute(0, 3, 1, 2)
out = self.proj(out) + out
out = out.permute(0, 2, 3, 1)
return out
class MultiHeadedAttention(nn.Module):
def __init__(self, dim, n_head, reduction=1, dropout=0):
super().__init__()
self.dim_head = dim // n_head
self.n_head = n_head
self.linear_q = nn.Linear(dim, dim, bias=False)
self.linear_kv = nn.Linear(dim, dim * 2, bias=False)
self.linear = nn.Linear(dim, dim)
self.dropout = dropout
self.reduction = reduction
if self.reduction > 1:
self.reduce_conv = nn.Conv2d(
dim, dim, self.reduction, stride=self.reduction
)
def forward(self, input):
batch_size, height, width, _ = input.shape
def reshape(input):
return input.reshape(batch_size, -1, self.n_head, self.dim_head).transpose(
1, 2
)
query = reshape(self.linear_q(input))
if self.reduction > 1:
dim = input.shape[-1]
reduc = input.transpose(1, 2).reshape(batch_size, dim, height, width)
reduc = self.reduce_conv(reduc).reshape(batch_size, dim, -1).transpose(1, 2)
kv = reduc
else:
kv = input
key, value = self.linear_kv(kv).chunk(2, dim=2)
key = reshape(key).transpose(2, 3)
value = reshape(value)
score = query @ key / math.sqrt(self.dim_head)
attn = F.softmax(score, 3)
attn = F.dropout(attn, self.dropout, training=self.training)
out = attn @ value
out = out.transpose(1, 2).reshape(
batch_size, height, width, self.dim_head * self.n_head
)
out = self.linear(out)
return out
class MultiHeadedLocalAttention(nn.Module):
def __init__(self, dim, n_head, dim_head, window_size, dropout=0):
super().__init__()
self.dim_head = dim_head
self.n_head = n_head
self.weight = nn.Linear(dim, n_head * dim_head * 3, bias=True)
self.linear = nn.Linear(n_head * dim_head, dim)
self.window_size = window_size
self.dropout = dropout
def forward(self, input):
batch, height, width, dim = input.shape
h_stride = height // self.window_size
w_stride = width // self.window_size
window = self.window_size
def reshape(input):
return (
input.reshape(
batch,
h_stride,
window,
w_stride,
window,
self.n_head,
self.dim_head,
)
.permute(0, 1, 3, 5, 2, 4, 6)
.reshape(batch, -1, self.n_head, window * window, self.dim_head)
)
query, key, value = self.weight(input).chunk(3, dim=-1) # B, S, H, W^2, D
query = reshape(query)
key = reshape(key).transpose(-2, -1)
value = reshape(value)
score = query @ key / math.sqrt(self.dim_head) # B, S, H, W^2, W^2
attn = F.softmax(score, -1)
attn = F.dropout(attn, self.dropout, training=self.training)
out = attn @ value # B, S, H, W^2, D
out = (
out.view(
batch, h_stride, w_stride, self.n_head, window, window, self.dim_head
)
.permute(0, 1, 4, 2, 5, 3, 6)
.reshape(batch, height, width, self.n_head * self.dim_head)
)
out = self.linear(out)
return out
class TransformerLayer(nn.Module):
def __init__(
self,
dim,
n_head,
dim_head,
dim_ff,
window_size,
activation=nn.SiLU,
drop_ff=0,
drop_attn=0,
drop_path=0,
):
super().__init__()
self.norm_attn_local = LayerNorm(dim)
self.attn_local = MultiHeadedLocalAttention(
dim, n_head, dim_head, window_size, drop_attn
)
self.norm_ff_local = LayerNorm(dim)
self.ff_local = PositionwiseFeedForward(
dim, dim_ff, activation=activation, dropout=drop_ff
)
self.norm_attn_global = LayerNorm(dim)
self.attn_global = MultiHeadedAttention(dim, n_head, window_size, drop_attn)
self.norm_ff_global = LayerNorm(dim)
self.ff_global = PositionwiseFeedForward(
dim, dim_ff, activation=activation, dropout=drop_ff
)
self.drop_path = DropPath(drop_path)
def set_drop_path(self, p):
self.drop_path.p = p
def forward(self, input):
out = input + self.drop_path(self.attn_local(self.norm_attn_local(input)))
out = out + self.drop_path(self.ff_local(self.norm_ff_local(out)))
out = out + self.drop_path(self.attn_global(self.norm_attn_global(out)))
out = out + self.drop_path(self.ff_global(self.norm_ff_global(out)))
return out
class PatchEmbedding(nn.Module):
def __init__(self, in_dim, out_dim, window_size):
super().__init__()
self.window_size = window_size
self.linear = nn.Linear(in_dim * window_size * window_size, out_dim)
self.norm = nn.LayerNorm(out_dim)
def forward(self, input):
out = patchify(input, self.window_size)
out = self.linear(out)
out = self.norm(out)
return out
def reduce_size(size, reduction):
return (size[0] // reduction, size[1] // reduction)
@config_model(name="twins_svt", namespace="model", use_type=True)
class TwinsSVT(nn.Module):
def __init__(
self,
n_class: StrictInt,
depths: Tuple[StrictInt, StrictInt, StrictInt, StrictInt],
dims: Tuple[StrictInt, StrictInt, StrictInt, StrictInt],
dim_head: StrictInt,
n_heads: Tuple[StrictInt, StrictInt, StrictInt, StrictInt],
dim_ffs: Tuple[StrictInt, StrictInt, StrictInt, StrictInt],
window_size: StrictInt,
drop_ff: StrictFloat = 0.0,
drop_attn: StrictFloat = 0.0,
drop_path: StrictFloat = 0.0,
):
super().__init__()
self.depths = depths
def make_block(i, in_dim, reduction):
return self.make_block(
depths[i],
in_dim,
dims[i],
n_heads[i],
dim_head,
dim_ffs[i],
window_size,
reduction,
drop_ff,
drop_attn,
)
self.block1 = make_block(0, 3, 4)
self.block2 = make_block(1, dims[0], 2)
self.block3 = make_block(2, dims[1], 2)
self.block4 = make_block(3, dims[2], 2)
self.final_linear = nn.Sequential(nn.LayerNorm(dims[-1]))
linear = nn.Linear(dims[-1], n_class)
nn.init.normal_(linear.weight, std=0.02)
nn.init.zeros_(linear.bias)
self.classifier = nn.Sequential(nn.AdaptiveAvgPool2d(1), nn.Flatten(1), linear)
self.apply(self.init_weights)
self.set_dropout(None, drop_path)
def set_dropout(self, dropout, drop_path):
n_blocks = sum(self.depths)
dp_rate = [drop_path * float(i) / n_blocks for i in range(n_blocks)]
i = 0
for block in self.block1:
try:
block.set_drop_path(dp_rate[i])
i += 1
except:
continue
for block in self.block2:
try:
block.set_drop_path(dp_rate[i])
i += 1
except:
continue
for block in self.block3:
try:
block.set_drop_path(dp_rate[i])
i += 1
except:
continue
for block in self.block4:
try:
block.set_drop_path(dp_rate[i])
i += 1
except:
continue
def init_weights(self, module):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=0.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
def make_block(
self,
depth,
in_dim,
dim,
n_head,
dim_head,
dim_ff,
window_size,
reduction,
drop_ff,
drop_attn,
):
block = [PatchEmbedding(in_dim, dim, reduction)]
for i in range(depth):
block.append(
TransformerLayer(
dim,
n_head,
dim_head,
dim_ff,
window_size,
drop_ff=drop_ff,
drop_attn=drop_attn,
)
)
if i == 0:
block.append(PositionalEncodingGenerator(dim))
return nn.Sequential(*block)
def forward(self, input):
out = self.block1(input.permute(0, 2, 3, 1))
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.final_linear(out).permute(0, 3, 1, 2)
out = self.classifier(out)
return out
|
py | 1a54b273fca55b21ca20058463e43ce47010cc68 | #!/usr/bin/env python
#
# esp-idf NVS partition generation tool. Tool helps in generating NVS-compatible
# partition binary, with key-value pair entries provided via a CSV file.
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division, print_function
import argparse
import array
import binascii
import codecs
import datetime
import distutils.dir_util
import os
import random
import struct
import sys
import zlib
from builtins import bytes, int, range
from io import open
from itertools import zip_longest
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
except ImportError:
print('The cryptography package is not installed.'
'Please refer to the Get Started section of the ESP-IDF Programming Guide for '
'setting up the required packages.')
raise
VERSION1_PRINT = 'V1 - Multipage Blob Support Disabled'
VERSION2_PRINT = 'V2 - Multipage Blob Support Enabled'
def reverse_hexbytes(addr_tmp):
addr = []
reversed_bytes = ''
for i in range(0, len(addr_tmp), 2):
addr.append(addr_tmp[i:i + 2])
reversed_bytes = ''.join(reversed(addr))
return reversed_bytes
""" Class for standard NVS page structure """
class Page(object):
PAGE_PARAMS = {
'max_size': 4096,
'max_old_blob_size': 1984,
'max_new_blob_size': 4000,
'max_entries': 126
}
# Item type codes
U8 = 0x01
I8 = 0x11
U16 = 0x02
I16 = 0x12
U32 = 0x04
I32 = 0x14
U64 = 0x08
I64 = 0x18
SZ = 0x21
BLOB = 0x41
BLOB_DATA = 0x42
BLOB_IDX = 0x48
# Few Page constants
HEADER_SIZE = 32
BITMAPARRAY_OFFSET = 32
BITMAPARRAY_SIZE_IN_BYTES = 32
FIRST_ENTRY_OFFSET = 64
SINGLE_ENTRY_SIZE = 32
CHUNK_ANY = 0xFF
ACTIVE = 0xFFFFFFFE
FULL = 0xFFFFFFFC
VERSION1 = 0xFF
VERSION2 = 0xFE
def __init__(self, page_num, version, is_rsrv_page=False):
self.entry_num = 0
self.bitmap_array = array.array('B')
self.version = version
self.page_buf = bytearray(b'\xff') * Page.PAGE_PARAMS['max_size']
if not is_rsrv_page:
self.bitmap_array = self.create_bitmap_array()
self.set_header(page_num, version)
def set_header(self, page_num, version):
# set page state to active
page_header = bytearray(b'\xff') * 32
page_state_active_seq = Page.ACTIVE
struct.pack_into('<I', page_header, 0, page_state_active_seq)
# set page sequence number
struct.pack_into('<I', page_header, 4, page_num)
# set version
if version == Page.VERSION2:
page_header[8] = Page.VERSION2
elif version == Page.VERSION1:
page_header[8] = Page.VERSION1
# set header's CRC
crc_data = bytes(page_header[4:28])
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', page_header, 28, crc & 0xFFFFFFFF)
self.page_buf[0:len(page_header)] = page_header
def create_bitmap_array(self):
bitarray = array.array('B')
charsize = 32 # bitmaparray has 256 bits, hence 32 bytes
fill = 255 # Fill all 8 bits with 1's
bitarray.extend((fill,) * charsize)
return bitarray
def write_bitmaparray(self):
bitnum = self.entry_num * 2
byte_idx = bitnum // 8 # Find byte index in the array
bit_offset = bitnum & 7 # Find bit offset in given byte index
mask = ~(1 << bit_offset)
self.bitmap_array[byte_idx] &= mask
start_idx = Page.BITMAPARRAY_OFFSET
end_idx = Page.BITMAPARRAY_OFFSET + Page.BITMAPARRAY_SIZE_IN_BYTES
self.page_buf[start_idx:end_idx] = self.bitmap_array
def encrypt_entry(self, data_arr, tweak_arr, encr_key):
# Encrypt 32 bytes of data using AES-XTS encryption
backend = default_backend()
plain_text = codecs.decode(data_arr, 'hex')
tweak = codecs.decode(tweak_arr, 'hex')
cipher = Cipher(algorithms.AES(encr_key), modes.XTS(tweak), backend=backend)
encryptor = cipher.encryptor()
encrypted_data = encryptor.update(plain_text)
return encrypted_data
def encrypt_data(self, data_input, no_of_entries, nvs_obj):
# Set values needed for encryption and encrypt data byte wise
encr_data_to_write = bytearray()
data_len_needed = 64 # in hex
tweak_len_needed = 32 # in hex
key_len_needed = 64
init_tweak_val = '0'
init_data_val = 'f'
tweak_tmp = ''
encr_key_input = None
# Extract encryption key and tweak key from given key input
if len(nvs_obj.encr_key) == key_len_needed:
encr_key_input = nvs_obj.encr_key
else:
encr_key_input = codecs.decode(nvs_obj.encr_key, 'hex')
rel_addr = nvs_obj.page_num * Page.PAGE_PARAMS['max_size'] + Page.FIRST_ENTRY_OFFSET
if not isinstance(data_input, bytearray):
byte_arr = bytearray(b'\xff') * 32
byte_arr[0:len(data_input)] = data_input
data_input = byte_arr
data_input = binascii.hexlify(data_input)
entry_no = self.entry_num
start_idx = 0
end_idx = start_idx + 64
for _ in range(0, no_of_entries):
# Set tweak value
offset = entry_no * Page.SINGLE_ENTRY_SIZE
addr = hex(rel_addr + offset)[2:]
addr_len = len(addr)
if addr_len > 2:
if not addr_len % 2:
addr_tmp = addr
else:
addr_tmp = init_tweak_val + addr
tweak_tmp = reverse_hexbytes(addr_tmp)
tweak_val = tweak_tmp + (init_tweak_val * (tweak_len_needed - (len(tweak_tmp))))
else:
tweak_val = addr + (init_tweak_val * (tweak_len_needed - len(addr)))
# Encrypt data
data_bytes = data_input[start_idx:end_idx]
if type(data_bytes) == bytes:
data_bytes = data_bytes.decode()
data_val = data_bytes + (init_data_val * (data_len_needed - len(data_bytes)))
encr_data_ret = self.encrypt_entry(data_val, tweak_val, encr_key_input)
encr_data_to_write = encr_data_to_write + encr_data_ret
# Update values for encrypting next set of data bytes
start_idx = end_idx
end_idx = start_idx + 64
entry_no += 1
return encr_data_to_write
def write_entry_to_buf(self, data, entrycount,nvs_obj):
encr_data = bytearray()
if nvs_obj.encrypt:
encr_data_ret = self.encrypt_data(data, entrycount,nvs_obj)
encr_data[0:len(encr_data_ret)] = encr_data_ret
data = encr_data
data_offset = Page.FIRST_ENTRY_OFFSET + (Page.SINGLE_ENTRY_SIZE * self.entry_num)
start_idx = data_offset
end_idx = data_offset + len(data)
self.page_buf[start_idx:end_idx] = data
# Set bitmap array for entries in current page
for i in range(0, entrycount):
self.write_bitmaparray()
self.entry_num += 1
def set_crc_header(self, entry_struct):
crc_data = bytearray(b'28')
crc_data[0:4] = entry_struct[0:4]
crc_data[4:28] = entry_struct[8:32]
crc_data = bytes(crc_data)
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 4, crc & 0xFFFFFFFF)
return entry_struct
def write_varlen_binary_data(self, entry_struct, ns_index, key, data, data_size, total_entry_count, encoding, nvs_obj):
chunk_start = 0
chunk_count = 0
chunk_index = Page.CHUNK_ANY
offset = 0
remaining_size = data_size
tailroom = None
while True:
chunk_size = 0
# Get the size available in current page
tailroom = (Page.PAGE_PARAMS['max_entries'] - self.entry_num - 1) * Page.SINGLE_ENTRY_SIZE
assert tailroom >= 0, 'Page overflow!!'
# Split the binary data into two and store a chunk of available size onto curr page
if tailroom < remaining_size:
chunk_size = tailroom
else:
chunk_size = remaining_size
remaining_size = remaining_size - chunk_size
# Change type of data to BLOB_DATA
entry_struct[1] = Page.BLOB_DATA
# Calculate no. of entries data chunk will require
datachunk_rounded_size = (chunk_size + 31) & ~31
datachunk_entry_count = datachunk_rounded_size // 32
datachunk_total_entry_count = datachunk_entry_count + 1 # +1 for the entry header
# Set Span
entry_struct[2] = datachunk_total_entry_count
# Update the chunkIndex
chunk_index = chunk_start + chunk_count
entry_struct[3] = chunk_index
# Set data chunk
data_chunk = data[offset:offset + chunk_size]
# Compute CRC of data chunk
struct.pack_into('<H', entry_struct, 24, chunk_size)
if type(data) != bytes:
data_chunk = bytes(data_chunk, encoding='utf8')
crc = zlib.crc32(data_chunk, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 28, crc & 0xFFFFFFFF)
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write entry header
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
# write actual data
self.write_entry_to_buf(data_chunk, datachunk_entry_count,nvs_obj)
chunk_count = chunk_count + 1
if remaining_size or (tailroom - chunk_size) < Page.SINGLE_ENTRY_SIZE:
nvs_obj.create_new_page()
self = nvs_obj.cur_page
offset = offset + chunk_size
# All chunks are stored, now store the index
if not remaining_size:
# Initialise data field to 0xff
data_array = bytearray(b'\xff') * 8
entry_struct[24:32] = data_array
# change type of data to BLOB_IDX
entry_struct[1] = Page.BLOB_IDX
# Set Span
entry_struct[2] = 1
# Update the chunkIndex
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
struct.pack_into('<I', entry_struct, 24, data_size)
entry_struct[28] = chunk_count
entry_struct[29] = chunk_start
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write last entry
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
break
return entry_struct
def write_single_page_entry(self, entry_struct, data, datalen, data_entry_count, nvs_obj):
# compute CRC of data
struct.pack_into('<H', entry_struct, 24, datalen)
if type(data) != bytes:
data = bytes(data, encoding='utf8')
crc = zlib.crc32(data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 28, crc & 0xFFFFFFFF)
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write entry header
self.write_entry_to_buf(entry_struct, 1, nvs_obj)
# write actual data
self.write_entry_to_buf(data, data_entry_count, nvs_obj)
"""
Low-level function to write variable length data into page buffer. Data should be formatted
according to encoding specified.
"""
def write_varlen_data(self, key, data, encoding, ns_index,nvs_obj):
# Set size of data
datalen = len(data)
if datalen > Page.PAGE_PARAMS['max_old_blob_size']:
if self.version == Page.VERSION1:
raise InputError(' Input File: Size (%d) exceeds max allowed length `%s` bytes for key `%s`.'
% (datalen, Page.PAGE_PARAMS['max_old_blob_size'], key))
else:
if encoding == 'string':
raise InputError(' Input File: Size (%d) exceeds max allowed length `%s` bytes for key `%s`.'
% (datalen, Page.PAGE_PARAMS['max_old_blob_size'], key))
# Calculate no. of entries data will require
rounded_size = (datalen + 31) & ~31
data_entry_count = rounded_size // 32
total_entry_count = data_entry_count + 1 # +1 for the entry header
# Check if page is already full and new page is needed to be created right away
if self.entry_num >= Page.PAGE_PARAMS['max_entries']:
raise PageFullError()
elif (self.entry_num + total_entry_count) >= Page.PAGE_PARAMS['max_entries']:
if not (self.version == Page.VERSION2 and encoding in ['hex2bin', 'binary', 'base64']):
raise PageFullError()
# Entry header
entry_struct = bytearray(b'\xff') * 32
# Set Namespace Index
entry_struct[0] = ns_index
# Set Span
if self.version == Page.VERSION2:
if encoding == 'string':
entry_struct[2] = data_entry_count + 1
# Set Chunk Index
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
else:
entry_struct[2] = data_entry_count + 1
# set key
key_array = b'\x00' * 16
entry_struct[8:24] = key_array
entry_struct[8:8 + len(key)] = key.encode()
# set Type
if encoding == 'string':
entry_struct[1] = Page.SZ
elif encoding in ['hex2bin', 'binary', 'base64']:
entry_struct[1] = Page.BLOB
if self.version == Page.VERSION2 and (encoding in ['hex2bin', 'binary', 'base64']):
entry_struct = self.write_varlen_binary_data(entry_struct,ns_index,key,data,
datalen,total_entry_count, encoding, nvs_obj)
else:
self.write_single_page_entry(entry_struct, data, datalen, data_entry_count, nvs_obj)
""" Low-level function to write data of primitive type into page buffer. """
def write_primitive_data(self, key, data, encoding, ns_index,nvs_obj):
# Check if entry exceeds max number of entries allowed per page
if self.entry_num >= Page.PAGE_PARAMS['max_entries']:
raise PageFullError()
entry_struct = bytearray(b'\xff') * 32
entry_struct[0] = ns_index # namespace index
entry_struct[2] = 0x01 # Span
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
# write key
key_array = b'\x00' * 16
entry_struct[8:24] = key_array
entry_struct[8:8 + len(key)] = key.encode()
if encoding == 'u8':
entry_struct[1] = Page.U8
struct.pack_into('<B', entry_struct, 24, data)
elif encoding == 'i8':
entry_struct[1] = Page.I8
struct.pack_into('<b', entry_struct, 24, data)
elif encoding == 'u16':
entry_struct[1] = Page.U16
struct.pack_into('<H', entry_struct, 24, data)
elif encoding == 'i16':
entry_struct[1] = Page.I16
struct.pack_into('<h', entry_struct, 24, data)
elif encoding == 'u32':
entry_struct[1] = Page.U32
struct.pack_into('<I', entry_struct, 24, data)
elif encoding == 'i32':
entry_struct[1] = Page.I32
struct.pack_into('<i', entry_struct, 24, data)
elif encoding == 'u64':
entry_struct[1] = Page.U64
struct.pack_into('<Q', entry_struct, 24, data)
elif encoding == 'i64':
entry_struct[1] = Page.I64
struct.pack_into('<q', entry_struct, 24, data)
# Compute CRC
crc_data = bytearray(b'28')
crc_data[0:4] = entry_struct[0:4]
crc_data[4:28] = entry_struct[8:32]
crc_data = bytes(crc_data)
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 4, crc & 0xFFFFFFFF)
# write to file
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
""" Get page buffer data of a given page """
def get_data(self):
return self.page_buf
"""
NVS class encapsulates all NVS specific operations to create a binary with given key-value pairs.
Binary can later be flashed onto device via a flashing utility.
"""
class NVS(object):
def __init__(self, fout, input_size, version, encrypt=False, key_input=None):
self.size = input_size
self.encrypt = encrypt
self.encr_key = None
self.namespace_idx = 0
self.page_num = -1
self.pages = []
self.version = version
self.fout = fout
if self.encrypt:
self.encr_key = key_input
self.cur_page = self.create_new_page(version)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None and exc_value is None:
# Create pages for remaining available size
while True:
try:
self.create_new_page()
except InsufficientSizeError:
self.size = None
# Creating the last reserved page
self.create_new_page(is_rsrv_page=True)
break
result = self.get_binary_data()
self.fout.write(result)
def create_new_page(self, version=None, is_rsrv_page=False):
# Set previous page state to FULL before creating new page
if self.pages:
curr_page_state = struct.unpack('<I', self.cur_page.page_buf[0:4])[0]
if curr_page_state == Page.ACTIVE:
page_state_full_seq = Page.FULL
struct.pack_into('<I', self.cur_page.page_buf, 0, page_state_full_seq)
# Set version for NVS binary generated
version = self.version
# Update available size as each page is created
if self.size == 0:
raise InsufficientSizeError('Error: Size parameter is less than the size of data in csv.Please increase size.')
if not is_rsrv_page:
self.size = self.size - Page.PAGE_PARAMS['max_size']
self.page_num += 1
# Set version for each page and page header
new_page = Page(self.page_num, version, is_rsrv_page)
self.pages.append(new_page)
self.cur_page = new_page
return new_page
"""
Write namespace entry and subsequently increase namespace count so that all upcoming entries
will be mapped to a new namespace.
"""
def write_namespace(self, key):
self.namespace_idx += 1
try:
self.cur_page.write_primitive_data(key, self.namespace_idx, 'u8', 0,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_primitive_data(key, self.namespace_idx, 'u8', 0,self)
"""
Write key-value pair. Function accepts value in the form of ascii character and converts
it into appropriate format before calling Page class's functions to write entry into NVS format.
Function handles PageFullError and creates a new page and re-invokes the function on a new page.
We don't have to guard re-invocation with try-except since no entry can span multiple pages.
"""
def write_entry(self, key, value, encoding):
if encoding == 'hex2bin':
value = value.strip()
if len(value) % 2 != 0:
raise InputError('%s: Invalid data length. Should be multiple of 2.' % key)
value = binascii.a2b_hex(value)
if encoding == 'base64':
value = binascii.a2b_base64(value)
if encoding == 'string':
if type(value) == bytes:
value = value.decode()
value += '\0'
encoding = encoding.lower()
varlen_encodings = ['string', 'binary', 'hex2bin', 'base64']
primitive_encodings = ['u8', 'i8', 'u16', 'i16', 'u32', 'i32', 'u64', 'i64']
if encoding in varlen_encodings:
try:
self.cur_page.write_varlen_data(key, value, encoding, self.namespace_idx,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_varlen_data(key, value, encoding, self.namespace_idx,self)
elif encoding in primitive_encodings:
try:
self.cur_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self)
else:
raise InputError('%s: Unsupported encoding' % encoding)
""" Return accumulated data of all pages """
def get_binary_data(self):
data = bytearray()
for page in self.pages:
data += page.get_data()
return data
class PageFullError(RuntimeError):
"""
Represents error when current page doesn't have sufficient entries left
to accommodate current request
"""
def __init__(self):
super(PageFullError, self).__init__()
class InputError(RuntimeError):
"""
Represents error on the input
"""
def __init__(self, e):
print('\nError:')
super(InputError, self).__init__(e)
class InsufficientSizeError(RuntimeError):
"""
Represents error when NVS Partition size given is insufficient
to accomodate the data in the given csv file
"""
def __init__(self, e):
super(InsufficientSizeError, self).__init__(e)
def nvs_open(result_obj, input_size, version=None, is_encrypt=False, key=None):
""" Wrapper to create and NVS class object. This object can later be used to set key-value pairs
:param result_obj: File/Stream object to dump resultant binary. If data is to be dumped into memory, one way is to use BytesIO object
:param input_size: Size of Partition
:return: NVS class instance
"""
return NVS(result_obj, input_size, version, encrypt=is_encrypt, key_input=key)
def write_entry(nvs_instance, key, datatype, encoding, value):
""" Wrapper to set key-value pair in NVS format
:param nvs_instance: Instance of an NVS class returned by nvs_open()
:param key: Key of the data
:param datatype: Data type. Valid values are "file", "data" and "namespace"
:param encoding: Data encoding. Valid values are "u8", "i8", "u16", "i16", "u32", "i32", "u64", "i64", "string", "binary", "hex2bin" and "base64"
:param value: Data value in ascii encoded string format for "data" datatype and filepath for "file" datatype
:return: None
"""
if datatype == 'file':
abs_file_path = value
if os.path.isabs(value) is False:
script_dir = os.getcwd()
abs_file_path = os.path.join(script_dir, value)
with open(abs_file_path, 'rb') as f:
value = f.read()
if datatype == 'namespace':
nvs_instance.write_namespace(key)
else:
nvs_instance.write_entry(key, value, encoding)
def nvs_close(nvs_instance):
""" Wrapper to finish writing to NVS and write data to file/stream object provided to nvs_open method
:param nvs_instance: Instance of NVS class returned by nvs_open()
:return: None
"""
nvs_instance.__exit__(None, None, None)
def check_size(size):
'''
Checks for input partition size
:param size: Input partition size
'''
try:
# Set size
input_size = int(size, 0)
if input_size % 4096 != 0:
sys.exit('Size of partition must be multiple of 4096')
# Update size as a page needs to be reserved of size 4KB
input_size = input_size - Page.PAGE_PARAMS['max_size']
if input_size < (2 * Page.PAGE_PARAMS['max_size']):
sys.exit('Minimum NVS partition size needed is 0x3000 bytes.')
return input_size
except Exception as e:
print(e)
sys.exit(0)
def set_target_filepath(outdir, filepath):
'''
Set target file path: <outdir>/<filepath>
:param outdir: Target output dir to store files
:param filepath: Path of target file
'''
bin_ext = '.bin'
# Expand if tilde(~) provided in path
outdir = os.path.expanduser(outdir)
if filepath:
key_file_name, ext = os.path.splitext(filepath)
if not ext:
filepath = key_file_name + bin_ext
elif bin_ext not in ext:
sys.exit('Error: `%s`. Only `%s` extension allowed.' % (filepath, bin_ext))
# Create dir if does not exist
if not (os.path.isdir(outdir)):
distutils.dir_util.mkpath(outdir)
filedir, filename = os.path.split(filepath)
filedir = os.path.join(outdir,filedir,'')
if filedir and not os.path.isdir(filedir):
distutils.dir_util.mkpath(filedir)
if os.path.isabs(filepath):
if not outdir == os.getcwd():
print('\nWarning: `%s` \n\t==> absolute path given so outdir is ignored for this file.' % filepath)
# Set to empty as outdir is ignored here
outdir = ''
# Set full path - outdir + filename
filepath = os.path.join(outdir, '') + filepath
return outdir, filepath
def encrypt(args):
'''
Generate encrypted NVS Partition
:param args: Command line arguments given
'''
key = None
bin_ext = '.bin'
check_size(args.size)
if (args.keygen is False) and (not args.inputkey):
sys.exit('Error. --keygen or --inputkey argument needed.')
elif args.keygen and args.inputkey:
sys.exit('Error. --keygen and --inputkey both are not allowed.')
elif not args.keygen and args.keyfile:
print('\nWarning:','--inputkey argument is given. --keyfile argument will be ignored...')
if args.inputkey:
# Check if key file has .bin extension
filename, ext = os.path.splitext(args.inputkey)
if bin_ext not in ext:
sys.exit('Error: `%s`. Only `%s` extension allowed.' % (args.inputkey, bin_ext))
key = bytearray()
with open(args.inputkey, 'rb') as key_f:
key = key_f.read(64)
# Generate encrypted NVS Partition
generate(args, is_encr_enabled=True, encr_key=key)
def decrypt_data(data_input, decr_key, page_num, entry_no, entry_size):
'''
Decrypt NVS data entry
'''
page_max_size = 4096
first_entry_offset = 64
init_tweak_val = '0'
tweak_len_needed = 32 # in hex
tweak_tmp = ''
data_input = binascii.hexlify(data_input)
rel_addr = page_num * page_max_size + first_entry_offset
# Set tweak value
offset = entry_no * entry_size
addr = hex(rel_addr + offset)[2:]
addr_len = len(addr)
if addr_len > 2:
if not addr_len % 2:
addr_tmp = addr
else:
addr_tmp = init_tweak_val + addr
tweak_tmp = reverse_hexbytes(addr_tmp)
tweak_val = tweak_tmp + (init_tweak_val * (tweak_len_needed - (len(tweak_tmp))))
else:
tweak_val = addr + (init_tweak_val * (tweak_len_needed - len(addr)))
if type(data_input) == bytes:
data_input = data_input.decode()
# Decrypt 32 bytes of data using AES-XTS decryption
backend = default_backend()
plain_text = codecs.decode(data_input, 'hex')
tweak = codecs.decode(tweak_val, 'hex')
cipher = Cipher(algorithms.AES(decr_key), modes.XTS(tweak), backend=backend)
decryptor = cipher.decryptor()
decrypted_data = decryptor.update(plain_text)
return decrypted_data
def decrypt(args):
'''
Decrypt encrypted NVS Partition
:param args: Command line arguments given
'''
bin_ext = '.bin'
nvs_read_bytes = 32
decrypted_entry_no = 0
file_entry_no = 0
page_num = 0
page_max_size = 4096
start_entry_offset = 0
empty_data_entry = bytearray(b'\xff') * nvs_read_bytes
# Check if key file has .bin extension
input_files = [args.input, args.key, args.output]
for filepath in input_files:
filename, ext = os.path.splitext(filepath)
if bin_ext not in ext:
sys.exit('Error: `%s`. Only `%s` extension allowed.' % (filepath, bin_ext))
with open(args.key,'rb') as decr_key_file:
decr_key = decr_key_file.read(64)
args.outdir, args.output = set_target_filepath(args.outdir, args.output)
output_buf = bytearray(b'\xff')
with open(args.input, 'rb') as input_file, open(args.output,'wb') as output_file:
while True:
if file_entry_no == 128:
decrypted_entry_no = 0
file_entry_no = 0
page_num += 1
data_entry = input_file.read(nvs_read_bytes)
if not data_entry:
break
if data_entry != empty_data_entry and file_entry_no not in [0,1]:
data_entry = decrypt_data(data_entry, decr_key, page_num, decrypted_entry_no, nvs_read_bytes)
decrypted_entry_no += 1
write_entry_no = ((page_num * page_max_size) + file_entry_no)
start_idx = start_entry_offset + (write_entry_no * nvs_read_bytes)
end_idx = nvs_read_bytes
output_buf[start_idx:end_idx] = data_entry
file_entry_no += 1
start_entry_offset += nvs_read_bytes
output_file.write(output_buf)
print('\nCreated NVS decrypted binary: ===>', args.output)
def generate_key(args):
'''
Generate encryption keys
:param args: Command line arguments given
'''
page_max_size = 4096
keys_dir = 'keys'
output_keyfile = None
bin_ext = '.bin'
if not args.keyfile:
timestamp = datetime.datetime.now().strftime('%m-%d_%H-%M')
args.keyfile = 'keys-' + timestamp + bin_ext
keys_outdir = os.path.join(args.outdir,keys_dir, '')
# Create keys/ dir in <outdir> if does not exist
if not (os.path.isdir(keys_outdir)):
distutils.dir_util.mkpath(keys_outdir)
keys_outdir, output_keyfile = set_target_filepath(keys_outdir, args.keyfile)
key = ''.join(random.choice('0123456789abcdef') for _ in range(128)).strip()
encr_key_bytes = codecs.decode(key, 'hex')
key_len = len(encr_key_bytes)
keys_buf = bytearray(b'\xff') * page_max_size
keys_buf[0:key_len] = encr_key_bytes
crc_data = keys_buf[0:key_len]
crc_data = bytes(crc_data)
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', keys_buf, key_len, crc & 0xFFFFFFFF)
with open(output_keyfile, 'wb') as output_keys_file:
output_keys_file.write(keys_buf)
print('\nCreated encryption keys: ===> ', output_keyfile)
return key
def generate(args, is_encr_enabled=False, encr_key=None):
'''
Generate NVS Partition
:param args: Command line arguments given
:param is_encr_enabled: Encryption enabled/disabled
:param encr_key: Key to encrypt NVS partition
'''
is_dir_new = False
bin_ext = '.bin'
input_size = check_size(args.size)
if args.version == 1:
args.version = Page.VERSION1
elif args.version == 2:
args.version = Page.VERSION2
# Check if key file has .bin extension
filename, ext = os.path.splitext(args.output)
if bin_ext not in ext:
sys.exit('Error: `%s`. Only `.bin` extension allowed.' % args.output)
args.outdir, args.output = set_target_filepath(args.outdir, args.output)
if is_encr_enabled and not encr_key:
encr_key = generate_key(args)
input_file = open(args.input, 'rt', encoding='utf8')
output_file = open(args.output, 'wb')
with open(args.input, 'rt', encoding='utf8') as input_file,\
open(args.output, 'wb') as output_file,\
nvs_open(output_file, input_size, args.version, is_encrypt=is_encr_enabled, key=encr_key) as nvs_obj:
if nvs_obj.version == Page.VERSION1:
version_set = VERSION1_PRINT
else:
version_set = VERSION2_PRINT
print('\nCreating NVS binary with version:', version_set)
line = input_file.readline().strip()
# Comments are skipped
while line.startswith('#'):
line = input_file.readline().strip()
if not isinstance(line, str):
line = line.encode('utf-8')
header = line.split(',')
while True:
line = input_file.readline().strip()
if not isinstance(line, str):
line = line.encode('utf-8')
value = line.split(',')
if len(value) == 1 and '' in value:
break
data = dict(zip_longest(header, value))
try:
# Check key length
if len(data['key']) > 15:
raise InputError('Length of key `{}` should be <= 15 characters.'.format(data['key']))
write_entry(nvs_obj, data['key'], data['type'], data['encoding'], data['value'])
except InputError as e:
print(e)
filedir, filename = os.path.split(args.output)
if filename:
print('\nWarning: NVS binary not created...')
os.remove(args.output)
if is_dir_new and not filedir == os.getcwd():
print('\nWarning: Output dir not created...')
os.rmdir(filedir)
sys.exit(-2)
print('\nCreated NVS binary: ===>', args.output)
def main():
parser = argparse.ArgumentParser(description='\nESP NVS partition generation utility', formatter_class=argparse.RawTextHelpFormatter)
subparser = parser.add_subparsers(title='Commands',
dest='command',
help='\nRun nvs_partition_gen.py {command} -h for additional help\n\n')
parser_gen = subparser.add_parser('generate',
help='Generate NVS partition',
formatter_class=argparse.RawTextHelpFormatter)
parser_gen.set_defaults(func=generate)
parser_gen.add_argument('input',
default=None,
help='Path to CSV file to parse')
parser_gen.add_argument('output',
default=None,
help='Path to output NVS binary file')
parser_gen.add_argument('size',
default=None,
help='Size of NVS partition in bytes\
\n(must be multiple of 4096)')
parser_gen.add_argument('--version',
choices=[1,2],
default=2,
type=int,
help='''Set multipage blob version.\
\nVersion 1 - Multipage blob support disabled.\
\nVersion 2 - Multipage blob support enabled.\
\nDefault: Version 2''')
parser_gen.add_argument('--outdir',
default=os.getcwd(),
help='Output directory to store files created\
\n(Default: current directory)')
parser_gen_key = subparser.add_parser('generate-key',
help='Generate keys for encryption',
formatter_class=argparse.RawTextHelpFormatter)
parser_gen_key.set_defaults(func=generate_key)
parser_gen_key.add_argument('--keyfile',
default=None,
help='Path to output encryption keys file')
parser_gen_key.add_argument('--outdir',
default=os.getcwd(),
help='Output directory to store files created.\
\n(Default: current directory)')
parser_encr = subparser.add_parser('encrypt',
help='Generate NVS encrypted partition',
formatter_class=argparse.RawTextHelpFormatter)
parser_encr.set_defaults(func=encrypt)
parser_encr.add_argument('input',
default=None,
help='Path to CSV file to parse')
parser_encr.add_argument('output',
default=None,
help='Path to output NVS binary file')
parser_encr.add_argument('size',
default=None,
help='Size of NVS partition in bytes\
\n(must be multiple of 4096)')
parser_encr.add_argument('--version',
choices=[1,2],
default=2,
type=int,
help='''Set multipage blob version.\
\nVersion 1 - Multipage blob support disabled.\
\nVersion 2 - Multipage blob support enabled.\
\nDefault: Version 2''')
parser_encr.add_argument('--keygen',
action='store_true',
default=False,
help='Generates key for encrypting NVS partition')
parser_encr.add_argument('--keyfile',
default=None,
help='Path to output encryption keys file')
parser_encr.add_argument('--inputkey',
default=None,
help='File having key for encrypting NVS partition')
parser_encr.add_argument('--outdir',
default=os.getcwd(),
help='Output directory to store files created.\
\n(Default: current directory)')
parser_decr = subparser.add_parser('decrypt',
help='Decrypt NVS encrypted partition',
formatter_class=argparse.RawTextHelpFormatter)
parser_decr.set_defaults(func=decrypt)
parser_decr.add_argument('input',
default=None,
help='Path to encrypted NVS partition file to parse')
parser_decr.add_argument('key',
default=None,
help='Path to file having keys for decryption')
parser_decr.add_argument('output',
default=None,
help='Path to output decrypted binary file')
parser_decr.add_argument('--outdir',
default=os.getcwd(),
help='Output directory to store files created.\
\n(Default: current directory)')
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
py | 1a54b28acedd9ff633d1db4868301520a6ba9dcb | import csv
import sys
#This program was written in Python 3.6.3 by Henry Caushi. You are free to use it for any reason, without my permission, without having to inform myself or anyone else
#This program was was written to aid other programs, by providing a list of all event IDs so that they appear only once
#List of all event IDs
list_ids = []
filename = "Higgs_Hunters_data_ALL.csv"
#Open the data file
f = open(filename+,"r")
reader = csv.reader(f)
for row in reader:
#If an event ID is not already added to the list, add it to the list
if row[3] not in list_ids:
list_ids.append(row[3])
f.close()
#Open a new file, and dump the event IDs
f = open("List IDs.txt","w")
for row in list_ids:
f.write(row+"\n")
f.close()
|
py | 1a54b2d585824fe4ae3901e30d6faf682affc7e0 | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 21500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
|
py | 1a54b2d9ff285fe4e246dab55e01346336899a5d | import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
meessage = Mail(
from_email='[email protected]',
to_emails='[email protected]',
subject='Sending with Twilio SendGrid is Fun',
html_content='<strong>and easy to do anywhere, even with Python</strong>')
try:
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message)
|
py | 1a54b329c79178875b7a9c33abd65a56470386e2 | """
Density fitting and interpolation classes
"""
import numpy as np
from scipy.optimize import leastsq, least_squares, curve_fit
from scipy.interpolate import PchipInterpolator, CubicSpline
import pdb
# Idealised models
def sech(z):
return 2./(np.exp(z) + np.exp(-z))
def ideal_rho_tanh(z, rho0, drho, dp, L):
#return drho/2 - drho/2*np.tanh(dp + dp*z/L )
return drho/2 * (1 - np.tanh(dp + dp*z/L ) ) + rho0
#return drho/2 * (1 - np.tanh(z/L + 1 ) )
def lamb_tanh_rho(z, rho0, dp, z1, h1, H=None):
# Assumes z is negative down
if H is None:
H = z.min()
zhat = z-H
return rho0*(1 - dp*(1 + np.tanh( (zhat-z1)/h1) ) )
def single_tanh_rho(z, rho0, rho1, z1, h1,):
#return rho0 + rho1/2*(1-np.tanh( (z+z1)/h1))
return rho0 - rho1*np.tanh((z+z1)/h1)
def double_tanh_rho_orig(z, rho0, rho1, rho2, z1, z2, h1, h2):
"""
Seven parameter model
"""
return rho0 + rho1/2*(1-np.tanh( (z+z1)/h1)) +\
rho2/2*(1-np.tanh( (z+z2)/h2))
def double_tanh_rho(z, rho0, rho1, rho2, z1, z2, h1, h2):
"""
Seven parameter model
"""
#return rho0 + rho1/2*(1-np.tanh( (z+z1)/h1)) +\
# rho2/2*(1-np.tanh( (z+z2)/h2))
return rho0 - rho1*np.tanh((z+z1)/h1) -\
rho2*np.tanh((z+z2)/h2)
def double_tanh_rho_new(z, rho0, rho1, z1, z2, h1, h2):
"""
Six parameter model proposed by Andrew Manderson and Ed Cripps, UWA Stats
"""
return rho0 - rho1* (np.tanh((z+z1)/h1) +\
np.tanh((z+z1+z2)/h2))
def fdiff(coeffs, rho, z, density_func):
if density_func=='double_tanh':
soln = double_tanh_rho(z, *coeffs)
elif density_func=='double_tanh_new':
soln = double_tanh_rho_new(z, *coeffs)
elif density_func=='single_tanh':
soln = single_tanh_rho(z, *coeffs)
else:
soln = density_func(z, coeffs)
#print coeffs[-4], coeffs[-3], coeffs[-2], coeffs[-1]
return rho - soln
def fit_rho(rho, z, density_func='single_tanh', errmax=1.0,
bounds=None, initguess=None):
"""
Fits an analytical density profile to data
Uses a robust linear regression
Inputs:
---
rho: vector of density [Nz]
z : depth [Nz] w/ negative values i.e. 0 at surface, positive: up
Returns:
---
rhofit: best fit function at z locations
f0: tuple with analytical parameters
"""
status = 0
rho0 = rho.min()
#rhotry = rho
# Use "least_squares" at it allows bounds on fitted parameters to be input
rhotry = rho # - rho0
H = np.abs(z).max()
if density_func=='double_tanh':
initguess = [rho0, 0.01, 0.01, 1., 2., H/10., H/10.] # double tanh guess
#bounds = [(0,10.),(0,10.),(0,H),(0,H),(0,H/2),(0,H/2)]
bounds = [(rho0-5,0.,0.,0.,0.,H/20.,H/20.),(rho0+5,10.,10.,H,H,H/2,H/2)]
elif density_func=='double_tanh_new':
initguess = [rho0, 0.01, 1., 2., H/10., H/10.] # double tanh guess
#bounds = [(0,10.),(0,10.),(0,H),(0,H),(0,H/2),(0,H/2)]
bounds = [(rho0-5,0.,0.,0.,H/20.,H/20.),(rho0+5,10.,H,H,H/2,H/2)]
elif density_func=='single_tanh':
initguess = [rho0, 1e-3, 40., 100.] # single stratification function
bounds = [(rho0-5,0.,0.,0.),(rho0+5,10.,2*H,2*H)]
#else: User must set bounds
soln =\
least_squares(fdiff, initguess, args=(rhotry, z, density_func), \
bounds=bounds,\
xtol=1e-10,
ftol=1e-10,
loss='cauchy', f_scale=0.1, # Robust
verbose=0,
)
f0 = soln['x']
#soln = leastsq(fdiff, initguess, args=(rhotry, z), \
# full_output=True)
#f0 = soln[0]
# This could be changed to pass a function directly...
if density_func=='double_tanh':
rhofit = double_tanh_rho(z, *f0)# + rho0
elif density_func=='double_tanh_new':
rhofit = double_tanh_rho_new(z, *f0)# + rho0
elif density_func=='single_tanh':
rhofit = single_tanh_rho(z, *f0)
else:
rhofit = density_func(z, f0)
err = np.linalg.norm(rhofit - rhotry)
if err > errmax:
print('Warning in density fit -- large error: %f'%err)
status = -1
#raise Exception('maximum fitting error exceeded')
return rhofit, f0, status
class FitDensity(object):
"""
Interpolate by fitting an analytical profile first
"""
density_func = 'single_tanh'
bounds = None
initguess = None
def __init__(self, rho, z, **kwargs):
self.__dict__.update(**kwargs)
self.rho0 = rho.min()
rhofit, self.f0, self.status = fit_rho(rho, z, density_func=self.density_func,
bounds=self.bounds, initguess=self.initguess)
def __call__(self, Z):
f0 = self.f0
if self.density_func=='double_tanh':
return double_tanh_rho(Z, *f0)# + self.rho0
elif self.density_func=='double_tanh_new':
return double_tanh_rho_new(Z, *f0)# + self.rho0
elif self.density_func=='single_tanh':
return single_tanh_rho(Z, *f0)
else:
return self.density_func(Z, f0)
class InterpDensity(object):
"""
Wrapper class for pchip function
"""
density_func = None
def __init__(self, rho ,z, **kwargs):
self.__dict__.update(**kwargs)
self.Fi = PchipInterpolator(z, rho, axis=0, extrapolate=True)
#self.Fi = CubicSpline(z, rho, axis=0, bc_type='natural')
def __call__(self, Z):
return self.Fi(Z)
class ChebyFitDensity(object):
"""
Wrapper class for Chebyshev Polynomial fit
"""
order=None
def __init__(self, rho ,z, **kwargs):
self.__dict__.update(**kwargs)
nz = z.size
if self.order is None:
self.order = int(max(3,nz -2))
self.f0 = coefs = np.polynomial.chebyshev.chebfit(z, rho, self.order)
def __call__(self, Z):
return np.polynomial.chebyshev.chebval(Z, self.f0)
|
py | 1a54b4b17dd80029a954b2dc6c323d71be8f8290 |
import os
# sprawdzenie czy plik istnieje
if os.path.isfile("/tmp/abc.txt"):
print("istnieje")
else:
print("nie istnieje")
# listowanie katalogu i sprawdzanie typów
dl = os.listdir("/tmp/")
print (dl)
for f in dl:
if os.path.isfile(f):
print("\"" + f + "\" jest plikiem")
elif os.path.isdir(f):
print("\"" + f + "\" jest katalogiem")
else:
print("\"" + f + "\" jest czymś innym")
# można także rekurencyjnie wraz z podkatalogami
for currDir, dirs, files in os.walk('/tmp'):
print("podkatalogi \"" + currDir + "\":")
for d in dirs:
print(" " + os.path.join(currDir, d))
print("pliki w \"" + currDir + "\":")
for f in files:
print(" " + os.path.join(currDir, f))
# zmiana nazwy
os.rename("/tmp/a.txt" "/tmp/b.txt")
# usuwanie
os.remove("/tmp/b.txt")
|
py | 1a54b4b9d7ef3580b31e62693083fa59aae2c3e5 | # -*- coding: utf-8 -*-
'''
专门为wapi程序准备的初始化入口
'''
'''
统一拦截处理和统一错误处理
'''
from api.interceptors.Auth import *
from api.interceptors.ErrorHandler import *
'''
蓝图功能,对所有的url进行蓝图功能配置
'''
from api.controllers.route import *
|
py | 1a54b4cb29a262e674555f2cbbc5af659809474c | import math
import json
import numpy as np
import torch
from mmcv.runner import get_dist_info
from torch.utils.data import Sampler
class DistributedClassAwareSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None, seed=1, sample_weight_path=None):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.seed = seed if seed is not None else 1
assert hasattr(self.dataset, 'flag')
assert len(self.dataset.flag) == len(self.dataset)
self.num_samples = math.ceil(len(self.dataset) / self.num_replicas)
with open(sample_weight_path, "r") as f:
sample_weight = json.load(f)
self.sample_weights = torch.tensor(
list(sample_weight.values()), dtype=torch.float)
self.indices = None
self.set_epoch(-1)
def __iter__(self):
return iter(self.indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.multinomial(
self.sample_weights, len(self.dataset), generator=g, replacement=True
).numpy()
self.flag = self.dataset.flag[indices]
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, j in enumerate(self.group_sizes):
self.num_samples += math.ceil(self.group_sizes[i] / self.samples_per_gpu /
self.num_replicas) * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
indices_group = []
for i, size in enumerate(self.group_sizes):
if size > 0:
flag_i_indice = np.where(self.flag == i)[0]
assert len(flag_i_indice) == size
indice = indices[flag_i_indice].tolist()
extra = math.ceil(
size / self.samples_per_gpu / self.num_replicas
) * self.samples_per_gpu * self.num_replicas - len(indice)
tmp = indice.copy()
for _ in range(extra // size):
indice.extend(tmp)
indice.extend(tmp[:extra % size])
indices_group.extend(indice)
assert len(indices_group) == self.total_size
indices_group = [
indices_group[j] for i in list(
torch.randperm(
len(indices_group) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
offset = self.num_samples * self.rank
indices_group = indices_group[offset:offset + self.num_samples]
assert len(indices_group) == self.num_samples
self.indices = indices_group
|
py | 1a54b55514c4b875944c99f0218ae615d45aaa78 | #!/usr/bin/env python
""" get_result.py """
import click
from urllib.parse import parse_qsl, urljoin, urlparse
import requests
from bs4 import BeautifulSoup
DAUM_DICT_HOST = "https://dic.daum.net/"
LANG = 'eng'
COMMAND_SET = {
'a': 'antonym',
'e': 'example sentences',
's': 'synonym',
'q': 'quit'
}
COMMANDS = "more: " + ' | '.join(
[f'{COMMAND_SET[key]}({key})' for key in COMMAND_SET]
)
def example_url(wordid: str, page: int = 1):
example_host = f'{DAUM_DICT_HOST}/word/view_example_more.do'
qsl = f'?wordid={wordid}&summaryid=etc&page={page}'
return urljoin(example_host, qsl)
def parse(html: str):
bs = BeautifulSoup(html, 'html.parser')
content = bs.findAll('meta', attrs={'property': 'og:description'})[0]\
.get('content')
if not content:
return 'No results found.', ''
try:
redir_url = bs.findAll('meta', attrs={'http-equiv': 'Refresh'})[0]\
.get('content').split('URL=')[1]
except IndexError:
# the result comes with polysemic words
redir_url = bs.findAll('a', attrs={'txt_cleansch'})[0].attrs['href']
dic_query = urlparse(redir_url).query
wordid = dict(parse_qsl(dic_query))['wordid']
return content, wordid
def parse_detail(html: str, wordid: str, category: str):
""" parse once more to get the detailed view """
bs = BeautifulSoup(html, 'html.parser')
id_set = {
'antonym': 'OPPOSITE_WORD',
'synonym': 'SIMILAR_WORD'
}
if category not in id_set.keys():
pass
else:
words = bs.find(id=id_set[category])
if not words:
# there's no antonym of this keyword
return 'No results found.'
tags = words.findAll('li')
result = [
f"{tag.find('a').text}: {tag.find('span').text}" for tag in tags
]
return '\n'.join(result)
def parse_example(url: str):
""" extract the example sentences """
html = requests.get(url).text
bs = BeautifulSoup(html, 'html.parser')
list_ = bs.findAll('li')
sentences = []
for l in list_:
eng_phrase = l.find('span', attrs={'txt_example'}).text.split('\n')[0]
mean_phrase = l.find('span', attrs={'mean_example'}).text
phrase_set = f'{eng_phrase}\n -> {mean_phrase}\n\n'
sentences.append(phrase_set)
return ''.join(sentences)
@click.command()
@click.argument('keyword', metavar='<keyword>')
def main(keyword):
""" Use DAUM Dictionary via terminal """
click.echo('Searching...')
url = f'{DAUM_DICT_HOST}search.do?q={keyword}&dic={LANG}'
response = requests.get(url)
meanings, wordid = parse(response.text)
detailed_url = f'https://dic.daum.net/word/view.do?wordid={wordid}'
detailed_text = None
click.echo(meanings)
if meanings == 'No results found.' and wordid == '':
return
while(True):
value = click.prompt(click.style(COMMANDS, fg='white', bg='blue'))
try:
command = COMMAND_SET[value]
except KeyError:
click.echo("Sorry, I don't understand.")
continue
if value != 'q':
if value == 'e':
result = parse_example(example_url(wordid))
click.echo(result)
else:
# a / s
if detailed_text is None:
detailed_text = requests.get(detailed_url).text
result = parse_detail(detailed_text, wordid, command)
click.secho(command, fg='green')
click.echo(result)
else:
break
if __name__ == "__main__":
main()
|
py | 1a54b56d49a816dc1a46cbed02e4eb1928ff02be | import numpy as np
import torch
import torch.nn.functional as F
def restore_bn(kernel, bn, conv_bias):
gamma = bn.weight
std = (bn.running_var + bn.eps).sqrt()
bias = -bn.running_mean
new_bias = (conv_bias - bn.bias) / gamma * std - bias
new_weight = kernel * (std / gamma).reshape(-1, 1, 1, 1)
return new_weight, new_bias
def transI_fusebn(kernel, bn, conv_bias):
gamma = bn.weight
std = (bn.running_var + bn.eps).sqrt()
bias = -bn.running_mean
if conv_bias is not None:
bias += conv_bias
return kernel * (
(gamma / std).reshape(-1, 1, 1, 1)), bn.bias + bias * gamma / std
def transII_addbranch(kernels, biases):
return torch.sum(kernels, dim=0), torch.sum(biases, dim=0)
def transIII_1x1_kxk(k1, b1, k2, b2, groups=1):
if groups == 1:
k = F.conv2d(k2, k1.permute(1, 0, 2, 3))
b_hat = (k2 * b1.reshape(1, -1, 1, 1)).sum((1, 2, 3))
else:
k_slices = []
b_slices = []
k1_T = k1.permute(1, 0, 2, 3)
k1_group_width = k1.size(0) // groups
k2_group_width = k2.size(0) // groups
for g in range(groups):
k1_T_slice = k1_T[:, g * k1_group_width:(g + 1) *
k1_group_width, :, :]
k2_slice = k2[g * k2_group_width:(g + 1) * k2_group_width, :, :, :]
k_slices.append(F.conv2d(k2_slice, k1_T_slice))
b_slices.append(
(k2_slice *
b1[g * k1_group_width:(g + 1) * k1_group_width].reshape(
1, -1, 1, 1)).sum((1, 2, 3)))
k, b_hat = transIV_depthconcat(k_slices, b_slices)
return k, b_hat + b2
def transIV_depthconcat(kernels, biases):
return torch.cat(kernels), torch.cat(biases)
def transV_avg(channels, kernel_size, groups):
input_dim = channels // groups
k = torch.zeros((channels, input_dim, kernel_size, kernel_size))
k[np.arange(channels).tolist(),
np.tile(np.arange(input_dim), groups).tolist(
), :, :] = 1.0 / kernel_size**2
return k
def transVI_multiscale(kernel, target_kernel_size):
"""
NOTE: This has not been tested with non-square kernels
(kernel.size(2) != kernel.size(3)) nor even-size kernels
"""
W_pixels_to_pad = (target_kernel_size - kernel.size(2)) // 2
H_pixels_to_pad = (target_kernel_size - kernel.size(3)) // 2
return F.pad(
kernel,
[H_pixels_to_pad, H_pixels_to_pad, W_pixels_to_pad, W_pixels_to_pad])
def transVII_kxk_1x1(k1, b1, k2, b2):
return F.conv2d(k1.permute(1, 0, 2, 3),
k2).permute(1, 0, 2,
3), (k2 * b1.reshape(-1, 1, 1, 1)).sum(
(1, 2, 3)) + b2
def transIIX_kxk_kxk(k1, b1, k2, b2, groups=1):
k1 = torch.from_numpy(
np.flip(np.flip(np.array(k1), axis=3), axis=2).copy())
k_size = k1.size(2)
padding = k_size // 2 + 1
if groups == 1:
k = F.conv2d(k2, k1.permute(1, 0, 2, 3), padding=padding)
b_hat = (k2 * b1.reshape(1, -1, 1, 1)).sum((1, 2, 3))
else:
k_slices = []
b_slices = []
k1_T = k1.permute(1, 0, 2, 3)
k1_group_width = k1.size(0) // groups
k2_group_width = k2.size(0) // groups
for g in range(groups):
k1_T_slice = k1_T[:, g * k1_group_width:(g + 1) *
k1_group_width, :, :]
k2_slice = k2[g * k2_group_width:(g + 1) * k2_group_width, :, :, :]
k_slices.append(F.conv2d(k2_slice, k1_T_slice, padding=padding))
b_slices.append(
(k2_slice *
b1[g * k1_group_width:(g + 1) * k1_group_width].reshape(
1, -1, 1, 1)).sum((1, 2, 3)))
k, b_hat = transIV_depthconcat(k_slices, b_slices)
return k, b_hat + b2
def transIX_bn_to_1x1(bn, in_channels, groups=1):
input_dim = in_channels // groups
kernel_value = np.zeros((in_channels, input_dim, 3, 3), dtype=np.float32)
for i in range(in_channels):
kernel_value[i, i % input_dim, 1, 1] = 1
id_tensor = torch.from_numpy(kernel_value).to(bn.weight.device)
kernel = id_tensor
running_mean = bn.running_mean
running_var = bn.running_var
gamma = bn.weight
beta = bn.bias
eps = bn.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
|
py | 1a54b7b6238dc9cd51b4d99c566445f37a2c25e8 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
"""
image = mpimg.imread('./images/waymo_car.jpg')
#Image Dimensions
print("Image Dimensions: ", image.shape)
"""
"""
Starting with B & W
"""
"""
#Covert to GrayScale
convGrayScale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
print("Converted Image Dimensions: ", convGrayScale.shape)
plt.imshow(convGrayScale, cmap='gray')
# Print the value at the centre of the image
x = convGrayScale.shape[1]//2
y = convGrayScale.shape[0]//2
print(convGrayScale[y,x])
# Finds the maximum and minimum grayscale values in this image
max_val = np.amax(convGrayScale)
min_val = np.amin(convGrayScale)
print('Max: ', max_val)
print('Min: ', min_val)
"""
"""
With Colour Images
"""
"""
#image = mpimg.imread('images/wa_state_highway.jpg')
plt.imshow(image)
# Copying RGB Channels into separate arrays
red = image[:,:,0]
green = image[:,:,1]
blue = image[:,:,2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
ax1.set_title('Red channel')
ax1.imshow(red, cmap='gray')
ax2.set_title('Green channel')
ax2.imshow(green, cmap='gray')
ax3.set_title('Blue channel')
ax3.imshow(blue, cmap='gray')
"""
"""
Creating Blue Screen
"""
"""
pizzaImage = cv2.imread("./images/pizza_bluescreen.jpg")
print("This image is a(n)", type(pizzaImage))
#Please remember that the image dimensions are displayed as Height x Width x Colour Components
print("Image Dimensions", pizzaImage.shape)
#We need to make a copy and convert the image to RGB
pizzaCopy = np.copy(pizzaImage)
pizzaCopy = cv2.cvtColor(pizzaCopy, cv2.COLOR_BGR2RGB)
plt.imshow(pizzaCopy)
#Identifying Colour thresholds for Blue
lowerBlue = np.array([0,0,210])
upperBlue = np.array([70,70,255])
#Creating masks for Blue area
mask = cv2.inRange(pizzaCopy, lowerBlue, upperBlue)
#Visualize the mask - Black area means that the mask isn't effective there
plt.imshow(mask, cmap='gray')
maskedImage = np.copy(pizzaCopy)
maskedImage[mask != 0] = [0, 0, 0]
plt.imshow(maskedImage, cmap='gray')
#Adding the background
backgroundImage = cv2.imread('./images/space_background.jpg')
backgroundImage = cv2.cvtColor(backgroundImage, cv2.COLOR_BGR2RGB)
croppedImage = backgroundImage[0:514, 0:816]
croppedImage[mask == 0] = [0,0,0]
plt.imshow(croppedImage)
completeImage = croppedImage + maskedImage
plt.imshow(completeImage)
"""
"""
Coding for Green Screen
"""
"""
carImage = cv2.imread("./images/car_green_screen.jpg")
print("This image is a(n)", type(carImage))
#Please remember that the image dimensions are displayed as Height x Width x Colour Components
print("Image Dimensions", carImage.shape)
#We need to make a copy and convert the image to RGB
carCopy = np.copy(carImage)
carCopy = cv2.cvtColor(carCopy, cv2.COLOR_BGR2RGB)
plt.imshow(carCopy)
#Identifying Colour thresholds for Green
lowerGreen = np.array([36, 25, 25])
upperGreen = np.array([70, 255, 255])
#Creating masks for Green area
mask = cv2.inRange(carCopy, lowerGreen, upperGreen)
#Visualize the mask - Black area means that the mask isn't effective there
plt.imshow(mask, cmap='gray')
maskedImage = np.copy(carCopy)
maskedImage[mask != 0] = [0, 0, 0]
plt.imshow(maskedImage, cmap='gray')
#Adding the background
backgroundImage = cv2.imread('./images/space_background.jpg')
backgroundImage = cv2.cvtColor(backgroundImage, cv2.COLOR_BGR2RGB)
plt.imshow(backgroundImage, cmap='gray')
croppedImage = backgroundImage[0:450, 0:660]
croppedImage[mask == 0] = [0,0,0]
plt.imshow(croppedImage)
completeImage = croppedImage + maskedImage
plt.imshow(completeImage)
"""
"""
Converting to HSV format
"""
image = cv2.imread('images/water_balloons.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
# RGB channels
r = image[:,:,0]
g = image[:,:,1]
b = image[:,:,2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
ax1.set_title('Red')
ax1.imshow(r, cmap='gray')
ax2.set_title('Green')
ax2.imshow(g, cmap='gray')
ax3.set_title('Blue')
ax3.imshow(b, cmap='gray')
# Convert from RGB to HSV
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
# HSV channels
h = hsv[:,:,0]
s = hsv[:,:,1]
v = hsv[:,:,2]
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
ax1.set_title('Hue')
ax1.imshow(h, cmap='gray')
ax2.set_title('Saturation')
ax2.imshow(s, cmap='gray')
ax3.set_title('Value')
ax3.imshow(v, cmap='gray')
# Define our color selection criteria in HSV values
lower_hue = np.array([160,0,0])
upper_hue = np.array([180,255,255])
# Define our color selection criteria in RGB values
lower_pink = np.array([180,0,100])
upper_pink = np.array([255,255,230])
# Define the masked area in RGB space
mask_rgb = cv2.inRange(image, lower_pink, upper_pink)
# mask the image
masked_image = np.copy(image)
masked_image[mask_rgb==0] = [0,0,0]
# Vizualize the mask
plt.imshow(masked_image)
# Now try HSV!
# Define the masked area in HSV space
mask_hsv = cv2.inRange(hsv, lower_hue, upper_hue)
# mask the image
masked_image = np.copy(image)
masked_image[mask_hsv==0] = [0,0,0]
# Vizualize the mask
plt.imshow(masked_image)
|
py | 1a54b7d425f4736f0ad6b06ae7dd31ffe4a98bed | version_info = (0, 12, 17, 'final', 0)
_specifier_ = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'final': ''}
__version__ = '%s.%s.%s%s'%(version_info[0], version_info[1], version_info[2],
'' if version_info[3]=='final' else _specifier_[version_info[3]]+str(version_info[4]))
__frontend_version__ = '^0.5.17'
|
py | 1a54b899ede7700d0e27202ae96eeebe0068786a | """
The DRAGON plugin.
This is a namespace package for running DRAGON.
"""
from .plugin import DragonPlugin
|
py | 1a54b8cb92618966f6945e9cf463c20d23d33325 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_machine_learning_compute import *
from .get_machine_learning_service import *
from .get_private_endpoint_connection import *
from .get_workspace import *
from .get_workspace_connection import *
from .list_machine_learning_compute_keys import *
from .list_machine_learning_compute_nodes import *
from .list_notebook_keys import *
from .list_storage_account_keys import *
from .list_workspace_keys import *
from .list_workspace_notebook_access_token import *
from .machine_learning_compute import *
from .machine_learning_service import *
from .private_endpoint_connection import *
from .workspace import *
from .workspace_connection import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:machinelearningservices/v20210101:MachineLearningCompute":
return MachineLearningCompute(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:machinelearningservices/v20210101:MachineLearningService":
return MachineLearningService(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:machinelearningservices/v20210101:PrivateEndpointConnection":
return PrivateEndpointConnection(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:machinelearningservices/v20210101:Workspace":
return Workspace(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:machinelearningservices/v20210101:WorkspaceConnection":
return WorkspaceConnection(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "machinelearningservices/v20210101", _module_instance)
_register_module()
|
py | 1a54b918c75b8f6851f014b049727a99f83a5fe1 | ############# constants
TITLE = "Cheese Maze"
DEVELOPER = "Jack Gartner"
HISTORY = "A mouse wants eat his cheese, Make it to the Hashtag to win, watch out for plus signs, $ is a teleport, P is a power up, Obtain the Key (K) in order to unlock the door (D)"
INSTRUCTIONS = "left arrow key\t\t\tto move left\nright arrow key\t\t\tto move right\nup arrow key\t\t\tto move up\ndown arrow key\t\t\tto move down\npress q\t\t\t\t\tto quit"
############# functions
def displayTitle():
print(TITLE)
print("By " + DEVELOPER)
print()
print(HISTORY)
print()
print(INSTRUCTIONS)
print()
def displayBoard():
print("-----------------")
print("| +\033[36mK\033[37m + \033[33mP\033[37m|")
print("|\033[32m#\033[37m \033[31mD\033[37m + |")
print("|++++ ++++++ |")
print("| + |")
print("| ++++++ +++++|")
print("| \033[34m$\033[37m|")
print("-----------------")
|
py | 1a54b9b79cf4ce739e9b7e3f439af4601538de99 | # (C) Copyright 2005- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
#
# Python implementation: bufr_clone
#
# Description: how to create a new BUFR message by cloning
# an existing message.
#
from __future__ import absolute_import
import traceback
import sys
from eccodes import *
INPUT = '../../data/bufr/syno_1.bufr'
OUTPUT = 'bufr_clone_test_p.clone.bufr'
VERBOSE = 1 # verbose error reporting
def example():
# open BUFR file
fin = open(INPUT, 'rb')
# open output BUFR file
fout = open(OUTPUT, 'wb')
# get handle for message
bufr = codes_bufr_new_from_file(fin)
# create several clones of this message and alter them
# in different ways
for centre in range(0, 3):
# clone the message
clone_id = codes_clone(bufr)
# this is the place where you may wish to modify the clone
codes_set(clone_id, 'bufrHeaderCentre', centre)
# write the cloned message to a file
codes_write(clone_id, fout)
# release the clone's handle
codes_release(clone_id)
# release the source's handle
codes_release(bufr)
fin.close()
fout.close()
def main():
try:
example()
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
return 1
if __name__ == "__main__":
sys.exit(main())
|
py | 1a54badbeb2ee6fcab9e5d3f258f903941cb00ce | import datetime as dt
import functools
import itertools
import logging
from logging.handlers import RotatingFileHandler
import os
import sys
import threading
import traceback
# pylint: disable=redefined-builtin
from codecs import open
from collections import namedtuple
from time import time
from cli_helpers.tabular_output import TabularOutputFormatter
from cli_helpers.tabular_output.preprocessors import (align_decimals,
format_numbers)
import humanize
import click
from prompt_toolkit.shortcuts import PromptSession, CompleteStyle
from prompt_toolkit.completion import DynamicCompleter, ThreadedCompleter
from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
from prompt_toolkit.document import Document
from prompt_toolkit.filters import HasFocus, IsDone
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.layout.processors import (ConditionalProcessor,
HighlightMatchingBracketProcessor,
TabsProcessor)
from prompt_toolkit.history import FileHistory
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from pygments.lexers.sql import PostgresLexer
from mssqlcli.config import (
get_casing_file,
config_location,
ensure_dir_exists,
get_config,
)
from mssqlcli.completion_refresher import CompletionRefresher
from mssqlcli.__init__ import __version__
from mssqlcli.encodingutils import text_type
from mssqlcli.key_bindings import mssqlcli_bindings
from mssqlcli.mssqlcliclient import MssqlCliClient
from mssqlcli.mssqlcompleter import MssqlCompleter
from mssqlcli.mssqlstyle import style_factory, style_factory_output
from mssqlcli.mssqltoolbar import create_toolbar_tokens_func
from mssqlcli.sqltoolsclient import SqlToolsClient
from mssqlcli.packages import special
from mssqlcli.mssqlbuffer import mssql_is_multiline
from mssqlcli.util import is_command_valid
import mssqlcli.localized_strings as localized
# Query tuples are used for maintaining history
MetaQuery = namedtuple(
'Query',
[
'query', # The entire text of the command
'successful', # True If all subqueries were successful
'total_time', # Time elapsed executing the query
'meta_changed', # True if any subquery executed create/alter/drop
'db_changed', # True if any subquery changed the database
'path_changed', # True if any subquery changed the search path
'mutated', # True if any subquery executed insert/update/delete
'contains_secure_statement', # True if any subquery contains the security statement
])
MetaQuery.__new__.__defaults__ = ('', False, 0, False, False, False, False, False)
OutputSettings = namedtuple(
'OutputSettings',
'table_format dcmlfmt floatfmt missingval expanded max_width case_function'
)
OutputSettings.__new__.__defaults__ = (
None, None, None, '<null>', False, None, lambda x: x
)
security_keywords = ['password', 'secret', 'encrypted_value']
def security_words_found_in(query):
try:
tokens = query.lower()
return any([keyword for keyword in security_keywords if keyword in tokens])
except AttributeError:
return False
class MssqlFileHistory(FileHistory):
def append_string(self, string):
if security_words_found_in(string):
return
super(MssqlFileHistory, self).append_string(string)
class MssqlCli(object):
# pylint: disable=too-many-instance-attributes, useless-object-inheritance
max_len_prompt = 30
default_prompt = '\\d> '
def set_default_pager(self, config):
configured_pager = config['main'].get('pager')
os_environ_pager = os.environ.get('PAGER')
is_less_installed = is_command_valid(['less', '--version'])
default_pager = configured_pager or os_environ_pager or \
('less -SRXF' if is_less_installed else False) or None
if configured_pager:
self.logger.info(
'Default pager found in config file: "%s"', configured_pager)
elif os_environ_pager:
self.logger.info('Default pager found in PAGER environment variable: "%s"',
os_environ_pager)
elif is_less_installed:
self.logger.info('Default pager set to Less')
else:
self.logger.info(
'No default pager found in environment. Using os default pager')
# Set default set of less recommended options, if they are not already set.
# They are ignored if pager is different than less.
if not os.environ.get('LESS'):
os.environ['LESS'] = '-SRXF'
if default_pager is not None:
os.environ['PAGER'] = default_pager
return default_pager
def __init__(self, options):
# Load config.
c = self.config = get_config(options.mssqlclirc_file)
self.initialize_logging()
self.logger = logging.getLogger(u'mssqlcli.main')
self.interactive_mode = options.interactive_mode
self.table_format = c['main']['table_format']
self.decimal_format = c['data_formats']['decimal']
self.float_format = c['data_formats']['float']
self.null_string = c['main'].get('null_string', '<null>')
self.expanded_output = c['main']['expand'] == 'always'
self.integrated_auth = options.integrated_auth
self.less_chatty = bool(
options.less_chatty) or c['main'].as_bool('less_chatty') or self.interactive_mode
keyword_casing = c['main']['keyword_casing']
self.settings = {
'casing_file': get_casing_file(c),
'generate_casing_file': c['main'].as_bool('generate_casing_file'),
'generate_aliases': c['main'].as_bool('generate_aliases'),
'asterisk_column_order': c['main']['asterisk_column_order'],
'qualify_columns': c['main']['qualify_columns'],
'case_column_headers': c['main'].as_bool('case_column_headers'),
'search_path_filter': c['main'].as_bool('search_path_filter'),
'single_connection': False,
'less_chatty': self.less_chatty,
'keyword_casing': keyword_casing,
}
if self.interactive_mode:
pager = self.set_default_pager(c)
self.prompt_session = None
# set auto_expand to false if less is detected with auto expand
self.auto_expand = options.auto_vertical_output \
or (c['main']['expand'] == 'auto' and pager != 'less -SRXF')
self.multiline = c['main'].as_bool('multi_line')
self.multiline_mode = c['main'].get('multi_line_mode', 'tsql')
self.vi_mode = c['main'].as_bool('vi')
self.prompt_format = options.prompt or c['main'].get('prompt', self.default_prompt)
self.row_limit = options.row_limit
self.min_num_menu_lines = c['main'].as_int('min_num_menu_lines')
self.multiline_continuation_char = c['main']['multiline_continuation_char']
self.syntax_style = c['main']['syntax_style']
self.cli_style = c['colors']
self.output_style = style_factory_output(self.syntax_style, self.cli_style)
self.wider_completion_menu = c['main'].as_bool('wider_completion_menu')
self.on_error = c['main']['on_error'].upper()
self.now = dt.datetime.today()
self.completion_refresher = CompletionRefresher()
self.query_history = []
# Initialize completer
smart_completion = c['main'].get('smart_completion', 'True').lower() == 'true'
self.completer = MssqlCompleter(smart_completion=smart_completion,
settings=self.settings)
self._completer_lock = threading.Lock()
# input and output file are for non-interactive mode
self.input_file = options.input_file
self.output_file = options.output_file
self.query = options.query
self.sqltoolsclient = SqlToolsClient(enable_logging=options.enable_sqltoolsservice_logging)
self.mssqlcliclient_main = MssqlCliClient(options, self.sqltoolsclient)
# exit and return error if user enters interactive mode with -i or -o arguments enabled
if self.interactive_mode and (self.input_file or self.output_file):
raise ValueError("Invalid arguments: -i and -o can only be used in non-interactive "
"mode.")
# exit and return error if both query text and an input file are specified
if self.query and self.input_file:
raise ValueError("Invalid arguments: either query [-Q] or input file [-i] may be "
"specified.")
def __del__(self):
# Shut-down sqltoolsservice
if self.sqltoolsclient:
self.sqltoolsclient.shutdown()
# TODO: possibly use at a later date for expanded output file functionality
# def write_to_file(self, pattern, **_):
# if not pattern:
# self.output_file = None
# message = 'File output disabled'
# return [(None, None, None, message, '', True)]
# filename = os.path.abspath(os.path.expanduser(pattern))
# if not os.path.isfile(filename):
# try:
# open(filename, 'w').close()
# except IOError as e:
# self.output_file = None
# message = str(e) + '\nFile output disabled'
# return [(None, None, None, message, '', False)]
# self.output_file = filename
# message = 'Writing to file "%s"' % self.output_file
# return [(None, None, None, message, '', True)]
def initialize_logging(self):
log_file = self.config['main']['log_file']
if log_file == 'default':
log_file = config_location() + 'mssqlcli.log'
ensure_dir_exists(log_file)
log_level = self.config['main']['log_level']
# Disable logging if value is NONE by switching to a no-op handler.
# Set log level to a high value so it doesn't even waste cycles getting
# called.
if log_level.upper() == 'NONE':
handler = logging.NullHandler()
else:
# creates a log buffer with max size of 20 MB and 5 backup files
handler = RotatingFileHandler(os.path.expanduser(log_file), encoding='utf-8',
maxBytes=1024*1024*20, backupCount=5)
level_map = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NONE': logging.CRITICAL
}
log_level = level_map[log_level.upper()]
formatter = logging.Formatter(
'%(asctime)s (%(process)d/%(threadName)s) '
'%(name)s %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root_logger = logging.getLogger('mssqlcli')
root_logger.addHandler(handler)
root_logger.setLevel(log_level)
root_logger.info('Initializing mssqlcli logging.')
root_logger.debug('Log file %r.', log_file)
def set_main_mssqlcli_client(self, mssqlcli_client):
self.mssqlcliclient_main = mssqlcli_client
def connect_to_database(self):
owner_uri, error_messages = self.mssqlcliclient_main.connect_to_database()
if not owner_uri and error_messages:
click.secho('\n'.join(error_messages),
err=True,
fg='yellow')
self.logger.debug('Database connection failed: %r.', error_messages)
sys.exit(1)
def handle_editor_command(self, text):
r"""
Editor command is any query that is prefixed or suffixed
by a '\e'. The reason for a while loop is because a user
might edit a query multiple times.
For eg:
"select * from \e"<enter> to edit it in vim, then come
back to the prompt with the edited query "select * from
blah where q = 'abc'\e" to edit it again.
:param text: Document
:return: Document
"""
# FIXME: using application.pre_run_callables like this here is not the best solution.
# It's internal api of prompt_toolkit that may change. This was added to fix #668.
# We may find a better way to do it in the future.
# pylint: disable=no-member
editor_command = special.editor_command(text)
while editor_command:
filename = special.get_filename(text)
query = (special.get_editor_query(text) or
self.get_last_query())
sql, message = special.open_external_editor(filename, sql=query)
if message:
# Something went wrong. Raise an exception and bail.
raise RuntimeError(message)
while True:
try:
text = self.prompt_session.prompt(default=sql)
break
except KeyboardInterrupt:
sql = ""
editor_command = special.editor_command(text)
return text
def _execute_interactive_command(self, text):
""" Runs commands in the interactive CLI mode. """
logger = self.logger
# Initialize default metaquery in case execution fails
query = MetaQuery(query=text, successful=False)
try:
output, query = self._evaluate_command(text)
except KeyboardInterrupt:
# Issue where Ctrl+C propagates to sql tools service process and kills it,
# so that query/cancel request can't be sent.
# Right now the sql_tools_service process is killed and we restart
# it with a new connection.
click.secho(u'Cancelling query...', err=True, fg='red')
self.reset()
logger.debug("cancelled query, sql: %r", text)
click.secho("Query cancelled.", err=True, fg='red')
except NotImplementedError:
click.secho('Not Yet Implemented.', fg="yellow")
else:
if query.total_time > 1:
# pylint: disable=no-member
print('Time: %0.03fs (%s)' % (query.total_time,
humanize.time.naturaldelta(query.total_time)))
else:
print('Time: %0.03fs' % query.total_time)
# Check if we need to update completions, in order of most
# to least drastic changes
if query.db_changed:
with self._completer_lock:
self.completer.reset_completions()
self.refresh_completions(persist_priorities='keywords')
elif query.meta_changed:
self.refresh_completions(persist_priorities='all')
if not query.contains_secure_statement:
# Allow MssqlCompleter to learn user's preferred keywords, etc.
with self._completer_lock:
self.completer.extend_query_history(text)
self.query_history.append(query)
return output
def execute_query(self, text):
""" Processes a query string and outputs to file or terminal """
if self.interactive_mode:
output = self._execute_interactive_command(text)
else:
# non-interactive mode
output, _ = self._evaluate_command(text)
self._output_query(output)
return output
def _output_query(self, output):
""" Specifies how query output is handled """
if self.interactive_mode:
click.echo_via_pager('\n'.join(output))
else:
if self.output_file:
try:
with open(self.output_file, 'w', encoding='utf-8') as f:
click.echo('\n'.join(output), file=f)
except IOError as e:
click.secho(str(e), err=True, fg='red')
sys.exit(1)
else:
click.echo('\n'.join(output))
def run(self):
""" Spins up CLI. """
# raise error if interactive mode is set to false here
if not self.interactive_mode:
raise ValueError("Invalid arguments: 'run' must be used in interactive mode! Please set "
"interactive_mode to True.")
# exit and return error if user enters interactive mode with -o argument enabled
if self.output_file:
raise ValueError("Invalid arguments: -o must be used with interactive mode set to "
"false.")
history_file = self.config['main']['history_file']
if history_file == 'default':
history_file = config_location() + 'history'
history = MssqlFileHistory(os.path.expanduser(history_file))
self.refresh_completions(history=history,
persist_priorities='none')
self.prompt_session = self._build_cli(history)
if not self.less_chatty:
print('Version: {}'.format(__version__))
print('Mail: [email protected]')
print('Home: http://github.com/dbcli/mssql-cli')
try:
while True:
try:
text = self.prompt_session.prompt()
except KeyboardInterrupt:
continue
# The reason we check here instead of inside the mssqlcliclient is
# because we want to raise the Exit exception which will be
# caught by the try/except block that wraps the mssqlcliclient execute
# statement.
if self.quit_command(text):
raise EOFError
try:
text = self.handle_editor_command(text)
except RuntimeError as e:
self.logger.error("sql: %r, error: %r", text, e)
self.logger.error("traceback: %r", traceback.format_exc())
click.secho(str(e), err=True, fg='red')
continue
self.execute_query(text)
self.now = dt.datetime.today()
except EOFError:
self.mssqlcliclient_main.shutdown()
if not self.less_chatty:
print(localized.goodbye())
def _build_cli(self, history):
"""
Builds prompt session.
NOTE: PROMPT-SESSION USES THIS AS DEPENDENCY.
"""
def get_message():
prompt = self.get_prompt(self.prompt_format)
return [(u'class:prompt', prompt)]
def get_continuation(width, line_number, is_soft_wrap):
"""
NOTE: updating parameters will cause prompt session to crash.
"""
# pylint: disable=unused-argument
continuation = self.multiline_continuation_char * (width - 1) + ' '
return [(u'class:continuation', continuation)]
get_toolbar_tokens = create_toolbar_tokens_func(self)
if self.wider_completion_menu:
complete_style = CompleteStyle.MULTI_COLUMN
else:
complete_style = CompleteStyle.COLUMN
with self._completer_lock:
self.prompt_session = PromptSession(
message=get_message,
style=style_factory(self.syntax_style, self.cli_style),
# Layout options.
lexer=PygmentsLexer(PostgresLexer),
prompt_continuation=get_continuation,
bottom_toolbar=get_toolbar_tokens,
complete_style=complete_style,
input_processors=[
ConditionalProcessor(
processor=HighlightMatchingBracketProcessor(
chars='[](){}'),
#pylint: disable=invalid-unary-operand-type
filter=HasFocus(DEFAULT_BUFFER) & ~IsDone()),
# Render \t as 4 spaces instead of "^I"
TabsProcessor(char1=u' ', char2=u' ')],
reserve_space_for_menu=self.min_num_menu_lines,
# Buffer options.
multiline=mssql_is_multiline(self),
completer=ThreadedCompleter(
DynamicCompleter(lambda: self.completer)),
history=history, auto_suggest=AutoSuggestFromHistory(),
complete_while_typing=True,
# Key bindings.
enable_system_prompt=True,
enable_open_in_editor=True,
# Other options.
key_bindings=mssqlcli_bindings(self),
editing_mode=EditingMode.VI if self.vi_mode else EditingMode.EMACS,
search_ignore_case=True)
return self.prompt_session
def _should_show_limit_prompt(self, status, rows):
"""
Returns True if limit prompt should be shown, False otherwise.
NOTE: updating parameters will cause prompt session to crash.
"""
# pylint: disable=unused-argument
if not rows:
return False
return self.interactive_mode and self.row_limit > 0 and len(rows) > self.row_limit
def _evaluate_command(self, text):
"""
Used to run a command entered by the user during CLI operation
(Puts the E in REPL)
returns (results, MetaQuery)
"""
# pylint: disable=too-many-locals
all_success = True
meta_changed = False # CREATE, ALTER, DROP, etc
mutated = False # INSERT, DELETE, etc
db_changed = False
contains_secure_statement = False
path_changed = False
output = []
total = 0
# Run the query.
start = time()
# mssql-cli
if not self.mssqlcliclient_main.connect_to_database():
click.secho(u'No connection to server. Exiting.')
sys.exit(1)
for rows, columns, status, sql, is_error in \
self.mssqlcliclient_main.execute_query(text):
total = time() - start
if self._should_show_limit_prompt(status, rows):
click.secho('The result set has more than %s rows.'
% self.row_limit, fg='red')
if not click.confirm('Do you want to continue?'):
click.secho("Aborted!", err=True, fg='red')
break
contains_secure_statement = security_words_found_in(sql)
if is_error:
output.append(status)
all_success = False
continue
if self.interactive_mode and self.auto_expand and self.prompt_session:
max_width = self.prompt_session.output.get_size().columns
else:
max_width = None
settings = OutputSettings(
table_format=self.table_format,
dcmlfmt=self.decimal_format,
floatfmt=self.float_format,
missingval=self.null_string,
expanded=self.expanded_output,
max_width=max_width,
case_function=(
self.completer.case if self.interactive_mode and
self.settings['case_column_headers']
else
lambda x: x
)
)
formatted = self.format_output(None, rows, columns, status, settings)
output.extend(formatted)
db_changed, new_db_name = self.has_change_db_cmd(sql)
if new_db_name:
self.logger.info('Database context changed.')
self.mssqlcliclient_main.connected_database = new_db_name
if all_success:
meta_changed = meta_changed or self.has_meta_cmd(text)
return output, MetaQuery(
text, all_success, total, meta_changed, db_changed, path_changed, mutated,
contains_secure_statement)
def _handle_server_closed_connection(self):
"""Used during CLI execution"""
reconnect = click.prompt(
'Connection reset. Reconnect (Y/n)',
show_default=False, type=bool, default=True)
if reconnect:
self.reset()
click.secho('Reconnected!\nTry the command again.', fg='green')
def shutdown(self):
""" API for shutting down client """
self.mssqlcliclient_main.shutdown()
def reset(self):
"""
Reset mssqlcli client with a new sql tools service and connection.
"""
self.sqltoolsclient.shutdown()
self.sqltoolsclient = SqlToolsClient()
self.mssqlcliclient_main = self.mssqlcliclient_main.clone(self.sqltoolsclient)
database_response = self.mssqlcliclient_main.connect_to_database()
if not database_response:
click.secho('Unable reconnect to server %s; database %s.' % (
self.mssqlcliclient_main.server_name,
self.mssqlcliclient_main.connected_database),
err=True, fg='yellow')
self.logger.info(u'Unable to reset connection to server %s; database %s',
self.mssqlcliclient_main.server_name,
self.mssqlcliclient_main.connected_database)
sys.exit(1)
else:
owner_uri, error_messages = database_response
if not owner_uri and error_messages:
# can occur if database credentials change during reset
self.logger.error(u'Error in reset : %s', error_messages)
raise ConnectionResetError(error_messages)
def refresh_completions(self, history=None, persist_priorities='all'):
# Clone mssqlcliclient to create a new connection with a new owner Uri.
mssqlclclient_completion_refresher = self.mssqlcliclient_main.clone()
callback = functools.partial(self._on_completions_refreshed,
persist_priorities=persist_priorities)
self.completion_refresher.refresh(mssqcliclient=mssqlclclient_completion_refresher,
callbacks=callback,
history=history,
settings=self.settings)
return [(None, None, None,
'Auto-completion refresh started in the background.')]
def _on_completions_refreshed(self, new_completer, persist_priorities):
self._swap_completer_objects(new_completer, persist_priorities)
if self.prompt_session:
# After refreshing, redraw the CLI to clear the statusbar
# "Refreshing completions..." indicator
self.prompt_session.app.invalidate()
def _swap_completer_objects(self, new_completer, persist_priorities):
"""Swap the completer object with the newly created completer.
persist_priorities is a string specifying how the old completer's
learned prioritizer should be transferred to the new completer.
'none' - The new prioritizer is left in a new/clean state
'all' - The new prioritizer is updated to exactly reflect
the old one
'keywords' - The new prioritizer is updated with old keyword
priorities, but not any other.
"""
with self._completer_lock:
old_completer = self.completer
self.completer = new_completer
if persist_priorities == 'all':
# Just swap over the entire prioritizer
new_completer.prioritizer = old_completer.prioritizer
elif persist_priorities == 'keywords':
# Swap over the entire prioritizer, but clear name priorities,
# leaving learned keyword priorities alone
new_completer.prioritizer = old_completer.prioritizer
new_completer.prioritizer.clear_names()
elif persist_priorities == 'none':
# Leave the new prioritizer as is
pass
# When mssql-cli is first launched we call refresh_completions before
# instantiating the cli object. So it is necessary to check if cli
# exists before trying the replace the completer object in cli.
self.completer = new_completer
def get_completions(self, text, cursor_position):
with self._completer_lock:
return self.completer.get_completions(
Document(text=text, cursor_position=cursor_position), None)
def get_prompt(self, string):
string = string.replace('\\t', self.now.strftime('%x %X'))
string = string.replace('\\u', self.mssqlcliclient_main.user_name or '(none)')
string = string.replace('\\h', self.mssqlcliclient_main.prompt_host or '(none)')
string = string.replace('\\d', self.mssqlcliclient_main.connected_database or '(none)')
string = string.replace('\\p', str(self.mssqlcliclient_main.prompt_port) or '(none)')
string = string.replace('\\n', "\n")
return string
def get_last_query(self):
"""Get the last query executed or None."""
return self.query_history[-1][0] if self.query_history else None
@staticmethod
def has_meta_cmd(query):
"""Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop, commit or rollback."""
if query and isinstance(query, str):
first_token = query.split()[0]
if first_token.lower() in ('alter', 'create', 'drop'):
return True
return False
@staticmethod
def has_change_db_cmd(query):
"""Determines if the statement is a database switch such as 'use' or '\\c'
Returns (True, DBName) or (False, None)
"""
if query and isinstance(query, str):
first_token = query.split()[0]
if first_token.lower() in ('use', '\\c', '\\connect'):
return True, query.split()[1].strip('"')
return False, None
@staticmethod
def quit_command(sql):
return (sql.strip().lower() == 'exit' or
sql.strip().lower() == 'quit' or
sql.strip() == r'\q' or
sql.strip() == ':q')
@staticmethod
def format_output(title, cur, headers, status, settings):
# pylint: disable=too-many-locals
output = []
expanded = (settings.expanded or settings.table_format == 'vertical')
table_format = ('vertical' if settings.expanded else
settings.table_format)
max_width = settings.max_width
case_function = settings.case_function
formatter = TabularOutputFormatter(format_name=table_format)
def format_array(val):
if val is None:
return settings.missingval
if not isinstance(val, list):
return val
return '{' + ','.join(text_type(format_array(e)) for e in val) + '}'
def format_arrays(data, headers, **_):
data = list(data)
for row in data:
row[:] = [
format_array(val) if isinstance(val, list) else val
for val in row
]
return data, headers
output_kwargs = {
'sep_title': 'RECORD {n}',
'sep_character': '-',
'sep_length': (1, 25),
'missing_value': settings.missingval,
'integer_format': settings.dcmlfmt,
'float_format': settings.floatfmt,
'preprocessors': (format_numbers, format_arrays),
'disable_numparse': True,
'preserve_whitespace': True
}
if not settings.floatfmt:
output_kwargs['preprocessors'] = (align_decimals, )
if title:
output.append(title)
if cur:
headers = [case_function(x) for x in headers]
if max_width is not None:
cur = list(cur)
formatted = formatter.format_output(cur, headers, **output_kwargs)
if isinstance(formatted, text_type):
formatted = iter(formatted.splitlines())
first_line = next(formatted)
formatted = itertools.chain([first_line], formatted)
if (not expanded and max_width and len(
first_line) > max_width and headers):
formatted = formatter.format_output(
cur, headers, format_name='vertical', column_types=None, **output_kwargs)
if isinstance(formatted, text_type):
formatted = iter(formatted.splitlines())
output = itertools.chain(output, formatted)
if status: # Only print the status if it's not None.
output = itertools.chain(output, [status])
return output
|
py | 1a54bb8934b5db7cf7bbcbd831d4cc1646341821 | import copy
import logging
import os
import time
from collections import Counter
from statistics import mean
import numpy as np
import pandas as pd
from .fold_fitting_strategy import AbstractFoldFittingStrategy, SequentialLocalFoldFittingStrategy
from ..abstract.abstract_model import AbstractModel
from ...constants import MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE, REFIT_FULL_SUFFIX
from ...utils.exceptions import TimeLimitExceeded
from ...utils.loaders import load_pkl
from ...utils.savers import save_pkl
from ...utils.utils import CVSplitter, _compute_fi_with_stddev
logger = logging.getLogger(__name__)
# TODO: Add metadata object with info like score on each model, train time on each model, etc.
class BaggedEnsembleModel(AbstractModel):
"""
Bagged ensemble meta-model which fits a given model multiple times across different splits of the training data.
For certain child models such as KNN, this may only train a single model and instead rely on the child model to generate out-of-fold predictions.
"""
_oof_filename = 'oof.pkl'
def __init__(self, model_base: AbstractModel, random_state=0, **kwargs):
self.model_base = model_base
self._child_type = type(self.model_base)
self.models = []
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
self._n_repeats = 0 # Number of n_repeats with at least 1 model fit, if kfold=5 and 8 models have been fit, _n_repeats is 2
self._n_repeats_finished = 0 # Number of n_repeats finished, if kfold=5 and 8 models have been fit, _n_repeats_finished is 1
self._k_fold_end = 0 # Number of models fit in current n_repeat (0 if completed), if kfold=5 and 8 models have been fit, _k_fold_end is 3
self._k = None # k models per n_repeat, equivalent to kfold value
self._k_per_n_repeat = [] # k-fold used for each n_repeat. == [5, 10, 3] if first kfold was 5, second was 10, and third was 3
self._random_state = random_state
self.low_memory = True
self._bagged_mode = None
# _child_oof currently is only set to True for KNN models, that are capable of LOO prediction generation to avoid needing bagging.
# TODO: Consider moving `_child_oof` logic to a separate class / refactor OOF logic.
# FIXME: Avoid unnecessary refit during refit_full on `_child_oof=True` models, just re-use the original model.
self._child_oof = False # Whether the OOF preds were taken from a single child model (Assumes child can produce OOF preds without bagging).
self._cv_splitters = [] # Keeps track of the CV splitter used for each bagged repeat.
super().__init__(problem_type=self.model_base.problem_type, eval_metric=self.model_base.eval_metric, **kwargs)
def _set_default_params(self):
default_params = {
# 'use_child_oof': False, # [Advanced] Whether to defer to child model for OOF preds and only train a single child.
'save_bag_folds': True,
# 'refit_folds': False, # [Advanced, Experimental] Whether to refit bags immediately to a refit_full model in a single .fit call.
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
super()._set_default_params()
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
drop_unique=False, # TODO: Get the value from child instead
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
def is_valid(self):
return self.is_fit() and (self._n_repeats == self._n_repeats_finished)
def can_infer(self):
return self.is_fit() and self.params.get('save_bag_folds', True)
def is_stratified(self):
if self.problem_type in [REGRESSION, QUANTILE, SOFTCLASS]:
return False
else:
return True
def is_fit(self):
return len(self.models) != 0
def can_fit(self) -> bool:
return not self.is_fit() or self._bagged_mode
def is_valid_oof(self):
return self.is_fit() and (self._child_oof or self._bagged_mode)
def get_oof_pred_proba(self, **kwargs):
# TODO: Require is_valid == True (add option param to ignore is_valid)
return self._oof_pred_proba_func(self._oof_pred_proba, self._oof_pred_model_repeats)
@staticmethod
def _oof_pred_proba_func(oof_pred_proba, oof_pred_model_repeats):
oof_pred_model_repeats_without_0 = np.where(oof_pred_model_repeats == 0, 1, oof_pred_model_repeats)
if oof_pred_proba.ndim == 2:
oof_pred_model_repeats_without_0 = oof_pred_model_repeats_without_0[:, None]
return oof_pred_proba / oof_pred_model_repeats_without_0
def _init_misc(self, **kwargs):
child = self._get_model_base().convert_to_template()
child.initialize(**kwargs)
self.eval_metric = child.eval_metric
self.stopping_metric = child.stopping_metric
self.quantile_levels = child.quantile_levels
self.normalize_pred_probas = child.normalize_pred_probas
def preprocess(self, X, preprocess_nonadaptive=True, model=None, **kwargs):
if preprocess_nonadaptive:
if model is None:
if not self.models:
return X
model = self.models[0]
model = self.load_child(model)
return model.preprocess(X, preprocess_stateful=False)
else:
return X
def _get_cv_splitter(self, n_splits, n_repeats, groups=None):
return CVSplitter(n_splits=n_splits, n_repeats=n_repeats, groups=groups, stratified=self.is_stratified(), random_state=self._random_state)
def _fit(self,
X,
y,
X_val=None,
y_val=None,
X_pseudo=None,
y_pseudo=None,
k_fold=None,
k_fold_start=0,
k_fold_end=None,
n_repeats=1,
n_repeat_start=0,
groups=None,
**kwargs):
use_child_oof = self.params.get('use_child_oof', False)
if use_child_oof:
if self.is_fit():
# TODO: We may want to throw an exception instead and avoid calling fit more than once
return self
k_fold = 1
k_fold_end = None
groups = None
if k_fold is None and groups is None:
k_fold = 5
if k_fold is not None and k_fold < 1:
k_fold = 1
if k_fold is None or k_fold > 1:
k_fold = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups).n_splits
self._validate_bag_kwargs(
k_fold=k_fold,
k_fold_start=k_fold_start,
k_fold_end=k_fold_end,
n_repeats=n_repeats,
n_repeat_start=n_repeat_start,
groups=groups,
)
if k_fold_end is None:
k_fold_end = k_fold
model_base = self._get_model_base()
model_base.rename(name='')
kwargs['feature_metadata'] = self.feature_metadata
kwargs['num_classes'] = self.num_classes # TODO: maybe don't pass num_classes to children
if self.model_base is not None:
self.save_model_base(self.model_base)
self.model_base = None
if self._oof_pred_proba is None and self.is_fit():
self._load_oof()
save_bag_folds = self.params.get('save_bag_folds', True)
if k_fold == 1:
self._fit_single(X=X, y=y, model_base=model_base, use_child_oof=use_child_oof, **kwargs)
return self
else:
refit_folds = self.params.get('refit_folds', False)
if refit_folds:
save_bag_folds = False
if kwargs.get('time_limit', None) is not None:
fold_start = n_repeat_start * k_fold + k_fold_start
fold_end = (n_repeats - 1) * k_fold + k_fold_end
folds_to_fit = fold_end - fold_start
# Reserve time for final refit model
kwargs['time_limit'] = kwargs['time_limit'] * folds_to_fit / (folds_to_fit + 1.2)
self._fit_folds(X=X, y=y, model_base=model_base, X_pseudo=X_pseudo, y_pseudo=y_pseudo,
k_fold=k_fold, k_fold_start=k_fold_start, k_fold_end=k_fold_end,
n_repeats=n_repeats, n_repeat_start=n_repeat_start, save_folds=save_bag_folds, groups=groups, **kwargs)
# FIXME: Don't save folds except for refit
# FIXME: Cleanup self
# FIXME: Don't add `_FULL` to name
if refit_folds:
refit_template = self.convert_to_refit_full_template()
refit_template.params['use_child_oof'] = False
kwargs['time_limit'] = None
refit_template.fit(X=X, y=y, k_fold=1, **kwargs)
refit_template._oof_pred_proba = self._oof_pred_proba
refit_template._oof_pred_model_repeats = self._oof_pred_model_repeats
refit_template._child_oof = True
refit_template.fit_time += self.fit_time + self.predict_time
return refit_template
else:
return self
def _validate_bag_kwargs(self, *,
k_fold,
k_fold_start,
k_fold_end,
n_repeats,
n_repeat_start,
groups):
if groups is not None:
if self._n_repeats_finished != 0:
raise AssertionError('Bagged models cannot call fit with `groups` specified when a full k-fold set has already been fit.')
if n_repeats > 1:
raise AssertionError('Cannot perform repeated bagging with `groups` specified.')
return
if k_fold_end is None:
k_fold_end = k_fold
if k_fold is None:
raise ValueError('k_fold cannot be None.')
if k_fold < 1:
raise ValueError(f'k_fold must be equal or greater than 1, value: ({k_fold})')
if n_repeat_start != self._n_repeats_finished:
raise ValueError(f'n_repeat_start must equal self._n_repeats_finished, values: ({n_repeat_start}, {self._n_repeats_finished})')
if n_repeats <= n_repeat_start:
raise ValueError(f'n_repeats must be greater than n_repeat_start, values: ({n_repeats}, {n_repeat_start})')
if k_fold_start != self._k_fold_end:
raise ValueError(f'k_fold_start must equal previous k_fold_end, values: ({k_fold_start}, {self._k_fold_end})')
if k_fold_start >= k_fold_end:
# TODO: Remove this limitation if n_repeats > 1
raise ValueError(f'k_fold_end must be greater than k_fold_start, values: ({k_fold_end}, {k_fold_start})')
if (n_repeats - n_repeat_start) > 1 and k_fold_end != k_fold:
# TODO: Remove this limitation
raise ValueError(f'k_fold_end must equal k_fold when (n_repeats - n_repeat_start) > 1, values: ({k_fold_end}, {k_fold})')
if self._k is not None and self._k != k_fold:
raise ValueError(f'k_fold must equal previously fit k_fold value for the current n_repeat, values: (({k_fold}, {self._k})')
def predict_proba(self, X, normalize=None, **kwargs):
model = self.load_child(self.models[0])
X = self.preprocess(X, model=model, **kwargs)
pred_proba = model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)
for model in self.models[1:]:
model = self.load_child(model)
pred_proba += model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)
pred_proba = pred_proba / len(self.models)
if self.temperature_scalar is not None:
pred_proba = self._apply_temperature_scaling(pred_proba)
return pred_proba
def _predict_proba(self, X, normalize=False, **kwargs):
return self.predict_proba(X=X, normalize=normalize, **kwargs)
def score_with_oof(self, y, sample_weight=None):
self._load_oof()
valid_indices = self._oof_pred_model_repeats > 0
y = y[valid_indices]
y_pred_proba = self.get_oof_pred_proba()[valid_indices]
if sample_weight is not None:
sample_weight = sample_weight[valid_indices]
return self.score_with_y_pred_proba(y=y, y_pred_proba=y_pred_proba, sample_weight=sample_weight)
def _fit_single(self, X, y, model_base, use_child_oof, time_limit=None, **kwargs):
if self.is_fit():
raise AssertionError('Model is already fit.')
if self._n_repeats != 0:
raise ValueError(f'n_repeats must equal 0 when fitting a single model with k_fold == 1, value: {self._n_repeats}')
model_base.name = f'{model_base.name}S1F1'
model_base.set_contexts(path_context=self.path + model_base.name + os.path.sep)
time_start_fit = time.time()
model_base.fit(X=X, y=y, time_limit=time_limit, **kwargs)
model_base.fit_time = time.time() - time_start_fit
model_base.predict_time = None
X_len = len(X)
# Check if pred_proba is going to take too long
if time_limit is not None and X_len >= 10000:
max_allowed_time = time_limit * 1.3 # allow some buffer
time_left = max(
max_allowed_time - model_base.fit_time,
time_limit * 0.1, # At least 10% of time_limit
10, # At least 10 seconds
)
# Sample at most 500 rows to estimate prediction time of all rows
# TODO: Consider moving this into end of abstract model fit for all models.
# Currently this only fixes problem when in bagged mode, if not bagging, then inference could still be problamatic
n_sample = min(500, round(X_len * 0.1))
frac = n_sample / X_len
X_sample = X.sample(n=n_sample)
time_start_predict = time.time()
model_base.predict_proba(X_sample)
time_predict_frac = time.time() - time_start_predict
time_predict_estimate = time_predict_frac / frac
logger.log(15, f'\t{round(time_predict_estimate, 2)}s\t= Estimated out-of-fold prediction time...')
if time_predict_estimate > time_left:
logger.warning(f'\tNot enough time to generate out-of-fold predictions for model. Estimated time required was {round(time_predict_estimate, 2)}s compared to {round(time_left, 2)}s of available time.')
raise TimeLimitExceeded
if use_child_oof:
logger.log(15, '\t`use_child_oof` was specified for this model. It will function similarly to a bagged model, but will only fit one child model.')
time_start_predict = time.time()
if model_base._get_tags().get('valid_oof', False):
self._oof_pred_proba = model_base.get_oof_pred_proba(X=X, y=y)
else:
logger.warning('\tWARNING: `use_child_oof` was specified but child model does not have a dedicated `get_oof_pred_proba` method. This model may have heavily overfit validation scores.')
self._oof_pred_proba = model_base.predict_proba(X=X)
self._child_oof = True
model_base.predict_time = time.time() - time_start_predict
model_base.val_score = model_base.score_with_y_pred_proba(y=y, y_pred_proba=self._oof_pred_proba)
else:
self._oof_pred_proba = model_base.predict_proba(X=X) # TODO: Cheater value, will be overfit to valid set
self._oof_pred_model_repeats = np.ones(shape=len(X), dtype=np.uint8)
self._n_repeats = 1
self._n_repeats_finished = 1
self._k_per_n_repeat = [1]
self._bagged_mode = False
model_base.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)
if not self.params.get('save_bag_folds', True):
model_base.model = None
if self.low_memory:
self.save_child(model_base, verbose=False)
self.models = [model_base.name]
else:
self.models = [model_base]
self._add_child_times_to_bag(model=model_base)
def _fit_folds(self,
X,
y,
model_base,
X_pseudo=None,
y_pseudo=None,
k_fold=None,
k_fold_start=0,
k_fold_end=None,
n_repeats=1,
n_repeat_start=0,
time_limit=None,
sample_weight=None,
save_folds=True,
groups=None,
**kwargs):
fold_fitting_strategy = self.params.get('fold_fitting_strategy', SequentialLocalFoldFittingStrategy)
# TODO: Preprocess data here instead of repeatedly
# FIXME: Raise exception if multiclass/binary and a single val fold contains all instances of a class. (Can happen if custom groups is specified)
time_start = time.time()
if k_fold_start != 0:
cv_splitter = self._cv_splitters[n_repeat_start]
else:
cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups)
if k_fold != cv_splitter.n_splits:
k_fold = cv_splitter.n_splits
if k_fold_end is None:
k_fold_end = k_fold
if cv_splitter.n_repeats < n_repeats:
# If current cv_splitter doesn't have enough n_repeats for all folds, then create a new one.
cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=n_repeats, groups=groups)
fold_fit_args_list, n_repeats_started, n_repeats_finished = self._generate_fold_configs(
X=X,
y=y,
cv_splitter=cv_splitter,
k_fold_start=k_fold_start,
k_fold_end=k_fold_end,
n_repeat_start=n_repeat_start,
n_repeat_end=n_repeats,
)
fold_fit_args_list = [dict(model_base=model_base, fold_ctx=fold_ctx, kwargs=kwargs) for fold_ctx in fold_fit_args_list]
logger.log(20, f'\tFitting {len(fold_fit_args_list)} child models '
f'({fold_fit_args_list[0]["fold_ctx"]["model_name_suffix"]} - {fold_fit_args_list[-1]["fold_ctx"]["model_name_suffix"]})')
oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y)
models = []
# noinspection PyCallingNonCallable
fold_fitting_strategy: AbstractFoldFittingStrategy = fold_fitting_strategy(
bagged_ensemble_model=self, X=X, y=y, X_pseudo=X_pseudo, y_pseudo=y_pseudo, sample_weight=sample_weight,
time_limit=time_limit, time_start=time_start, models=models,
oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats,
save_folds=save_folds)
for fold_fit_args in fold_fit_args_list:
fold_fitting_strategy.schedule_fold_model_fit(**fold_fit_args)
fold_fitting_strategy.after_all_folds_scheduled()
self.models += models
self._bagged_mode = True
if self._oof_pred_proba is None:
self._oof_pred_proba = oof_pred_proba
self._oof_pred_model_repeats = oof_pred_model_repeats
else:
self._oof_pred_proba += oof_pred_proba
self._oof_pred_model_repeats += oof_pred_model_repeats
self._cv_splitters += [cv_splitter for _ in range(n_repeats_started)]
self._k_per_n_repeat += [k_fold for _ in range(n_repeats_finished)]
self._n_repeats = n_repeats
if k_fold == k_fold_end:
self._k = None
self._k_fold_end = 0
self._n_repeats_finished = self._n_repeats
else:
self._k = k_fold
self._k_fold_end = k_fold_end
self._n_repeats_finished = self._n_repeats - 1
@staticmethod
def _generate_fold_configs(*,
X,
y,
cv_splitter,
k_fold_start,
k_fold_end,
n_repeat_start,
n_repeat_end) -> (list, int, int):
"""
Generates fold configs given a cv_splitter, k_fold start-end and n_repeat start-end.
Fold configs are used by inheritors of AbstractFoldFittingStrategy when fitting fold models.
Returns a list of fold configs, the number of started repeats, and the number of finished repeats.
"""
k_fold = cv_splitter.n_splits
kfolds = cv_splitter.split(X=X, y=y)
fold_start = n_repeat_start * k_fold + k_fold_start
fold_end = (n_repeat_end - 1) * k_fold + k_fold_end
folds_to_fit = fold_end - fold_start
fold_fit_args_list = []
n_repeats_started = 0
n_repeats_finished = 0
for repeat in range(n_repeat_start, n_repeat_end): # For each repeat
is_first_set = repeat == n_repeat_start
is_last_set = repeat == (n_repeat_end - 1)
if (not is_first_set) or (k_fold_start == 0):
n_repeats_started += 1
fold_in_set_start = k_fold_start if repeat == n_repeat_start else 0
fold_in_set_end = k_fold_end if is_last_set else k_fold
for fold_in_set in range(fold_in_set_start, fold_in_set_end): # For each fold
fold = fold_in_set + (repeat * k_fold)
fold_ctx = dict(
model_name_suffix=f'S{repeat + 1}F{fold_in_set + 1}', # S5F3 = 3rd fold of the 5th repeat set
fold=kfolds[fold],
is_last_fold=fold == (fold_end - 1),
folds_to_fit=folds_to_fit,
folds_finished=fold - fold_start,
folds_left=fold_end - fold,
)
fold_fit_args_list.append(fold_ctx)
if fold_in_set_end == k_fold:
n_repeats_finished += 1
assert len(fold_fit_args_list) == folds_to_fit, "fold_fit_args_list is not the expected length!"
return fold_fit_args_list, n_repeats_started, n_repeats_finished
# TODO: Augment to generate OOF after shuffling each column in X (Batching), this is the fastest way.
# TODO: Reduce logging clutter during OOF importance calculation (Currently logs separately for each child)
# Generates OOF predictions from pre-trained bagged models, assuming X and y are in the same row order as used in .fit(X, y)
def compute_feature_importance(self,
X,
y,
features=None,
silent=False,
time_limit=None,
is_oof=False,
**kwargs) -> pd.DataFrame:
if features is None:
# FIXME: use FULL features (children can have different features)
features = self.load_child(model=self.models[0]).features
if not is_oof:
return super().compute_feature_importance(X, y, features=features, time_limit=time_limit, silent=silent, **kwargs)
fi_fold_list = []
model_index = 0
num_children = len(self.models)
if time_limit is not None:
time_limit_per_child = time_limit / num_children
else:
time_limit_per_child = None
if not silent:
logging_message = f'Computing feature importance via permutation shuffling for {len(features)} features using out-of-fold (OOF) data aggregated across {num_children} child models...'
if time_limit is not None:
logging_message = f'{logging_message} Time limit: {time_limit}s...'
logger.log(20, logging_message)
time_start = time.time()
early_stop = False
children_completed = 0
log_final_suffix = ''
for n_repeat, k in enumerate(self._k_per_n_repeat):
if is_oof:
if self._child_oof or not self._bagged_mode:
raise AssertionError('Model trained with no validation data cannot get feature importances on training data, please specify new test data to compute feature importances (model=%s)' % self.name)
kfolds = self._cv_splitters[n_repeat].split(X=X, y=y)
cur_kfolds = kfolds[n_repeat * k:(n_repeat + 1) * k]
else:
cur_kfolds = [(None, list(range(len(X))))] * k
for i, fold in enumerate(cur_kfolds):
_, test_index = fold
model = self.load_child(self.models[model_index + i])
fi_fold = model.compute_feature_importance(X=X.iloc[test_index, :], y=y.iloc[test_index], features=features, time_limit=time_limit_per_child,
silent=silent, log_prefix='\t', importance_as_list=True, **kwargs)
fi_fold_list.append(fi_fold)
children_completed += 1
if time_limit is not None and children_completed != num_children:
time_now = time.time()
time_left = time_limit - (time_now - time_start)
time_child_average = (time_now - time_start) / children_completed
if time_left < (time_child_average * 1.1):
log_final_suffix = f' (Early stopping due to lack of time...)'
early_stop = True
break
if early_stop:
break
model_index += k
# TODO: DON'T THROW AWAY SAMPLES! USE LARGER N
fi_list_dict = dict()
for val in fi_fold_list:
val = val['importance'].to_dict() # TODO: Don't throw away stddev information of children
for key in val:
if key not in fi_list_dict:
fi_list_dict[key] = []
fi_list_dict[key] += val[key]
fi_df = _compute_fi_with_stddev(fi_list_dict)
if not silent:
logger.log(20, f'\t{round(time.time() - time_start, 2)}s\t= Actual runtime (Completed {children_completed} of {num_children} children){log_final_suffix}')
return fi_df
def get_features(self):
assert self.is_fit(), "The model must be fit before calling the get_features method."
return self.load_child(self.models[0]).get_features()
def load_child(self, model, verbose=False) -> AbstractModel:
if isinstance(model, str):
child_path = self.create_contexts(self.path + model + os.path.sep)
return self._child_type.load(path=child_path, verbose=verbose)
else:
return model
def save_child(self, model, verbose=False):
child = self.load_child(model)
child.set_contexts(self.path + child.name + os.path.sep)
child.save(verbose=verbose)
# TODO: Multiply epochs/n_iterations by some value (such as 1.1) to account for having more training data than bagged models
def convert_to_refit_full_template(self):
init_args = self.get_params()
init_args['hyperparameters']['save_bag_folds'] = True # refit full models must save folds
init_args['model_base'] = self.convert_to_refit_full_template_child()
init_args['name'] = init_args['name'] + REFIT_FULL_SUFFIX
model_full_template = self.__class__(**init_args)
return model_full_template
def convert_to_refit_full_template_child(self):
refit_params_trained = self._get_compressed_params_trained()
refit_params = copy.deepcopy(self._get_model_base().get_params())
refit_params['hyperparameters'].update(refit_params_trained)
refit_child_template = self._child_type(**refit_params)
return refit_child_template
def get_params(self):
init_args = dict(
model_base=self._get_model_base(),
random_state=self._random_state,
)
init_args.update(super().get_params())
init_args.pop('eval_metric')
init_args.pop('problem_type')
return init_args
def convert_to_template_child(self):
return self._get_model_base().convert_to_template()
def _get_compressed_params(self, model_params_list=None):
if model_params_list is None:
model_params_list = [
self.load_child(child).get_trained_params()
for child in self.models
]
model_params_compressed = dict()
for param in model_params_list[0].keys():
model_param_vals = [model_params[param] for model_params in model_params_list]
if all(isinstance(val, bool) for val in model_param_vals):
counter = Counter(model_param_vals)
compressed_val = counter.most_common(1)[0][0]
elif all(isinstance(val, int) for val in model_param_vals):
compressed_val = round(mean(model_param_vals))
elif all(isinstance(val, float) for val in model_param_vals):
compressed_val = mean(model_param_vals)
else:
try:
counter = Counter(model_param_vals)
compressed_val = counter.most_common(1)[0][0]
except TypeError:
compressed_val = model_param_vals[0]
model_params_compressed[param] = compressed_val
return model_params_compressed
def _get_compressed_params_trained(self):
model_params_list = [
self.load_child(child).params_trained
for child in self.models
]
return self._get_compressed_params(model_params_list=model_params_list)
def _get_model_base(self):
if self.model_base is None:
return self.load_model_base()
else:
return self.model_base
def _add_child_times_to_bag(self, model):
if self.fit_time is None:
self.fit_time = model.fit_time
else:
self.fit_time += model.fit_time
if self.predict_time is None:
self.predict_time = model.predict_time
else:
self.predict_time += model.predict_time
@classmethod
def load(cls, path: str, reset_paths=True, low_memory=True, load_oof=False, verbose=True):
model = super().load(path=path, reset_paths=reset_paths, verbose=verbose)
if not low_memory:
model.persist_child_models(reset_paths=reset_paths)
if load_oof:
model._load_oof()
return model
@classmethod
def load_oof(cls, path, verbose=True):
try:
oof = load_pkl.load(path=path + 'utils' + os.path.sep + cls._oof_filename, verbose=verbose)
oof_pred_proba = oof['_oof_pred_proba']
oof_pred_model_repeats = oof['_oof_pred_model_repeats']
except FileNotFoundError:
model = cls.load(path=path, reset_paths=True, verbose=verbose)
model._load_oof()
oof_pred_proba = model._oof_pred_proba
oof_pred_model_repeats = model._oof_pred_model_repeats
return cls._oof_pred_proba_func(oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats)
def _load_oof(self):
if self._oof_pred_proba is not None:
pass
else:
oof = load_pkl.load(path=self.path + 'utils' + os.path.sep + self._oof_filename)
self._oof_pred_proba = oof['_oof_pred_proba']
self._oof_pred_model_repeats = oof['_oof_pred_model_repeats']
def persist_child_models(self, reset_paths=True):
for i, model_name in enumerate(self.models):
if isinstance(model_name, str):
child_path = self.create_contexts(self.path + model_name + os.path.sep)
child_model = self._child_type.load(path=child_path, reset_paths=reset_paths, verbose=True)
self.models[i] = child_model
def load_model_base(self):
return load_pkl.load(path=self.path + 'utils' + os.path.sep + 'model_template.pkl')
def save_model_base(self, model_base):
save_pkl.save(path=self.path + 'utils' + os.path.sep + 'model_template.pkl', object=model_base)
def save(self, path=None, verbose=True, save_oof=True, save_children=False) -> str:
if path is None:
path = self.path
if save_children:
model_names = []
for child in self.models:
child = self.load_child(child)
child.set_contexts(path + child.name + os.path.sep)
child.save(verbose=False)
model_names.append(child.name)
self.models = model_names
if save_oof and self._oof_pred_proba is not None:
save_pkl.save(path=path + 'utils' + os.path.sep + self._oof_filename, object={
'_oof_pred_proba': self._oof_pred_proba,
'_oof_pred_model_repeats': self._oof_pred_model_repeats,
})
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
return super().save(path=path, verbose=verbose)
# If `remove_fit_stack=True`, variables will be removed that are required to fit more folds and to fit new stacker models which use this model as a base model.
# This includes OOF variables.
def reduce_memory_size(self, remove_fit_stack=False, remove_fit=True, remove_info=False, requires_save=True, reduce_children=False, **kwargs):
super().reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)
if remove_fit_stack:
try:
os.remove(self.path + 'utils' + os.path.sep + self._oof_filename)
except FileNotFoundError:
pass
if requires_save:
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
try:
os.remove(self.path + 'utils' + os.path.sep + 'model_template.pkl')
except FileNotFoundError:
pass
if requires_save:
self.model_base = None
try:
os.rmdir(self.path + 'utils')
except OSError:
pass
if reduce_children:
for model in self.models:
model = self.load_child(model)
model.reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)
if requires_save and self.low_memory:
self.save_child(model=model)
def _get_model_names(self):
model_names = []
for model in self.models:
if isinstance(model, str):
model_names.append(model)
else:
model_names.append(model.name)
return model_names
def get_info(self):
info = super().get_info()
children_info = self._get_child_info()
child_memory_sizes = [child['memory_size'] for child in children_info.values()]
sum_memory_size_child = sum(child_memory_sizes)
if child_memory_sizes:
max_memory_size_child = max(child_memory_sizes)
else:
max_memory_size_child = 0
if self.low_memory:
max_memory_size = info['memory_size'] + sum_memory_size_child
min_memory_size = info['memory_size'] + max_memory_size_child
else:
max_memory_size = info['memory_size']
min_memory_size = info['memory_size'] - sum_memory_size_child + max_memory_size_child
# Necessary if save_space is used as save_space deletes model_base.
if len(self.models) > 0:
child_model = self.load_child(self.models[0])
else:
child_model = self._get_model_base()
child_hyperparameters = child_model.params
child_ag_args_fit = child_model.params_aux
bagged_info = dict(
child_model_type=self._child_type.__name__,
num_child_models=len(self.models),
child_model_names=self._get_model_names(),
_n_repeats=self._n_repeats,
# _n_repeats_finished=self._n_repeats_finished, # commented out because these are too technical
# _k_fold_end=self._k_fold_end,
# _k=self._k,
_k_per_n_repeat=self._k_per_n_repeat,
_random_state=self._random_state,
low_memory=self.low_memory, # If True, then model will attempt to use at most min_memory_size memory by having at most one child in memory. If False, model will use max_memory_size memory.
bagged_mode=self._bagged_mode,
max_memory_size=max_memory_size, # Memory used when all children are loaded into memory at once.
min_memory_size=min_memory_size, # Memory used when only the largest child is loaded into memory.
child_hyperparameters=child_hyperparameters,
child_hyperparameters_fit=self._get_compressed_params_trained(),
child_ag_args_fit=child_ag_args_fit,
)
info['bagged_info'] = bagged_info
info['children_info'] = children_info
child_features_full = list(set().union(*[child['features'] for child in children_info.values()]))
info['features'] = child_features_full
info['num_features'] = len(child_features_full)
return info
def get_memory_size(self):
models = self.models
self.models = None
memory_size = super().get_memory_size()
self.models = models
return memory_size
def _get_child_info(self):
child_info_dict = dict()
for model in self.models:
if isinstance(model, str):
child_path = self.create_contexts(self.path + model + os.path.sep)
child_info_dict[model] = self._child_type.load_info(child_path)
else:
child_info_dict[model.name] = model.get_info()
return child_info_dict
def _construct_empty_oof(self, X, y):
if self.problem_type == MULTICLASS:
oof_pred_proba = np.zeros(shape=(len(X), len(y.unique())), dtype=np.float32)
elif self.problem_type == SOFTCLASS:
oof_pred_proba = np.zeros(shape=y.shape, dtype=np.float32)
elif self.problem_type == QUANTILE:
oof_pred_proba = np.zeros(shape=(len(X), len(self.quantile_levels)), dtype=np.float32)
else:
oof_pred_proba = np.zeros(shape=len(X), dtype=np.float32)
oof_pred_model_repeats = np.zeros(shape=len(X), dtype=np.uint8)
return oof_pred_proba, oof_pred_model_repeats
def _preprocess_fit_resources(self, silent=False, **kwargs):
"""Pass along to child models to avoid altering up-front"""
return kwargs
# TODO: Currently double disk usage, saving model in HPO and also saving model in bag
# FIXME: with use_bag_holdout=True, the fold-1 scores that are logged are of the inner validation score, not the holdout score.
# Fix this by passing X_val, y_val into this method
def _hyperparameter_tune(self, X, y, k_fold, scheduler_options, preprocess_kwargs=None, groups=None, **kwargs):
if len(self.models) != 0:
raise ValueError('self.models must be empty to call hyperparameter_tune, value: %s' % self.models)
kwargs['feature_metadata'] = self.feature_metadata
kwargs['num_classes'] = self.num_classes # TODO: maybe don't pass num_classes to children
self.model_base.set_contexts(self.path + 'hpo' + os.path.sep)
# TODO: Preprocess data here instead of repeatedly
if preprocess_kwargs is None:
preprocess_kwargs = dict()
use_child_oof = self.params.get('use_child_oof', False)
X = self.preprocess(X=X, preprocess=False, fit=True, **preprocess_kwargs)
if use_child_oof:
k_fold = 1
X_fold = X
y_fold = y
X_val_fold = None
y_val_fold = None
train_index = list(range(len(X)))
test_index = train_index
cv_splitter = None
else:
cv_splitter = self._get_cv_splitter(n_splits=k_fold, n_repeats=1, groups=groups)
if k_fold != cv_splitter.n_splits:
k_fold = cv_splitter.n_splits
kfolds = cv_splitter.split(X=X, y=y)
train_index, test_index = kfolds[0]
X_fold, X_val_fold = X.iloc[train_index, :], X.iloc[test_index, :]
y_fold, y_val_fold = y.iloc[train_index], y.iloc[test_index]
orig_time = scheduler_options[1]['time_out']
if orig_time:
scheduler_options[1]['time_out'] = orig_time * 0.8 # TODO: Scheduler doesn't early stop on final model, this is a safety net. Scheduler should be updated to early stop
hpo_models, hpo_model_performances, hpo_results = self.model_base.hyperparameter_tune(X=X_fold, y=y_fold, X_val=X_val_fold, y_val=y_val_fold, scheduler_options=scheduler_options, **kwargs)
scheduler_options[1]['time_out'] = orig_time
bags = {}
bags_performance = {}
for i, (model_name, model_path) in enumerate(hpo_models.items()):
child: AbstractModel = self._child_type.load(path=model_path)
# TODO: Create new Ensemble Here
bag = copy.deepcopy(self)
bag.rename(f"{bag.name}{os.path.sep}T{i}")
bag.set_contexts(self.path_root + bag.name + os.path.sep)
oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y)
if child._get_tags().get('valid_oof', False):
y_pred_proba = child.get_oof_pred_proba(X=X, y=y)
bag._n_repeats_finished = 1
bag._k_per_n_repeat = [1]
bag._bagged_mode = False
bag._child_oof = True # TODO: Consider a separate tag for refit_folds vs efficient OOF
else:
y_pred_proba = child.predict_proba(X_val_fold)
oof_pred_proba[test_index] += y_pred_proba
oof_pred_model_repeats[test_index] += 1
bag.model_base = None
child.rename('')
child.set_contexts(bag.path + child.name + os.path.sep)
bag.save_model_base(child.convert_to_template())
bag._k = k_fold
bag._k_fold_end = 1
bag._n_repeats = 1
bag._oof_pred_proba = oof_pred_proba
bag._oof_pred_model_repeats = oof_pred_model_repeats
child.rename('S1F1')
child.set_contexts(bag.path + child.name + os.path.sep)
if not self.params.get('save_bag_folds', True):
child.model = None
if bag.low_memory:
bag.save_child(child, verbose=False)
bag.models.append(child.name)
else:
bag.models.append(child)
bag.val_score = child.val_score
bag._add_child_times_to_bag(model=child)
if cv_splitter is not None:
bag._cv_splitters = [cv_splitter]
bag.save()
bags[bag.name] = bag.path
bags_performance[bag.name] = bag.val_score
# TODO: hpo_results likely not correct because no renames
return bags, bags_performance, hpo_results
def _more_tags(self):
return {'valid_oof': True}
|
py | 1a54bbc131e4b24632b0c0806326e53844174796 | """
Account (OOC) commands. These are stored on the Account object
and self.caller is thus always an Account, not an Object/Character.
These commands go in the AccountCmdset and are accessible also
when puppeting a Character (although with lower priority)
These commands use the account_caller property which tells the command
parent (MuxCommand, usually) to setup caller correctly. They use
self.account to make sure to always use the account object rather than
self.caller (which change depending on the level you are calling from)
The property self.character can be used to access the character when
these commands are triggered with a connected character (such as the
case of the `ooc` command), it is None if we are OOC.
Note that under MULTISESSION_MODE > 2, Account commands should use
self.msg() and similar methods to reroute returns to the correct
method. Otherwise all text will be returned to all connected sessions.
"""
from builtins import range
import time
from codecs import lookup as codecs_lookup
from django.conf import settings
from evennia.server.sessionhandler import SESSIONS
from evennia.utils import utils, create, logger, search
COMMAND_DEFAULT_CLASS = utils.class_from_module(settings.COMMAND_DEFAULT_CLASS)
_MAX_NR_CHARACTERS = settings.MAX_NR_CHARACTERS
_MULTISESSION_MODE = settings.MULTISESSION_MODE
# limit symbol import for API
__all__ = ("CmdOOCLook", "CmdIC", "CmdOOC", "CmdPassword", "CmdQuit",
"CmdCharCreate", "CmdOption", "CmdSessions", "CmdWho",
"CmdColorTest", "CmdQuell")
class MuxAccountLookCommand(COMMAND_DEFAULT_CLASS):
"""
Custom parent (only) parsing for OOC looking, sets a "playable"
property on the command based on the parsing.
"""
def parse(self):
"""Custom parsing"""
super().parse()
if _MULTISESSION_MODE < 2:
# only one character allowed - not used in this mode
self.playable = None
return
playable = self.account.db._playable_characters
if playable is not None:
# clean up list if character object was deleted in between
if None in playable:
playable = [character for character in playable if character]
self.account.db._playable_characters = playable
# store playable property
if self.args:
self.playable = dict((utils.to_str(char.key.lower()), char)
for char in playable).get(self.args.lower(), None)
else:
self.playable = playable
# Obs - these are all intended to be stored on the Account, and as such,
# use self.account instead of self.caller, just to be sure. Also self.msg()
# is used to make sure returns go to the right session
# note that this is inheriting from MuxAccountLookCommand,
# and has the .playable property.
class CmdOOCLook(MuxAccountLookCommand):
"""
look while out-of-character
Usage:
look
Look in the ooc state.
"""
# This is an OOC version of the look command. Since a
# Account doesn't have an in-game existence, there is no
# concept of location or "self". If we are controlling
# a character, pass control over to normal look.
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""implement the ooc look command"""
if _MULTISESSION_MODE < 2:
# only one character allowed
self.msg("You are out-of-character (OOC).\nUse |wic|n to get back into the game.")
return
# call on-account look helper method
self.msg(self.account.at_look(target=self.playable, session=self.session))
class CmdCharCreate(COMMAND_DEFAULT_CLASS):
"""
create a new character
Usage:
charcreate <charname> [= desc]
Create a new character, optionally giving it a description. You
may use upper-case letters in the name - you will nevertheless
always be able to access your character using lower-case letters
if you want.
"""
key = "charcreate"
locks = "cmd:pperm(Player)"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""create the new character"""
account = self.account
if not self.args:
self.msg("Usage: charcreate <charname> [= description]")
return
key = self.lhs
desc = self.rhs
charmax = _MAX_NR_CHARACTERS
if not account.is_superuser and \
(account.db._playable_characters and
len(account.db._playable_characters) >= charmax):
self.msg("You may only create a maximum of %i characters." % charmax)
return
from evennia.objects.models import ObjectDB
typeclass = settings.BASE_CHARACTER_TYPECLASS
if ObjectDB.objects.filter(db_typeclass_path=typeclass, db_key__iexact=key):
# check if this Character already exists. Note that we are only
# searching the base character typeclass here, not any child
# classes.
self.msg("|rA character named '|w%s|r' already exists.|n" % key)
return
# create the character
start_location = ObjectDB.objects.get_id(settings.START_LOCATION)
default_home = ObjectDB.objects.get_id(settings.DEFAULT_HOME)
permissions = settings.PERMISSION_ACCOUNT_DEFAULT
new_character = create.create_object(typeclass, key=key,
location=start_location,
home=default_home,
permissions=permissions)
# only allow creator (and developers) to puppet this char
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Developer) or pperm(Developer);delete:id(%i) or perm(Admin)" %
(new_character.id, account.id, account.id))
account.db._playable_characters.append(new_character)
if desc:
new_character.db.desc = desc
elif not new_character.db.desc:
new_character.db.desc = "This is a character."
self.msg("Created new character %s. Use |wic %s|n to enter the game as this character."
% (new_character.key, new_character.key))
logger.log_sec('Character Created: %s (Caller: %s, IP: %s).' % (new_character, account, self.session.address))
class CmdCharDelete(COMMAND_DEFAULT_CLASS):
"""
delete a character - this cannot be undone!
Usage:
chardelete <charname>
Permanently deletes one of your characters.
"""
key = "chardelete"
locks = "cmd:pperm(Player)"
help_category = "General"
def func(self):
"""delete the character"""
account = self.account
if not self.args:
self.msg("Usage: chardelete <charactername>")
return
# use the playable_characters list to search
match = [char for char in utils.make_iter(account.db._playable_characters)
if char.key.lower() == self.args.lower()]
if not match:
self.msg("You have no such character to delete.")
return
elif len(match) > 1:
self.msg("Aborting - there are two characters with the same name. Ask an admin to delete the right one.")
return
else: # one match
from evennia.utils.evmenu import get_input
def _callback(caller, callback_prompt, result):
if result.lower() == "yes":
# only take action
delobj = caller.ndb._char_to_delete
key = delobj.key
caller.db._playable_characters = [pc for pc in caller.db._playable_characters if pc != delobj]
delobj.delete()
self.msg("Character '%s' was permanently deleted." % key)
logger.log_sec('Character Deleted: %s (Caller: %s, IP: %s).' % (key, account, self.session.address))
else:
self.msg("Deletion was aborted.")
del caller.ndb._char_to_delete
match = match[0]
account.ndb._char_to_delete = match
# Return if caller has no permission to delete this
if not match.access(account, 'delete'):
self.msg("You do not have permission to delete this character.")
return
prompt = "|rThis will permanently destroy '%s'. This cannot be undone.|n Continue yes/[no]?"
get_input(account, prompt % match.key, _callback)
class CmdIC(COMMAND_DEFAULT_CLASS):
"""
control an object you have permission to puppet
Usage:
ic <character>
Go in-character (IC) as a given Character.
This will attempt to "become" a different object assuming you have
the right to do so. Note that it's the ACCOUNT character that puppets
characters/objects and which needs to have the correct permission!
You cannot become an object that is already controlled by another
account. In principle <character> can be any in-game object as long
as you the account have access right to puppet it.
"""
key = "ic"
# lock must be all() for different puppeted objects to access it.
locks = "cmd:all()"
aliases = "puppet"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""
Main puppet method
"""
account = self.account
session = self.session
new_character = None
if not self.args:
new_character = account.db._last_puppet
if not new_character:
self.msg("Usage: ic <character>")
return
if not new_character:
# search for a matching character
new_character = [char for char in search.object_search(self.args) if char.access(account, "puppet")]
if not new_character:
self.msg("That is not a valid character choice.")
return
if len(new_character) > 1:
self.msg("Multiple targets with the same name:\n %s"
% ", ".join("%s(#%s)" % (obj.key, obj.id) for obj in new_character))
return
else:
new_character = new_character[0]
try:
account.puppet_object(session, new_character)
account.db._last_puppet = new_character
logger.log_sec('Puppet Success: (Caller: %s, Target: %s, IP: %s).' % (account, new_character, self.session.address))
except RuntimeError as exc:
self.msg("|rYou cannot become |C%s|n: %s" % (new_character.name, exc))
logger.log_sec('Puppet Failed: %s (Caller: %s, Target: %s, IP: %s).' % (exc, account, new_character, self.session.address))
# note that this is inheriting from MuxAccountLookCommand,
# and as such has the .playable property.
class CmdOOC(MuxAccountLookCommand):
"""
stop puppeting and go ooc
Usage:
ooc
Go out-of-character (OOC).
This will leave your current character and put you in a incorporeal OOC state.
"""
key = "ooc"
locks = "cmd:pperm(Player)"
aliases = "unpuppet"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""Implement function"""
account = self.account
session = self.session
old_char = account.get_puppet(session)
if not old_char:
string = "You are already OOC."
self.msg(string)
return
account.db._last_puppet = old_char
# disconnect
try:
account.unpuppet_object(session)
self.msg("\n|GYou go OOC.|n\n")
if _MULTISESSION_MODE < 2:
# only one character allowed
self.msg("You are out-of-character (OOC).\nUse |wic|n to get back into the game.")
return
self.msg(account.at_look(target=self.playable, session=session))
except RuntimeError as exc:
self.msg("|rCould not unpuppet from |c%s|n: %s" % (old_char, exc))
class CmdSessions(COMMAND_DEFAULT_CLASS):
"""
check your connected session(s)
Usage:
sessions
Lists the sessions currently connected to your account.
"""
key = "sessions"
locks = "cmd:all()"
help_category = "General"
# this is used by the parent
account_caller = True
def func(self):
"""Implement function"""
account = self.account
sessions = account.sessions.all()
table = self.styled_table("|wsessid",
"|wprotocol",
"|whost",
"|wpuppet/character",
"|wlocation")
for sess in sorted(sessions, key=lambda x: x.sessid):
char = account.get_puppet(sess)
table.add_row(str(sess.sessid), str(sess.protocol_key),
isinstance(sess.address, tuple) and sess.address[0] or sess.address,
char and str(char) or "None",
char and str(char.location) or "N/A")
self.msg("|wYour current session(s):|n\n%s" % table)
class CmdWho(COMMAND_DEFAULT_CLASS):
"""
list who is currently online
Usage:
who
doing
Shows who is currently online. Doing is an alias that limits info
also for those with all permissions.
"""
key = "who"
aliases = "doing"
locks = "cmd:all()"
# this is used by the parent
account_caller = True
def func(self):
"""
Get all connected accounts by polling session.
"""
account = self.account
session_list = SESSIONS.get_sessions()
session_list = sorted(session_list, key=lambda o: o.account.key)
if self.cmdstring == "doing":
show_session_data = False
else:
show_session_data = account.check_permstring("Developer") or account.check_permstring("Admins")
naccounts = SESSIONS.account_count()
if show_session_data:
# privileged info
table = self.styled_table("|wAccount Name",
"|wOn for",
"|wIdle",
"|wPuppeting",
"|wRoom",
"|wCmds",
"|wProtocol",
"|wHost")
for session in session_list:
if not session.logged_in:
continue
delta_cmd = time.time() - session.cmd_last_visible
delta_conn = time.time() - session.conn_time
account = session.get_account()
puppet = session.get_puppet()
location = puppet.location.key if puppet and puppet.location else "None"
table.add_row(utils.crop(account.get_display_name(account), width=25),
utils.time_format(delta_conn, 0),
utils.time_format(delta_cmd, 1),
utils.crop(puppet.get_display_name(account) if puppet else "None", width=25),
utils.crop(location, width=25),
session.cmd_total,
session.protocol_key,
isinstance(session.address, tuple) and session.address[0] or session.address)
else:
# unprivileged
table = self.styled_table("|wAccount name", "|wOn for", "|wIdle")
for session in session_list:
if not session.logged_in:
continue
delta_cmd = time.time() - session.cmd_last_visible
delta_conn = time.time() - session.conn_time
account = session.get_account()
table.add_row(utils.crop(account.get_display_name(account), width=25),
utils.time_format(delta_conn, 0),
utils.time_format(delta_cmd, 1))
is_one = naccounts == 1
self.msg("|wAccounts:|n\n%s\n%s unique account%s logged in."
% (table, "One" if is_one else naccounts, "" if is_one else "s"))
class CmdOption(COMMAND_DEFAULT_CLASS):
"""
Set an account option
Usage:
option[/save] [name = value]
Switches:
save - Save the current option settings for future logins.
clear - Clear the saved options.
This command allows for viewing and setting client interface
settings. Note that saved options may not be able to be used if
later connecting with a client with different capabilities.
"""
key = "option"
aliases = "options"
switch_options = ("save", "clear")
locks = "cmd:all()"
# this is used by the parent
account_caller = True
def func(self):
"""
Implements the command
"""
if self.session is None:
return
flags = self.session.protocol_flags
# Display current options
if not self.args:
# list the option settings
if "save" in self.switches:
# save all options
self.caller.db._saved_protocol_flags = flags
self.msg("|gSaved all options. Use option/clear to remove.|n")
if "clear" in self.switches:
# clear all saves
self.caller.db._saved_protocol_flags = {}
self.msg("|gCleared all saved options.")
options = dict(flags) # make a copy of the flag dict
saved_options = dict(self.caller.attributes.get("_saved_protocol_flags", default={}))
if "SCREENWIDTH" in options:
if len(options["SCREENWIDTH"]) == 1:
options["SCREENWIDTH"] = options["SCREENWIDTH"][0]
else:
options["SCREENWIDTH"] = " \n".join("%s : %s" % (screenid, size)
for screenid, size in options["SCREENWIDTH"].items())
if "SCREENHEIGHT" in options:
if len(options["SCREENHEIGHT"]) == 1:
options["SCREENHEIGHT"] = options["SCREENHEIGHT"][0]
else:
options["SCREENHEIGHT"] = " \n".join("%s : %s" % (screenid, size)
for screenid, size in options["SCREENHEIGHT"].items())
options.pop("TTYPE", None)
header = ("Name", "Value", "Saved") if saved_options else ("Name", "Value")
table = self.styled_table(*header)
for key in sorted(options):
row = [key, options[key]]
if saved_options:
saved = " |YYes|n" if key in saved_options else ""
changed = "|y*|n" if key in saved_options and flags[key] != saved_options[key] else ""
row.append("%s%s" % (saved, changed))
table.add_row(*row)
self.msg("|wClient settings (%s):|n\n%s|n" % (self.session.protocol_key, table))
return
if not self.rhs:
self.msg("Usage: option [name = [value]]")
return
# Try to assign new values
def validate_encoding(new_encoding):
# helper: change encoding
try:
codecs_lookup(new_encoding)
except LookupError:
raise RuntimeError("The encoding '|w%s|n' is invalid. " % new_encoding)
return val
def validate_size(new_size):
return {0: int(new_size)}
def validate_bool(new_bool):
return True if new_bool.lower() in ("true", "on", "1") else False
def update(new_name, new_val, validator):
# helper: update property and report errors
try:
old_val = flags.get(new_name, False)
new_val = validator(new_val)
if old_val == new_val:
self.msg("Option |w%s|n was kept as '|w%s|n'." % (new_name, old_val))
else:
flags[new_name] = new_val
self.msg("Option |w%s|n was changed from '|w%s|n' to '|w%s|n'." % (new_name, old_val, new_val))
return {new_name: new_val}
except Exception as err:
self.msg("|rCould not set option |w%s|r:|n %s" % (new_name, err))
return False
validators = {"ANSI": validate_bool,
"CLIENTNAME": utils.to_str,
"ENCODING": validate_encoding,
"MCCP": validate_bool,
"NOGOAHEAD": validate_bool,
"MXP": validate_bool,
"NOCOLOR": validate_bool,
"NOPKEEPALIVE": validate_bool,
"OOB": validate_bool,
"RAW": validate_bool,
"SCREENHEIGHT": validate_size,
"SCREENWIDTH": validate_size,
"SCREENREADER": validate_bool,
"TERM": utils.to_str,
"UTF-8": validate_bool,
"XTERM256": validate_bool,
"INPUTDEBUG": validate_bool,
"FORCEDENDLINE": validate_bool}
name = self.lhs.upper()
val = self.rhs.strip()
optiondict = False
if val and name in validators:
optiondict = update(name, val, validators[name])
else:
self.msg("|rNo option named '|w%s|r'." % name)
if optiondict:
# a valid setting
if "save" in self.switches:
# save this option only
saved_options = self.account.attributes.get("_saved_protocol_flags", default={})
saved_options.update(optiondict)
self.account.attributes.add("_saved_protocol_flags", saved_options)
for key in optiondict:
self.msg("|gSaved option %s.|n" % key)
if "clear" in self.switches:
# clear this save
for key in optiondict:
self.account.attributes.get("_saved_protocol_flags", {}).pop(key, None)
self.msg("|gCleared saved %s." % key)
self.session.update_flags(**optiondict)
class CmdPassword(COMMAND_DEFAULT_CLASS):
"""
change your password
Usage:
password <old password> = <new password>
Changes your password. Make sure to pick a safe one.
"""
key = "password"
locks = "cmd:pperm(Player)"
# this is used by the parent
account_caller = True
def func(self):
"""hook function."""
account = self.account
if not self.rhs:
self.msg("Usage: password <oldpass> = <newpass>")
return
oldpass = self.lhslist[0] # Both of these are
newpass = self.rhslist[0] # already stripped by parse()
# Validate password
validated, error = account.validate_password(newpass)
if not account.check_password(oldpass):
self.msg("The specified old password isn't correct.")
elif not validated:
errors = [e for suberror in error.messages for e in error.messages]
string = "\n".join(errors)
self.msg(string)
else:
account.set_password(newpass)
account.save()
self.msg("Password changed.")
logger.log_sec('Password Changed: %s (Caller: %s, IP: %s).' % (account, account, self.session.address))
class CmdQuit(COMMAND_DEFAULT_CLASS):
"""
quit the game
Usage:
quit
Switch:
all - disconnect all connected sessions
Gracefully disconnect your current session from the
game. Use the /all switch to disconnect from all sessions.
"""
key = "quit"
switch_options = ("all",)
locks = "cmd:all()"
# this is used by the parent
account_caller = True
def func(self):
"""hook function"""
account = self.account
if 'all' in self.switches:
account.msg("|RQuitting|n all sessions. Hope to see you soon again.", session=self.session)
reason = "quit/all"
for session in account.sessions.all():
account.disconnect_session_from_account(session, reason)
else:
nsess = len(account.sessions.all())
reason = "quit"
if nsess == 2:
account.msg("|RQuitting|n. One session is still connected.", session=self.session)
elif nsess > 2:
account.msg("|RQuitting|n. %i sessions are still connected." % (nsess - 1), session=self.session)
else:
# we are quitting the last available session
account.msg("|RQuitting|n. Hope to see you again, soon.", session=self.session)
account.disconnect_session_from_account(self.session, reason)
class CmdColorTest(COMMAND_DEFAULT_CLASS):
"""
testing which colors your client support
Usage:
color ansi||xterm256
Prints a color map along with in-mud color codes to use to produce
them. It also tests what is supported in your client. Choices are
16-color ansi (supported in most muds) or the 256-color xterm256
standard. No checking is done to determine your client supports
color - if not you will see rubbish appear.
"""
key = "color"
locks = "cmd:all()"
help_category = "General"
# this is used by the parent
account_caller = True
# the slices of the ANSI_PARSER lists to use for retrieving the
# relevant color tags to display. Replace if using another schema.
# This command can only show one set of markup.
slice_bright_fg = slice(7, 15) # from ANSI_PARSER.ansi_map
slice_dark_fg = slice(15, 23) # from ANSI_PARSER.ansi_map
slice_dark_bg = slice(-8, None) # from ANSI_PARSER.ansi_map
slice_bright_bg = slice(None, None) # from ANSI_PARSER.ansi_xterm256_bright_bg_map
def table_format(self, table):
"""
Helper method to format the ansi/xterm256 tables.
Takes a table of columns [[val,val,...],[val,val,...],...]
"""
if not table:
return [[]]
extra_space = 1
max_widths = [max([len(str(val)) for val in col]) for col in table]
ftable = []
for irow in range(len(table[0])):
ftable.append([str(col[irow]).ljust(max_widths[icol]) + " " *
extra_space for icol, col in enumerate(table)])
return ftable
def func(self):
"""Show color tables"""
if self.args.startswith("a"):
# show ansi 16-color table
from evennia.utils import ansi
ap = ansi.ANSI_PARSER
# ansi colors
# show all ansi color-related codes
bright_fg = ["%s%s|n" % (code, code.replace("|", "||"))
for code, _ in ap.ansi_map[self.slice_bright_fg]]
dark_fg = ["%s%s|n" % (code, code.replace("|", "||"))
for code, _ in ap.ansi_map[self.slice_dark_fg]]
dark_bg = ["%s%s|n" % (code.replace("\\", ""), code.replace("|", "||").replace("\\", ""))
for code, _ in ap.ansi_map[self.slice_dark_bg]]
bright_bg = ["%s%s|n" % (code.replace("\\", ""), code.replace("|", "||").replace("\\", ""))
for code, _ in ap.ansi_xterm256_bright_bg_map[self.slice_bright_bg]]
dark_fg.extend(["" for _ in range(len(bright_fg) - len(dark_fg))])
table = utils.format_table([bright_fg, dark_fg, bright_bg, dark_bg])
string = "ANSI colors:"
for row in table:
string += "\n " + " ".join(row)
self.msg(string)
self.msg("||X : black. ||/ : return, ||- : tab, ||_ : space, ||* : invert, ||u : underline\n"
"To combine background and foreground, add background marker last, e.g. ||r||[B.\n"
"Note: bright backgrounds like ||[r requires your client handling Xterm256 colors.")
elif self.args.startswith("x"):
# show xterm256 table
table = [[], [], [], [], [], [], [], [], [], [], [], []]
for ir in range(6):
for ig in range(6):
for ib in range(6):
# foreground table
table[ir].append("|%i%i%i%s|n" % (ir, ig, ib, "||%i%i%i" % (ir, ig, ib)))
# background table
table[6 + ir].append("|%i%i%i|[%i%i%i%s|n"
% (5 - ir, 5 - ig, 5 - ib, ir, ig, ib, "||[%i%i%i" % (ir, ig, ib)))
table = self.table_format(table)
string = "Xterm256 colors (if not all hues show, your client might not report that it can handle xterm256):"
string += "\n" + "\n".join("".join(row) for row in table)
table = [[], [], [], [], [], [], [], [], [], [], [], []]
for ibatch in range(4):
for igray in range(6):
letter = chr(97 + (ibatch * 6 + igray))
inverse = chr(122 - (ibatch * 6 + igray))
table[0 + igray].append("|=%s%s |n" % (letter, "||=%s" % letter))
table[6 + igray].append("|=%s|[=%s%s |n" % (inverse, letter, "||[=%s" % letter))
for igray in range(6):
# the last row (y, z) has empty columns
if igray < 2:
letter = chr(121 + igray)
inverse = chr(98 - igray)
fg = "|=%s%s |n" % (letter, "||=%s" % letter)
bg = "|=%s|[=%s%s |n" % (inverse, letter, "||[=%s" % letter)
else:
fg, bg = " ", " "
table[0 + igray].append(fg)
table[6 + igray].append(bg)
table = self.table_format(table)
string += "\n" + "\n".join("".join(row) for row in table)
self.msg(string)
else:
# malformed input
self.msg("Usage: color ansi||xterm256")
class CmdQuell(COMMAND_DEFAULT_CLASS):
"""
use character's permissions instead of account's
Usage:
quell
unquell
Normally the permission level of the Account is used when puppeting a
Character/Object to determine access. This command will switch the lock
system to make use of the puppeted Object's permissions instead. This is
useful mainly for testing.
Hierarchical permission quelling only work downwards, thus an Account cannot
use a higher-permission Character to escalate their permission level.
Use the unquell command to revert back to normal operation.
"""
key = "quell"
aliases = ["unquell"]
locks = "cmd:pperm(Player)"
help_category = "General"
# this is used by the parent
account_caller = True
def _recache_locks(self, account):
"""Helper method to reset the lockhandler on an already puppeted object"""
if self.session:
char = self.session.puppet
if char:
# we are already puppeting an object. We need to reset
# the lock caches (otherwise the superuser status change
# won't be visible until repuppet)
char.locks.reset()
account.locks.reset()
def func(self):
"""Perform the command"""
account = self.account
permstr = account.is_superuser and " (superuser)" or "(%s)" % (", ".join(account.permissions.all()))
if self.cmdstring in ('unquell', 'unquell'):
if not account.attributes.get('_quell'):
self.msg("Already using normal Account permissions %s." % permstr)
else:
account.attributes.remove('_quell')
self.msg("Account permissions %s restored." % permstr)
else:
if account.attributes.get('_quell'):
self.msg("Already quelling Account %s permissions." % permstr)
return
account.attributes.add('_quell', True)
puppet = self.session.puppet
if puppet:
cpermstr = "(%s)" % ", ".join(puppet.permissions.all())
cpermstr = "Quelling to current puppet's permissions %s." % cpermstr
cpermstr += "\n(Note: If this is higher than Account permissions %s," \
" the lowest of the two will be used.)" % permstr
cpermstr += "\nUse unquell to return to normal permission usage."
self.msg(cpermstr)
else:
self.msg("Quelling Account permissions%s. Use unquell to get them back." % permstr)
self._recache_locks(account)
class CmdStyle(COMMAND_DEFAULT_CLASS):
"""
In-game style options
Usage:
style
style <option> = <value>
Configure stylings for in-game display elements like table borders, help
entriest etc. Use without arguments to see all available options.
"""
key = "style"
switch_options = ['clear']
def func(self):
if not self.args:
self.list_styles()
return
self.set()
def list_styles(self):
table = self.styled_table('Option', 'Description', 'Type', 'Value', width=78)
for op_key in self.account.options.options_dict.keys():
op_found = self.account.options.get(op_key, return_obj=True)
table.add_row(op_key, op_found.description,
op_found.__class__.__name__, op_found.display())
self.msg(str(table))
def set(self):
try:
result = self.account.options.set(self.lhs, self.rhs)
except ValueError as e:
self.msg(str(e))
return
self.msg('Style %s set to %s' % (self.lhs, result))
|
py | 1a54bca3f906f2a2547cb23106d3d842753567d9 | """ Task object to generate / manage assessors and cluster."""
from datetime import date
import errno
import logging
import os
import shutil
import time
from . import cluster
from .cluster import PBS
from .errors import (NeedInputsException, NoDataException,
ClusterLaunchException)
from .dax_settings import DAX_Settings, DEFAULT_DATATYPE, DEFAULT_FS_DATATYPE
from . import assessor_utils
__copyright__ = 'Copyright 2013 Vanderbilt University. All Rights Reserved'
__all__ = ['Task', 'ClusterTask', 'XnatTask']
DAX_SETTINGS = DAX_Settings()
# Logger to print logs
LOGGER = logging.getLogger('dax')
# Job Statuses
# assessor that doesn't have data to run (for session assessor)
# E.G: dtiqa multi but no dti present.
NO_DATA = 'NO_DATA'
# assessor that is ready to be launch on the cluster.
# All the input data for the process to run are there.
NEED_TO_RUN = 'NEED_TO_RUN'
# assessor where input data are missing from a scan,
# multiple scans or other assessor.
NEED_INPUTS = 'NEED_INPUTS'
# the job has been submitted on the cluster and is running right now.
JOB_RUNNING = 'JOB_RUNNING'
# the job failed on the cluster.
JOB_FAILED = 'JOB_FAILED'
# Job done, waiting for the Spider to upload the results
READY_TO_UPLOAD = 'READY_TO_UPLOAD'
# in the process of uploading the resources on XNAT.
UPLOADING = 'UPLOADING'
# the assessors contains all the files. The upload and the job are done.
COMPLETE = 'COMPLETE'
# the job finished and upload is complete
READY_TO_COMPLETE = 'READY_TO_COMPLETE'
DOES_NOT_EXIST = 'DOES_NOT_EXIST'
OPEN_STATUS_LIST = [NEED_TO_RUN, UPLOADING, JOB_RUNNING, READY_TO_COMPLETE,
JOB_FAILED]
JOB_BUILT = 'JOB_BUILT'
# QC Statuses
# job is still running, not ready for QA yet
JOB_PENDING = 'Job Pending'
# job ready to be QA
NEEDS_QA = 'Needs QA'
# QC status set by the Image Analyst after looking at the results.
GOOD = 'Good'
# QC status set by the Image Analyst after looking at the results.
PASSED_QA = 'Passed'
# QC status set by the Image Analyst after looking at the results.
FAILED = 'Failed'
# QC status set by the Image Analyst after looking at the results.
BAD = 'Bad'
# QC status set by the Image Analyst after looking at the results.
POOR = 'Poor'
# will cause spider to delete results and rerun the processing
RERUN = 'Rerun'
# will cause spider to zip the current results and put in OLD,
# and then processing
REPROC = 'Reproc'
# Do not run this assessor anymore
DONOTRUN = 'Do Not Run'
FAILED_NEEDS_REPROC = 'Failed-needs reprocessing' # FS
PASSED_EDITED_QA = 'Passed with edits' # FS
OPEN_QA_LIST = [RERUN, REPROC]
BAD_QA_STATUS = [FAILED, BAD, POOR, DONOTRUN]
# Other Constants
DEFAULT_EMAIL_OPTS = DAX_SETTINGS.get_email_opts()
JOB_EXTENSION_FILE = DAX_SETTINGS.get_job_extension_file()
READY_TO_UPLOAD_FLAG_FILENAME = 'READY_TO_UPLOAD.txt'
OLD_RESOURCE = 'OLD'
EDITS_RESOURCE = 'EDITS'
REPROC_RES_SKIP_LIST = [OLD_RESOURCE, EDITS_RESOURCE]
INPUTS_DIRNAME = 'INPUTS'
BATCH_DIRNAME = 'BATCH'
OUTLOG_DIRNAME = 'OUTLOG'
PBS_DIRNAME = 'PBS'
# Status and QC status supported by DAX
SUPPORTED_STATUS = [NO_DATA, NEED_TO_RUN, NEED_INPUTS, JOB_RUNNING, JOB_FAILED,
READY_TO_UPLOAD, UPLOADING, READY_TO_COMPLETE, COMPLETE,
JOB_BUILT]
SUPPORTED_QC_STATUS = [JOB_PENDING, NEEDS_QA, GOOD, PASSED_QA, FAILED, BAD,
POOR, RERUN, REPROC, DONOTRUN, FAILED_NEEDS_REPROC,
PASSED_EDITED_QA]
def mkdirp(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def create_flag(flag_path):
open(flag_path, 'w').close()
class Task(object):
""" Class Task to generate/manage the assessor with the cluster """
def __init__(self, processor, assessor, upload_dir):
"""
Init of class Task
:param processor: processor used
:param assessor: pyxnat assessor object
:param upload_dir: upload directory to copy data after job finished.
:return: None
"""
self.processor = processor
self.assessor = assessor
self.upload_dir = upload_dir
self.atype = processor.xsitype.lower()
# Cache for convenience
self.assessor_label = assessor_utils.full_label_from_assessor(assessor)
def get_processor_name(self):
"""
Get the name of the Processor for the Task.
:return: String of the Processor name.
"""
return self.processor.name
def get_processor_version(self):
"""
Get the version of the Processor.
:return: String of the Processor version.
"""
return self.processor.version
def is_open(self):
"""
Check to see if a task is still in "Open" status as defined in
OPEN_STATUS_LIST.
:return: True if the Task is open. False if it is not open
"""
astatus = self.get_status()
return astatus in OPEN_STATUS_LIST
def get_job_usage(self):
"""
Get the amount of memory used, the amount of walltime used, the jobid
of the process, the node the process ran on, and when it started
from the scheduler.
:return: List of strings. Memory used, walltime used, jobid, node used,
and start date
"""
atype = self.atype
mgets = self.assessor.attrs.mget([
'%s/memused' % atype,
'%s/walltimeused' % atype,
'%s/jobid' % atype,
'%s/jobnode' % atype,
'%s/jobstartdate' % atype
])
return [mgets[0].strip(),
mgets[1].strip(),
mgets[2].strip(),
mgets[3].strip(),
mgets[4].strip()]
def check_job_usage(self):
"""
The task has now finished, get the amount of memory used, the amount of
walltime used, the jobid of the process, the node the process ran on,
and when it started from the scheduler. Set these values on XNAT
:return: None
"""
[memused, walltime, jobid, jobnode, jobstrdate] = self.get_job_usage()
if walltime:
if memused and jobnode:
LOGGER.debug('memused and walltime already set, skipping')
else:
if memused == '':
self.set_memused('NotFound')
if jobnode == '':
self.set_jobnode('NotFound')
return
# We can't get info from cluster if job too old
if not cluster.is_traceable_date(jobstrdate):
self.set_walltime('NotFound')
self.set_memused('NotFound')
self.set_jobnode('NotFound')
return
# Get usage with tracejob
jobinfo = cluster.tracejob_info(jobid, jobstrdate)
if jobinfo['mem_used'].strip():
self.set_memused(jobinfo['mem_used'])
else:
self.set_memused('NotFound')
if jobinfo['walltime_used'].strip():
self.set_walltime(jobinfo['walltime_used'])
else:
self.set_walltime('NotFound')
if jobinfo['jobnode'].strip():
self.set_jobnode(jobinfo['jobnode'])
else:
self.set_jobnode('NotFound')
def get_memused(self):
"""
Get the amount of memory used for a process
:return: String of how much memory was used
"""
memused = self.assessor.attrs.get('%s/memused' % self.atype)
return memused.strip()
def set_memused(self, memused):
"""
Set the amount of memory used for a process
:param memused: String denoting the amount of memory used
:return: None
"""
self.assessor.attrs.set('%s/memused' % self.atype, memused)
def get_walltime(self):
"""
Get the amount of walltime used for a process
:return: String of how much walltime was used for a process
"""
walltime = self.assessor.attrs.get('%s/walltimeused' % self.atype)
return walltime.strip()
def set_walltime(self, walltime):
"""
Set the value of walltime used for an assessor on XNAT
:param walltime: String denoting how much time was used running
the process.
:return: None
"""
self.assessor.attrs.set('%s/walltimeused' % self.atype, walltime)
def get_jobnode(self):
"""
Gets the node that a process ran on
:return: String identifying the node that a job ran on
"""
jobnode = self.assessor.attrs.get('%s/jobnode' % self.atype)
if jobnode is None:
jobnode = 'NotFound'
return jobnode.strip()
def set_jobnode(self, jobnode):
"""
Set the value of the the node that the process ran on on the grid
:param jobnode: String identifying the node the job ran on
:return: None
"""
self.assessor.attrs.set('%s/jobnode' % self.atype, jobnode)
def undo_processing(self):
"""
Unset the job ID, memory used, walltime, and jobnode information
for the assessor on XNAT
:except: pyxnat.core.errors.DatabaseError when attempting to
delete a resource
:return: None
"""
from pyxnat.core.errors import DatabaseError
self.set_qcstatus(JOB_PENDING)
self.set_jobid(' ')
self.set_memused(' ')
self.set_walltime(' ')
self.set_jobnode(' ')
out_resource_list = self.assessor.out_resources()
for out_resource in out_resource_list:
if out_resource.label() not in REPROC_RES_SKIP_LIST:
LOGGER.info(' Removing %s' % out_resource.label())
try:
out_resource.delete()
except DatabaseError:
LOGGER.error(' ERROR:deleting resource.')
def reproc_processing(self):
"""
If the procstatus of an assessor is REPROC on XNAT, rerun the assessor.
:return: None
"""
curtime = time.strftime("%Y%m%d-%H%M%S")
local_dir = '%s_%s' % (self.assessor_label, curtime)
local_zip = '%s.zip' % local_dir
xml_filename = os.path.join(self.upload_dir, local_dir,
'%s.xml' % self.assessor_label)
# Make the temp dir
mkdirp(os.path.join(self.upload_dir, local_dir))
# Download the current resources
out_resource_list = self.assessor.out_resources()
for out_resource in out_resource_list:
olabel = out_resource.label()
if olabel not in REPROC_RES_SKIP_LIST and \
len(out_resource.files().get()) > 0:
LOGGER.info(' Downloading: %s' % olabel)
out_res = self.assessor.out_resource(olabel)
out_res.get(os.path.join(self.upload_dir, local_dir),
extract=True)
# Download xml of assessor
xml = self.assessor.get()
with open(xml_filename, 'w') as f_xml:
f_xml.write('%s\n' % xml)
# Zip it all up
cmd = 'cd %s && zip -qr %s %s/' % (self.upload_dir, local_zip,
local_dir)
LOGGER.debug('running cmd: %s' % cmd)
os.system(cmd)
# Upload it to Archive
self.assessor.out_resource(OLD_RESOURCE)\
.file(local_zip)\
.put(os.path.join(self.upload_dir, local_zip))
# Run undo
self.undo_processing()
# Delete the local copies
os.remove(os.path.join(self.upload_dir, local_zip))
shutil.rmtree(os.path.join(self.upload_dir, local_dir))
def update_status(self):
"""
Update the satus of a Task object.
:return: the "new" status (updated) of the Task.
"""
old_status, qcstatus, jobid = self.get_statuses()
new_status = old_status
if old_status == COMPLETE or old_status == JOB_FAILED:
if qcstatus == REPROC:
LOGGER.info(' * qcstatus=REPROC, running \
reproc_processing...')
self.reproc_processing()
new_status = NEED_TO_RUN
elif qcstatus == RERUN:
LOGGER.info(' * qcstatus=RERUN, running \
undo_processing...')
self.undo_processing()
new_status = NEED_TO_RUN
else:
# self.check_date()
pass
elif old_status == NEED_TO_RUN:
# TODO: anything, not yet???
pass
elif old_status == READY_TO_COMPLETE:
self.check_job_usage()
new_status = COMPLETE
elif old_status == NEED_INPUTS:
# This is now handled by dax_build
pass
elif old_status == JOB_RUNNING:
LOGGER.debug('calling check_running')
new_status = self.check_running(jobid)
elif old_status == READY_TO_UPLOAD:
# TODO: let upload spider handle it???
# self.check_date()
pass
elif old_status == UPLOADING:
# TODO: can we see if it's really uploading???
pass
elif old_status == NO_DATA:
pass
else:
LOGGER.warn(' * unknown status for %s: %s'
% (self.assessor_label, old_status))
LOGGER.debug('new_status='+new_status)
if new_status != old_status:
LOGGER.info(' * changing status from %s to %s'
% (old_status, new_status))
# Update QC Status
if new_status == COMPLETE:
self.set_proc_and_qc_status(new_status, NEEDS_QA)
else:
self.set_status(new_status)
return new_status
def get_jobid(self):
"""
Get the jobid of an assessor as stored on XNAT
:return: string of the jobid
"""
jobid = self.assessor.attrs.get('%s/jobid' % self.atype)
if jobid is None:
jobid = 'NotFound'
return jobid.strip()
def get_job_status(self, jobid=None):
"""
Get the status of a job given its jobid as assigned by the scheduler
:param jobid: job id assigned by the scheduler
:return: string from call to cluster.job_status or UNKNOWN.
"""
jobstatus = 'UNKNOWN'
if jobid is None:
jobid = self.get_jobid()
if jobid != '' and jobid != '0':
jobstatus = cluster.job_status(jobid)
LOGGER.debug('jobid,jobstatus='+str(jobid)+','+str(jobstatus))
return jobstatus
def launch(self, jobdir, job_email=None,
job_email_options=DAX_SETTINGS.get_email_opts(),
job_rungroup=None,xnat_host=None, writeonly=False, pbsdir=None,
force_no_qsub=False):
"""
Method to launch a job on the grid
:param jobdir: absolute path where the data will be stored on the node
:param job_email: who to email if the job fails
:param job_email_options: grid-specific job email options (e.g.,
fails, starts, exits etc)
:param job_rungroup: grid-specific group to run the job under
:param xnat_host: set the XNAT_HOST in the PBS job
:param writeonly: write the job files without submitting them
:param pbsdir: folder to store the pbs file
:param force_no_qsub: run the job locally on the computer (serial mode)
:raises: cluster.ClusterLaunchException if the jobid is 0 or empty
as returned by pbs.submit() method
:return: True if the job failed
"""
cmds = self.commands(jobdir)
pbsfile = self.pbs_path(writeonly, pbsdir)
outlog = self.outlog_path()
outlog_dir = os.path.dirname(outlog)
mkdirp(outlog_dir)
pbs = PBS(pbsfile,
outlog,
cmds,
self.processor.walltime_str,
self.processor.memreq_mb,
self.processor.ppn,
self.processor.env,
job_email,
job_email_options,
job_rungroup,
xnat_host,
self.processor.job_template)
pbs.write()
if writeonly:
mes_format = """ filepath: {path}"""
LOGGER.info(mes_format.format(path=pbsfile))
return True
else:
jobid, job_failed = pbs.submit(outlog=outlog,
force_no_qsub=force_no_qsub)
if jobid == '' or jobid == '0':
# TODO: check to be sure it didn't really launch, then
# try one more time
LOGGER.error('failed to launch job on cluster')
raise ClusterLaunchException
else:
self.set_launch(jobid)
if force_no_qsub or \
not cluster.command_found(DAX_SETTINGS.get_cmd_submit()):
if job_failed:
LOGGER.info(' * changing status to %s'
% JOB_FAILED)
self.set_status(JOB_FAILED)
else:
LOGGER.info(' * changing status to %s'
% READY_TO_UPLOAD)
# Status already set in the spider
return True
def check_date(self):
"""
Sets the job created date if the assessor was not made through
dax_build
:return: Returns if get_createdate() is != '', sets date otherwise
"""
if self.get_createdate() != '':
return
jobstartdate = self.get_jobstartdate()
if jobstartdate != '':
self.set_createdate(jobstartdate)
def get_jobstartdate(self):
"""
Get the date that the job started
:return: String of the date that the job started in "%Y-%m-%d" format
"""
return self.assessor.attrs.get('%s/jobstartdate' % self.atype)
def set_jobstartdate_today(self):
"""
Set the date that the job started on the grid to today
:return: call to set_jobstartdate with today's date
"""
today_str = str(date.today())
return self.set_jobstartdate(today_str)
def set_jobstartdate(self, date_str):
"""
Set the date that the job started on the grid based on user passed
value
:param date_str: Datestring in the format "%Y-%m-%d" to set the job
starte date to
:return: None
"""
self.assessor.attrs.set('%s/jobstartdate' % self.atype, date_str)
def get_createdate(self):
"""
Get the date an assessor was created
:return: String of the date the assessor was created in "%Y-%m-%d"
format
"""
return self.assessor.attrs.get('%s/date' % self.atype)
def set_createdate(self, date_str):
"""
Set the date of the assessor creation to user passed value
:param date_str: String of the date in "%Y-%m-%d" format
:return: String of today's date in "%Y-%m-%d" format
"""
self.assessor.attrs.set('%s/date' % self.atype, date_str)
return date_str
def set_createdate_today(self):
"""
Set the date of the assessor creation to today
:return: String of todays date in "%Y-%m-%d" format
"""
today_str = str(date.today())
self.set_createdate(today_str)
return today_str
def get_status(self):
"""
Get the procstatus of an assessor
:return: The string of the procstatus of the assessor.
DOES_NOT_EXIST if the assessor does not exist
"""
if not self.assessor.exists():
xnat_status = DOES_NOT_EXIST
elif self.atype.lower() in [DEFAULT_DATATYPE.lower(),
DEFAULT_FS_DATATYPE.lower()]:
xnat_status = self.assessor.attrs.get('%s/procstatus'
% self.atype.lower())
else:
xnat_status = 'UNKNOWN_xsiType: %s' % self.atype
return xnat_status
def get_statuses(self, cached_sessions=None):
"""
Get the procstatus, qcstatus, and job id of an assessor
:return: Serially ordered strings of the assessor procstatus,
qcstatus, then jobid.
"""
if cached_sessions:
for csess in cached_sessions:
for cassr in csess.assessors():
if cassr.label() == self.assessor_label:
pstatus = cassr.info()['procstatus']
qstatus = cassr.info()['qcstatus']
jobid = cassr.info()['jobid']
return pstatus, qstatus, jobid
if not self.assessor.exists():
xnat_status = DOES_NOT_EXIST
qcstatus = DOES_NOT_EXIST
jobid = ''
elif self.atype.lower() in [DEFAULT_DATATYPE.lower(),
DEFAULT_FS_DATATYPE.lower()]:
xnat_status, qcstatus, jobid = self.assessor.attrs.mget([
'%s/procstatus' % self.atype,
'%s/validation/status' % self.atype,
'%s/jobid' % self.atype
])
else:
xnat_status = 'UNKNOWN_xsiType: %s' % self.atype
qcstatus = 'UNKNOWN_xsiType: %s' % self.atype
jobid = ''
return xnat_status, qcstatus, jobid
def set_status(self, status):
"""
Set the procstatus of an assessor on XNAT
:param status: String to set the procstatus of the assessor to
:return: None
"""
self.assessor.attrs.set('%s/procstatus' % self.atype, status)
def get_qcstatus(self):
"""
Get the qcstatus of the assessor
:return: A string of the qcstatus for the assessor if it exists.
If it does not, it returns DOES_NOT_EXIST.
The else case returns an UNKNOWN xsiType with the xsiType of the
assessor as stored on XNAT.
"""
qcstatus = ''
if not self.assessor.exists():
qcstatus = DOES_NOT_EXIST
elif self.atype.lower() in [DEFAULT_DATATYPE.lower(),
DEFAULT_FS_DATATYPE.lower()]:
qcstatus = self.assessor.attrs.get('%s/validation/status'
% self.atype)
else:
qcstatus = 'UNKNOWN_xsiType: %s' % self.atype
return qcstatus
def set_qcstatus(self, qcstatus):
"""
Set the qcstatus of the assessor
:param qcstatus: String to set the qcstatus to
:return: None
"""
self.assessor.attrs.mset({
'%s/validation/status' % self.atype: qcstatus,
'%s/validation/validated_by' % self.atype: 'NULL',
'%s/validation/date' % self.atype: 'NULL',
'%s/validation/notes' % self.atype: 'NULL',
'%s/validation/method' % self.atype: 'NULL',
})
def set_proc_and_qc_status(self, procstatus, qcstatus):
"""
Set the procstatus and qcstatus of the assessor
:param procstatus: String to set the procstatus of the assessor to
:param qcstatus: String to set the qcstatus of the assessor to
:return: None
"""
self.assessor.attrs.mset({
'%s/procstatus' % self.atype: procstatus,
'%s/validation/status' % self.atype: qcstatus,
})
def set_jobid(self, jobid):
"""
Set the job ID of the assessor on XNAT
:param jobid: The ID of the process assigned by the grid scheduler
:return: None
"""
self.assessor.attrs.set('%s/jobid' % self.atype, jobid)
def set_launch(self, jobid):
"""
Set the date that the job started and its associated ID on XNAT.
Additionally, set the procstatus to JOB_RUNNING
:param jobid: The ID of the process assigned by the grid scheduler
:return: None
"""
today_str = str(date.today())
self.assessor.attrs.mset({
'%s/jobstartdate' % self.atype.lower(): today_str,
'%s/jobid' % self.atype.lower(): jobid,
'%s/procstatus' % self.atype.lower(): JOB_RUNNING,
})
def commands(self, jobdir):
"""
Call the get_cmds method of the class Processor.
:param jobdir: Fully qualified path where the job will run on the node.
Note that this is likely to start with /tmp on most grids.
:return: A string that makes a command line call to a spider with all
args.
"""
assr_dir = os.path.join(jobdir, self.assessor_label)
return self.processor.get_cmds(self.assessor, assr_dir)
def pbs_path(self, writeonly=False, pbsdir=None):
"""
Method to return the path of the PBS file for the job
:param writeonly: write the job files without submitting them in TRASH
:param pbsdir: folder to store the pbs file
:return: A string that is the absolute path to the PBS file that will
be submitted to the scheduler for execution.
"""
res_dir = self.upload_dir
j_ext = DAX_SETTINGS.get_job_extension_file()
assessor_label = assessor_utils.full_label_from_assessor(
self.assessor)
filename = '%s%s' % (assessor_label, j_ext)
if writeonly:
if pbsdir and os.path.isdir(pbsdir):
return os.path.join(pbsdir, filename)
else:
return os.path.join(os.path.join(res_dir, 'TRASH'), filename)
else:
if pbsdir:
return os.path.join(pbsdir, filename)
else:
return os.path.join(os.path.join(res_dir, 'PBS'), filename)
def outlog_path(self):
"""
Method to return the path of outlog file for the job
:return: A string that is the absolute path to the OUTLOG file.
"""
assr_fout = '%s.output' % self.assessor_label
return os.path.join(self.upload_dir, OUTLOG_DIRNAME, assr_fout)
def ready_flag_exists(self):
"""
Method to see if the flag file
<UPLOAD_DIR>/<ASSESSOR_LABEL>/READY_TO_UPLOAD.txt exists
:return: True if the file exists. False if the file does not exist.
"""
flagfile = os.path.join(self.upload_dir, self.assessor_label,
READY_TO_UPLOAD_FLAG_FILENAME)
return os.path.isfile(flagfile)
def check_running(self, jobid=None):
"""
Check to see if a job specified by the scheduler ID is still running
:param jobid: The ID of the job in question assigned by the scheduler.
:return: A String of JOB_RUNNING if the job is running or enqueued and
JOB_FAILED if the ready flag (see read_flag_exists) does not exist
in the assessor label folder in the upload directory.
"""
# Check status on cluster
jobstatus = self.get_job_status(jobid)
LOGGER.debug('jobstatus='+str(jobstatus))
if not jobstatus or jobstatus in ['R', 'Q']:
# Still running
return JOB_RUNNING
elif not self.ready_flag_exists():
# Check for a flag file created upon completion,
# if it's not there then the job failed
return JOB_FAILED
else:
# Let Upload Spider handle the upload
return JOB_RUNNING
class ClusterTask(Task):
""" Class Task to generate/manage the assessor with the cluster """
def __init__(self, assr_label, upload_dir, diskq):
"""
Init of class ClusterTask
:return: None
"""
self.assessor_label = assr_label
self.processor = None
self.assessor = None
self.atype = None
self.assessor_id = None
self.diskq = diskq
self.upload_dir = upload_dir
def get_processor_name(self):
"""
Get the name of the Processor for the Task.
:return: String of the Processor name.
"""
raise NotImplementedError()
def get_processor_version(self):
"""
Get the version of the Processor.
:return: String of the Processor version.
"""
raise NotImplementedError()
def is_open(self):
"""
Check to see if a task is still in "Open" status as defined in
OPEN_STATUS_LIST.
:return: True if the Task is open. False if it is not open
"""
astatus = self.get_status()
return astatus in OPEN_STATUS_LIST
def get_job_usage(self):
"""
Get the amount of memory used, the amount of walltime used, the jobid
of the process, the node the process ran on, and when it started
from the scheduler.
:return: List of strings. Memory used, walltime used, jobid, node used,
and start date
"""
memused = self.get_attr('memused')
walltime = self.get_attr('walltimeused')
jobid = self.get_attr('jobid')
jobnode = self.get_attr('jobnode')
jobstartdate = self.get_attr('jobstartdate')
return [memused, walltime, jobid, jobnode, jobstartdate]
def check_job_usage(self):
"""
The task has now finished, get the amount of memory used, the amount of
walltime used, the jobid of the process, the node the process ran on,
and when it started from the scheduler. Set these values locally
:return: None
"""
[memused, walltime, jobid, jobnode, jobstrdate] = self.get_job_usage()
if walltime:
if memused and jobnode:
LOGGER.debug('memused and walltime already set, skipping')
else:
if memused == '':
self.set_memused('NotFound')
if jobnode == '':
self.set_jobnode('NotFound')
return
# We can't get info from cluster if job too old
if not cluster.is_traceable_date(jobstrdate):
self.set_walltime('NotFound')
self.set_memused('NotFound')
self.set_jobnode('NotFound')
return
# Get usage with tracejob
jobinfo = cluster.tracejob_info(jobid, jobstrdate)
if jobinfo['mem_used'].strip():
self.set_memused(jobinfo['mem_used'])
else:
self.set_memused('NotFound')
if jobinfo['walltime_used'].strip():
self.set_walltime(jobinfo['walltime_used'])
else:
self.set_walltime('NotFound')
if jobinfo['jobnode'].strip():
self.set_jobnode(jobinfo['jobnode'])
else:
self.set_jobnode('NotFound')
def get_memused(self):
"""
Get the amount of memory used for a process
:return: String of how much memory was used
"""
memused = self.get_attr('memused')
if memused is None:
memused = 'NotFound'
return memused
def set_memused(self, memused):
"""
Set the amount of memory used for a process
:param memused: String denoting the amount of memory used
:return: None
"""
self.set_attr('memused', memused)
def get_walltime(self):
"""
Get the amount of walltime used for a process
:return: String of how much walltime was used for a process
"""
walltime = self.get_attr('walltimeused')
if walltime is None:
walltime = 'NotFound'
return walltime
def set_walltime(self, walltime):
"""
Set the value of walltime used for an assessor
:param walltime: String denoting how much time was used running
the process.
:return: None
"""
self.set_attr('walltimeused', walltime)
def get_jobnode(self):
"""
Gets the node that a process ran on
:return: String identifying the node that a job ran on
"""
jobnode = self.get_attr('jobnode')
return jobnode
def set_jobnode(self, jobnode):
"""
Set the value of the the node that the process ran on on the grid
:param jobnode: String identifying the node the job ran on
:return: None
"""
self.set_attr('jobnode', jobnode)
def undo_processing(self):
raise NotImplementedError()
def reproc_processing(self):
"""
:raises: NotImplementedError
:return: None
"""
raise NotImplementedError()
def update_status(self):
"""
Update the status of a Cluster Task object.
:return: the "new" status (updated) of the Task.
"""
old_status = self.get_status()
new_status = old_status
LOGGER.debug('old_status='+old_status)
if old_status == JOB_RUNNING:
new_status = self.check_running()
if new_status == READY_TO_UPLOAD:
new_status = self.complete_task()
elif new_status == JOB_FAILED:
new_status = self.fail_task()
else:
# still running
pass
elif old_status in [COMPLETE, JOB_FAILED, NEED_TO_RUN,
NEED_INPUTS, READY_TO_UPLOAD, UPLOADING, NO_DATA]:
pass
else:
LOGGER.warn(' * unknown status for %s: %s'
% (self.assessor_label, old_status))
if new_status != old_status:
LOGGER.info(' * changing status from %s to %s'
% (old_status, new_status))
self.set_status(new_status)
return new_status
def get_jobid(self):
"""
Get the jobid of an assessor as stored in local cache
:return: string of the jobid
"""
jobid = self.get_attr('jobid')
return jobid
def get_job_status(self):
"""
Get the status of a job given its jobid as assigned by the scheduler
:param jobid: job id assigned by the scheduler
:return: string from call to cluster.job_status or UNKNOWN.
"""
jobstatus = 'UNKNOWN'
jobid = self.get_jobid()
if jobid and jobid != '0':
jobstatus = cluster.job_status(jobid)
LOGGER.debug('jobid,jobstatus='+str(jobid)+','+str(jobstatus))
return jobstatus
def launch(self, force_no_qsub=False):
"""
Method to launch a job on the grid
:raises: cluster.ClusterLaunchException if the jobid is 0 or empty
as returned by pbs.submit() method
:return: True if the job failed
"""
batch_path = self.batch_path()
outlog = self.outlog_path()
jobid, job_failed = cluster.submit_job(batch_path, outlog=outlog,
force_no_qsub=force_no_qsub)
if jobid == '' or jobid == '0':
LOGGER.error('failed to launch job on cluster')
raise ClusterLaunchException
else:
self.set_launch(jobid)
cmd = DAX_SETTINGS.get_cmd_submit()
if (force_no_qsub or not cluster.command_found(cmd)) and \
job_failed:
self.set_status(JOB_FAILED)
return True
def check_date(self):
"""
Sets the job created date if the assessor was not made via dax_build
"""
raise NotImplementedError()
def get_jobstartdate(self):
"""
Get the date that the job started
:return: String of the date that the job started in "%Y-%m-%d" format
"""
jobstartdate = self.get_attr('jobstartdate')
if jobstartdate is None:
jobstartdate = 'NULL'
return jobstartdate
def set_jobstartdate(self, date_str):
"""
Set the date that the job started on the grid based on user passed
value
:param date_str: Datestring in the format "%Y-%m-%d" to set the job
starte date to
:return: None
"""
self.set_attr('jobstartdate', date_str)
def get_createdate(self):
"""
Get the date an assessor was created
:return: String of the date the assessor was created in "%Y-%m-%d"
format
"""
raise NotImplementedError()
def set_createdate(self, date_str):
"""
Set the date of the assessor creation to user passed value
:param date_str: String of the date in "%Y-%m-%d" format
:return: String of today's date in "%Y-%m-%d" format
"""
raise NotImplementedError()
def set_createdate_today(self):
"""
Set the date of the assessor creation to today
:return: String of todays date in "%Y-%m-%d" format
"""
raise NotImplementedError()
def get_status(self):
"""
Get the procstatus
:return: The string of the procstatus
"""
procstatus = self.get_attr('procstatus')
if not procstatus:
procstatus = NEED_TO_RUN
return procstatus
def get_statuses(self):
"""
Get the procstatus, qcstatus, and job id of an assessor
"""
raise NotImplementedError()
def set_status(self, status):
"""
Set the procstatus of an assessor on XNAT
:param status: String to set the procstatus of the assessor to
:return: None
"""
self.set_attr('procstatus', status)
def get_qcstatus(self):
"""
Get the qcstatus
"""
raise NotImplementedError()
def set_qcstatus(self, qcstatus):
"""
Set the qcstatus of the assessor
:param qcstatus: String to set the qcstatus to
:return: None
"""
raise NotImplementedError()
def set_proc_and_qc_status(self, procstatus, qcstatus):
"""
Set the procstatus and qcstatus of the assessor
"""
raise NotImplementedError()
def set_jobid(self, jobid):
"""
Set the job ID of the assessor
:param jobid: The ID of the process assigned by the grid scheduler
:return: None
"""
self.set_attr('jobid', jobid)
def set_launch(self, jobid):
"""
Set the date that the job started and its associated ID.
Additionally, set the procstatus to JOB_RUNNING
:param jobid: The ID of the process assigned by the grid scheduler
:return: None
"""
today_str = str(date.today())
self.set_attr('jobstartdate', today_str)
self.set_attr('jobid', jobid)
self.set_attr('procstatus', JOB_RUNNING)
def commands(self, jobdir):
"""
Call the get_cmds method of the class Processor.
:param jobdir: Fully qualified path where the job will run on the node.
Note that this is likely to start with /tmp on most grids.
:return: A string that makes a command line call to a spider with all
args.
"""
raise NotImplementedError()
def batch_path(self):
"""
Method to return the path of the PBS file for the job
:return: A string that is the absolute path to the PBS file that will
be submitted to the scheduler for execution.
"""
label = self.assessor_label
return os.path.join(self.diskq, BATCH_DIRNAME,
'%s%s' % (label, JOB_EXTENSION_FILE))
def outlog_path(self):
"""
Method to return the path of outlog file for the job
:return: A string that is the absolute path to the OUTLOG file.
"""
f_out = '%s.txt' % self.assessor_label
return os.path.join(self.diskq, OUTLOG_DIRNAME, f_out)
def processor_spec_path(self):
"""
Method to return the path of processor file for the job
:return: A string that is the absolute path to the file.
"""
return os.path.join(self.diskq, 'processor', self.assessor_label)
def upload_pbs_dir(self):
"""
Method to return the path of dir for the PBS
:return: A string that is the directory path for the PBS dir
"""
label = self.assessor_label
return os.path.join(self.upload_dir, label, PBS_DIRNAME)
def upload_outlog_dir(self):
"""
Method to return the path of outlog file for the job
:return: A string that is the absolute path to the OUTLOG file.
"""
label = self.assessor_label
return os.path.join(self.upload_dir, label, OUTLOG_DIRNAME)
def check_running(self):
"""
Check to see if a job specified by the scheduler ID is still running
:param jobid: The ID of the job in question assigned by the scheduler.
:return: A String of JOB_RUNNING if the job is running or enqueued and
JOB_FAILED if the ready flag (see read_flag_exists) does not exist
in the assessor label folder in the upload directory.
"""
if self.ready_flag_exists():
return READY_TO_UPLOAD
# Check status on cluster
jobstatus = self.get_job_status()
LOGGER.debug('jobstatus='+str(jobstatus))
if not jobstatus or jobstatus == 'R' or jobstatus == 'Q':
# Still running
return JOB_RUNNING
else:
return JOB_FAILED
def build_task(self):
"""
Method to build a job
"""
raise NotImplementedError()
def build_commands(self):
"""
Call the get_cmds method of the class Processor.
:param jobdir: Fully qualified path where the job will run on the node.
Note that this is likely to start with /tmp on most grids.
:return: A string that makes a command line call to a spider with all
args.
"""
raise NotImplementedError()
def get_attr(self, name):
apath = self.attr_path(name)
if not os.path.exists(apath):
return None
with open(apath, 'r') as f:
return f.read().strip()
def set_attr(self, name, value):
attr_path = self.attr_path(name)
attr_dir = os.path.dirname(attr_path)
mkdirp(attr_dir)
with open(self.attr_path(name), 'w') as f:
f.write(str(value) + '\n')
def attr_path(self, attr):
return os.path.join(self.diskq, attr, self.assessor_label)
def complete_task(self):
self.check_job_usage()
# Copy batch file, note we don't move so dax_upload knows the
# task origin
src = self.batch_path()
dst = self.upload_pbs_dir()
mkdirp(dst)
LOGGER.debug('copying batch file from %s to %s' % (src, dst))
shutil.copy(src, dst)
# Move output file
src = self.outlog_path()
dst = self.upload_outlog_dir()
if os.path.exists(src):
mkdirp(dst)
LOGGER.debug('moving outlog file from %s to %s' % (src, dst))
shutil.move(src, dst)
# Touch file for dax_upload to check
res_dir = self.upload_dir
create_flag(os.path.join(res_dir, self.assessor_label,
'%s.txt' % READY_TO_COMPLETE))
return COMPLETE
def fail_task(self):
self.check_job_usage()
# Copy batch file, note we don't move so dax_upload knows the
# task origin
src = self.batch_path()
dst = self.upload_pbs_dir()
mkdirp(dst)
LOGGER.debug('copying batch file from %s to %s' % (src, dst))
shutil.copy(src, dst)
# Move output file
src = self.outlog_path()
dst = self.upload_outlog_dir()
mkdirp(dst)
LOGGER.debug('moving outlog file from %s to %s' % (src, dst))
shutil.move(src, dst)
# Touch file for dax_upload that job failed
res_dir = self.upload_dir
create_flag(os.path.join(res_dir, self.assessor_label,
'%s.txt' % JOB_FAILED))
# Touch file for dax_upload to check
create_flag(os.path.join(res_dir, self.assessor_label,
'%s.txt' % READY_TO_COMPLETE))
return JOB_FAILED
def delete_attr(self, attr):
try:
os.remove(self.attr_path(attr))
except OSError:
pass
def delete_batch(self):
# Delete batch file
try:
os.remove(self.batch_path())
except OSError:
pass
def delete(self):
# Delete attributes
attr_list = ['jobid', 'jobnode', 'procstatus', 'walltimeused',
'memused', 'jobstartdate', 'processor']
for attr in attr_list:
self.delete_attr(attr)
self.delete_batch()
class XnatTask(Task):
""" Class Task to generate/manage the assessor with the cluster """
def __init__(self, processor, assessor, upload_dir, diskq):
"""
Init of class Task
:param processor: processor used
:param assessor: assessor dict ?
:param upload_dir: upload directory to copy data when job finished.
:return: None
"""
super(XnatTask, self).__init__(processor, assessor, upload_dir)
self.diskq = diskq
def check_job_usage(self):
"""
The task has now finished, get the amount of memory used, the amount of
walltime used, the jobid of the process, the node the process ran on,
and when it started from the scheduler. Set these values on XNAT
:return: None
"""
raise NotImplementedError()
def update_status(self):
"""
Update the satus of an XNAT Task object.
:return: the "new" status (updated) of the Task.
"""
old_status, qcstatus, jobid = self.get_statuses()
new_status = old_status
if old_status == COMPLETE or old_status == JOB_FAILED:
if qcstatus == REPROC:
LOGGER.info(' * qcstatus=REPROC, running \
reproc_processing...')
self.reproc_processing()
new_status = NEED_TO_RUN
elif qcstatus == RERUN:
LOGGER.info(' * qcstatus=RERUN, running \
undo_processing...')
self.undo_processing()
new_status = NEED_TO_RUN
else:
pass
elif old_status in [NEED_TO_RUN, READY_TO_COMPLETE, NEED_INPUTS,
JOB_RUNNING, READY_TO_UPLOAD, UPLOADING, NO_DATA,
JOB_BUILT]:
pass
else:
LOGGER.warn(' * unknown status for %s: %s'
% (self.assessor_label, old_status))
if new_status != old_status:
LOGGER.info(' * changing status from %s to %s'
% (old_status, new_status))
self.set_status(new_status)
return new_status
def get_job_status(self):
raise NotImplementedError()
def launch(self):
"""
Method to launch a job on the grid
"""
raise NotImplementedError()
def set_launch(self, jobid):
"""
Set the date that the job started and its associated ID on XNAT.
Additionally, set the procstatus to JOB_RUNNING
:param jobid: The ID of the process assigned by the grid scheduler
:return: None
"""
raise NotImplementedError()
def batch_path(self):
"""
Method to return the path of the PBS file for the job
:return: A string that is the absolute path to the PBS file that will
be submitted to the scheduler for execution.
"""
f_pbs = '%s%s' % (self.assessor_label, JOB_EXTENSION_FILE)
return os.path.join(self.diskq, BATCH_DIRNAME, f_pbs)
def outlog_path(self):
"""
Method to return the path of outlog file for the job
:return: A string that is the absolute path to the OUTLOG file.
"""
f_txt = '%s.txt' % self.assessor_label
return os.path.join(self.diskq, OUTLOG_DIRNAME, f_txt)
def processor_spec_path(self):
"""
Method to return the path of processor file for the job
:return: A string that is the absolute path to the file.
"""
return os.path.join(self.diskq, 'processor', self.assessor_label)
def check_running(self):
"""
Check to see if a job specified by the scheduler ID is still running
:param jobid: The ID of the job in question assigned by the scheduler.
:return: A String of JOB_RUNNING if the job is running or enqueued and
JOB_FAILED if the ready flag (see read_flag_exists) does not exist
in the assessor label folder in the upload directory.
"""
raise NotImplementedError()
def write_processor_spec(self):
filename = self.processor_spec_path()
mkdirp(os.path.dirname(filename))
self.processor.write_processor_spec(filename)
def build_task(self, assr, sessions,
jobdir, job_email=None,
job_email_options=DEFAULT_EMAIL_OPTS,
job_rungroup=None,
xnat_host=None):
"""
Method to build a job
"""
(old_proc_status, old_qc_status, _) = self.get_statuses(sessions)
try:
resdir = self.upload_dir
cmds = self.build_commands(assr, sessions, jobdir, resdir)
batch_file = self.batch_path()
outlog = self.outlog_path()
batch = PBS(batch_file,
outlog,
cmds,
self.processor.walltime_str,
self.processor.memreq_mb,
self.processor.ppn,
self.processor.env,
job_email,
job_email_options,
job_rungroup,
xnat_host,
self.processor.job_template)
LOGGER.info('writing:' + batch_file)
batch.write()
# Set new statuses to be updated
new_proc_status = JOB_RUNNING
new_qc_status = JOB_PENDING
# Write processor spec file for version 3
try:
if (self.processor.procyamlversion == '3.0.0-dev.0'):
# write processor spec file
LOGGER.debug('writing processor spec file')
self.write_processor_spec()
except AttributeError as err:
# older processor does not have version
LOGGER.debug('procyamlversion not found'.format(err))
except NeedInputsException as e:
new_proc_status = NEED_INPUTS
new_qc_status = e.value
except NoDataException as e:
new_proc_status = NO_DATA
new_qc_status = e.value
if new_proc_status != old_proc_status or \
new_qc_status != old_qc_status:
self.set_proc_and_qc_status(new_proc_status, new_qc_status)
return (new_proc_status, new_qc_status)
def build_commands(self, assr, sessions, jobdir, resdir):
"""
Call the build_cmds method of the class Processor.
:param jobdir: Fully qualified path where the job will run on the node.
Note that this is likely to start with /tmp on most grids.
:return: A string that makes a command line call to a spider with all
args.
"""
return self.processor.build_cmds(
assr,
self.assessor_label,
sessions,
jobdir,
resdir)
|
py | 1a54bcc805cfb85ab55550de668e096041c316a0 | import _init_paths
import utils.zl_utils as zl
import h5py
import zl_config as C
import numpy as np
def merge_pickled_files():
import os
h5f = h5py.File(C.coco_eb_h5_path,'w')
cnt = 0
zl.tick()
for path, subdirs, files in os.walk(C.coco_eb_dir):
for name in files:
cnt += 1
if cnt %1000==0:
print cnt,zl.tock()
zl.tick()
fpath = os.path.join(path, name)
fid = name.replace('.eb','')
bbs = np.array(zl.load(fpath)).astype(np.float16)
h5f[fid]=bbs
merge_pickled_files()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.