filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_21647 |
from overrides import overrides
from pytorch_transformers import BertModel
import torch.nn as nn
from claf.data.data_handler import CachePath
from claf.decorator import register
from claf.model.base import ModelWithoutTokenEmbedder
from claf.model.token_classification.mixin import TokenClassification
from claf.model import cls_utils
@register("model:bert_for_tok_cls")
class BertForTokCls(TokenClassification, ModelWithoutTokenEmbedder):
"""
Implementation of Single Sentence Tagging model presented in
BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
(https://arxiv.org/abs/1810.04805)
* Args:
token_embedder: used to embed the sequence
num_tags: number of classified tags
ignore_tag_idx: index of the tag to ignore when calculating loss (tag pad value)
* Kwargs:
pretrained_model_name: the name of a pre-trained model
dropout: classification layer dropout
"""
def __init__(
self, token_makers, num_tags, ignore_tag_idx, pretrained_model_name=None, dropout=0.2
):
super(BertForTokCls, self).__init__(token_makers)
self.use_pytorch_transformers = True # for optimizer's model parameters
self.ignore_tag_idx = ignore_tag_idx
self.num_tags = num_tags
self._model = BertModel.from_pretrained(
pretrained_model_name, cache_dir=str(CachePath.ROOT)
)
self.classifier = nn.Sequential(
nn.Dropout(dropout), nn.Linear(self._model.config.hidden_size, num_tags)
)
self.classifier.apply(self._model.init_weights)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_tag_idx)
@overrides
def forward(self, features, labels=None):
"""
* Args:
features: feature dictionary like below.
{
"bert_input": {
"feature": [
[100, 576, 21, 45, 7, 91, 101, 0, 0, ...],
...,
]
}
"token_type": {
"feature": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, ...],
...,
]
},
"tagged_sub_token_idxs": {
[
[1, 3, 4, 0, 0, 0, 0, 0, 0, ...],
...,
]
}
}
* Kwargs:
label: label dictionary like below.
{
"class_idx": [2, 1, 0, 4, 5, ...]
"data_idx": [2, 4, 5, 7, 2, 1, ...]
}
Do not calculate loss when there is no label. (inference/predict mode)
* Returns: output_dict (dict) consisting of
- sequence_embed: embedding vector of the sequence
- tag_logits: representing unnormalized log probabilities of the tags.
- tag_idxs: target class idx
- data_idx: data idx
- loss: a scalar loss to be optimized
"""
bert_inputs = features["bert_input"]["feature"]
token_type_ids = features["token_type"]["feature"]
tagged_sub_token_idxs = features["tagged_sub_token_idxs"]["feature"]
num_tokens = features["num_tokens"]["feature"]
attention_mask = (bert_inputs > 0).long()
outputs = self._model(
bert_inputs, token_type_ids=token_type_ids, attention_mask=attention_mask
)
token_encodings = outputs[0]
pooled_output = outputs[1]
tag_logits = self.classifier(token_encodings) # [B, L, num_tags]
# gather the logits of the tagged token positions.
gather_token_pos_idxs = tagged_sub_token_idxs.unsqueeze(-1).repeat(1, 1, self.num_tags)
token_tag_logits = tag_logits.gather(1, gather_token_pos_idxs) # [B, num_tokens, num_tags]
sliced_token_tag_logits = [token_tag_logits[idx, :n, :] for idx, n in enumerate(num_tokens)]
output_dict = {"sequence_embed": pooled_output, "tag_logits": sliced_token_tag_logits}
if labels:
tag_idxs = labels["tag_idxs"]
data_idx = labels["data_idx"]
output_dict["tag_idxs"] = tag_idxs
output_dict["data_idx"] = data_idx
# Loss
loss = self.criterion(token_tag_logits.view(-1, self.num_tags), tag_idxs.view(-1))
output_dict["loss"] = loss.unsqueeze(0) # NOTE: DataParallel concat Error
return output_dict
@overrides
def print_examples(self, index, inputs, predictions):
"""
Print evaluation examples
* Args:
index: data index
inputs: mini-batch inputs
predictions: prediction dictionary consisting of
- key: 'id' (sequence id)
- value: dictionary consisting of
- class_idx
* Returns:
print(Sequence, Sequence Tokens, Target Tags, Target Slots, Predicted Tags, Predicted Slots)
"""
data_idx = inputs["labels"]["data_idx"][index].item()
data_id = self._dataset.get_id(data_idx)
helper = self._dataset.helper
sequence = helper["examples"][data_id]["sequence"]
target_tag_texts = helper["examples"][data_id]["tag_texts"]
pred_tag_idxs = predictions[data_id]["tag_idxs"]
pred_tag_texts = self._dataset.get_tag_texts_with_idxs(pred_tag_idxs)
sequence_tokens = helper["examples"][data_id]["sequence_sub_tokens"]
print()
print("- Sequence:", sequence)
print("- Sequence Tokens:", sequence_tokens)
print("- Target:")
print(" Tags:", target_tag_texts)
print(" (Slots)", cls_utils.get_tag_dict(sequence, target_tag_texts))
print("- Predict:")
print(" Tags:", pred_tag_texts)
print(" (Slots)", cls_utils.get_tag_dict(sequence, pred_tag_texts))
print()
|
the-stack_0_21650 | # Machine Learning Keras Suite
#
# A Python helper file: option helper for parameters.
#
# Author: Björn Hempel <[email protected]>
# Date: 03.10.2019
# Web: https://github.com/bjoern-hempel/machine-learning-keras-suite
#
# LICENSE
#
# MIT License
#
# Copyright (c) 2019 Björn Hempel <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import click
class OptionHelper(click.Option):
"""A class that can different default options for different commands."""
command_path = {}
parameters = {}
def __init__(self, *args, **kwargs):
# add option type
self.option_type = kwargs.pop('option_type', 'default')
# check option type
if self.option_type not in ['default', 'default_by_command', 'default_by_parameter', 'concat_parameters']:
raise AssertionError('Unknown option type "%s"' % self.option_type)
# call init function
getattr(self, 'init_' + self.option_type)(kwargs)
# call all parent option classes
super(OptionHelper, self).__init__(*args, **kwargs)
def init_default(self, kwargs):
pass
def init_default_by_command(self, kwargs):
self.default_options = kwargs.pop('default_options', self.get_default_dict(kwargs))
# check type of argument default_options
if not isinstance(self.default_options, dict):
raise AssertionError('Attribute default_options must be a dict object.')
def init_default_by_parameter(self, kwargs):
self.default_options = kwargs.pop('default_options', None)
self.dependent = kwargs.pop('dependent', None)
def init_concat_parameters(self, kwargs):
self.concat = kwargs.pop('concat', None)
def get_default(self, ctx):
processed_value = getattr(self, 'get_default_' + self.option_type)(ctx)
OptionHelper.parameters[self.name] = processed_value
return processed_value
def get_default_default_by_command(self, ctx):
if self.name not in OptionHelper.command_path:
OptionHelper.command_path[self.name] = ctx.info_name
else:
OptionHelper.command_path[self.name] += '_' + ctx.info_name.replace('-', '_')
command_path = OptionHelper.command_path[self.name]
if command_path not in self.default_options:
if 'default' in self.default_options:
return self.default_options['default']
else:
return None
return self.default_options[command_path]
def get_default_default_by_parameter(self, ctx):
# no choice given
if not isinstance(self.default_options, dict) or self.dependent is None:
OptionHelper.parameters[self.name] = self.default_options
return self.default_options
if self.dependent not in OptionHelper.parameters:
raise AssertionError('%s was not found' % self.dependent)
key = OptionHelper.parameters[self.dependent]
if key not in self.default_options:
if 'default' in self.default_options:
return self.default_options['default']
else:
return None
return self.default_options[key]
def get_default_concat_parameters(self, ctx):
return super(OptionHelper, self).get_default(ctx)
def process_value(self, ctx, value):
processed_value = getattr(self, 'process_value_' + self.option_type)(ctx, value)
OptionHelper.parameters[self.name] = processed_value
return processed_value
def process_value_default_by_command(self, ctx, value):
return super(OptionHelper, self).process_value(ctx, value)
def process_value_default_by_parameter(self, ctx, value):
if value is not None:
return_value = self.type_cast_value(ctx, value)
if self.dependent is None:
OptionHelper.parameters[self.name] = return_value
return return_value
def process_value_concat_parameters(self, ctx, value):
if value is not None:
return_value = self.type_cast_value(ctx, value)
if self.concat is None:
OptionHelper.parameters[self.name] = return_value
if self.concat in OptionHelper.parameters and OptionHelper.parameters[self.concat] is not None:
return OptionHelper.parameters[self.concat] + '/' + return_value
return return_value
@staticmethod
def get_default_dict(kwargs):
type_argument = kwargs['type']
# given type is an integer
if type_argument == int:
return {'default': 0}
# given type is a float
if type_argument == float:
return {'default': 0.0}
# given type is a sring
if type_argument == str:
return {'default': ''}
return {'default': None}
|
the-stack_0_21652 | r"""
Orlik-Solomon Algebras
"""
#*****************************************************************************
# Copyright (C) 2015 William Slofstra
# Travis Scrimshaw <tscrimsh at umn.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.cachefunc import cached_method
from sage.combinat.free_module import CombinatorialFreeModule
from sage.categories.algebras import Algebras
from sage.sets.family import Family
class OrlikSolomonAlgebra(CombinatorialFreeModule):
r"""
An Orlik-Solomon algebra.
Let `R` be a commutative ring. Let `M` be a matroid with ground set
`X`. Let `C(M)` denote the set of circuits of `M`. Let `E` denote
the exterior algebra over `R` generated by `\{ e_x \mid x \in X \}`.
The *Orlik-Solomon ideal* `J(M)` is the ideal of `E` generated by
.. MATH::
\partial e_S := \sum_{i=1}^t (-1)^{i-1} e_{j_1} \wedge e_{j_2}
\wedge \cdots \wedge \widehat{e}_{j_i} \wedge \cdots \wedge e_{j_t}
for all `S = \left\{ j_1 < j_2 < \cdots < j_t \right\} \in C(M)`,
where `\widehat{e}_{j_i}` means that the term `e_{j_i}` is being
omitted. The notation `\partial e_S` is not a coincidence, as
`\partial e_S` is actually the image of
`e_S := e_{j_1} \wedge e_{j_2} \wedge \cdots \wedge e_{j_t}` under the
unique derivation `\partial` of `E` which sends all `e_x` to `1`.
It is easy to see that `\partial e_S \in J(M)` not only for circuits
`S`, but also for any dependent set `S` of `M`. Moreover, every
dependent set `S` of `M` satisfies `e_S \in J(M)`.
The *Orlik-Solomon algebra* `A(M)` is the quotient `E / J(M)`. This is
a graded finite-dimensional skew-commutative `R`-algebra. Fix
some ordering on `X`; then, the NBC sets of `M` (that is, the subsets
of `X` containing no broken circuit of `M`) form a basis of `A(M)`.
(Here, a *broken circuit* of `M` is defined to be the result of
removing the smallest element from a circuit of `M`.)
In the current implementation, the basis of `A(M)` is indexed by the
NBC sets, which are implemented as frozensets.
INPUT:
- ``R`` -- the base ring
- ``M`` -- the defining matroid
- ``ordering`` -- (optional) an ordering of the ground set
EXAMPLES:
We create the Orlik-Solomon algebra of the uniform matroid `U(3, 4)`
and do some basic computations::
sage: M = matroids.Uniform(3, 4)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.dimension()
14
sage: G = OS.algebra_generators()
sage: M.broken_circuits()
frozenset({frozenset({1, 2, 3})})
sage: G[1] * G[2] * G[3]
OS{0, 1, 2} - OS{0, 1, 3} + OS{0, 2, 3}
REFERENCES:
- :wikipedia:`Arrangement_of_hyperplanes#The_Orlik-Solomon_algebra`
- [CE2001]_
"""
@staticmethod
def __classcall_private__(cls, R, M, ordering=None):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: from sage.algebras.orlik_solomon import OrlikSolomonAlgebra
sage: OS1 = OrlikSolomonAlgebra(QQ, M)
sage: OS2 = OrlikSolomonAlgebra(QQ, M, ordering=(0,1,2,3,4,5))
sage: OS3 = OrlikSolomonAlgebra(QQ, M, ordering=[0,1,2,3,4,5])
sage: OS1 is OS2 and OS2 is OS3
True
"""
if ordering is None:
ordering = sorted(M.groundset())
return super(OrlikSolomonAlgebra, cls).__classcall__(cls, R, M, tuple(ordering))
def __init__(self, R, M, ordering=None):
"""
Initialize ``self``.
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: TestSuite(OS).run()
We check on the matroid associated to the graph with 3 vertices and
2 edges between each vertex::
sage: G = Graph([[1,2],[1,2],[2,3],[2,3],[1,3],[1,3]], multiedges=True)
sage: M = Matroid(G)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: elts = OS.some_elements() + list(OS.algebra_generators())
sage: TestSuite(OS).run(elements=elts)
"""
self._M = M
self._sorting = {x:i for i,x in enumerate(ordering)}
# set up the dictionary of broken circuits
self._broken_circuits = dict()
for c in self._M.circuits():
L = sorted(c, key=lambda x: self._sorting[x])
self._broken_circuits[frozenset(L[1:])] = L[0]
cat = Algebras(R).FiniteDimensional().WithBasis().Graded()
CombinatorialFreeModule.__init__(self, R, M.no_broken_circuits_sets(ordering),
prefix='OS', bracket='{',
sorting_key=self._sort_key,
category=cat)
def _sort_key(self, x):
"""
Return the key used to sort the terms.
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS._sort_key(frozenset({1, 2}))
(-2, [1, 2])
sage: OS._sort_key(frozenset({0, 1, 2}))
(-3, [0, 1, 2])
sage: OS._sort_key(frozenset({}))
(0, [])
"""
return (-len(x), sorted(x))
def _repr_term(self, m):
"""
Return a string representation of the basis element indexed by `m`.
EXAMPLES::
sage: M = matroids.Uniform(3, 4)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS._repr_term(frozenset([0]))
'OS{0}'
"""
return "OS{{{}}}".format(str(list(m))[1:-1])
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: M.orlik_solomon_algebra(QQ)
Orlik-Solomon algebra of Wheel(3): Regular matroid of rank 3
on 6 elements with 16 bases
"""
return "Orlik-Solomon algebra of {}".format(self._M)
@cached_method
def one_basis(self):
"""
Return the index of the basis element corresponding to `1`
in ``self``.
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.one_basis() == frozenset([])
True
"""
return frozenset({})
@cached_method
def algebra_generators(self):
"""
Return the algebra generators of ``self``.
These form a family indexed by the ground set `X` of `M`. For
each `x \in X`, the `x`-th element is `e_x`.
EXAMPLES::
sage: M = matroids.Uniform(2, 2)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.algebra_generators()
Finite family {0: OS{0}, 1: OS{1}}
sage: M = matroids.Uniform(1, 2)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.algebra_generators()
Finite family {0: OS{0}, 1: OS{0}}
sage: M = matroids.Uniform(1, 3)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.algebra_generators()
Finite family {0: OS{0}, 1: OS{0}, 2: OS{0}}
"""
return Family(sorted(self._M.groundset()),
lambda i: self.subset_image(frozenset([i])))
@cached_method
def product_on_basis(self, a, b):
"""
Return the product in ``self`` of the basis elements
indexed by ``a`` and ``b``.
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.product_on_basis(frozenset([2]), frozenset([3,4]))
OS{0, 1, 2} - OS{0, 1, 4} + OS{0, 2, 3} + OS{0, 3, 4}
::
sage: G = OS.algebra_generators()
sage: prod(G)
0
sage: G[2] * G[4]
-OS{1, 2} + OS{1, 4}
sage: G[3] * G[4] * G[2]
OS{0, 1, 2} - OS{0, 1, 4} + OS{0, 2, 3} + OS{0, 3, 4}
sage: G[2] * G[3] * G[4]
OS{0, 1, 2} - OS{0, 1, 4} + OS{0, 2, 3} + OS{0, 3, 4}
sage: G[3] * G[2] * G[4]
-OS{0, 1, 2} + OS{0, 1, 4} - OS{0, 2, 3} - OS{0, 3, 4}
TESTS:
Let us check that `e_{s_1} e_{s_2} \cdots e_{s_k} = e_S` for any
subset `S = \{ s_1 < s_2 < \cdots < s_k \}` of the ground set::
sage: G = Graph([[1,2],[1,2],[2,3],[3,4],[4,2]], multiedges=True)
sage: M = Matroid(G).regular_matroid()
sage: E = M.groundset_list()
sage: OS = M.orlik_solomon_algebra(ZZ)
sage: G = OS.algebra_generators()
sage: import itertools
sage: def test_prod(F):
....: LHS = OS.subset_image(frozenset(F))
....: RHS = OS.prod([G[i] for i in sorted(F)])
....: return LHS == RHS
sage: all( test_prod(F) for k in range(len(E)+1)
....: for F in itertools.combinations(E, k) )
True
"""
if not a:
return self.basis()[b]
if not b:
return self.basis()[a]
if not a.isdisjoint(b):
return self.zero()
R = self.base_ring()
# since a is disjoint from b, we can just multiply the generator
if len(a) == 1:
i = list(a)[0]
# insert i into nbc, keeping track of sign in coeff
ns = b.union({i})
ns_sorted = sorted(ns, key=lambda x: self._sorting[x])
coeff = (-1)**ns_sorted.index(i)
return R(coeff) * self.subset_image(ns)
# r is the accumalator
# we reverse a in the product, so add a sign
# note that l>=2 here
if len(a) % 4 < 2:
sign = R.one()
else:
sign = - R.one()
r = self._from_dict({b: sign}, remove_zeros=False)
# now do the multiplication generator by generator
G = self.algebra_generators()
for i in sorted(a, key=lambda x: self._sorting[x]):
r = G[i] * r
return r
@cached_method
def subset_image(self, S):
"""
Return the element `e_S` of `A(M)` (``== self``) corresponding to
a subset `S` of the ground set of `M`.
INPUT:
- ``S`` -- a frozenset which is a subset of the ground set of `M`
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: BC = sorted(M.broken_circuits(), key=sorted)
sage: for bc in BC: (sorted(bc), OS.subset_image(bc))
([1, 3], -OS{0, 1} + OS{0, 3})
([1, 4, 5], OS{0, 1, 4} - OS{0, 1, 5} - OS{0, 3, 4} + OS{0, 3, 5})
([2, 3, 4], OS{0, 1, 2} - OS{0, 1, 4} + OS{0, 2, 3} + OS{0, 3, 4})
([2, 3, 5], OS{0, 2, 3} + OS{0, 3, 5})
([2, 4], -OS{1, 2} + OS{1, 4})
([2, 5], -OS{0, 2} + OS{0, 5})
([4, 5], -OS{3, 4} + OS{3, 5})
sage: M4 = matroids.CompleteGraphic(4)
sage: OS = M4.orlik_solomon_algebra(QQ)
sage: OS.subset_image(frozenset({2,3,4}))
OS{0, 2, 3} + OS{0, 3, 4}
An example of a custom ordering::
sage: G = Graph([[3, 4], [4, 1], [1, 2], [2, 3], [3, 5], [5, 6], [6, 3]])
sage: M = Matroid(G)
sage: s = [(5, 6), (1, 2), (3, 5), (2, 3), (1, 4), (3, 6), (3, 4)]
sage: sorted([sorted(c) for c in M.circuits()])
[[(1, 2), (1, 4), (2, 3), (3, 4)],
[(3, 5), (3, 6), (5, 6)]]
sage: OS = M.orlik_solomon_algebra(QQ, ordering=s)
sage: OS.subset_image(frozenset([]))
OS{}
sage: OS.subset_image(frozenset([(1,2),(3,4),(1,4),(2,3)]))
0
sage: OS.subset_image(frozenset([(2,3),(1,2),(3,4)]))
OS{(1, 2), (3, 4), (2, 3)}
sage: OS.subset_image(frozenset([(1,4),(3,4),(2,3),(3,6),(5,6)]))
-OS{(1, 2), (5, 6), (2, 3), (1, 4), (3, 6)}
+ OS{(1, 2), (5, 6), (3, 4), (1, 4), (3, 6)}
- OS{(1, 2), (5, 6), (3, 4), (2, 3), (3, 6)}
sage: OS.subset_image(frozenset([(1,4),(3,4),(2,3),(3,6),(3,5)]))
OS{(1, 2), (5, 6), (2, 3), (1, 4), (3, 5)}
- OS{(1, 2), (5, 6), (2, 3), (1, 4), (3, 6)}
+ OS{(1, 2), (5, 6), (3, 4), (1, 4), (3, 5)}
+ OS{(1, 2), (5, 6), (3, 4), (1, 4), (3, 6)}
- OS{(1, 2), (5, 6), (3, 4), (2, 3), (3, 5)}
- OS{(1, 2), (5, 6), (3, 4), (2, 3), (3, 6)}
TESTS::
sage: G = Graph([[1,2],[1,2],[2,3],[2,3],[1,3],[1,3]], multiedges=True)
sage: M = Matroid(G)
sage: sorted([sorted(c) for c in M.circuits()])
[[0, 1], [0, 2, 4], [0, 2, 5], [0, 3, 4],
[0, 3, 5], [1, 2, 4], [1, 2, 5], [1, 3, 4],
[1, 3, 5], [2, 3], [4, 5]]
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.subset_image(frozenset([]))
OS{}
sage: OS.subset_image(frozenset([1, 2, 3]))
0
sage: OS.subset_image(frozenset([1, 3, 5]))
0
sage: OS.subset_image(frozenset([1, 2]))
OS{0, 2}
sage: OS.subset_image(frozenset([3, 4]))
-OS{0, 2} + OS{0, 4}
sage: OS.subset_image(frozenset([1, 5]))
OS{0, 4}
sage: G = Graph([[1,2],[1,2],[2,3],[3,4],[4,2]], multiedges=True)
sage: M = Matroid(G)
sage: sorted([sorted(c) for c in M.circuits()])
[[0, 1], [2, 3, 4]]
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.subset_image(frozenset([]))
OS{}
sage: OS.subset_image(frozenset([1, 3, 4]))
-OS{0, 2, 3} + OS{0, 2, 4}
We check on a non-standard ordering::
sage: M = matroids.Wheel(3)
sage: o = [5,4,3,2,1,0]
sage: OS = M.orlik_solomon_algebra(QQ, ordering=o)
sage: BC = sorted(M.broken_circuits(ordering=o), key=sorted)
sage: for bc in BC: (sorted(bc), OS.subset_image(bc))
([0, 1], OS{0, 3} - OS{1, 3})
([0, 1, 4], OS{0, 3, 5} - OS{0, 4, 5} - OS{1, 3, 5} + OS{1, 4, 5})
([0, 2], OS{0, 5} - OS{2, 5})
([0, 2, 3], -OS{0, 3, 5} + OS{2, 3, 5})
([1, 2], OS{1, 4} - OS{2, 4})
([1, 2, 3], -OS{1, 3, 5} + OS{1, 4, 5} + OS{2, 3, 5} - OS{2, 4, 5})
([3, 4], OS{3, 5} - OS{4, 5})
"""
if not isinstance(S, frozenset):
raise ValueError("S needs to be a frozenset")
for bc in self._broken_circuits:
if bc.issubset(S):
i = self._broken_circuits[bc]
if i in S:
# ``S`` contains not just a broken circuit, but an
# actual circuit; then `e_S = 0`.
return self.zero()
coeff = self.base_ring().one()
# Now, reduce ``S``, and build the result ``r``:
r = self.zero()
switch = False
Si = S.union({i})
Ss = sorted(Si, key=lambda x: self._sorting[x])
for j in Ss:
if j in bc:
r += coeff * self.subset_image(Si.difference({j}))
if switch:
coeff *= -1
if j == i:
switch = True
return r
else: # So ``S`` is an NBC set.
return self.monomial(S)
def degree_on_basis(self, m):
"""
Return the degree of the basis element indexed by ``m``.
EXAMPLES::
sage: M = matroids.Wheel(3)
sage: OS = M.orlik_solomon_algebra(QQ)
sage: OS.degree_on_basis(frozenset([1]))
1
sage: OS.degree_on_basis(frozenset([0, 2, 3]))
3
"""
return len(m)
|
the-stack_0_21653 | # Copyright (c) 2016 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line interface sub-commands related to replicas.
"""
import json
import os
from cliff import command
from cliff import lister
from cliff import show
from coriolisclient.cli import formatter
from coriolisclient.cli import replica_executions
class ReplicaFormatter(formatter.EntityFormatter):
columns = ("ID",
"Instances",
"Last tasks execution",
"Created",
)
def _get_sorted_list(self, obj_list):
return sorted(obj_list, key=lambda o: o.created_at)
def _format_last_execution(self, obj):
if obj.executions:
execution = sorted(obj.executions, key=lambda e: e.created_at)[-1]
return "%(id)s %(status)s" % execution.to_dict()
return ""
def _get_formatted_data(self, obj):
data = (obj.id,
"\n".join(obj.instances),
self._format_last_execution(obj),
obj.created_at,
)
return data
class ReplicaDetailFormatter(formatter.EntityFormatter):
def __init__(self, show_instances_data=False):
self.columns = [
"id",
"created",
"last_updated",
"instances",
"origin_endpoint_id",
"destination_endpoint_id",
"destination_environment",
"network_map",
"executions",
]
if show_instances_data:
self.columns.append("instances-data")
def _format_instances(self, obj):
return os.linesep.join(sorted(obj.instances))
def _format_destination_environment(self, obj):
if obj.destination_environment is not None:
return obj.destination_environment.to_dict()
else:
return ""
def _format_execution(self, execution):
return ("%(id)s %(status)s" % execution.to_dict())
def _format_executions(self, executions):
return ("%(ls)s" % {"ls": os.linesep}).join(
[self._format_execution(e) for e in
sorted(executions, key=lambda e: e.created_at)])
def _get_formatted_data(self, obj):
data = [obj.id,
obj.created_at,
obj.updated_at,
self._format_instances(obj),
obj.origin_endpoint_id,
obj.destination_endpoint_id,
self._format_destination_environment(obj),
obj.network_map,
self._format_executions(obj.executions),
]
if "instances-data" in self.columns:
data.append(obj.info)
return data
class CreateReplica(show.ShowOne):
"""Create a new replica"""
def get_parser(self, prog_name):
parser = super(CreateReplica, self).get_parser(prog_name)
parser.add_argument('--origin-endpoint', required=True,
help='The origin endpoint id')
parser.add_argument('--destination-endpoint', required=True,
help='The destination endpoint id')
parser.add_argument('--destination-environment',
help='JSON encoded data related to the '
'destination\'s environment')
parser.add_argument('--network-map', dest='network_map', required=True,
help='JSON mapping between identifiers of '
'networks on the source and identifiers of '
'networks on the destination.')
parser.add_argument('--instance', action='append', required=True,
dest="instances",
help='An instances to be migrated, can be '
'specified multiple times')
return parser
def take_action(self, args):
destination_environment = None
if args.destination_environment:
destination_environment = json.loads(args.destination_environment)
network_map = None
if args.network_map:
network_map = json.loads(args.network_map)
replica = self.app.client_manager.coriolis.replicas.create(
args.origin_endpoint,
args.destination_endpoint,
destination_environment,
args.instances,
network_map)
return ReplicaDetailFormatter().get_formatted_entity(replica)
class ShowReplica(show.ShowOne):
"""Show a replica"""
def get_parser(self, prog_name):
parser = super(ShowReplica, self).get_parser(prog_name)
parser.add_argument('id', help='The replica\'s id')
parser.add_argument('--show-instances-data', action='store_true',
help='Includes the instances data used for tasks '
'execution, this is useful for troubleshooting',
default=False)
return parser
def take_action(self, args):
replica = self.app.client_manager.coriolis.replicas.get(args.id)
return ReplicaDetailFormatter(
args.show_instances_data).get_formatted_entity(replica)
class DeleteReplica(command.Command):
"""Delete a replica"""
def get_parser(self, prog_name):
parser = super(DeleteReplica, self).get_parser(prog_name)
parser.add_argument('id', help='The replica\'s id')
return parser
def take_action(self, args):
self.app.client_manager.coriolis.replicas.delete(args.id)
class DeleteReplicaDisks(show.ShowOne):
"""Delete replica target disks"""
def get_parser(self, prog_name):
parser = super(DeleteReplicaDisks, self).get_parser(prog_name)
parser.add_argument('id', help='The replica\'s id')
return parser
def take_action(self, args):
execution = self.app.client_manager.coriolis.replicas.delete_disks(
args.id)
return replica_executions.ReplicaExecutionDetailFormatter(
).get_formatted_entity(execution)
class ListReplica(lister.Lister):
"""List replicas"""
def get_parser(self, prog_name):
parser = super(ListReplica, self).get_parser(prog_name)
return parser
def take_action(self, args):
obj_list = self.app.client_manager.coriolis.replicas.list()
return ReplicaFormatter().list_objects(obj_list)
|
the-stack_0_21654 | # coding=utf-8
# Copyright (c) DIRECT Contributors
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Taken from Detectron 2, licensed under Apache 2.0.
# https://github.com/facebookresearch/detectron2/blob/60d7a1fd33cc48e58968659cd3301f3300b2786b/detectron2/solver/lr_scheduler.py
# Changes:
# - Docstring to match the rest of the library.
# - Calls to other subroutines which do not exist in DIRECT.
# - Stylistic changes.
import logging
import math
from bisect import bisect_right
from typing import List
import torch
# NOTE: PyTorch's LR scheduler interface uses names that assume the LR changes
# only on epoch boundaries. We typically use iteration based schedules instead.
# As a result, "epoch" (e.g., as in self.last_epoch) should be understood to mean
# "iteration" instead.
# FIXME: ideally this would be achieved with a CombinedLRScheduler, separating
# MultiStepLR with WarmupLR but the current LRScheduler design doesn't allow it.
class LRScheduler(torch.optim.lr_scheduler._LRScheduler): # pylint: disable=protected-access
def __init__(self, optimizer, last_epoch=-1, verbose=False):
super().__init__(optimizer, last_epoch, verbose)
self.logger = logging.getLogger(type(self).__name__)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which is not the optimizer or logger.
"""
state_dict = {key: value for key, value in self.__dict__.items() if key not in ["optimizer", "logger"]}
return state_dict
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): # pylint: disable=protected-access
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestones: List[int],
gamma: float = 0.1,
warmup_factor: float = 0.001,
warmup_iterations: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {milestones}",
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iterations = warmup_iterations
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]: # type: ignore
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method,
self.last_epoch, # type: ignore
self.warmup_iterations,
self.warmup_factor,
)
return [
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch) # type: ignore
for base_lr in self.base_lrs # type: ignore
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
class WarmupCosineLR(torch.optim.lr_scheduler._LRScheduler): # pylint: disable=protected-access
def __init__(
self,
optimizer: torch.optim.Optimizer,
max_iters: int,
warmup_factor: float = 0.001,
warmup_iterations: int = 1000,
warmup_method: str = "linear",
last_epoch: int = -1,
):
self.max_iters = max_iters
self.warmup_factor = warmup_factor
self.warmup_iterations = warmup_iterations
self.warmup_method = warmup_method
super().__init__(optimizer, last_epoch)
def get_lr(self) -> List[float]: # type: ignore
warmup_factor = _get_warmup_factor_at_iter(
self.warmup_method,
self.last_epoch, # type: ignore
self.warmup_iterations,
self.warmup_factor,
)
# Different definitions of half-cosine with warmup are possible. For
# simplicity we multiply the standard half-cosine schedule by the warmup
# factor. An alternative is to start the period of the cosine at warmup_iterations
# instead of at 0. In the case that warmup_iterations << max_iters the two are
# very close to each other.
return [
base_lr * warmup_factor * 0.5 * (1.0 + math.cos(math.pi * self.last_epoch / self.max_iters)) # type: ignore
for base_lr in self.base_lrs # type: ignore
]
def _compute_values(self) -> List[float]:
# The new interface
return self.get_lr()
def _get_warmup_factor_at_iter(method: str, curr_iter: int, warmup_iters: int, warmup_factor: float) -> float:
"""Return the learning rate warmup factor at a specific iteration.
Parameters
----------
method: str
Warmup method; either "constant" or "linear".
curr_iter: int
Iteration at which to calculate the warmup factor.
warmup_iters: int
The length of the warmup phases.
warmup_factor: float
The base warmup factor (the meaning changes according to the method used).
Returns
-------
float: The effective warmup factor at the given iteration.
"""
if curr_iter >= warmup_iters:
return 1.0
if method == "constant":
return warmup_factor
if method == "linear":
alpha = curr_iter / warmup_iters
return warmup_factor * (1 - alpha) + alpha
raise ValueError(f"Unknown warmup method: {method}")
|
the-stack_0_21655 | # returns roman numeral of number between 1 and 100
def to_numeral(num):
"int -> string"
ints = (100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
result = ""
for i in range(len(ints)):
count = int(num // ints[i])
result += nums[i] * count
num -= ints[i] * count
return result
# returns roman numeral of given integer if it is between 1 and 100
def r_numeral(num):
"int -> string"
if not str(num).isdigit():
return "Only numbers please."
elif int(num) < 1 or int(num) > 100:
return "Must be between 1 and 100."
else:
return to_numeral(int(num))
# py.test exercise_11_1_16.py --cov=exercise_11_1_16.py --cov-report=html
def test_r_numeral():
assert r_numeral(0) == "Must be between 1 and 100."
assert r_numeral(5) == "V"
assert r_numeral(101) == "Must be between 1 and 100."
assert r_numeral('dk4sj') == "Only numbers please."
if __name__ == "__main__":
print(r_numeral(input("Number: ")))
|
the-stack_0_21657 | import dbus.mainloop.glib, sys
from gi.repository import GLib
import can
from prototypes import text_encoder
import time
def sendToNav(text):
frames = text_encoder.encode(text, 0x2A)
for frame in frames:
can0.send(can.Message(arbitration_id=0x5E7, data=frame, extended_id=False))
time.sleep(0.05)
def on_property_changed(interface, changed, invalidated):
if interface != 'org.bluez.MediaPlayer1':
return
for prop, value in changed.items():
if prop == 'Status':
print('Playback Status: {}'.format(value))
elif prop == 'Track':
if value.get('Title', ''):
sendToNav('a\n{}: {}'.format(value.get('Artist', ''), value.get('Title', '')))
def on_playback_control(msg):
if msg.data == bytearray([0x20,0x00]):
player_iface.Play()
elif msg.data == bytearray([0x20,0x00]):
player_iface.Pause()
elif msg.data == bytearray([0x10,0x00]):
player_iface.Next()
elif msg.data == bytearray([0x08,0x00]):
player_iface.Previous()
return True
can0 = can.Bus(channel='can0', bustype='socketcan_ctypes')
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
obj = bus.get_object('org.bluez', "/")
mgr = dbus.Interface(obj, 'org.freedesktop.DBus.ObjectManager')
for path, ifaces in mgr.GetManagedObjects().items():
adapter = ifaces.get('org.bluez.MediaPlayer1')
if not adapter:
continue
player = bus.get_object('org.bluez', path)
player_iface = dbus.Interface(player, dbus_interface='org.bluez.MediaPlayer1')
break
if not adapter:
sendToNav('AB\nCDD')
sys.exit('Error: Media Player not found.')
bus.add_signal_receiver(
on_property_changed,
bus_name='org.bluez',
signal_name='PropertiesChanged',
dbus_interface='org.freedesktop.DBus.Properties')
# listeners = [
# on_playback_control # Callback function
# ]
# Create Notifier with an explicit loop to use for scheduling of callbacks
# loop = asyncio.get_event_loop()
# notifier = can.Notifier(can0, listeners, loop=loop)
# loop.run_forever()
# GLib.io_add_watch(sys.stdin, GLib.IO_IN, on_playback_control)
GLib.MainLoop().run() |
the-stack_0_21659 | OAUTH_CONFIG = {
"google": {
"scope": ["https://www.googleapis.com/auth/userinfo.profile"],
"auth_url": "https://accounts.google.com/o/oauth2/v2/auth",
"token_url": "https://oauth2.googleapis.com/token",
"user_url": "https://www.googleapis.com/oauth2/v1/userinfo?alt=json",
},
"github": {
"scope": ["user"],
"user_url": "https://api.github.com/user",
"auth_url": "https://github.com/login/oauth/authorize",
"token_url": "https://github.com/login/oauth/access_token",
},
"facebook": {
"scope": ["email", "public_profile"],
"auth_url": "https://www.facebook.com/v11.0/dialog/oauth",
"user_url": "",
"token_url": "https://graph.facebook.com/v11.0/oauth/access_token",
},
"gitlab": {
"scope": ["read_user"],
"sep": "+",
"user_url": "/api/v4/user",
"auth_url": "/oauth/authorize",
"token_url": "/oauth/token",
},
}
ALLOWED_TRUE_BOOLEANS = ["y", "t", "1"]
ARRAY_FIELDS = ["metadata.tags", "metadata.markers", "metadata.annotations"]
NUMERIC_FIELDS = [
"duration",
"start_time",
"summary.failures",
"summary.errors",
"summary.passes",
"summary.skips",
"summary.tests",
"summary.xfailures",
"summary.xpasses",
]
MAX_PAGE_SIZE = 500 # max page size API can return, page_sizes over this are sent to a worker
HEATMAP_MAX_BUILDS = 40 # max for number of builds that are possible to display in heatmap
BARCHART_MAX_BUILDS = 150 # max for number of builds possible to display in bar chart
COUNT_TIMEOUT = 0.5 # timeout for counting the number of documents [s]
COUNT_ESTIMATE_LIMIT = 1000 # if count estimate < COUNT_ESTIMATE_LIMIT, actually count
MAX_DOCUMENTS = 100000 # max documents for pagination, when apply_max=True
JJV_RUN_LIMIT = 8000 # max runs from which to aggregate Jenkins Jobs
HEATMAP_RUN_LIMIT = 3000 # max runs from which to determine recent Jenkins builds
SYNC_RUN_TIME = 3 * 60 * 60 # time for searching through aborted runs, 3 hrs in [s]
_ADDITIONAL_FILTERS_PARAM = {
"name": "additional_filters",
"description": "Comma-separated list of additional filters, cf. "
"https://docs.ibutsu-project.org/en/latest/user-guide/filter-help.html ",
"type": "string",
"required": False,
}
WIDGET_TYPES = {
"jenkins-heatmap": {
"id": "jenkins-heatmap",
"title": "Jenkins Pipeline Heatmap",
"description": "A heatmap of test runs from a Jenkins pipeline",
"params": [
{
"name": "job_name",
"description": "The Jenkins job name, "
"this is the value of the 'metadata.jenkins.job_name' key.",
"type": "string",
"required": True,
},
{
"name": "builds",
"description": "The number of Jenkins builds to analyze.",
"type": "integer",
"default": 5,
"required": True,
},
{
"name": "group_field",
"description": "The field in a result to group by, typically 'component'",
"type": "string",
"required": True,
"default": "component",
},
{
"name": "count_skips",
"description": "Count skips against the pass rate.",
"type": "boolean",
"required": False,
"default": True,
},
_ADDITIONAL_FILTERS_PARAM,
],
"type": "widget",
},
"run-aggregator": {
"id": "run-aggregator",
"title": "Run Aggregation",
"description": "An aggregation of recent run results",
"params": [
{
"name": "group_field",
"description": "Run data to order by, e.g. 'component' or 'env'",
"type": "string",
"required": True,
"default": "component",
},
{
"name": "weeks",
"description": "Aggregate test results from <weeks> weeks ago, e.g. 4",
"type": "integer",
"required": True,
"default": 4,
},
{
"name": "chart_type",
"description": "Type of chart with which to display results, e.g. 'bar' or 'line'",
"type": "string",
"required": False,
"default": "bar",
},
_ADDITIONAL_FILTERS_PARAM,
],
"type": "widget",
},
"result-summary": {
"id": "result-summary",
"title": "Result Summary",
"description": "A summary of the saved test results, optionally filtered",
"params": [
{
"name": "source",
"description": "Filter test results by a specific 'source'",
"type": "string",
"required": False,
},
{
"name": "env",
"description": "Filter test results by a specific 'env'",
"type": "string",
"required": False,
},
{
"name": "job_name",
"description": "Filter test results by a specific jenkins job",
"type": "string",
"required": False,
},
_ADDITIONAL_FILTERS_PARAM,
],
"type": "widget",
},
"result-aggregator": {
"id": "result-aggregator",
"title": "Result Aggregation",
"description": "A count of test results that fall into various categories",
"params": [
{
"name": "group_field",
"description": "Result data to group by, e.g. 'env', "
"'metadata.assignee', 'metadata.exception_name'",
"type": "string",
"required": True,
"default": "result",
},
{
"name": "days",
"description": "Aggregate test results from <days> days ago, e.g. 3",
"type": "float",
"required": False,
"default": 3,
},
{
"name": "run_id",
"description": "Aggregate results from a specific run",
"type": "string",
"required": False,
},
{
"name": "chart_type",
"description": "Type of chart with which to display results, e.g. 'pie' or 'bar'",
"type": "string",
"required": False,
"default": "pie",
},
_ADDITIONAL_FILTERS_PARAM,
],
"type": "widget",
},
"jenkins-job-view": {
"id": "jenkins-job-view",
"title": "Jenkins Job View",
"params": [
{"name": "filter", "description": "Filters for the Jenkins Jobs", "type": "list"},
{"name": "page", "description": "Desired page of builds to return.", "type": "integer"},
{
"name": "page_size",
"description": "Number of builds on each page",
"type": "integer",
},
{
"name": "run_limit",
"description": "Limit on runs from which to aggregate jenkins jobs",
"type": "integer",
},
],
"type": "view",
},
"jenkins-analysis-view": {
"id": "jenkins-analysis-view",
"title": "Jenkins Job Analysis",
"params": [
{"name": "job_name", "description": "The name of the Jenkins Job", "type": "string"},
{"name": "builds", "description": "The number of builds to fetch", "type": "integer"},
],
"type": "view",
},
"jenkins-bar-chart": {
"id": "jenkins-bar-chart",
"title": "Jenkins Bar Chart",
"description": "A bar chart to display aggregate test results "
"for a particular jenkins job over time",
"params": [
{
"name": "job_name",
"description": "The name of the Jenkins Job",
"type": "string",
"required": True,
},
{
"name": "builds",
"description": "The number of builds to fetch",
"type": "integer",
"required": True,
"default": 30,
},
],
"type": "widget",
},
"jenkins-line-chart": {
"id": "jenkins-line-chart",
"title": "Jenkins Line Chart",
"description": "A line chart to display Jenkins job run time for a particular jenkins job",
"params": [
{
"name": "job_name",
"description": "The name of the Jenkins Job",
"type": "string",
"required": True,
},
{
"name": "builds",
"description": "The number of builds to fetch",
"type": "integer",
"required": False,
"default": 30,
},
],
"type": "widget",
},
}
|
the-stack_0_21660 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import flavors
from nova import db
from nova import exception
from nova import notifications
from nova.objects import base
from nova.objects import fields
from nova.objects import flavor as flavor_obj
from nova.objects import instance_fault
from nova.objects import instance_info_cache
from nova.objects import pci_device
from nova.objects import security_group
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining."""
if not expected_attrs:
return expected_attrs
return [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
class Instance(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added info_cache
# Version 1.2: Added security_groups
# Version 1.3: Added expected_vm_state and admin_state_reset to
# save()
# Version 1.4: Added locked_by and deprecated locked
# Version 1.5: Added cleaned
# Version 1.6: Added pci_devices
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
# Version 1.9: Make uuid a non-None real string
# Version 1.10: Added use_slave to refresh and get_by_uuid
# Version 1.11: Update instance from database during destroy
VERSION = '1.11'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'hostname': fields.StringField(nullable=True),
'launch_index': fields.IntegerField(nullable=True),
'key_name': fields.StringField(nullable=True),
'key_data': fields.StringField(nullable=True),
'power_state': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'launched_on': fields.StringField(nullable=True),
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
'locked': fields.BooleanField(default=False),
'locked_by': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'vm_mode': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'root_device_name': fields.StringField(nullable=True),
'default_ephemeral_device': fields.StringField(nullable=True),
'default_swap_device': fields.StringField(nullable=True),
'config_drive': fields.StringField(nullable=True),
'access_ip_v4': fields.IPV4AddressField(nullable=True),
'access_ip_v6': fields.IPV6AddressField(nullable=True),
'auto_disk_config': fields.BooleanField(default=False),
'progress': fields.IntegerField(nullable=True),
'shutdown_terminate': fields.BooleanField(default=False),
'disable_terminate': fields.BooleanField(default=False),
'cell_name': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(),
'system_metadata': fields.DictOfNullableStringsField(),
'info_cache': fields.ObjectField('InstanceInfoCache',
nullable=True),
'security_groups': fields.ObjectField('SecurityGroupList'),
'fault': fields.ObjectField('InstanceFault', nullable=True),
'cleaned': fields.BooleanField(default=False),
'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
}
obj_extra_fields = ['name']
def __init__(self, *args, **kwargs):
super(Instance, self).__init__(*args, **kwargs)
self._reset_metadata_tracking()
def _reset_metadata_tracking(self):
self._orig_system_metadata = (dict(self.system_metadata) if
'system_metadata' in self else {})
self._orig_metadata = (dict(self.metadata) if
'metadata' in self else {})
def obj_reset_changes(self, fields=None):
super(Instance, self).obj_reset_changes(fields)
self._reset_metadata_tracking()
def obj_what_changed(self):
changes = super(Instance, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if 'system_metadata' in self and (self.system_metadata !=
self._orig_system_metadata):
changes.add('system_metadata')
return changes
def obj_make_compatible(self, primitive, target_version):
target_version = (int(target_version.split('.')[0]),
int(target_version.split('.')[1]))
unicode_attributes = ['user_id', 'project_id', 'image_ref',
'kernel_id', 'ramdisk_id', 'hostname',
'key_name', 'key_data', 'host', 'node',
'user_data', 'availability_zone',
'display_name', 'display_description',
'launched_on', 'locked_by', 'os_type',
'architecture', 'vm_mode', 'root_device_name',
'default_ephemeral_device',
'default_swap_device', 'config_drive',
'cell_name']
if target_version < (1, 10) and 'info_cache' in primitive:
# NOTE(danms): Instance <= 1.9 (havana) had info_cache 1.4
self.info_cache.obj_make_compatible(primitive['info_cache'],
'1.4')
primitive['info_cache']['nova_object.version'] = '1.4'
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we couldn't handle unicode in
# string fields, so squash it here
for field in [x for x in unicode_attributes if x in primitive
and primitive[x] is not None]:
primitive[field] = primitive[field].encode('ascii', 'replace')
if target_version < (1, 6):
# NOTE(danms): Before 1.6 there was no pci_devices list
if 'pci_devices' in primitive:
del primitive['pci_devices']
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for key in self.fields:
if key == 'name':
# NOTE(danms): prevent recursion
continue
elif not self.obj_attr_is_set(key):
# NOTE(danms): Don't trigger lazy-loads
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@staticmethod
def _from_db_object(context, instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
if expected_attrs is None:
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
if field in INSTANCE_OPTIONAL_ATTRS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
if 'metadata' in expected_attrs:
instance['metadata'] = utils.instance_meta(db_inst)
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.instance_sys_meta(db_inst)
if 'fault' in expected_attrs:
instance['fault'] = (
instance_fault.InstanceFault.get_latest_for_instance(
context, instance.uuid))
if 'pci_devices' in expected_attrs:
pci_devices = pci_device._make_pci_list(
context, pci_device.PciDeviceList(),
db_inst['pci_devices'])
instance['pci_devices'] = pci_devices
if 'info_cache' in expected_attrs:
if db_inst['info_cache'] is None:
instance.info_cache = None
elif not instance.obj_attr_is_set('info_cache'):
# TODO(danms): If this ever happens on a backlevel instance
# passed to us by a backlevel service, things will break
instance.info_cache = instance_info_cache.InstanceInfoCache()
if instance.info_cache is not None:
instance_info_cache.InstanceInfoCache._from_db_object(
context, instance.info_cache, db_inst['info_cache'])
if 'security_groups' in expected_attrs:
sec_groups = security_group._make_secgroup_list(
context, security_group.SecurityGroupList(),
db_inst['security_groups'])
instance['security_groups'] = sec_groups
instance._context = context
instance.obj_reset_changes()
return instance
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable_classmethod
def get_by_id(cls, context, inst_id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get(context, inst_id,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
updates.pop('id', None)
expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS
if attr in updates]
if 'security_groups' in updates:
updates['security_groups'] = [x.name for x in
updates['security_groups']]
if 'info_cache' in updates:
updates['info_cache'] = {
'network_info': updates['info_cache'].network_info.json()
}
db_inst = db.instance_create(context, updates)
Instance._from_db_object(context, self, db_inst, expected_attrs)
@base.remotable
def destroy(self, context):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
if not self.obj_attr_is_set('uuid'):
raise exception.ObjectActionError(action='destroy',
reason='no uuid')
if not self.obj_attr_is_set('host') or not self.host:
# NOTE(danms): If our host is not set, avoid a race
constraint = db.constraint(host=db.equal_any(None))
else:
constraint = None
try:
db_inst = db.instance_destroy(context, self.uuid,
constraint=constraint)
Instance._from_db_object(context, self, db_inst)
except exception.ConstraintNotMet:
raise exception.ObjectActionError(action='destroy',
reason='host changed')
delattr(self, base.get_attrname('id'))
def _save_info_cache(self, context):
self.info_cache.save(context)
def _save_security_groups(self, context):
for secgroup in self.security_groups:
secgroup.save(context)
def _save_fault(self, context):
# NOTE(danms): I don't think we need to worry about this, do we?
pass
def _save_pci_devices(self, context):
# NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
# permitted to update the DB. all change to devices from here will
# be dropped.
pass
@base.remotable
def save(self, context, expected_vm_state=None,
expected_task_state=None, admin_state_reset=False):
"""Save updates to this instance
Column-wise updates will be made based on the result of
self.what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
:param context: Security context
:param expected_task_state: Optional tuple of valid task states
for the instance to be in.
:param expected_vm_state: Optional tuple of valid vm states
for the instance to be in.
:param admin_state_reset: True if admin API is forcing setting
of task_state/vm_state.
"""
cell_type = cells_opts.get_cell_type()
if cell_type == 'api' and self.cell_name:
# NOTE(comstud): We need to stash a copy of ourselves
# before any updates are applied. When we call the save
# methods on nested objects, we will lose any changes to
# them. But we need to make sure child cells can tell
# what is changed.
#
# We also need to nuke any updates to vm_state and task_state
# unless admin_state_reset is True. compute cells are
# authoritative for their view of vm_state and task_state.
stale_instance = self.obj_clone()
def _handle_cell_update_from_api():
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_from_api(context, stale_instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
else:
stale_instance = None
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(self[field], base.NovaObject)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
LOG.exception(_('No save handler for %s') % field,
instance=self)
elif field in changes:
updates[field] = self[field]
if not updates:
if stale_instance:
_handle_cell_update_from_api()
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
updates['expected_vm_state'] = expected_vm_state
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
# NOTE(alaski): We need to pull system_metadata for the
# notification.send_update() below. If we don't there's a KeyError
# when it tries to extract the flavor.
if 'system_metadata' not in expected_attrs:
expected_attrs.append('system_metadata')
old_ref, inst_ref = db.instance_update_and_get_original(
context, self.uuid, updates, update_cells=False,
columns_to_join=_expected_cols(expected_attrs))
if stale_instance:
_handle_cell_update_from_api()
elif cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_at_top(context, inst_ref)
self._from_db_object(context, self, inst_ref, expected_attrs)
notifications.send_update(context, old_ref, inst_ref)
self.obj_reset_changes()
@base.remotable
def refresh(self, context, use_slave=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(context, uuid=self.uuid,
expected_attrs=extra,
use_slave=use_slave)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
current._context = None
for field in self.fields:
if self.obj_attr_is_set(field) and self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def obj_load_attr(self, attrname):
if attrname not in INSTANCE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug(_("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s"),
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
# FIXME(comstud): This should be optimized to only load the attr.
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
# NOTE(danms): Never allow us to recursively-load
if instance.obj_attr_is_set(attrname):
self[attrname] = instance[attrname]
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='loading %s requires recursion' % attrname)
def get_flavor(self, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
db_flavor = flavors.extract_flavor(self, prefix)
flavor = flavor_obj.Flavor()
for key in flavors.system_metadata_flavor_props:
flavor[key] = db_flavor[key]
return flavor
def set_flavor(self, flavor, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
self.system_metadata = flavors.save_flavor_info(
self.system_metadata, flavor, prefix)
self.save()
def delete_flavor(self, namespace):
self.system_metadata = flavors.delete_flavor_info(
self.system_metadata, "%s_" % namespace)
self.save()
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
if get_fault:
# Build an instance_uuid:latest-fault mapping
expected_attrs.remove('fault')
instance_uuids = [inst['uuid'] for inst in db_inst_list]
faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
context, instance_uuids)
for fault in faults:
if fault.instance_uuid not in inst_faults:
inst_faults[fault.instance_uuid] = fault
inst_list.objects = []
for db_inst in db_inst_list:
inst_obj = Instance._from_db_object(context, Instance(), db_inst,
expected_attrs=expected_attrs)
if get_fault:
inst_obj.fault = inst_faults.get(inst_obj.uuid, None)
inst_list.objects.append(inst_obj)
inst_list.obj_reset_changes()
return inst_list
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added use_slave to get_by_host
# Instance <= version 1.9
# Version 1.2: Instance <= version 1.11
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
}
child_versions = {
'1.1': '1.9',
# NOTE(danms): Instance was at 1.9 before we added this
'1.2': '1.11',
}
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None):
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_node(cls, context, host, node, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_node(
context, host, node)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_not_type(
context, host, type_id=type_id)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_hung_in_rebooting(cls, context, reboot_window,
expected_attrs=None):
db_inst_list = db.instance_get_all_hung_in_rebooting(context,
reboot_window)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_security_group_id(cls, context, security_group_id):
db_secgroup = db.security_group_get(
context, security_group_id,
columns_to_join=['instances.info_cache',
'instances.system_metadata'])
return _make_instance_list(context, cls(), db_secgroup['instances'],
['info_cache', 'system_metadata'])
@classmethod
def get_by_security_group(cls, context, security_group):
return cls.get_by_security_group_id(context, security_group.id)
def fill_faults(self):
"""Batch query the database for our instances' faults.
:returns: A list of instance uuids for which faults were found.
"""
uuids = [inst.uuid for inst in self]
faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
self._context, uuids)
faults_by_uuid = {}
for fault in faults:
if fault.instance_uuid not in faults_by_uuid:
faults_by_uuid[fault.instance_uuid] = fault
for instance in self:
if instance.uuid in faults_by_uuid:
instance.fault = faults_by_uuid[instance.uuid]
else:
# NOTE(danms): Otherwise the caller will cause a lazy-load
# when checking it, and we know there are none
instance.fault = None
instance.obj_reset_changes(['fault'])
return faults_by_uuid.keys()
|
the-stack_0_21662 | """
peeringdb sync backend to use for pdb_load_data
command
"""
import re
from collections import defaultdict
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.db.models import OneToOneRel
from django_peeringdb.client_adaptor.backend import Backend as BaseBackend
from django_peeringdb.client_adaptor.backend import reftag_to_cls
import peeringdb_server.models as models
from peeringdb import resource
__version__ = "1.0"
class Backend(BaseBackend):
"""
Custom tailored peeringdb_server backend for the
peeringdb client we can use to sync data from
another peeringdb server instance.
We can inherit most of the official django-peeringdb
Backend, however we need bind resources to the peeringdb
models and fix some issues with validation and relationships.
"""
# map peeringdb_server models to peeringdb client resources
RESOURCE_MAP = {
resource.Facility: models.Facility,
resource.InternetExchange: models.InternetExchange,
resource.InternetExchangeFacility: models.InternetExchangeFacility,
resource.InternetExchangeLan: models.IXLan,
resource.InternetExchangeLanPrefix: models.IXLanPrefix,
resource.Network: models.Network,
resource.NetworkContact: models.NetworkContact,
resource.NetworkFacility: models.NetworkFacility,
resource.NetworkIXLan: models.NetworkIXLan,
resource.Organization: models.Organization,
}
def get_resource(self, cls):
"""
Override this so it doesn't hard fail on a non
existing resource. As sync will try to obtain resources
for relationships in peeringdb_server that aren't
really resources (sponsorships, partnerships etc.)
"""
return self.CONCRETE_MAP.get(cls)
@reftag_to_cls
def get_fields(self, concrete):
"""
Sync currently doesnt support OneToOne relationships
and none of the ones that exist in peeringdb_server
are relevant to the data we want to sync.
However they still get processed, causing errors.
Here we make sure to not process OneToOneRel relationships
"""
_fields = super().get_fields(concrete)
fields = []
for field in _fields:
if isinstance(field, OneToOneRel):
continue
fields.append(field)
return fields
def set_relation_many_to_many(self, obj, field_name, objs):
"""
Sync will try to process sponsorship_set off of `org`
and run into an error, so we make sure to ignore it
when handling many 2 many relationships during sync
"""
if field_name in ["sponsorship_set"]:
return
return super().set_relation_many_to_many(obj, field_name, objs)
def clean(self, obj):
"""
We override the object validation here to handle
common validation issues that exist in the official production
db, where valdiators are set, but data has not yet been
fixed retroactively.
These instances are:
- info_prefixes4 on networks (adjust data)
- info_prefixes6 on networks (adjust data)
- overlapping prefixes on ixlan prefixes (skip validation)
- invalid prefix length on ixlan prefixes (skip validation)
- ipaddr4 out of prefix address space on netixlans (skip validation)
- ipaddr6 out of prefix address space on netixlans (skip validation)
"""
obj.updated = (
obj._meta.get_field("updated")
.to_python(obj.updated)
.replace(tzinfo=models.UTC())
)
obj.created = (
obj._meta.get_field("created")
.to_python(obj.created)
.replace(tzinfo=models.UTC())
)
def save(self, obj):
# make sure all datetime values have their timezone set
for field in obj._meta.get_fields():
if field.get_internal_type() == "DateTimeField":
value = getattr(obj, field.name)
if not value:
continue
if isinstance(value, str):
value = field.to_python(value)
value = value.replace(tzinfo=models.UTC())
setattr(obj, field.name, value)
if obj.HandleRef.tag == "ix":
obj.save(create_ixlan=False)
else:
obj.save()
def detect_uniqueness_error(self, exc):
"""
Parse error, and if it describes any violations of a uniqueness constraint,
return the corresponding fields, else None
"""
pattern = r"(\w+) with this (\w+) already exists"
fields = []
if isinstance(exc, IntegrityError):
return self._detect_integrity_error(exc)
assert isinstance(exc, ValidationError), TypeError
error_dict = getattr(exc, "error_dict", getattr(exc, "message_dict", {}))
for name, err in error_dict.items():
if re.search(pattern, str(err)):
fields.append(name)
return fields or None
def detect_missing_relations(self, obj, exc):
"""
Parse error messages and collect the missing-relationship errors
as a dict of Resource -> {id set}
"""
missing = defaultdict(set)
error_dict = getattr(exc, "error_dict", getattr(exc, "message_dict", {}))
for name, err in error_dict.items():
# check if it was a relationship that doesnt exist locally
pattern = r".+ with id (\d+) does not exist.+"
m = re.match(pattern, str(err))
if m:
field = obj._meta.get_field(name)
res = self.get_resource(field.related_model)
missing[res].add(int(m.group(1)))
return missing
|
the-stack_0_21663 | from __future__ import absolute_import
__author__ = 'breddels'
import numpy as np
import logging
import threading
import uuid
import time
import ast
from .dataframe import DataFrame, default_shape
from .utils import _issequence
from .tasks import Task
from .legacy import Subspace
import vaex.promise
import vaex.settings
import vaex.utils
from tornado.httpclient import AsyncHTTPClient, HTTPClient
import tornado.httputil
import tornado.websocket
from tornado.concurrent import Future
from tornado import gen
import tornado.ioloop
import json
import astropy.units
from vaex.utils import _ensure_strings_from_expressions, _ensure_string_from_expression
try:
import __builtin__
except ImportError:
import builtins as __builtin__
try:
import Cookie # py2
except ImportError:
import http.cookies as Cookie # py3
logger = logging.getLogger("vaex.remote")
DEFAULT_REQUEST_TIMEOUT = 60 * 5 # 5 minutes
forwarded_method_names = [] # all method names on DataFrame that get forwarded to the server
def wrap_future_with_promise(future):
if isinstance(future, vaex.promise.Promise): # TODO: not so nice, sometimes we pass a promise
return future
promise = vaex.promise.Promise()
def callback(future):
# print(("callback", future, future.result()))
e = future.exception()
if e:
# print(("reject", e))
promise.reject(e)
else:
promise.fulfill(future.result())
future.add_done_callback(callback)
return promise
def listify(value):
# TODO: listify is a bad name, can we use a common serialization function?
if isinstance(value, vaex.expression.Expression):
return str(value)
elif isinstance(value, list):
value = list([listify(item) for item in value])
elif hasattr(value, "tolist"):
value = value.tolist()
return value
try:
from urllib.request import urlopen
from urllib.parse import urlparse, urlencode
except ImportError:
from urlparse import urlparse
from urllib import urlopen, urlencode
def _check_error(object):
if "error" in object:
raise RuntimeError("Server responded with error: %r" % object["error"])
class ServerRest(object):
def __init__(self, hostname, port=5000, base_path="/", background=False, thread_mover=None, websocket=True, token=None, token_trusted=None):
self.hostname = hostname
self.port = port
self.base_path = base_path if base_path.endswith("/") else (base_path + "/")
self.token = token
self.token_trusted = token_trusted
# if delay:
event = threading.Event()
self.thread_mover = thread_mover or (lambda fn, *args, **kwargs: fn(*args, **kwargs))
logger.debug("thread mover: %r", self.thread_mover)
# jobs maps from uid to tasks
self.jobs = {}
def ioloop_threaded():
logger.debug("creating tornado io_loop")
self.io_loop = tornado.ioloop.IOLoop().current()
event.set()
logger.debug("started tornado io_loop...")
self.io_loop.start()
self.io_loop.close()
logger.debug("stopped tornado io_loop")
io_loop = tornado.ioloop.IOLoop.current(instance=False)
if True: # io_loop:# is None:
logger.debug("no current io loop, starting it in thread")
thread = threading.Thread(target=ioloop_threaded)
thread.setDaemon(True)
thread.start()
event.wait()
else:
logger.debug("using current io loop")
self.io_loop = io_loop
self.io_loop.make_current()
# if async:
self.http_client_async = AsyncHTTPClient()
# self.http_client = HTTPClient()
self.user_id = vaex.settings.webclient.get("cookie.user_id")
self.use_websocket = websocket
self.websocket = None
self.submit = self.submit_http
if self.use_websocket:
logger.debug("connect via websocket")
self.submit = self.submit_websocket
self._websocket_connect()
logger.debug("websocket connected")
def close(self):
# self.http_client_async.
# self.http_client.close()
if self.use_websocket:
if self.websocket:
self.websocket.close()
self.io_loop.stop()
def _websocket_connect(self):
def connected(websocket):
logger.debug("connected to websocket: %s" % self._build_url(""))
def failed(reason):
logger.error("failed to connect to %s" % self._build_url(""))
self.websocket_connected = vaex.promise.Promise()
self.websocket_connected.then(connected, failed)
if 0:
connected = wrap_future_with_promise(tornado.websocket.websocket_connect(self._build_url("websocket"), on_message_callback=self._on_websocket_message))
connected.get()
self.websocket_connected.fulfill(connected)
def do():
try:
logger.debug("wrapping promise")
logger.debug("connecting to: %s", self._build_url("websocket"))
connected = wrap_future_with_promise(tornado.websocket.websocket_connect(self._build_url("websocket"), on_message_callback=self._on_websocket_message))
logger.debug("continue")
self.websocket_connected.fulfill(connected)
except:
logger.exception("error connecting")
# raise
logger.debug("add callback")
self.io_loop.add_callback(do)
logger.debug("added callback: ")
# self.io_loop.start()
# if self.port == 29345:
# import pdb
# pdb.set_trace()
logger.debug("waiting for connection")
result = self.websocket_connected.get()
logger.debug("websocket connected")
if self.websocket_connected.isRejected:
raise self.websocket.reason
def _on_websocket_message(self, msg):
try:
task = None
json_data, data = msg.split(b"\n", 1)
response = json.loads(json_data.decode("utf8"))
phase = response["job_phase"]
job_id = response.get("job_id")
task = self.jobs[job_id]
if data:
import zlib
data = zlib.decompress(data)
try:
numpy_array = np.frombuffer(data, dtype=np.dtype(response["dtype"])).reshape(ast.literal_eval(response["shape"]))
except:
logger.exception("error in decoding data: %r %r %r", data, response, task.task_queue)
finally:
response["result"] = numpy_array
import sys
if sys.getsizeof(msg) > 1024 * 4:
logger.debug("socket read message: <large amount of data>",)
else:
logger.debug("socket read message: %s", msg)
logger.debug("json response: %r", response)
# for the moment, job == task, in the future a job can be multiple tasks
except Exception as e:
if task:
task.reject(e)
logger.exception("unexpected decoding error")
return
if job_id:
try:
logger.debug("job update %r, phase=%r", job_id, phase)
if phase == "COMPLETED":
result = response["result"] # [0]
# logger.debug("completed job %r, result=%r", job_id, result)
logger.debug("completed job %r (delay=%r, thread_mover=%r)", job_id, task.delay, self.thread_mover)
processed_result = task.post_process(result)
if task.delay:
self.thread_mover(task.fulfill, processed_result)
else:
task.fulfill(processed_result)
elif phase == "EXCEPTION":
logger.error("exception happened at server side: %r", response)
class_name = response["exception"]["class"]
msg = response["exception"]["msg"]
exception = getattr(__builtin__, class_name)(msg)
logger.debug("error in job %r, exception=%r", job_id, exception)
task = self.jobs[job_id]
if task.delay:
self.thread_mover(task.reject, exception)
else:
task.reject(exception)
elif phase == "ERROR":
logger.error("error happened at server side: %r", response)
msg = response["error"]
exception = RuntimeError("error at server: %r" % msg)
task = self.jobs[job_id]
if task.delay:
self.thread_mover(task.reject, exception)
else:
task.reject(exception)
elif phase == "PENDING":
fraction = response["progress"]
logger.debug("pending?: %r", phase)
task = self.jobs[job_id]
if task.delay:
self.thread_mover(task.signal_progress.emit, fraction)
else:
task.signal_progress.emit(fraction)
except Exception as e:
logger.exception("error in handling job")
task = self.jobs[job_id]
if task.delay:
self.thread_mover(task.reject, e)
else:
task.reject(e)
def wait(self):
io_loop = tornado.ioloop.IOLoop.instance()
io_loop.start()
def submit_websocket(self, path, arguments, delay=False, progress=None, post_process=lambda x: x):
assert self.use_websocket
task = TaskServer(post_process=post_process, delay=delay)
progressbars = vaex.utils.progressbars(progress)
progressbars.add_task(task)
logger.debug("created task: %r, %r (delay=%r)" % (path, arguments, delay))
job_id = str(uuid.uuid4())
self.jobs[job_id] = task
arguments["job_id"] = job_id
arguments["path"] = path
arguments["user_id"] = self.user_id
if self.token:
arguments["token"] = self.token
if self.token_trusted:
arguments["token_trusted"] = self.token_trusted
# arguments = dict({key: (value.tolist() if hasattr(value, "tolist") else value) for key, value in arguments.items()})
arguments = dict({key: listify(value) for key, value in arguments.items()})
def do():
def write(socket):
try:
logger.debug("write to websocket: %r", arguments)
socket.write_message(json.dumps(arguments))
return
except:
import traceback
traceback.print_exc()
# return
logger.debug("will schedule a write to the websocket")
self.websocket_connected.then(write).end() # .then(task.fulfill)
self.io_loop.add_callback(do)
logger.debug("we can continue (main thread is %r)", threading.currentThread())
if delay:
return task
else:
return task.get()
def submit_http(self, path, arguments, post_process, delay, progress=None, **kwargs):
def pre_post_process(response):
cookie = Cookie.SimpleCookie()
for cookieset in response.headers.get_list("Set-Cookie"):
cookie.load(cookieset)
logger.debug("cookie load: %r", cookieset)
logger.debug("cookie: %r", cookie)
if "user_id" in cookie:
user_id = cookie["user_id"].value
logger.debug("user_id: %s", user_id)
if self.user_id != user_id:
self.user_id = user_id
vaex.settings.webclient.store("cookie.user_id", self.user_id)
data = response.body
is_json = False
logger.info("response is: %r", response.body)
logger.info("content_type is: %r", response.headers["Content-Type"])
if response.headers["Content-Type"] == "application/numpy-array":
shape, dtype, data = response.body.split(b"\n", 2)
shape = shape.decode("ascii")
dtype = dtype.decode("ascii")
import ast
numpy_array = np.fromstring(data, dtype=np.dtype(dtype)).reshape(ast.literal_eval(shape))
return post_process(numpy_array)
else:
try:
data = json.loads(response.body.decode("ascii"))
is_json = True
except Exception as e:
logger.info("couldn't convert to json (error is %s, assume it's raw data): %s", e, data)
# logger.info("couldn't convert to json (error is %s, assume it's raw data)", e)
if is_json:
self._check_exception(data)
return post_process(data["result"])
else:
return post_process(data)
arguments = {key: listify(value) for key, value in arguments.items()}
import pdb
arguments_json = {key: json.dumps(value) for key, value in arguments.items()}
headers = tornado.httputil.HTTPHeaders()
url = self._build_url(path + "?" + urlencode(arguments_json))
logger.debug("fetch %s, delay=%r", url, delay)
if self.user_id is not None:
headers.add("Cookie", "user_id=%s" % self.user_id)
logger.debug("adding user_id %s to request", self.user_id)
if delay:
task = TaskServer(pre_post_process, delay=delay)
# tornado doesn't like that we call fetch while ioloop is running in another thread, we should use ioloop.add_callbacl
def do():
self.thread_mover(task.signal_progress.emit, 0.5)
future = self.http_client_async.fetch(url, headers=headers, request_timeout=DEFAULT_REQUEST_TIMEOUT, **kwargs)
def thread_save_succes(value):
self.thread_mover(task.signal_progress.emit, 1.0)
self.thread_mover(task.fulfill, value)
def thread_save_failure(value):
self.thread_mover(task.reject, value)
wrap_future_with_promise(future).then(pre_post_process).then(thread_save_succes, thread_save_failure)
self.io_loop.add_callback(do)
return task # promise.then(self._move_to_thread)
else:
return pre_post_process(self.http_client.fetch(url, headers=headers, request_timeout=DEFAULT_REQUEST_TIMEOUT, **kwargs))
def datasets(self, as_dict=False, delay=False):
def post(result):
logger.debug("datasets result: %r", result)
def create(server, state):
dataset = DatasetRest(self, name=state['name'],
length_original=state['length_original'],
column_names=state['column_names'],
dtypes=state['dtypes'])
dataset.state_set(state['state'])
return dataset
datasets = [create(self, kwargs) for kwargs in result]
logger.debug("datasets: %r", datasets)
return datasets if not as_dict else dict([(ds.name, ds) for ds in datasets])
arguments = {}
if self.token:
arguments["token"] = self.token
if self.token_trusted:
arguments["token_trusted"] = self.token_trusted
return self.submit(path="datasets", arguments=arguments, post_process=post, delay=delay)
def _build_url(self, method):
protocol = "ws" if self.use_websocket else "http"
return "%s://%s:%d%s%s" % (protocol, self.hostname, self.port, self.base_path, method)
def _call_subspace(self, method_name, subspace, **kwargs):
def post_process(result):
if method_name == "histogram": # histogram is the exception..
# TODO: don't do binary transfer, just json, now we cannot handle exception
import base64
# logger.debug("result: %r", result)
# result = base64.b64decode(result) #.decode("base64")
# result = base64.
data = np.fromstring(result, dtype=np.float64)
shape = (kwargs["size"],) * subspace.dimension
data = data.reshape(shape)
return data
else:
try:
return np.array(result)
except ValueError:
return result
dataset_name = subspace.df.name
expressions = subspace.expressions
delay = subspace.delay
path = "datasets/%s/%s" % (dataset_name, method_name)
url = self._build_url(path)
arguments = dict(kwargs)
dataset_remote = subspace.df
arguments["selection"] = subspace.is_masked
arguments['state'] = dataset_remote.state_get()
arguments['auto_fraction'] = dataset_remote.get_auto_fraction()
arguments.update(dict(expressions=expressions))
return self.submit(path, arguments, post_process=post_process, delay=delay)
def _call_dataset(self, method_name, dataset_remote, delay, numpy=False, progress=None, **kwargs):
def post_process(result):
# result = self._check_exception(json.loads(result.body))["result"]
if numpy:
result = np.fromstring(result, dtype=np.float64)
return result
path = "datasets/%s/%s" % (dataset_remote.name, method_name)
arguments = dict(kwargs)
arguments['state'] = dataset_remote.state_get()
arguments['auto_fraction'] = dataset_remote.get_auto_fraction()
body = urlencode(arguments)
return self.submit(path, arguments, post_process=post_process, progress=progress, delay=delay)
def _schedule_call(self, method_name, dataset_remote, delay, **kwargs):
def post_process(result):
# result = self._check_exception(json.loads(result.body))["result"]
try:
return np.array(result)
except ValueError:
return result
method = "%s/%s" % (dataset_remote.name, method_name)
return self.schedule(path, arguments, post_process=post_process, delay=delay)
def _check_exception(self, reply_json):
if "exception" in reply_json:
logger.error("exception happened at server side: %r", reply_json)
class_name = reply_json["exception"]["class"]
msg = reply_json["exception"]["msg"]
raise getattr(__builtin__, class_name)(msg)
if "error" in reply_json:
raise ValueError("unknown error occured at server")
else:
return reply_json
class SubspaceRemote(Subspace):
def toarray(self, list):
return np.array(list)
@property
def dimension(self):
return len(self.expressions)
def _task(self, promise):
"""Helper function for returning tasks results, result when immediate is True, otherwise the task itself, which is a promise"""
if self.delay:
return promise
else:
return promise
def sleep(self, seconds, delay=False):
return self.df.server.call("sleep", seconds, delay=delay)
def minmax(self):
return self._task(self.df.server._call_subspace("minmax", self))
# return self._task(task)
def histogram(self, limits, size=256, weight=None):
return self._task(self.df.server._call_subspace("histogram", self, size=size, limits=limits, weight=weight))
def nearest(self, point, metric=None):
point = vaex.utils.make_list(point)
result = self.df.server._call_subspace("nearest", self, point=point, metric=metric)
return self._task(result)
def mean(self):
return self.df.server._call_subspace("mean", self)
def correlation(self, means=None, vars=None):
return self.df.server._call_subspace("correlation", self, means=means, vars=vars)
def var(self, means=None):
return self.df.server._call_subspace("var", self, means=means)
def sum(self):
return self.df.server._call_subspace("sum", self)
def limits_sigma(self, sigmas=3, square=False):
return self.df.server._call_subspace("limits_sigma", self, sigmas=sigmas, square=square)
def mutual_information(self, limits=None, size=256):
return self.df.server._call_subspace("mutual_information", self, limits=limits, size=size)
class DataFrameRemote(DataFrame):
def __init__(self, name, server, column_names):
super(DataFrameRemote, self).__init__(name, column_names)
self.server = server
def forward(f=None, has_delay=False):
assert not has_delay
def decorator(method):
method_name = method.__name__
forwarded_method_names.append(method_name)
def wrapper(df, *args, **kwargs):
return df.server._call_dataset(method_name, df, delay=has_delay, *args, **kwargs)
return wrapper
if f is None:
return decorator
else:
return decorator(f)
class DatasetRest(DataFrameRemote):
def __init__(self, server, name, column_names, dtypes, length_original):
DataFrameRemote.__init__(self, name, server.hostname, column_names)
self.server = server
self.name = name
self.column_names = column_names
self._dtypes = {name: np.dtype(dtype) for name, dtype in dtypes.items()}
for column_name in self.get_column_names(virtual=True, strings=True):
self._save_assign_expression(column_name)
self._length_original = length_original
self._length_unfiltered = length_original
self._index_end = length_original
self.path = self.filename = self.server._build_url("%s" % name)
self.fraction = 1
self.executor = ServerExecutor()
def copy(self, column_names=None, virtual=True):
dtypes = {name: self.dtype(name) for name in self.get_column_names(strings=True, virtual=False)}
ds = DatasetRest(self.server, self.name, self.column_names, dtypes=dtypes, length_original=self._length_original)
state = self.state_get()
if not virtual:
state['virtual_columns'] = {}
ds.state_set(state, use_active_range=True)
return ds
def trim(self, inplace=False):
df = self if inplace else self.copy()
# can we get away with not trimming?
return df
def count(self, expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None):
return self._delay(delay, self.server._call_dataset("count", self, delay=True, progress=progress, expression=expression, binby=binby, limits=limits, shape=shape, selection=selection, edges=edges))
def mean(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
return self._delay(delay, self.server._call_dataset("mean", self, delay=True, progress=progress, expression=expression, binby=binby, limits=limits, shape=shape, selection=selection))
def sum(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
return self._delay(delay, self.server._call_dataset("sum", self, delay=True, progress=progress, expression=expression, binby=binby, limits=limits, shape=shape, selection=selection))
def var(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
return self._delay(delay, self.server._call_dataset("var", self, delay=True, progress=progress, expression=expression, binby=binby, limits=limits, shape=shape, selection=selection))
def minmax(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
return self._delay(delay, self.server._call_dataset("minmax", self, delay=True, progress=progress, expression=expression, binby=binby, limits=limits, shape=shape, selection=selection))
def min(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
return self._delay(delay, self.server._call_dataset("min", self, delay=True, progress=progress, expression=expression, binby=binby, limits=limits, shape=shape, selection=selection))
def max(self, expression, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
return self._delay(delay, self.server._call_dataset("max", self, delay=True, progress=progress, expression=expression, binby=binby, limits=limits, shape=shape, selection=selection))
# def count(self, expression=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False):
def cov(self, x, y=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
return self._delay(delay, self.server._call_dataset("cov", self, delay=True, progress=progress, x=x, y=y, binby=binby, limits=limits, shape=shape, selection=selection))
def correlation(self, x, y=None, binby=[], limits=None, shape=default_shape, sort=False, sort_key=np.abs, selection=False, delay=False, progress=None):
# TODO: sort and sort_key should be done locally
return self._delay(delay, self.server._call_dataset("correlation", self, delay=True, progress=progress, x=x, y=y, binby=binby, limits=limits, shape=shape, selection=selection))
def _delay(self, delay, task, progressbar=False):
if delay:
return task
else:
result = task.get()
logger.debug("result = %r", result)
return result
def dtype(self, expression):
if expression in self._dtypes:
return self._dtypes[expression]
else:
return np.zeros(1, dtype=np.float64).dtype
def is_local(self): return False
@forward()
def _head_and_tail_table(self, n=5, format='html'):
raise NotImplemented
def __repr__(self):
name = self.__class__.__module__ + "." + self.__class__.__name__
return "<%s(server=%r, name=%r, column_names=%r, __len__=%r)> instance at 0x%x" % (name, self.server, self.name, self.column_names, len(self), id(self))
def __call__(self, *expressions, **kwargs):
return SubspaceRemote(self, expressions, kwargs.get("executor") or self.executor, delay=kwargs.get("delay", False))
def evaluate(self, expression, i1=None, i2=None, out=None, selection=None, parallel=True, delay=False):
expression = _ensure_strings_from_expressions(expression)
"""basic support for evaluate at server, at least to run some unittest, do not expect this to work from strings"""
result = self.server._call_dataset("evaluate", self, expression=expression, i1=i1, i2=i2, selection=selection, parallel=parallel, delay=delay)
# TODO: we ignore out
return result
def execute(self):
'''Execute all delayed jobs.'''
self.executor.execute()
# TODO: should be support _task_agg? If we do, we can use the base class' method
# self._task_aggs.clear()
# we may get rid of this when we group together tasks
class ServerExecutor(object):
def __init__(self):
self.signal_begin = vaex.events.Signal("begin")
self.signal_progress = vaex.events.Signal("progress")
self.signal_end = vaex.events.Signal("end")
self.signal_cancel = vaex.events.Signal("cancel")
def execute(self):
logger.debug("dummy execute")
class TaskServer(Task):
def __init__(self, post_process, delay):
Task.__init__(self, None, [])
self.post_process = post_process
self.delay = delay
self.task_queue = []
def schedule(self, task):
self.task_queue.append(task)
logger.info("task added, queue: %r", self.task_queue)
return task
def execute(self):
logger.debug("starting with execute")
if self._is_executing:
logger.debug("nested execute call")
# this situation may happen since in this methods, via a callback (to update a progressbar) we enter
# Qt's eventloop, which may execute code that will call execute again
# as long as that code is using delay tasks (i.e. promises) we can simple return here, since after
# the execute is almost finished, any new tasks added to the task_queue will get executing
return
# u 'column' is uniquely identified by a tuple of (dataset, expression)
self._is_executing = True
try:
t0 = time.time()
task_queue_all = list(self.task_queue)
if not task_queue_all:
logger.info("only had cancelled tasks")
logger.info("clearing queue")
# self.task_queue = [] # Ok, this was stupid.. in the meantime there may have been new tasks, instead, remove the ones we copied
for task in task_queue_all:
logger.info("remove from queue: %r", task)
self.task_queue.remove(task)
logger.info("left in queue: %r", self.task_queue)
task_queue_all = [task for task in task_queue_all if not task.cancelled]
logger.debug("executing queue: %r" % (task_queue_all))
# for task in self.task_queue:
# $ print task, task.expressions_all
datasets = set(task.dataset for task in task_queue_all)
cancelled = [False]
def cancel():
logger.debug("cancelling")
self.signal_cancel.emit()
cancelled[0] = True
try:
# process tasks per dataset
self.signal_begin.emit()
for dataset in datasets:
task_queue = [task for task in task_queue_all if task.dataset == dataset]
expressions = list(set(expression for task in task_queue for expression in task.expressions_all))
for task in task_queue:
task._results = []
task.signal_progress.emit(0)
self.server.execute_queue(task_queue)
self._is_executing = False
except:
# on any error we flush the task queue
self.signal_cancel.emit()
logger.exception("error in task, flush task queue")
raise
logger.debug("executing took %r seconds" % (time.time() - t0))
# while processing the self.task_queue, new elements will be added to it, so copy it
logger.debug("cancelled: %r", cancelled)
if cancelled[0]:
logger.debug("execution aborted")
task_queue = task_queue_all
for task in task_queue:
# task._result = task.reduce(task._results)
# task.reject(UserAbort("cancelled"))
# remove references
task._result = None
task._results = None
else:
task_queue = task_queue_all
for task in task_queue:
logger.debug("fulfill task: %r", task)
if not task.cancelled:
task._result = task.reduce(task._results)
task.fulfill(task._result)
# remove references
task._result = None
task._results = None
self.signal_end.emit()
# if new tasks were added as a result of this, execute them immediately
# TODO: we may want to include infinite recursion protection
self._is_executing = False
if len(self.task_queue) > 0:
logger.debug("task queue not empty.. start over!")
self.execute()
finally:
self._is_executing = False
if __name__ == "__main__":
import vaex
import sys
vaex.set_log_level_debug()
server = vaex.server(sys.argv[1], port=int(sys.argv[2]))
datasets = server.datasets()
print(datasets)
dataset = datasets[0]
dataset = vaex.example()
print(dataset("x").minmax())
dataset.select("x < 0")
print(dataset.selected_length(), len(dataset))
print(dataset("x").selected().is_masked)
print(dataset("x").selected().minmax())
|
the-stack_0_21664 | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
from examples.tf_profiler_model import neural_net
tf.reset_default_graph()
learning_rate = 0.1
num_steps = 500
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder('float', [None, num_input])
Y = tf.placeholder('float', [None, num_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
logits = neural_net(X, weights, biases)
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, './models/model.ckpt')
print('Model restored.')
for _ in range(1000):
batch_x, batch_y = mnist.test.next_batch(batch_size)
acc = sess.run(accuracy, feed_dict={X: batch_x,
Y: batch_y,
})
print(acc)
|
the-stack_0_21665 | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Encode configuration for model Cisco-IOS-XR-ip-domain-cfg.
usage: cd-encode-xr-ip-domain-cfg-31-ydk.py [-h] [-v]
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CodecService
from ydk.providers import CodecServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_ip_domain_cfg \
as xr_ip_domain_cfg
import logging
def config_ip_domain(ip_domain):
"""Add config data to ip_domain object."""
vrf = ip_domain.vrfs.Vrf()
vrf.vrf_name = "RED"
# host name "ruby"
ipv6_host = vrf.ipv6_hosts.Ipv6Host()
ipv6_host.host_name = "ruby"
ipv6_host.address.append("2001:db8:a::1")
vrf.ipv6_hosts.ipv6_host.append(ipv6_host)
# host name "flame"
ipv6_host = vrf.ipv6_hosts.Ipv6Host()
ipv6_host.host_name = "flame"
ipv6_host.address.append("2001:db8:a::2")
vrf.ipv6_hosts.ipv6_host.append(ipv6_host)
# host name "crimson"
ipv6_host = vrf.ipv6_hosts.Ipv6Host()
ipv6_host.host_name = "crimson"
ipv6_host.address.append("2001:db8:a::3")
ipv6_host.address.append("2001:db8:a::4")
vrf.ipv6_hosts.ipv6_host.append(ipv6_host)
# host name "raspberry"
ipv6_host = vrf.ipv6_hosts.Ipv6Host()
ipv6_host.host_name = "raspberry"
ipv6_host.address.append("2001:db8:a::5")
ipv6_host.address.append("2001:db8:a::6")
vrf.ipv6_hosts.ipv6_host.append(ipv6_host)
ip_domain.vrfs.vrf.append(vrf)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
args = parser.parse_args()
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create codec provider
provider = CodecServiceProvider(type="xml")
# create codec service
codec = CodecService()
ip_domain = xr_ip_domain_cfg.IpDomain() # create object
config_ip_domain(ip_domain) # add object configuration
# encode and print object
print(codec.encode(provider, ip_domain))
exit()
# End of script
|
the-stack_0_21669 | from tkinter import READABLE
from modules.countdown.const.Values import Responses,responses
import pyttsx3 as tts
import time
import random
class S_Speaker:
def __init__(self):
engine=tts.init()
voices=engine.getProperty("voices")
engine.setProperty("voice",voices[1].id)
self.engine=engine
def say(self,text):
self.engine.say(text)
self.engine.runAndWait()
def countDown(self, totalTime):
self.say(responses[Responses.WAIT_COUNTING])
count=3
while count:
# time.sleep(0.5)
self.say(count)
count=count-1
i = random.randint(0, len(responses[Responses.START_COUNTING]) - 1)
self.say(responses[Responses.START_COUNTING][i])
count=totalTime
while count:
time.sleep(1)
count=count-1
if count==6: self.say("5 seconds left!")
i = random.randint(0, len(responses[Responses.TIME_UP]) - 1)
self.say(responses[Responses.TIME_UP][i])
|
the-stack_0_21670 | from django.shortcuts import render, redirect
from django import forms
from django.http import HttpResponse, Http404
import datetime as dt
from cloudinary.forms import cl_init_js_callbacks
from .models import mygalleria_image, Category,Location
from .forms import PhotoForm
def index(request):
photo = mygalleria_image.display_photo()
return render(request, 'upload.html', {"photo": photo})
def photo_today(request):
date = dt.date.today()
photo = mygalleria_image.display_photo()
return render(request, 'gallery/todays_photos.html', {"date": date, "photo": photo})
def upload(request):
context = dict(backend_form=PhotoForm())
if request.method == 'POST':
form = PhotoForm(request.POST, request.FILES)
context['posted'] = form.instance
if form.is_valid():
form.save()
return render(request, 'upload.html', context)
def convert_dates(dates):
day_number = dt.date.weekday(dates)
days = ['Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', "Sunday"]
# Returning the actual day of the week
day = days[day_number]
return day
# View Function to present news from the past days
def past_days_photos(request, past_date):
try:
# Convert data from the string Url
date = dt.datetime.strptime(past_date, '%Y-%m-%d').date()
except ValueError:
# Raise 404 error when ValueError is thrown
raise Http404()
assert False
if date == dt.date.today():
return redirect(photo_today)
photo = mygalleria_image.days_photo(date)
return render(request, 'gallery/past_photos.html', {"date": date, "photo": photo})
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
searched_category = mygalleria_image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'gallery/search.html', {"message": message, "category": searched_category})
else:
message = "You haven't searched for any category"
return render(request, 'gallery/search.html', {"message": message})
def category(request, category_id):
try:
category = Category.objects.get(id=category_id)
except DoesNotExist:
raise Http404()
return render(request, "gallery/category.html", {"category": category})
|
the-stack_0_21672 | # -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.29.18'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import RateLimitExceeded
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, TRUNCATE, ROUND, ROUND_UP, ROUND_DOWN
from ccxt.base.decimal_to_precision import number_to_string
# -----------------------------------------------------------------------------
# rsa jwt signing
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
# -----------------------------------------------------------------------------
# ecdsa signing
from ccxt.static_dependencies import ecdsa
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
# Python 2 & 3
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
from time import mktime
from wsgiref.handlers import format_date_time
# -----------------------------------------------------------------------------
try:
basestring # basestring was removed in Python 3
except NameError:
basestring = str
try:
long # long integer was removed in Python 3
except NameError:
long = int
# -----------------------------------------------------------------------------
try:
import urllib.parse as _urlencode # Python 3
except ImportError:
import urllib as _urlencode # Python 2
# -----------------------------------------------------------------------------
# web3/0x imports
try:
from web3 import Web3, HTTPProvider
except ImportError:
Web3 = HTTPProvider = None # web3/0x not supported in Python 2
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
version = None
certified = False
pro = False
# rate limiter settings
enableRateLimit = False
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None # Session () by default
verify = True # SSL verification
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
verbose = False
markets = None
symbols = None
timeframes = None
fees = {
'trading': {
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
hostname = None # in case of inaccessibility of the "main" domain
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = '' # a "0x"-prefixed hexstring private key for a wallet
walletAddress = '' # the wallet address "0x"-prefixed hexstring
token = '' # reserved for HTTP auth in some cases
twofa = None
marketsById = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': RateLimitExceeded,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'410': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
myTrades = None
trades = None
transactions = None
ohlcvs = None
tickers = None
base_currencies = None
quote_currencies = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
accounts = None
status = {
'status': 'ok',
'updated': None,
'eta': None,
'url': None,
}
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet
'walletAddress': False, # the wallet address "0x"-prefixed hexstring
'token': False, # reserved for HTTP auth in some cases
}
# API method metainfo
has = {
'loadMarkets': True,
'cancelAllOrders': False,
'cancelOrder': True,
'cancelOrders': False,
'CORS': False,
'createDepositAddress': False,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'deposit': False,
'editOrder': 'emulated',
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': False,
'fetchDepositAddress': False,
'fetchDeposits': False,
'fetchL2OrderBook': True,
'fetchLedger': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': 'emulated',
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrderBooks': False,
'fetchOrders': False,
'fetchOrderTrades': False,
'fetchStatus': 'emulated',
'fetchTicker': True,
'fetchTickers': False,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchFundingFee': False,
'fetchFundingFees': False,
'fetchTradingLimits': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'privateAPI': True,
'publicAPI': True,
'withdraw': False,
}
precisionMode = DECIMAL_PLACES
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresWeb3 = False
web3 = None
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.orders = dict() if self.orders is None else self.orders
self.tickers = dict() if self.tickers is None else self.tickers
self.trades = dict() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.ohlcvs = dict() if self.ohlcvs is None else self.ohlcvs
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.origin = self.uuid()
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# convert all properties from underscore notation foo_bar to camelcase notation fooBar
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
# fetch_ohlcv → fetchOHLCV (not fetchOhlcv!)
exceptions = {'ohlcv': 'OHLCV', 'le': 'LE', 'be': 'BE'}
camelcase = parts[0] + ''.join(exceptions.get(i, self.capitalize(i)) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit,
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket', {}))
self.session = self.session if self.session or self.asyncio_loop else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
if self.requiresWeb3 and Web3 and not self.web3:
self.web3 = Web3(HTTPProvider())
def __del__(self):
if self.session:
self.session.close()
def __repr__(self):
return 'ccxt.' + ('async_support.' if self.asyncio_loop else '') + self.id + '()'
def __str__(self):
return self.name
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['api_backup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'api_backup' in self.urls:
self.urls['api'] = self.urls['api_backup']
del self.urls['api_backup']
@classmethod
def define_rest_api(cls, api, method_name, options={}):
delimiters = re.compile('[^a-zA-Z0-9]')
entry = getattr(cls, method_name) # returns a function (instead of a bound method)
for api_type, methods in api.items():
for http_method, urls in methods.items():
for url in urls:
url = url.strip()
split_path = delimiters.split(url)
uppercase_method = http_method.upper()
lowercase_method = http_method.lower()
camelcase_method = lowercase_method.capitalize()
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
lowercase_path = [x.strip().lower() for x in split_path]
underscore_suffix = '_'.join([k for k in lowercase_path if len(k)])
camelcase = api_type + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = api_type + '_' + lowercase_method + '_' + underscore_suffix.lower()
if 'suffixes' in options:
if 'camelcase' in options['suffixes']:
camelcase += options['suffixes']['camelcase']
if 'underscore' in options['suffixes']:
underscore += options['suffixes']['underscore']
def partialer():
outer_kwargs = {'path': url, 'api': api_type, 'method': uppercase_method}
@functools.wraps(entry)
def inner(_self, params=None):
"""
Inner is called when a generated method (publicGetX) is called.
_self is a reference to self created by function.__get__(exchange, type(exchange))
https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
"""
inner_kwargs = dict(outer_kwargs) # avoid mutation
if params is not None:
inner_kwargs['params'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
def throttle(self):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
if elapsed < self.rateLimit:
delay = self.rateLimit - elapsed
time.sleep(delay / 1000.0)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""Exchange.request is the entry point for all generated methods"""
return self.fetch2(path, api, method, params, headers, body)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def throw_exactly_matched_exception(self, exact, string, message):
if string in exact:
raise exact[string](message)
def throw_broadly_matched_exception(self, broad, string, message):
broad_key = self.find_broadly_matched_key(broad, string)
if broad_key is not None:
raise broad[broad_key](message)
def find_broadly_matched_key(self, broad, string):
"""A helper method for matching error strings exactly vs broadly"""
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def handle_errors(self, code, reason, url, method, headers, body, response, request_headers, request_body):
pass
def prepare_request_headers(self, headers=None):
headers = headers or {}
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return headers
def print(self, *args):
print(*args)
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.print("\nRequest:", method, url, request_headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
request_body = body
if body:
body = body.encode()
self.session.cookies.clear()
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies,
verify=self.verify
)
http_response = response.text
http_status_code = response.status_code
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
# FIXME remove last_x_responses from subclasses
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
self.print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
raise RequestTimeout(method + ' ' + url)
except TooManyRedirects as e:
raise ExchangeError(method + ' ' + url)
except SSLError as e:
raise ExchangeError(method + ' ' + url)
except HTTPError as e:
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
raise ExchangeError(method + ' ' + url)
except RequestException as e: # base exception class
error_string = str(e)
if ('ECONNRESET' in error_string) or ('Connection aborted.' in error_string):
raise NetworkError(method + ' ' + url + ' ' + error_string)
else:
raise ExchangeError(method + ' ' + url + ' ' + error_string)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
def handle_rest_errors(self, http_status_code, http_status_text, body, url, method):
error = None
string_code = str(http_status_code)
if string_code in self.httpExceptions:
error = self.httpExceptions[string_code]
if error == ExchangeNotAvailable:
if re.search('(cloudflare|incapsula|overload|ddos)', body, flags=re.IGNORECASE):
error = DDoSProtection
if error:
raise error(' '.join([method, url, string_code, http_status_text, body]))
def handle_rest_response(self, response, json_response, url, method):
if self.is_json_encoded_object(response) and json_response is None:
ddos_protection = re.search('(cloudflare|incapsula|overload|ddos)', response, flags=re.IGNORECASE)
exchange_not_available = re.search('(offline|busy|retry|wait|unavailable|maintain|maintenance|maintenancing)', response, flags=re.IGNORECASE)
if ddos_protection:
raise DDoSProtection(' '.join([method, url, response]))
if exchange_not_available:
message = response + ' exchange downtime, exchange closed for maintenance or offline, DDoS protection or rate-limiting in effect'
raise ExchangeNotAvailable(' '.join([method, url, response, message]))
raise ExchangeError(' '.join([method, url, response]))
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return json.loads(http_response)
except ValueError: # superclass of JsonDecodeError (python2)
pass
def is_text_response(self, headers):
content_type = headers.get('Content-Type', '')
return content_type.startswith('application/json') or content_type.startswith('text/')
@staticmethod
def key_exists(dictionary, key):
if dictionary is None or key is None:
return False
if isinstance(dictionary, list):
if isinstance(key, int) and 0 <= key and key < len(dictionary):
return dictionary[key] is not None
else:
return False
if key in dictionary:
return dictionary[key] is not None
return False
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if Exchange.key_exists(dictionary, key):
value = float(dictionary[key])
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_lower(dictionary, key, default_value=None):
return str(dictionary[key]).lower() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_upper(dictionary, key, default_value=None):
return str(dictionary[key]).upper() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number) or (isinstance(value, basestring) and value.isnumeric()):
return int(value)
return default_value
@staticmethod
def safe_integer_product(dictionary, key, factor, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number):
return int(value * factor)
elif isinstance(value, basestring):
try:
return int(float(value) * factor)
except ValueError:
pass
return default_value
@staticmethod
def safe_timestamp(dictionary, key, default_value=None):
return Exchange.safe_integer_product(dictionary, key, 1000, default_value)
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if Exchange.key_exists(dictionary, key) else default_value
# we're not using safe_floats with a list argument as we're trying to save some cycles here
# we're not using safe_float_3 either because those cases are too rare to deserve their own optimization
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_lower_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_lower, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_upper_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_upper, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_product_2(dictionary, key1, key2, factor, default_value=None):
value = Exchange.safe_integer_product(dictionary, key1, factor)
return value if value is not None else Exchange.safe_integer_product(dictionary, key2, factor, default_value)
@staticmethod
def safe_timestamp_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_integer_product_2(dictionary, key1, key2, 1000, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
"""A helper-wrapper for the safe_value_2() family."""
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
"""Deprecated, use decimal_to_precision instead"""
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
"""Deprecated, todo: remove references from subclasses"""
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def strip(string):
return string.strip()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
array = Exchange.to_array(array)
return list(filter(lambda x: x[key] == value, array))
@staticmethod
def filterBy(array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
is_int_key = isinstance(key, int)
for element in array:
if ((is_int_key and (key < len(element))) or (key in element)) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
if isinstance(params, dict):
for key in params:
if not isinstance(params[key], list):
string = string.replace('{' + key + '}', str(params[key]))
return string
@staticmethod
def urlencode(params={}):
for key, value in params.items():
if isinstance(value, bool):
params[key] = 'true' if value else 'false'
return _urlencode.urlencode(params)
@staticmethod
def urlencode_with_array_repeat(params={}):
return re.sub(r'%5B\d*%5D', '', Exchange.urlencode(params))
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri, safe="~()*!.'"):
return _urlencode.quote(uri, safe=safe)
@staticmethod
def omit(d, *args):
if isinstance(d, dict):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
return d
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume, *_] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, (int, long)):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def rfc2616(self, timestamp=None):
if timestamp is None:
ts = datetime.datetime.now()
else:
ts = timestamp
stamp = mktime(ts.timetuple())
return format_date_time(stamp)
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y' + infix + '%m' + infix + '%d')
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
ms = (ms + '00')[0:4]
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
h = hashlib.new(algorithm, request)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
if digest == 'hex':
return h.hexdigest()
elif digest == 'base64':
return base64.b64encode(h.digest())
return h.digest()
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_concat_array(array):
result = bytes()
for element in array:
result = result + element
return result
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def binary_to_base64(s):
return Exchange.decode(base64.standard_b64encode(s))
@staticmethod
def jwt(request, secret, alg='HS256'):
algos = {
'HS256': hashlib.sha256,
'HS384': hashlib.sha384,
'HS512': hashlib.sha512,
}
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encoded_header = Exchange.base64urlencode(header)
encoded_data = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encoded_header + '.' + encoded_data
if alg[:2] == 'RS':
signature = Exchange.rsa(token, secret, alg)
else:
algorithm = algos[alg]
signature = Exchange.hmac(Exchange.encode(token), secret, algorithm, 'binary')
return token + '.' + Exchange.base64urlencode(signature)
@staticmethod
def rsa(request, secret, alg='RS256'):
algorithms = {
"RS256": hashes.SHA256(),
"RS384": hashes.SHA384(),
"RS512": hashes.SHA512(),
}
algorithm = algorithms[alg]
priv_key = load_pem_private_key(secret, None, backends.default_backend())
return priv_key.sign(Exchange.encode(request), padding.PKCS1v15(), algorithm)
@staticmethod
def ecdsa(request, secret, algorithm='p256', hash=None, fixed_length=False):
# your welcome - frosty00
algorithms = {
'p192': [ecdsa.NIST192p, 'sha256'],
'p224': [ecdsa.NIST224p, 'sha256'],
'p256': [ecdsa.NIST256p, 'sha256'],
'p384': [ecdsa.NIST384p, 'sha384'],
'p521': [ecdsa.NIST521p, 'sha512'],
'secp256k1': [ecdsa.SECP256k1, 'sha256'],
}
if algorithm not in algorithms:
raise ArgumentsRequired(algorithm + ' is not a supported algorithm')
curve_info = algorithms[algorithm]
hash_function = getattr(hashlib, curve_info[1])
encoded_request = Exchange.encode(request)
if hash is not None:
digest = Exchange.hash(encoded_request, hash, 'binary')
else:
digest = base64.b16decode(encoded_request, casefold=True)
key = ecdsa.SigningKey.from_string(base64.b16decode(Exchange.encode(secret),
casefold=True), curve=curve_info[0])
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize)
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter = 0
minimum_size = (1 << (8 * 31)) - 1
half_order = key.privkey.order / 2
while fixed_length and (r_int > half_order or r_int <= minimum_size or s_int <= minimum_size):
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize,
extra_entropy=Exchange.number_to_le(counter, 32))
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter += 1
r, s = Exchange.decode(base64.b16encode(r_binary)).lower(), Exchange.decode(base64.b16encode(s_binary)).lower()
return {
'r': r,
's': s,
'v': v,
}
@staticmethod
def unjson(input):
return json.loads(input)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, basestring) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode()
@staticmethod
def decode(string):
return string.decode()
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
raise AuthenticationError('requires `' + key + '`')
else:
return error
return True
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
raise InvalidAddress('address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
raise InvalidAddress('address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': None,
'used': None,
'total': None,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def currency_id(self, commonCode):
if self.currencies:
if commonCode in self.currencies:
return self.currencies[commonCode]['id']
currencyIds = {v: k for k, v in self.commonCurrencies.items()}
return self.safe_string(currencyIds, commonCode, commonCode)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def price_to_precision(self, symbol, price):
return self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def amount_to_precision(self, symbol, amount):
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], self.precisionMode)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[currency]['precision'], self.precisionMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
self.fees['trading'],
{'precision': self.precision, 'limits': self.limits},
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.marketsById = self.markets_by_id
self.symbols = sorted(list(self.markets.keys()))
self.ids = sorted(list(self.markets_by_id.keys()))
if currencies:
self.currencies = self.deep_extend(currencies, self.currencies)
else:
base_currencies = [{
'id': market['baseId'] if (('baseId' in market) and (market['baseId'] is not None)) else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if (('quoteId' in market) and (market['quoteId'] is not None)) else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
base_currencies = self.sort_by(base_currencies, 'code')
quote_currencies = self.sort_by(quote_currencies, 'code')
self.base_currencies = self.index_by(base_currencies, 'code')
self.quote_currencies = self.index_by(quote_currencies, 'code')
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.index_by(currencies, 'code'), self.currencies)
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
return self.markets
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported('create_order() not supported yet')
def cancel_order(self, id, symbol=None, params={}):
raise NotSupported('cancel_order() not supported yet')
def cancel_unified_order(self, order, params={}):
return self.cancel_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_bids_asks(self, symbols=None, params={}):
raise NotSupported('API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
def fetch_tickers(self, symbols=None, params={}):
raise NotSupported('API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def purge_cached_orders(self, before):
orders = self.to_array(self.orders)
orders = [order for order in orders if (order['status'] == 'open') or (order['timestamp'] >= before)]
self.orders = self.index_by(orders, 'id')
return self.orders
def fetch_order(self, id, symbol=None, params={}):
raise NotSupported('fetch_order() is not supported yet')
def fetch_unified_order(self, order, params={}):
return self.fetch_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
raise NotSupported('fetch_order_trades() is not supported yet')
def fetch_transactions(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_transactions() is not supported yet')
def fetch_deposits(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_deposits() is not supported yet')
def fetch_withdrawals(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported('fetch_withdrawals() is not supported yet')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return ohlcv[0:6] if isinstance(ohlcv, list) else ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
ohlcvs = self.to_array(ohlcvs)
num_ohlcvs = len(ohlcvs)
result = []
i = 0
while i < num_ohlcvs:
if limit and (len(result) >= limit):
break
ohlcv = self.parse_ohlcv(ohlcvs[i], market, timeframe, since, limit)
i = i + 1
if since and (ohlcv[0] < since):
continue
result.append(ohlcv)
return self.sort_by(result, 0)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [float(bidask[price_key]), float(bidask[amount_key])]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
raise ExchangeError('unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def parse_balance(self, balance):
currencies = self.omit(balance, ['info', 'free', 'used', 'total']).keys()
balance['free'] = {}
balance['used'] = {}
balance['total'] = {}
for currency in currencies:
if balance[currency].get('total') is None:
if balance[currency].get('free') is not None and balance[currency].get('used') is not None:
balance[currency]['total'] = self.sum(balance[currency].get('free'), balance[currency].get('used'))
if balance[currency].get('free') is None:
if balance[currency].get('total') is not None and balance[currency].get('used') is not None:
balance[currency]['free'] = self.sum(balance[currency]['total'], -balance[currency]['used'])
if balance[currency].get('used') is None:
if balance[currency].get('total') is not None and balance[currency].get('free') is not None:
balance[currency]['used'] = self.sum(balance[currency]['total'], -balance[currency]['free'])
balance['free'][currency] = balance[currency]['free']
balance['used'][currency] = balance[currency]['used']
balance['total'][currency] = balance[currency]['total']
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
raise NotSupported('fetch_funding_fees() not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
raise NotSupported('fetch_funding_fee() not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = self.fetch_time(params)
self.status['updated'] = updated
return self.status
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False): # noqa E741
result = []
for i in range(0, len(ohlcvs[t])):
result.append([
ohlcvs[t][i] if ms else (ohlcvs[t][i] * 1000),
ohlcvs[o][i],
ohlcvs[h][i],
ohlcvs[l][i],
ohlcvs[c][i],
ohlcvs[v][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False): # noqa E741
result = {}
result[t] = []
result[o] = []
result[h] = []
result[l] = []
result[c] = []
result[v] = []
for i in range(0, len(ohlcvs)):
result[t].append(ohlcvs[i][0] if ms else int(ohlcvs[i][0] / 1000))
result[o].append(ohlcvs[i][1])
result[h].append(ohlcvs[i][2])
result[l].append(ohlcvs[i][3])
result[c].append(ohlcvs[i][4])
result[v].append(ohlcvs[i][5])
return result
def build_ohlcv(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(high, low, close, volume) = (2, 3, 4, 5)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
if (j == 0) or opening_time >= ohlcvs[j - 1][0] + ms:
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[j - 1][high] = max(ohlcvs[j - 1][high], trade['price'])
ohlcvs[j - 1][low] = min(ohlcvs[j - 1][low], trade['price'])
ohlcvs[j - 1][close] = trade['price']
ohlcvs[j - 1][volume] += trade['amount']
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' == unit:
scale = 60 * 60 * 24 * 365
elif 'M' == unit:
scale = 60 * 60 * 24 * 30
elif 'w' == unit:
scale = 60 * 60 * 24 * 7
elif 'd' == unit:
scale = 60 * 60 * 24
elif 'h' == unit:
scale = 60 * 60
elif 'm' == unit:
scale = 60
elif 's' == unit:
scale = 1
else:
raise NotSupported('timeframe unit {} is not supported'.format(unit))
return amount * scale
@staticmethod
def round_timeframe(timeframe, timestamp, direction=ROUND_DOWN):
ms = Exchange.parse_timeframe(timeframe) * 1000
# Get offset based on timeframe in milliseconds
offset = timestamp % ms
return timestamp - offset + (ms if direction == ROUND_UP else 0)
def parse_trades(self, trades, market=None, since=None, limit=None, params={}):
array = self.to_array(trades)
array = [self.extend(self.parse_trade(trade, market), params) for trade in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def parse_ledger(self, data, currency=None, since=None, limit=None, params={}):
array = self.to_array(data)
result = []
for item in array:
entry = self.parse_ledger_entry(item, currency)
if isinstance(entry, list):
result += [self.extend(i, params) for i in entry]
else:
result.append(self.extend(entry, params))
result = self.sort_by(result, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(result, code, since, limit)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
return self.filter_by_currency_since_limit(array, code, since, limit)
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
array = self.to_array(orders)
array = [self.extend(self.parse_order(order, market), params) for order in array]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
return self.filter_by_symbol_since_limit(array, symbol, since, limit)
def safe_currency_code(self, currency_id, currency=None):
code = None
if currency_id is not None:
currency_id = str(currency_id)
if self.currencies_by_id is not None and currency_id in self.currencies_by_id:
code = self.currencies_by_id[currency_id]['code']
else:
code = self.common_currency_code(currency_id.upper())
if code is None and currency is not None:
code = currency['code']
return code
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if value is not None:
array = [entry for entry in array if entry[field] == value]
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail and (since is None) else array[:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit)
def filter_by_since_limit(self, array, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail and (since is None) else array[:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
raise ExchangeError('Currencies not loaded')
if isinstance(code, basestring) and (code in self.currencies):
return self.currencies[code]
raise ExchangeError('Does not have currency code ' + str(code))
def market(self, symbol):
if not self.markets:
raise ExchangeError('Markets not loaded')
if isinstance(symbol, basestring) and (symbol in self.markets):
return self.markets[symbol]
raise BadSymbol('{} does not have market symbol {}'.format(self.id, symbol))
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
rate = market[takerOrMaker]
cost = float(self.cost_to_precision(symbol, amount * price))
return {
'rate': rate,
'type': takerOrMaker,
'currency': market['quote'],
'cost': float(self.fee_to_precision(symbol, rate * cost)),
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, *args):
return self.create_order(symbol, 'limit', *args)
def create_market_order(self, symbol, *args):
return self.create_order(symbol, 'market', *args)
def create_limit_buy_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'buy', *args)
def create_limit_sell_order(self, symbol, *args):
return self.create_order(symbol, 'limit', 'sell', *args)
def create_market_buy_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}):
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
# -------------------------------------------------------------------------
# web3 / 0x methods
@staticmethod
def has_web3():
return Web3 is not None
def check_required_dependencies(self):
if not Exchange.has_web3():
raise NotSupported("Web3 functionality requires Python3 and web3 package installed: https://github.com/ethereum/web3.py")
@staticmethod
def from_wei(amount, decimals=18):
amount_float = float(amount)
exponential = '{:.14e}'.format(amount_float)
n, exponent = exponential.split('e')
new_exponent = int(exponent) - decimals
return float(n + 'e' + str(new_exponent))
@staticmethod
def to_wei(amount, decimals=18):
amount_float = float(amount)
exponential = '{:.14e}'.format(amount_float)
n, exponent = exponential.split('e')
new_exponent = int(exponent) + decimals
return number_to_string(n + 'e' + str(new_exponent))
def privateKeyToAddress(self, privateKey):
private_key_bytes = base64.b16decode(Exchange.encode(privateKey), True)
public_key_bytes = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1).verifying_key.to_string()
public_key_hash = self.web3.sha3(public_key_bytes)
return '0x' + Exchange.decode(base64.b16encode(public_key_hash))[-40:].lower()
def soliditySha3(self, array):
values = self.solidityValues(array)
types = self.solidityTypes(values)
return self.web3.soliditySha3(types, values).hex()
def solidityTypes(self, array):
return ['address' if self.web3.isAddress(value) else 'uint256' for value in array]
def solidityValues(self, array):
return [self.web3.toChecksumAddress(value) if self.web3.isAddress(value) else (int(value, 16) if str(value)[:2] == '0x' else int(value)) for value in array]
def getZeroExOrderHash2(self, order):
return self.soliditySha3([
order['exchangeContractAddress'], # address
order['maker'], # address
order['taker'], # address
order['makerTokenAddress'], # address
order['takerTokenAddress'], # address
order['feeRecipient'], # address
order['makerTokenAmount'], # uint256
order['takerTokenAmount'], # uint256
order['makerFee'], # uint256
order['takerFee'], # uint256
order['expirationUnixTimestampSec'], # uint256
order['salt'], # uint256
])
def getZeroExOrderHash(self, order):
unpacked = [
self.web3.toChecksumAddress(order['exchangeContractAddress']), # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['maker']), # { value: order.maker, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['taker']), # { value: order.taker, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['makerTokenAddress']), # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['takerTokenAddress']), # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address },
self.web3.toChecksumAddress(order['feeRecipient']), # { value: order.feeRecipient, type: types_1.SolidityTypes.Address },
int(order['makerTokenAmount']), # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, },
int(order['takerTokenAmount']), # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, },
int(order['makerFee']), # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, },
int(order['takerFee']), # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, },
int(order['expirationUnixTimestampSec']), # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, },
int(order['salt']), # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 },
]
types = [
'address', # { value: order.exchangeContractAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.maker, type: types_1.SolidityTypes.Address },
'address', # { value: order.taker, type: types_1.SolidityTypes.Address },
'address', # { value: order.makerTokenAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.takerTokenAddress, type: types_1.SolidityTypes.Address },
'address', # { value: order.feeRecipient, type: types_1.SolidityTypes.Address },
'uint256', # { value: bigNumberToBN(order.makerTokenAmount), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.takerTokenAmount), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.makerFee), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.takerFee), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.expirationUnixTimestampSec), type: types_1.SolidityTypes.Uint256, },
'uint256', # { value: bigNumberToBN(order.salt), type: types_1.SolidityTypes.Uint256 },
]
return self.web3.soliditySha3(types, unpacked).hex()
@staticmethod
def remove_0x_prefix(value):
if value[:2] == '0x':
return value[2:]
return value
def getZeroExOrderHashV2(self, order):
# https://github.com/0xProject/0x-monorepo/blob/development/python-packages/order_utils/src/zero_ex/order_utils/__init__.py
def pad_20_bytes_to_32(twenty_bytes):
return bytes(12) + twenty_bytes
def int_to_32_big_endian_bytes(i):
return i.to_bytes(32, byteorder="big")
def to_bytes(value):
if not isinstance(value, str):
raise TypeError("Value must be an instance of str")
if len(value) % 2:
value = "0x0" + self.remove_0x_prefix(value)
return base64.b16decode(self.remove_0x_prefix(value), casefold=True)
domain_struct_header = b"\x91\xab=\x17\xe3\xa5\n\x9d\x89\xe6?\xd3\x0b\x92\xbe\x7fS6\xb0;({\xb9Fxz\x83\xa9\xd6*'f\xf0\xf2F\x18\xf4\xc4\xbe\x1eb\xe0&\xfb\x03\x9a \xef\x96\xf4IR\x94\x81}\x10'\xff\xaam\x1fp\xe6\x1e\xad|[\xef\x02x\x16\xa8\x00\xda\x176DO\xb5\x8a\x80~\xf4\xc9`;xHg?~:h\xeb\x14\xa5"
order_schema_hash = b'w\x05\x01\xf8\x8a&\xed\xe5\xc0J \xef\x87yi\xe9a\xeb\x11\xfc\x13\xb7\x8a\xafAKc=\xa0\xd4\xf8o'
header = b"\x19\x01"
domain_struct_hash = self.web3.sha3(
domain_struct_header +
pad_20_bytes_to_32(to_bytes(order["exchangeAddress"]))
)
order_struct_hash = self.web3.sha3(
order_schema_hash +
pad_20_bytes_to_32(to_bytes(order["makerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["takerAddress"])) +
pad_20_bytes_to_32(to_bytes(order["feeRecipientAddress"])) +
pad_20_bytes_to_32(to_bytes(order["senderAddress"])) +
int_to_32_big_endian_bytes(int(order["makerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["takerAssetAmount"])) +
int_to_32_big_endian_bytes(int(order["makerFee"])) +
int_to_32_big_endian_bytes(int(order["takerFee"])) +
int_to_32_big_endian_bytes(int(order["expirationTimeSeconds"])) +
int_to_32_big_endian_bytes(int(order["salt"])) +
self.web3.sha3(to_bytes(order["makerAssetData"])) +
self.web3.sha3(to_bytes(order["takerAssetData"]))
)
sha3 = self.web3.sha3(
header +
domain_struct_hash +
order_struct_hash
)
return '0x' + base64.b16encode(sha3).decode('ascii').lower()
def signZeroExOrderV2(self, order, privateKey):
orderHash = self.getZeroExOrderHashV2(order)
signature = self.signMessage(orderHash[-64:], privateKey)
return self.extend(order, {
'orderHash': orderHash,
'signature': self._convertECSignatureToSignatureHex(signature),
})
def _convertECSignatureToSignatureHex(self, signature):
# https://github.com/0xProject/0x-monorepo/blob/development/packages/order-utils/src/signature_utils.ts
v = signature["v"]
if v != 27 and v != 28:
v = v + 27
return (
hex(v) +
signature["r"][-64:] +
signature["s"][-64:] +
"03"
)
def hashMessage(self, message):
message_bytes = base64.b16decode(Exchange.encode(Exchange.remove_0x_prefix(message)), True)
hash_bytes = self.web3.sha3(b"\x19Ethereum Signed Message:\n" + Exchange.encode(str(len(message_bytes))) + message_bytes)
return '0x' + Exchange.decode(base64.b16encode(hash_bytes)).lower()
@staticmethod
def signHash(hash, privateKey):
signature = Exchange.ecdsa(hash[-64:], privateKey, 'secp256k1', None)
return {
'r': '0x' + signature['r'],
's': '0x' + signature['s'],
'v': 27 + signature['v'],
}
def signMessage(self, message, privateKey):
#
# The following comment is related to MetaMask, we use the upper type of signature prefix:
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'ETH_SIGN',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 28,
# r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2",
# s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf"
# }
#
# --------------------------------------------------------------------
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'NONE',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 27,
# r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6",
# s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394"
# }
#
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def decimal_to_bytes(n, endian='big'):
"""int.from_bytes and int.to_bytes don't work in python2"""
if n > 0:
next_byte = Exchange.decimal_to_bytes(n // 0x100, endian)
remainder = bytes([n % 0x100])
return next_byte + remainder if endian == 'big' else remainder + next_byte
else:
return b''
@staticmethod
def totp(key):
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded) # throws an error if the key is invalid
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(Exchange.decimal_to_bytes(epoch, 'big'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
@staticmethod
def number_to_le(n, size):
return Exchange.decimal_to_bytes(int(n), 'little').ljust(size, b'\x00')
@staticmethod
def number_to_be(n, size):
return Exchange.decimal_to_bytes(int(n), 'big').rjust(size, b'\x00')
@staticmethod
def base16_to_binary(s):
return base64.b16decode(s, True)
# python supports arbitrarily big integers
@staticmethod
def integer_divide(a, b):
return int(a) // int(b)
@staticmethod
def integer_pow(a, b):
return int(a) ** int(b)
@staticmethod
def integer_modulo(a, b):
return int(a) % int(b)
def sleep(self, milliseconds):
return time.sleep(milliseconds / 1000)
|
the-stack_0_21673 | from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
din = data_layer(name='data', size=100)
enc = din
for i in range(32):
enc = addto_layer([enc, enc])
pred = fc_layer(
input=fc_layer(
input=enc, size=32, act=ReluActivation()),
size=10,
act=SoftmaxActivation())
outputs(pred)
|
the-stack_0_21676 | # This script is licensed as public domain.
bl_info = {
"name": "Export Inter-Quake Model (.iqm/.iqe)",
"author": "Lee Salzman",
"version": (2019, 4, 24),
"blender": (2, 80, 0),
"location": "File > Export > Inter-Quake Model",
"description": "Export to the Inter-Quake Model format (.iqm/.iqe)",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export"}
import os, struct, math
import mathutils
import bpy
import bpy_extras.io_utils
IQM_POSITION = 0
IQM_TEXCOORD = 1
IQM_NORMAL = 2
IQM_TANGENT = 3
IQM_BLENDINDEXES = 4
IQM_BLENDWEIGHTS = 5
IQM_COLOR = 6
IQM_CUSTOM = 0x10
IQM_BYTE = 0
IQM_UBYTE = 1
IQM_SHORT = 2
IQM_USHORT = 3
IQM_INT = 4
IQM_UINT = 5
IQM_HALF = 6
IQM_FLOAT = 7
IQM_DOUBLE = 8
IQM_LOOP = 1
IQM_HEADER = struct.Struct('<16s27I')
IQM_MESH = struct.Struct('<6I')
IQM_TRIANGLE = struct.Struct('<3I')
IQM_JOINT = struct.Struct('<Ii10f')
IQM_POSE = struct.Struct('<iI20f')
IQM_ANIMATION = struct.Struct('<3IfI')
IQM_VERTEXARRAY = struct.Struct('<5I')
IQM_BOUNDS = struct.Struct('<8f')
MAXVCACHE = 32
class Vertex:
def __init__(self, index, coord, normal, uv, weights, color):
self.index = index
self.coord = coord
self.normal = normal
self.uv = uv
self.weights = weights
self.color = color
def normalizeWeights(self):
# renormalizes all weights such that they add up to 255
# the list is chopped/padded to exactly 4 weights if necessary
if not self.weights:
self.weights = [ (0, 0), (0, 0), (0, 0), (0, 0) ]
return
self.weights.sort(key = lambda weight: weight[0], reverse=True)
if len(self.weights) > 4:
del self.weights[4:]
totalweight = sum([ weight for (weight, bone) in self.weights])
if totalweight > 0:
self.weights = [ (int(round(weight * 255.0 / totalweight)), bone) for (weight, bone) in self.weights]
while len(self.weights) > 1 and self.weights[-1][0] <= 0:
self.weights.pop()
else:
totalweight = len(self.weights)
self.weights = [ (int(round(255.0 / totalweight)), bone) for (weight, bone) in self.weights]
totalweight = sum([ weight for (weight, bone) in self.weights])
while totalweight != 255:
for i, (weight, bone) in enumerate(self.weights):
if totalweight > 255 and weight > 0:
self.weights[i] = (weight - 1, bone)
totalweight -= 1
elif totalweight < 255 and weight < 255:
self.weights[i] = (weight + 1, bone)
totalweight += 1
while len(self.weights) < 4:
self.weights.append((0, self.weights[-1][1]))
def calcScore(self):
if self.uses:
self.score = 2.0 * pow(len(self.uses), -0.5)
if self.cacherank >= 3:
self.score += pow(1.0 - float(self.cacherank - 3)/MAXVCACHE, 1.5)
elif self.cacherank >= 0:
self.score += 0.75
else:
self.score = -1.0
def neighborKey(self, other):
if self.coord < other.coord:
return (self.coord.x, self.coord.y, self.coord.z, other.coord.x, other.coord.y, other.coord.z, tuple(self.weights), tuple(other.weights))
else:
return (other.coord.x, other.coord.y, other.coord.z, self.coord.x, self.coord.y, self.coord.z, tuple(other.weights), tuple(self.weights))
def __hash__(self):
return self.index
def __eq__(self, v):
return self.coord == v.coord and self.normal == v.normal and self.uv == v.uv and self.weights == v.weights and self.color == v.color
class Mesh:
def __init__(self, name, material, verts):
self.name = name
self.material = material
self.verts = [ None for v in verts ]
self.vertmap = {}
self.tris = []
def calcTangents(self):
# See "Tangent Space Calculation" at http://www.terathon.com/code/tangent.html
for v in self.verts:
v.tangent = mathutils.Vector((0.0, 0.0, 0.0))
v.bitangent = mathutils.Vector((0.0, 0.0, 0.0))
for (v0, v1, v2) in self.tris:
dco1 = v1.coord - v0.coord
dco2 = v2.coord - v0.coord
duv1 = v1.uv - v0.uv
duv2 = v2.uv - v0.uv
tangent = dco2*duv1.y - dco1*duv2.y
bitangent = dco2*duv1.x - dco1*duv2.x
if dco2.cross(dco1).dot(bitangent.cross(tangent)) < 0:
tangent.negate()
bitangent.negate()
v0.tangent += tangent
v1.tangent += tangent
v2.tangent += tangent
v0.bitangent += bitangent
v1.bitangent += bitangent
v2.bitangent += bitangent
for v in self.verts:
v.tangent = v.tangent - v.normal*v.tangent.dot(v.normal)
v.tangent.normalize()
if v.normal.cross(v.tangent).dot(v.bitangent) < 0:
v.bitangent = -1.0
else:
v.bitangent = 1.0
def optimize(self):
# Linear-speed vertex cache optimization algorithm by Tom Forsyth
for v in self.verts:
if v:
v.index = -1
v.uses = []
v.cacherank = -1
for i, (v0, v1, v2) in enumerate(self.tris):
v0.uses.append(i)
v1.uses.append(i)
v2.uses.append(i)
for v in self.verts:
if v:
v.calcScore()
besttri = -1
bestscore = -42.0
scores = []
for i, (v0, v1, v2) in enumerate(self.tris):
scores.append(v0.score + v1.score + v2.score)
if scores[i] > bestscore:
besttri = i
bestscore = scores[i]
vertloads = 0 # debug info
vertschedule = []
trischedule = []
vcache = []
while besttri >= 0:
tri = self.tris[besttri]
scores[besttri] = -666.0
trischedule.append(tri)
for v in tri:
if v.cacherank < 0: # debug info
vertloads += 1 # debug info
if v.index < 0:
v.index = len(vertschedule)
vertschedule.append(v)
v.uses.remove(besttri)
v.cacherank = -1
v.score = -1.0
vcache = [ v for v in tri if v.uses ] + [ v for v in vcache if v.cacherank >= 0 ]
for i, v in enumerate(vcache):
v.cacherank = i
v.calcScore()
besttri = -1
bestscore = -42.0
for v in vcache:
for i in v.uses:
v0, v1, v2 = self.tris[i]
scores[i] = v0.score + v1.score + v2.score
if scores[i] > bestscore:
besttri = i
bestscore = scores[i]
while len(vcache) > MAXVCACHE:
vcache.pop().cacherank = -1
if besttri < 0:
for i, score in enumerate(scores):
if score > bestscore:
besttri = i
bestscore = score
print('%s: %d verts optimized to %d/%d loads for %d entry LRU cache' % (self.name, len(self.verts), vertloads, len(vertschedule), MAXVCACHE))
#print('%s: %d verts scheduled to %d' % (self.name, len(self.verts), len(vertschedule)))
self.verts = vertschedule
# print('%s: %d tris scheduled to %d' % (self.name, len(self.tris), len(trischedule)))
self.tris = trischedule
def meshData(self, iqm):
return [ iqm.addText(self.name), iqm.addText(self.material), self.firstvert, len(self.verts), self.firsttri, len(self.tris) ]
class Bone:
def __init__(self, name, origname, index, parent, matrix):
self.name = name
self.origname = origname
self.index = index
self.parent = parent
self.matrix = matrix
self.localmatrix = matrix
if self.parent:
self.localmatrix = parent.matrix.inverted() @ self.localmatrix
self.numchannels = 0
self.channelmask = 0
self.channeloffsets = [ 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10, 1.0e10 ]
self.channelscales = [ -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10, -1.0e10 ]
def jointData(self, iqm):
if self.parent:
parent = self.parent.index
else:
parent = -1
pos = self.localmatrix.to_translation()
orient = self.localmatrix.to_quaternion()
orient.normalize()
if orient.w > 0:
orient.negate()
scale = self.localmatrix.to_scale()
scale.x = round(scale.x*0x10000)/0x10000
scale.y = round(scale.y*0x10000)/0x10000
scale.z = round(scale.z*0x10000)/0x10000
return [ iqm.addText(self.name), parent, pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z ]
def poseData(self, iqm):
if self.parent:
parent = self.parent.index
else:
parent = -1
return [ parent, self.channelmask ] + self.channeloffsets + self.channelscales
def calcChannelMask(self):
for i in range(0, 10):
self.channelscales[i] -= self.channeloffsets[i]
if self.channelscales[i] >= 1.0e-10:
self.numchannels += 1
self.channelmask |= 1 << i
self.channelscales[i] /= 0xFFFF
else:
self.channelscales[i] = 0.0
return self.numchannels
class Animation:
def __init__(self, name, frames, fps = 0.0, flags = 0):
self.name = name
self.frames = frames
self.fps = fps
self.flags = flags
def calcFrameLimits(self, bones):
for frame in self.frames:
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
bone.channeloffsets[0] = min(bone.channeloffsets[0], loc.x)
bone.channeloffsets[1] = min(bone.channeloffsets[1], loc.y)
bone.channeloffsets[2] = min(bone.channeloffsets[2], loc.z)
bone.channeloffsets[3] = min(bone.channeloffsets[3], quat.x)
bone.channeloffsets[4] = min(bone.channeloffsets[4], quat.y)
bone.channeloffsets[5] = min(bone.channeloffsets[5], quat.z)
bone.channeloffsets[6] = min(bone.channeloffsets[6], quat.w)
bone.channeloffsets[7] = min(bone.channeloffsets[7], scale.x)
bone.channeloffsets[8] = min(bone.channeloffsets[8], scale.y)
bone.channeloffsets[9] = min(bone.channeloffsets[9], scale.z)
bone.channelscales[0] = max(bone.channelscales[0], loc.x)
bone.channelscales[1] = max(bone.channelscales[1], loc.y)
bone.channelscales[2] = max(bone.channelscales[2], loc.z)
bone.channelscales[3] = max(bone.channelscales[3], quat.x)
bone.channelscales[4] = max(bone.channelscales[4], quat.y)
bone.channelscales[5] = max(bone.channelscales[5], quat.z)
bone.channelscales[6] = max(bone.channelscales[6], quat.w)
bone.channelscales[7] = max(bone.channelscales[7], scale.x)
bone.channelscales[8] = max(bone.channelscales[8], scale.y)
bone.channelscales[9] = max(bone.channelscales[9], scale.z)
def animData(self, iqm):
return [ iqm.addText(self.name), self.firstframe, len(self.frames), self.fps, self.flags ]
def frameData(self, bones):
data = b''
for frame in self.frames:
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if (bone.channelmask&0x7F) == 0x7F:
lx = int(round((loc.x - bone.channeloffsets[0]) / bone.channelscales[0]))
ly = int(round((loc.y - bone.channeloffsets[1]) / bone.channelscales[1]))
lz = int(round((loc.z - bone.channeloffsets[2]) / bone.channelscales[2]))
qx = int(round((quat.x - bone.channeloffsets[3]) / bone.channelscales[3]))
qy = int(round((quat.y - bone.channeloffsets[4]) / bone.channelscales[4]))
qz = int(round((quat.z - bone.channeloffsets[5]) / bone.channelscales[5]))
qw = int(round((quat.w - bone.channeloffsets[6]) / bone.channelscales[6]))
data += struct.pack('<7H', lx, ly, lz, qx, qy, qz, qw)
else:
if bone.channelmask & 1:
data += struct.pack('<H', int(round((loc.x - bone.channeloffsets[0]) / bone.channelscales[0])))
if bone.channelmask & 2:
data += struct.pack('<H', int(round((loc.y - bone.channeloffsets[1]) / bone.channelscales[1])))
if bone.channelmask & 4:
data += struct.pack('<H', int(round((loc.z - bone.channeloffsets[2]) / bone.channelscales[2])))
if bone.channelmask & 8:
data += struct.pack('<H', int(round((quat.x - bone.channeloffsets[3]) / bone.channelscales[3])))
if bone.channelmask & 16:
data += struct.pack('<H', int(round((quat.y - bone.channeloffsets[4]) / bone.channelscales[4])))
if bone.channelmask & 32:
data += struct.pack('<H', int(round((quat.z - bone.channeloffsets[5]) / bone.channelscales[5])))
if bone.channelmask & 64:
data += struct.pack('<H', int(round((quat.w - bone.channeloffsets[6]) / bone.channelscales[6])))
if bone.channelmask & 128:
data += struct.pack('<H', int(round((scale.x - bone.channeloffsets[7]) / bone.channelscales[7])))
if bone.channelmask & 256:
data += struct.pack('<H', int(round((scale.y - bone.channeloffsets[8]) / bone.channelscales[8])))
if bone.channelmask & 512:
data += struct.pack('<H', int(round((scale.z - bone.channeloffsets[9]) / bone.channelscales[9])))
return data
def frameBoundsData(self, bones, meshes, frame, invbase):
bbmin = bbmax = None
xyradius = 0.0
radius = 0.0
transforms = []
for i, bone in enumerate(bones):
loc, quat, scale, mat = frame[i]
if bone.parent:
mat = transforms[bone.parent.index] @ mat
transforms.append(mat)
for i, mat in enumerate(transforms):
transforms[i] = mat @ invbase[i]
for mesh in meshes:
for v in mesh.verts:
pos = mathutils.Vector((0.0, 0.0, 0.0))
for (weight, bone) in v.weights:
if weight > 0:
pos += (transforms[bone] @ v.coord) * (weight / 255.0)
if bbmin:
bbmin.x = min(bbmin.x, pos.x)
bbmin.y = min(bbmin.y, pos.y)
bbmin.z = min(bbmin.z, pos.z)
bbmax.x = max(bbmax.x, pos.x)
bbmax.y = max(bbmax.y, pos.y)
bbmax.z = max(bbmax.z, pos.z)
else:
bbmin = pos.copy()
bbmax = pos.copy()
pradius = pos.x*pos.x + pos.y*pos.y
if pradius > xyradius:
xyradius = pradius
pradius += pos.z*pos.z
if pradius > radius:
radius = pradius
if bbmin:
xyradius = math.sqrt(xyradius)
radius = math.sqrt(radius)
else:
bbmin = bbmax = mathutils.Vector((0.0, 0.0, 0.0))
return IQM_BOUNDS.pack(bbmin.x, bbmin.y, bbmin.z, bbmax.x, bbmax.y, bbmax.z, xyradius, radius)
def boundsData(self, bones, meshes):
invbase = []
for bone in bones:
invbase.append(bone.matrix.inverted())
data = b''
for i, frame in enumerate(self.frames):
print('Calculating bounding box for %s:%d' % (self.name, i))
data += self.frameBoundsData(bones, meshes, frame, invbase)
return data
class IQMFile:
def __init__(self):
self.textoffsets = {}
self.textdata = b''
self.meshes = []
self.meshdata = []
self.numverts = 0
self.numtris = 0
self.joints = []
self.jointdata = []
self.numframes = 0
self.framesize = 0
self.anims = []
self.posedata = []
self.animdata = []
self.framedata = []
self.vertdata = []
def addText(self, str):
if not self.textdata:
self.textdata += b'\x00'
self.textoffsets[''] = 0
try:
return self.textoffsets[str]
except:
offset = len(self.textdata)
self.textoffsets[str] = offset
self.textdata += bytes(str, encoding="utf8") + b'\x00'
return offset
def addJoints(self, bones):
for bone in bones:
self.joints.append(bone)
if self.meshes:
self.jointdata.append(bone.jointData(self))
def addMeshes(self, meshes):
self.meshes += meshes
for mesh in meshes:
mesh.firstvert = self.numverts
mesh.firsttri = self.numtris
self.meshdata.append(mesh.meshData(self))
self.numverts += len(mesh.verts)
self.numtris += len(mesh.tris)
def addAnims(self, anims):
self.anims += anims
for anim in anims:
anim.firstframe = self.numframes
self.animdata.append(anim.animData(self))
self.numframes += len(anim.frames)
def calcFrameSize(self):
for anim in self.anims:
anim.calcFrameLimits(self.joints)
self.framesize = 0
for joint in self.joints:
self.framesize += joint.calcChannelMask()
for joint in self.joints:
if self.anims:
self.posedata.append(joint.poseData(self))
print('Exporting %d frames of size %d' % (self.numframes, self.framesize))
def writeVerts(self, file, offset):
if self.numverts <= 0:
return
file.write(IQM_VERTEXARRAY.pack(IQM_POSITION, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TEXCOORD, 0, IQM_FLOAT, 2, offset))
offset += self.numverts * struct.calcsize('<2f')
file.write(IQM_VERTEXARRAY.pack(IQM_NORMAL, 0, IQM_FLOAT, 3, offset))
offset += self.numverts * struct.calcsize('<3f')
file.write(IQM_VERTEXARRAY.pack(IQM_TANGENT, 0, IQM_FLOAT, 4, offset))
offset += self.numverts * struct.calcsize('<4f')
if self.joints:
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDINDEXES, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
file.write(IQM_VERTEXARRAY.pack(IQM_BLENDWEIGHTS, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in self.meshes)
if hascolors:
file.write(IQM_VERTEXARRAY.pack(IQM_COLOR, 0, IQM_UBYTE, 4, offset))
offset += self.numverts * struct.calcsize('<4B')
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.coord))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<2f', *v.uv))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<3f', *v.normal))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4f', v.tangent.x, v.tangent.y, v.tangent.z, v.bitangent))
if self.joints:
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][1], v.weights[1][1], v.weights[2][1], v.weights[3][1]))
for mesh in self.meshes:
for v in mesh.verts:
file.write(struct.pack('<4B', v.weights[0][0], v.weights[1][0], v.weights[2][0], v.weights[3][0]))
if hascolors:
for mesh in self.meshes:
for v in mesh.verts:
if v.color:
file.write(struct.pack('<4B', v.color[0], v.color[1], v.color[2], v.color[3]))
else:
file.write(struct.pack('<4B', 0, 0, 0, 255))
def calcNeighbors(self):
edges = {}
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = v0.neighborKey(v1)
e1 = v1.neighborKey(v2)
e2 = v2.neighborKey(v0)
tri = mesh.firsttri + i
try: edges[e0].append(tri)
except: edges[e0] = [tri]
try: edges[e1].append(tri)
except: edges[e1] = [tri]
try: edges[e2].append(tri)
except: edges[e2] = [tri]
neighbors = []
for mesh in self.meshes:
for i, (v0, v1, v2) in enumerate(mesh.tris):
e0 = edges[v0.neighborKey(v1)]
e1 = edges[v1.neighborKey(v2)]
e2 = edges[v2.neighborKey(v0)]
tri = mesh.firsttri + i
match0 = match1 = match2 = -1
if len(e0) == 2: match0 = e0[e0.index(tri)^1]
if len(e1) == 2: match1 = e1[e1.index(tri)^1]
if len(e2) == 2: match2 = e2[e2.index(tri)^1]
neighbors.append((match0, match1, match2))
self.neighbors = neighbors
def writeTris(self, file):
for mesh in self.meshes:
for (v0, v1, v2) in mesh.tris:
file.write(struct.pack('<3I', v0.index + mesh.firstvert, v1.index + mesh.firstvert, v2.index + mesh.firstvert))
for (n0, n1, n2) in self.neighbors:
if n0 < 0: n0 = 0xFFFFFFFF
if n1 < 0: n1 = 0xFFFFFFFF
if n2 < 0: n2 = 0xFFFFFFFF
file.write(struct.pack('<3I', n0, n1, n2))
def export(self, file, usebbox = True):
self.filesize = IQM_HEADER.size
if self.textdata:
while len(self.textdata) % 4:
self.textdata += b'\x00'
ofs_text = self.filesize
self.filesize += len(self.textdata)
else:
ofs_text = 0
if self.meshdata:
ofs_meshes = self.filesize
self.filesize += len(self.meshdata) * IQM_MESH.size
else:
ofs_meshes = 0
if self.numverts > 0:
ofs_vertexarrays = self.filesize
num_vertexarrays = 4
if self.joints:
num_vertexarrays += 2
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in self.meshes)
if hascolors:
num_vertexarrays += 1
self.filesize += num_vertexarrays * IQM_VERTEXARRAY.size
ofs_vdata = self.filesize
self.filesize += self.numverts * struct.calcsize('<3f2f3f4f')
if self.joints:
self.filesize += self.numverts * struct.calcsize('<4B4B')
if hascolors:
self.filesize += self.numverts * struct.calcsize('<4B')
else:
ofs_vertexarrays = 0
num_vertexarrays = 0
ofs_vdata = 0
if self.numtris > 0:
ofs_triangles = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
ofs_neighbors = self.filesize
self.filesize += self.numtris * IQM_TRIANGLE.size
else:
ofs_triangles = 0
ofs_neighbors = 0
if self.jointdata:
ofs_joints = self.filesize
self.filesize += len(self.jointdata) * IQM_JOINT.size
else:
ofs_joints = 0
if self.posedata:
ofs_poses = self.filesize
self.filesize += len(self.posedata) * IQM_POSE.size
else:
ofs_poses = 0
if self.animdata:
ofs_anims = self.filesize
self.filesize += len(self.animdata) * IQM_ANIMATION.size
else:
ofs_anims = 0
falign = 0
if self.framesize * self.numframes > 0:
ofs_frames = self.filesize
self.filesize += self.framesize * self.numframes * struct.calcsize('<H')
falign = (4 - (self.filesize % 4)) % 4
self.filesize += falign
else:
ofs_frames = 0
if usebbox and self.numverts > 0 and self.numframes > 0:
ofs_bounds = self.filesize
self.filesize += self.numframes * IQM_BOUNDS.size
else:
ofs_bounds = 0
file.write(IQM_HEADER.pack('INTERQUAKEMODEL'.encode('ascii'), 2, self.filesize, 0, len(self.textdata), ofs_text, len(self.meshdata), ofs_meshes, num_vertexarrays, self.numverts, ofs_vertexarrays, self.numtris, ofs_triangles, ofs_neighbors, len(self.jointdata), ofs_joints, len(self.posedata), ofs_poses, len(self.animdata), ofs_anims, self.numframes, self.framesize, ofs_frames, ofs_bounds, 0, 0, 0, 0))
file.write(self.textdata)
for mesh in self.meshdata:
file.write(IQM_MESH.pack(*mesh))
self.writeVerts(file, ofs_vdata)
self.writeTris(file)
for joint in self.jointdata:
file.write(IQM_JOINT.pack(*joint))
for pose in self.posedata:
file.write(IQM_POSE.pack(*pose))
for anim in self.animdata:
file.write(IQM_ANIMATION.pack(*anim))
for anim in self.anims:
file.write(anim.frameData(self.joints))
file.write(b'\x00' * falign)
if usebbox and self.numverts > 0 and self.numframes > 0:
for anim in self.anims:
file.write(anim.boundsData(self.joints, self.meshes))
def findArmature(context):
armature = None
for obj in context.selected_objects:
if obj.type == 'ARMATURE':
armature = obj
break
if not armature:
for obj in context.selected_objects:
if obj.type == 'MESH':
armature = obj.find_armature()
if armature:
break
return armature
def poseArmature(context, armature, pose):
if armature:
armature.data.pose_position = pose
armature.data.update_tag()
context.scene.frame_set(context.scene.frame_current)
def derigifyBones(context, armature, scale):
data = armature.data
defnames = []
orgbones = {}
defbones = {}
org2defs = {}
def2org = {}
defparent = {}
defchildren = {}
for bone in data.bones.values():
if bone.name.startswith('ORG-'):
orgbones[bone.name[4:]] = bone
org2defs[bone.name[4:]] = []
elif bone.name.startswith('DEF-'):
defnames.append(bone.name[4:])
defbones[bone.name[4:]] = bone
defchildren[bone.name[4:]] = []
for name, bone in defbones.items():
orgname = name
orgbone = orgbones.get(orgname)
splitname = -1
if not orgbone:
splitname = name.rfind('.')
suffix = ''
if splitname >= 0 and name[splitname+1:] in [ 'l', 'r', 'L', 'R' ]:
suffix = name[splitname:]
splitname = name.rfind('.', 0, splitname)
if splitname >= 0 and name[splitname+1:splitname+2].isdigit():
orgname = name[:splitname] + suffix
orgbone = orgbones.get(orgname)
org2defs[orgname].append(name)
def2org[name] = orgname
for defs in org2defs.values():
defs.sort()
for name in defnames:
bone = defbones[name]
orgname = def2org[name]
orgbone = orgbones.get(orgname)
defs = org2defs[orgname]
if orgbone:
i = defs.index(name)
if i == 0:
orgparent = orgbone.parent
if orgparent and orgparent.name.startswith('ORG-'):
orgpname = orgparent.name[4:]
defparent[name] = org2defs[orgpname][-1]
else:
defparent[name] = defs[i-1]
if name in defparent:
defchildren[defparent[name]].append(name)
bones = {}
worldmatrix = armature.matrix_world
worklist = [ bone for bone in defnames if bone not in defparent ]
for index, bname in enumerate(worklist):
bone = defbones[bname]
bonematrix = worldmatrix @ bone.matrix_local
if scale != 1.0:
bonematrix.translation *= scale
bones[bone.name] = Bone(bname, bone.name, index, bname in defparent and bones.get(defbones[defparent[bname]].name), bonematrix)
worklist.extend(defchildren[bname])
print('De-rigified %d bones' % len(worklist))
return bones
def collectBones(context, armature, scale):
data = armature.data
bones = {}
worldmatrix = armature.matrix_world
worklist = [ bone for bone in data.bones.values() if not bone.parent ]
for index, bone in enumerate(worklist):
bonematrix = worldmatrix @ bone.matrix_local
if scale != 1.0:
bonematrix.translation *= scale
bones[bone.name] = Bone(bone.name, bone.name, index, bone.parent and bones.get(bone.parent.name), bonematrix)
for child in bone.children:
if child not in worklist:
worklist.append(child)
print('Collected %d bones' % len(worklist))
return bones
def collectAnim(context, armature, scale, bones, action, startframe = None, endframe = None):
if not startframe or not endframe:
startframe, endframe = action.frame_range
startframe = int(startframe)
endframe = int(endframe)
print('Exporting action "%s" frames %d-%d' % (action.name, startframe, endframe))
scene = context.scene
worldmatrix = armature.matrix_world
armature.animation_data.action = action
outdata = []
for time in range(startframe, endframe+1):
scene.frame_set(time)
pose = armature.pose
outframe = []
for bone in bones:
posematrix = pose.bones[bone.origname].matrix
if bone.parent:
posematrix = pose.bones[bone.parent.origname].matrix.inverted() @ posematrix
else:
posematrix = worldmatrix @ posematrix
if scale != 1.0:
posematrix.translation *= scale
loc = posematrix.to_translation()
quat = posematrix.to_3x3().inverted().transposed().to_quaternion()
quat.normalize()
if quat.w > 0:
quat.negate()
pscale = posematrix.to_scale()
pscale.x = round(pscale.x*0x10000)/0x10000
pscale.y = round(pscale.y*0x10000)/0x10000
pscale.z = round(pscale.z*0x10000)/0x10000
outframe.append((loc, quat, pscale, posematrix))
outdata.append(outframe)
return outdata
def collectAnims(context, armature, scale, bones, animspecs):
if not armature.animation_data:
print('Armature has no animation data')
return []
actions = bpy.data.actions
animspecs = [ spec.strip() for spec in animspecs.split(',') ]
anims = []
scene = context.scene
oldaction = armature.animation_data.action
oldframe = scene.frame_current
for animspec in animspecs:
animspec = [ arg.strip() for arg in animspec.split(':') ]
animname = animspec[0]
if animname not in actions:
print('Action "%s" not found in current armature' % animname)
continue
try:
startframe = int(animspec[1])
except:
startframe = None
try:
endframe = int(animspec[2])
except:
endframe = None
try:
fps = float(animspec[3])
except:
fps = float(scene.render.fps)
try:
flags = int(animspec[4])
except:
flags = 0
framedata = collectAnim(context, armature, scale, bones, actions[animname], startframe, endframe)
anims.append(Animation(animname, framedata, fps, flags))
armature.animation_data.action = oldaction
scene.frame_set(oldframe)
return anims
def collectMeshes(context, bones, scale, matfun, useskel = True, usecol = False, usemods = False, filetype = 'IQM'):
vertwarn = []
objs = context.selected_objects #context.scene.objects
meshes = []
for obj in objs:
if obj.type == 'MESH':
dg = context.evaluated_depsgraph_get()
data = obj.evaluated_get(dg).to_mesh(preserve_all_data_layers=True, depsgraph=dg) if usemods else obj.original.to_mesh(preserve_all_data_layers=True, depsgraph=dg)
if not data.polygons:
continue
data.calc_normals_split()
coordmatrix = obj.matrix_world
normalmatrix = coordmatrix.inverted().transposed()
if scale != 1.0:
coordmatrix = mathutils.Matrix.Scale(scale, 4) @ coordmatrix
materials = {}
matnames = {}
groups = obj.vertex_groups
uvlayer = data.uv_layers.active and data.uv_layers.active.data
colors = None
alpha = None
if usecol:
if data.vertex_colors.active:
if data.vertex_colors.active.name.startswith('alpha'):
alpha = data.vertex_colors.active.data
else:
colors = data.vertex_colors.active.data
for layer in data.vertex_colors:
if layer.name.startswith('alpha'):
if not alpha:
alpha = layer.data
elif not colors:
colors = layer.data
if data.materials:
for idx, mat in enumerate(data.materials):
matprefix = mat.name or ''
matimage = ''
if mat.node_tree:
for n in mat.node_tree.nodes:
if n.type == 'TEX_IMAGE' and n.image:
matimage = os.path.basename(n.image.filepath)
break
matnames[idx] = matfun(matprefix, matimage)
for face in data.polygons:
if len(face.vertices) < 3:
continue
if all([ data.vertices[i].co == data.vertices[face.vertices[0]].co for i in face.vertices[1:] ]):
continue
matindex = face.material_index
try:
mesh = materials[obj.name, matindex]
except:
matname = matnames.get(matindex, '')
mesh = Mesh(obj.name, matname, data.vertices)
meshes.append(mesh)
materials[obj.name, matindex] = mesh
verts = mesh.verts
vertmap = mesh.vertmap
faceverts = []
for loopidx in face.loop_indices:
loop = data.loops[loopidx]
v = data.vertices[loop.vertex_index]
vertco = coordmatrix @ v.co
if not face.use_smooth:
vertno = mathutils.Vector(face.normal)
else:
vertno = mathutils.Vector(loop.normal)
vertno = normalmatrix @ vertno
vertno.normalize()
# flip V axis of texture space
if uvlayer:
uv = uvlayer[loopidx].uv
vertuv = mathutils.Vector((uv[0], 1.0 - uv[1]))
else:
vertuv = mathutils.Vector((0.0, 0.0))
if colors:
vertcol = colors[loopidx].color
vertcol = (int(round(vertcol[0] * 255.0)), int(round(vertcol[1] * 255.0)), int(round(vertcol[2] * 255.0)), 255)
else:
vertcol = None
if alpha:
vertalpha = alpha[loopidx].color
if vertcol:
vertcol = (vertcol[0], vertcol[1], vertcol[2], int(round(vertalpha[0] * 255.0)))
else:
vertcol = (255, 255, 255, int(round(vertalpha[0] * 255.0)))
vertweights = []
if useskel:
for g in v.groups:
try:
vertweights.append((g.weight, bones[groups[g.group].name].index))
except:
if (groups[g.group].name, mesh.name) not in vertwarn:
vertwarn.append((groups[g.group].name, mesh.name))
print('Vertex depends on non-existent bone: %s in mesh: %s' % (groups[g.group].name, mesh.name))
if not face.use_smooth:
vertindex = len(verts)
vertkey = Vertex(vertindex, vertco, vertno, vertuv, vertweights, vertcol)
if filetype == 'IQM':
vertkey.normalizeWeights()
mesh.verts.append(vertkey)
faceverts.append(vertkey)
continue
vertkey = Vertex(v.index, vertco, vertno, vertuv, vertweights, vertcol)
if filetype == 'IQM':
vertkey.normalizeWeights()
if not verts[v.index]:
verts[v.index] = vertkey
faceverts.append(vertkey)
elif verts[v.index] == vertkey:
faceverts.append(verts[v.index])
else:
try:
vertindex = vertmap[vertkey]
faceverts.append(verts[vertindex])
except:
vertindex = len(verts)
vertmap[vertkey] = vertindex
verts.append(vertkey)
faceverts.append(vertkey)
# Quake winding is reversed
for i in range(2, len(faceverts)):
mesh.tris.append((faceverts[0], faceverts[i], faceverts[i-1]))
for mesh in meshes:
mesh.optimize()
if filetype == 'IQM':
mesh.calcTangents()
print('%s %s: generated %d triangles' % (mesh.name, mesh.material, len(mesh.tris)))
return meshes
def exportIQE(file, meshes, bones, anims):
file.write('# Inter-Quake Export\n\n')
for bone in bones:
if bone.parent:
parent = bone.parent.index
else:
parent = -1
file.write('joint "%s" %d\n' % (bone.name, parent))
if meshes:
pos = bone.localmatrix.to_translation()
orient = bone.localmatrix.to_quaternion()
orient.normalize()
if orient.w > 0:
orient.negate()
scale = bone.localmatrix.to_scale()
scale.x = round(scale.x*0x10000)/0x10000
scale.y = round(scale.y*0x10000)/0x10000
scale.z = round(scale.z*0x10000)/0x10000
if scale.x == 1.0 and scale.y == 1.0 and scale.z == 1.0:
file.write('\tpq %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w))
else:
file.write('\tpq %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z))
hascolors = any(mesh.verts and mesh.verts[0].color for mesh in meshes)
for mesh in meshes:
file.write('\nmesh "%s"\n\tmaterial "%s"\n\n' % (mesh.name, mesh.material))
for v in mesh.verts:
file.write('vp %.8f %.8f %.8f\n\tvt %.8f %.8f\n\tvn %.8f %.8f %.8f\n' % (v.coord.x, v.coord.y, v.coord.z, v.uv.x, v.uv.y, v.normal.x, v.normal.y, v.normal.z))
if bones:
weights = '\tvb'
for weight in v.weights:
weights += ' %d %.8f' % (weight[1], weight[0])
file.write(weights + '\n')
if hascolors:
if v.color:
file.write('\tvc %.8f %.8f %.8f %.8f\n' % (v.color[0] / 255.0, v.color[1] / 255.0, v.color[2] / 255.0, v.color[3] / 255.0))
else:
file.write('\tvc 0 0 0 1\n')
file.write('\n')
for (v0, v1, v2) in mesh.tris:
file.write('fm %d %d %d\n' % (v0.index, v1.index, v2.index))
for anim in anims:
file.write('\nanimation "%s"\n\tframerate %.8f\n' % (anim.name, anim.fps))
if anim.flags&IQM_LOOP:
file.write('\tloop\n')
for frame in anim.frames:
file.write('\nframe\n')
for (pos, orient, scale, mat) in frame:
if scale.x == 1.0 and scale.y == 1.0 and scale.z == 1.0:
file.write('pq %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w))
else:
file.write('pq %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f %.8f\n' % (pos.x, pos.y, pos.z, orient.x, orient.y, orient.z, orient.w, scale.x, scale.y, scale.z))
file.write('\n')
def exportIQM(context, filename, usemesh = True, usemods = False, useskel = True, usebbox = True, usecol = False, scale = 1.0, animspecs = None, matfun = (lambda prefix, image: image), derigify = False, boneorder = None):
armature = findArmature(context)
if useskel and not armature:
print('No armature selected')
return
if filename.lower().endswith('.iqm'):
filetype = 'IQM'
elif filename.lower().endswith('.iqe'):
filetype = 'IQE'
else:
print('Unknown file type: %s' % filename)
return
if useskel:
if derigify:
bones = derigifyBones(context, armature, scale)
else:
bones = collectBones(context, armature, scale)
else:
bones = {}
if boneorder:
try:
f = open(bpy_extras.io_utils.path_reference(boneorder, os.path.dirname(bpy.data.filepath), os.path.dirname(filename)), "r", encoding = "utf-8")
names = [line.strip() for line in f.readlines()]
f.close()
names = [name for name in names if name in [bone.name for bone in bones.values()]]
if len(names) != len(bones):
print('Bone order (%d) does not match skeleton (%d)' % (len(names), len(bones)))
return
print('Reordering bones')
for bone in bones.values():
bone.index = names.index(bone.name)
except:
print('Failed opening bone order: %s' % boneorder)
return
if armature:
oldpose = armature.data.pose_position
poseArmature(context, armature, 'REST')
bonelist = sorted(bones.values(), key = lambda bone: bone.index)
if usemesh:
meshes = collectMeshes(context, bones, scale, matfun, useskel, usecol, usemods, filetype)
else:
meshes = []
if armature:
poseArmature(context, armature, oldpose)
if useskel and animspecs:
anims = collectAnims(context, armature, scale, bonelist, animspecs)
else:
anims = []
if filetype == 'IQM':
iqm = IQMFile()
iqm.addMeshes(meshes)
iqm.addJoints(bonelist)
iqm.addAnims(anims)
iqm.calcFrameSize()
iqm.calcNeighbors()
if filename:
try:
if filetype == 'IQM':
file = open(filename, 'wb')
else:
file = open(filename, 'w')
except:
print ('Failed writing to %s' % (filename))
return
if filetype == 'IQM':
iqm.export(file, usebbox)
elif filetype == 'IQE':
exportIQE(file, meshes, bonelist, anims)
file.close()
print('Saved %s file to %s' % (filetype, filename))
else:
print('No %s file was generated' % (filetype))
class ExportIQM(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
'''Export an Inter-Quake Model IQM or IQE file'''
bl_idname = "export.iqm"
bl_label = 'Export IQM'
filename_ext = ".iqm"
animspec = bpy.props.StringProperty(name="Animations", description="Animations to export", maxlen=1024, default="")
usemesh = bpy.props.BoolProperty(name="Meshes", description="Generate meshes", default=True)
usemods = bpy.props.BoolProperty(name="Modifiers", description="Apply modifiers", default=True)
useskel = bpy.props.BoolProperty(name="Skeleton", description="Generate skeleton", default=True)
usebbox = bpy.props.BoolProperty(name="Bounding boxes", description="Generate bounding boxes", default=True)
usecol = bpy.props.BoolProperty(name="Vertex colors", description="Export vertex colors", default=False)
usescale = bpy.props.FloatProperty(name="Scale", description="Scale of exported model", default=1.0, min=0.0, step=50, precision=2)
#usetrans = bpy.props.FloatVectorProperty(name="Translate", description="Translate position of exported model", step=50, precision=2, size=3)
matfmt = bpy.props.EnumProperty(name="Materials", description="Material name format", items=[("m+i-e", "material+image-ext", ""), ("m", "material", ""), ("i", "image", "")], default="m+i-e")
derigify = bpy.props.BoolProperty(name="De-rigify", description="Export only deformation bones from rigify", default=False)
boneorder = bpy.props.StringProperty(name="Bone order", description="Override ordering of bones", subtype="FILE_NAME", default="")
def execute(self, context):
if self.properties.matfmt == "m+i-e":
matfun = lambda prefix, image: prefix + os.path.splitext(image)[0]
elif self.properties.matfmt == "m":
matfun = lambda prefix, image: prefix
else:
matfun = lambda prefix, image: image
exportIQM(context, self.properties.filepath, self.properties.usemesh, self.properties.usemods, self.properties.useskel, self.properties.usebbox, self.properties.usecol, self.properties.usescale, self.properties.animspec, matfun, self.properties.derigify, self.properties.boneorder)
return {'FINISHED'}
def check(self, context):
filepath = bpy.path.ensure_ext(self.filepath, '.iqm')
filepathalt = bpy.path.ensure_ext(self.filepath, '.iqe')
if filepath != self.filepath and filepathalt != self.filepath:
self.filepath = filepath
return True
return False
def menu_func(self, context):
self.layout.operator(ExportIQM.bl_idname, text="Inter-Quake Model (.iqm, .iqe)")
def register():
bpy.utils.register_class(ExportIQM)
bpy.types.TOPBAR_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_class(ExportIQM)
bpy.types.TOPBAR_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
|
the-stack_0_21678 | # -*- coding: utf-8 -*-
"""
Created on Wed May 22 14:19:34 2019
@author: sdran
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
# from matplotlib import rc
# rc('text', usetex=True)
plt.rcParams.update({'font.size': 16})
snr_plot = [10,15]
nplot = len(snr_plot)
plt.figure(figsize=(10,5))
plt.rcParams.update({'font.size': 12})
for iplot, snr in enumerate(snr_plot):
keras_fn = ('saved_expts/adam_snr%d.pkl' % snr)
with open(keras_fn,'rb') as fp:
mse_ts_keras,mse_tr,ntr_keras,snr_keras = pickle.load(fp)
vamp_fn = ('saved_expts/ml_mat_vamp_snr%d.pkl'% snr)
with open(vamp_fn,'rb') as fp:
mse_ts_vamp,ntr_vamp,nin,snr_vamp,se_test = pickle.load(fp)
vamp_se_fn = ('saved_expts/ml_mat_vamp_se_snr%d.pkl' % snr)
with open(vamp_se_fn,'rb') as fp:
mse_ts_se,ntr_se,nin,snr_vamp,se_test = pickle.load(fp)
mse_avg_se = np.median(mse_ts_se[-1,:,:],axis=0)
mse_avg_vamp = np.median(mse_ts_vamp[-1,:,:],axis=0)
mse_avg_keras = np.median(mse_ts_keras[-1,:,:],axis=0)
plt.subplot(1,nplot,iplot+1)
plt.plot(ntr_keras, mse_avg_keras, 'o-', fillstyle='none', lw=2, ms=10)
plt.plot(ntr_vamp, mse_avg_vamp, 's-', fillstyle='none', lw=2, ms=10)
plt.plot(ntr_se, mse_avg_se, '-', lw=2)
plt.grid()
plt.title('SNR=%d dB' % int(snr))
plt.ylim((1,2.5))
plt.xlabel('Num training samples')
plt.ylabel('Normalized test MSE')
plt.legend(['ADAM-MAP', 'ML-Mat-VAMP', 'ML-Mat-VAMP (SE)'])
plt.tight_layout()
fig_name = 'mse_vs_ntr.png'
plt.savefig(fig_name) |
the-stack_0_21679 | """
Copyright 2020 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import setuptools
def read_metadata(pkg_file):
metadata = dict()
with open(pkg_file, 'r') as init_file:
for line in init_file.readlines():
if line.startswith('__'):
line = line.replace("'", '')
line = line.replace('\n', '')
key, value = line.split(' = ')
metadata[key] = value
return metadata
metadata = read_metadata('mgw_dc/__init__.py')
setuptools.setup(
name=metadata.get('__title__'),
version=metadata.get('__version__'),
author=metadata.get('__author__'),
description=metadata.get('__description__'),
license=metadata.get('__license__'),
url=metadata.get('__url__'),
copyright=metadata.get('__copyright__'),
packages=setuptools.find_packages(),
python_requires='~=3.0',
classifiers=(
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: Unix',
'Natural Language :: English',
),
)
|
the-stack_0_21680 | """Functions for downloading CAN data."""
import pandas as pd
RENAME_COLS = {
"dt": "timestamp",
"location": "fips",
}
def load_data(path: str) -> pd.DataFrame:
"""
Load CAN's data from a local or online parquet file.
Some important columns are:
- provider: Source of the data
- location_type: State or county level data
- variable_name: Name of available metrics, like pcr_tests_*
This function also formats and renames the geo and time columns to follow our conventions.
Parameters
----------
path: str
A local path or URL to CAN's parquet file to load from
Returns
-------
pd.DataFrame
CAN's data in long format
"""
df_pq = (pd
.read_parquet(path)
.rename(columns=RENAME_COLS)
)
# Format fips
df_pq["fips"] = df_pq["fips"].astype(str).str.zfill(5)
return df_pq
def extract_testing_metrics(df: pd.DataFrame) -> pd.DataFrame:
"""
Extract just the county-level testing metrics from CAN's data.
Specifically picks the CDC-sourced metrics only as they are confirmed to be PCR-specimen-based.
Also converts from long to wide format for easier aggregations later on.
Note that the CDC's metrics are already smoothed (7-day rolling averaged).
Parameters
----------
df: pd.DataFrame
CAN's data in long format
Returns
-------
pd.DataFrame
CAN's / CDC's testing data in wide format
Columns: fips, timestamp, pcr_positivity_rate, pcr_tests_positive, pcr_tests_total
"""
# Filter to PCR-specimen rows from CDC and convert from long to wide format
df_tests = (
df
.query(
"""
age == 'all' and ethnicity == 'all' and sex == 'all' and \
location_type == 'county' and provider == 'cdc' and \
variable_name.str.startswith('pcr_tests_')
""")
.pivot(index=["fips", "timestamp"], columns="variable_name", values="value")
.reset_index()
# Filter off rows with 0 sample_size
.query("pcr_tests_total > 0")
# pcr_tests_positive from the CDC is actually positivity rate (percentage)
.rename(columns={"pcr_tests_positive": "pcr_positivity_rate"})
)
df_tests["pcr_positivity_rate"] /= 100
df_tests["pcr_tests_positive"] = df_tests.pcr_positivity_rate * df_tests.pcr_tests_total
return df_tests
|
the-stack_0_21683 | """"Vendoring script, python 3.5 needed"""
from pathlib import Path
import os
import re
import shutil
import invoke
TASK_NAME = 'update'
FILE_WHITE_LIST = (
'Makefile',
'vendor.txt',
'__init__.py',
'README.rst',
)
def drop_dir(path, **kwargs):
shutil.rmtree(str(path), **kwargs)
def remove_all(paths):
for path in paths:
if path.is_dir():
drop_dir(path)
else:
path.unlink()
def log(msg):
print('[vendoring.%s] %s' % (TASK_NAME, msg))
def _get_vendor_dir(ctx):
git_root = ctx.run('git rev-parse --show-toplevel', hide=True).stdout
return Path(git_root.strip()) / 'src' / 'pip' / '_vendor'
def clean_vendor(ctx, vendor_dir):
# Old _vendor cleanup
remove_all(vendor_dir.glob('*.pyc'))
log('Cleaning %s' % vendor_dir)
for item in vendor_dir.iterdir():
if item.is_dir():
shutil.rmtree(str(item))
elif item.name not in FILE_WHITE_LIST:
item.unlink()
else:
log('Skipping %s' % item)
def detect_vendored_libs(vendor_dir):
retval = []
for item in vendor_dir.iterdir():
if item.is_dir():
retval.append(item.name)
elif item.name.endswith(".pyi"):
continue
elif item.name not in FILE_WHITE_LIST:
retval.append(item.name[:-3])
return retval
def rewrite_imports(package_dir, vendored_libs):
for item in package_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name.endswith('.py'):
rewrite_file_imports(item, vendored_libs)
def rewrite_file_imports(item, vendored_libs):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text(encoding='utf-8')
# Revendor pkg_resources.extern first
text = re.sub(r'pkg_resources.extern', r'pip._vendor', text)
for lib in vendored_libs:
text = re.sub(
r'(\n\s*)import %s(\n\s*)' % lib,
r'\1from pip._vendor import %s\2' % lib,
text,
)
text = re.sub(
r'(\n\s*)from %s(\.|\s+)' % lib,
r'\1from pip._vendor.%s\2' % lib,
text,
)
item.write_text(text, encoding='utf-8')
def apply_patch(ctx, patch_file_path):
log('Applying patch %s' % patch_file_path.name)
ctx.run('git apply --verbose %s' % patch_file_path)
def vendor(ctx, vendor_dir):
log('Reinstalling vendored libraries')
# We use --no-deps because we want to ensure that all of our dependencies
# are added to vendor.txt, this includes all dependencies recursively up
# the chain.
ctx.run(
'pip install -t {0} -r {0}/vendor.txt --no-compile --no-deps'.format(
str(vendor_dir),
)
)
remove_all(vendor_dir.glob('*.dist-info'))
remove_all(vendor_dir.glob('*.egg-info'))
# Cleanup setuptools unneeded parts
(vendor_dir / 'easy_install.py').unlink()
drop_dir(vendor_dir / 'setuptools')
drop_dir(vendor_dir / 'pkg_resources' / '_vendor')
drop_dir(vendor_dir / 'pkg_resources' / 'extern')
# Drop the bin directory (contains easy_install, distro, chardetect etc.)
# Might not appear on all OSes, so ignoring errors
drop_dir(vendor_dir / 'bin', ignore_errors=True)
# Drop interpreter and OS specific msgpack libs.
# Pip will rely on the python-only fallback instead.
remove_all(vendor_dir.glob('msgpack/*.so'))
# Detect the vendored packages/modules
vendored_libs = detect_vendored_libs(vendor_dir)
log("Detected vendored libraries: %s" % ", ".join(vendored_libs))
# Global import rewrites
log("Rewriting all imports related to vendored libs")
for item in vendor_dir.iterdir():
if item.is_dir():
rewrite_imports(item, vendored_libs)
elif item.name not in FILE_WHITE_LIST:
rewrite_file_imports(item, vendored_libs)
# Special cases: apply stored patches
log("Apply patches")
patch_dir = Path(__file__).parent / 'patches'
for patch in patch_dir.glob('*.patch'):
apply_patch(ctx, patch)
@invoke.task
def update_stubs(ctx):
vendor_dir = _get_vendor_dir(ctx)
vendored_libs = detect_vendored_libs(vendor_dir)
print("[vendoring.update_stubs] Add mypy stubs")
extra_stubs_needed = {
# Some projects need stubs other than a simple <name>.pyi
"six": [
"six.__init__",
"six.moves.__init__",
"six.moves.configparser",
],
# Some projects should not have stubs coz they're single file modules
"appdirs": [],
}
for lib in vendored_libs:
if lib not in extra_stubs_needed:
(vendor_dir / (lib + ".pyi")).write_text("from %s import *" % lib)
continue
for selector in extra_stubs_needed[lib]:
fname = selector.replace(".", os.sep) + ".pyi"
if selector.endswith(".__init__"):
selector = selector[:-9]
f_path = vendor_dir / fname
if not f_path.parent.exists():
f_path.parent.mkdir()
f_path.write_text("from %s import *" % selector)
@invoke.task(name=TASK_NAME, post=[update_stubs])
def main(ctx):
vendor_dir = _get_vendor_dir(ctx)
log('Using vendor dir: %s' % vendor_dir)
clean_vendor(ctx, vendor_dir)
vendor(ctx, vendor_dir)
log('Revendoring complete')
|
the-stack_0_21685 | import pytest
import inspect
from CybORG import CybORG
from CybORG.Agents import B_lineAgent
from CybORG.Shared.Enums import TrinaryEnum
from CybORG.Shared.Actions import Monitor, DiscoverRemoteSystems
@pytest.fixture
def cyborg(agents = {'Red':B_lineAgent},seed = 1):
path = str(inspect.getfile(CybORG))
path = path[:-10] + '/Shared/Scenarios/Scenario1b.yaml'
cyborg = CybORG(path, 'sim', agents=agents)
cyborg.set_seed(seed)
return cyborg
def test_get_observation(cyborg):
for i in range(10):
results = cyborg.step(agent='Blue',action=Monitor(session=0,agent='Blue'))
step_obs = results.observation
blue_obs = cyborg.get_observation('Blue')
assert blue_obs == step_obs
red_obs = cyborg.get_observation('Red')
assert 'success' in red_obs
assert len(red_obs.keys()) > 1
def test_get_agent_state(cyborg):
cyborg.step(agent='Blue',action=Monitor(session=0,agent='Blue'))
for agent in ('Red','Blue','True'):
obs = cyborg.get_agent_state(agent)
assert type(obs) == dict
for hostid in obs:
if hostid == 'success':
continue
host = obs[hostid]
assert type(host) == dict
attributes = set(['Interface','Processes','Sessions','System info','User Info'])
attributes.remove('User Info') if agent == 'Red' else None
assert set(host.keys()) == attributes
def test_get_action_space(cyborg):
for agent in ('Red','Blue'):
action_space = cyborg.get_action_space(agent)
assert type(action_space) == dict
assert list(action_space.keys()) == ['action', 'subnet', 'ip_address', 'session', 'username',
'password', 'process', 'port', 'target_session', 'agent', 'hostname']
def test_get_last_action(cyborg):
cyborg.reset()
red_action = cyborg.get_last_action('Red')
blue_action = cyborg.get_last_action('Blue')
assert red_action == None
assert blue_action == None
cyborg.step(agent='Blue',action=Monitor(session=0,agent='Blue'))
red_action = cyborg.get_last_action('Red')
assert type(red_action) == DiscoverRemoteSystems
blue_action = cyborg.get_last_action('Blue')
assert type(blue_action) == Monitor
def test_get_ip_map(cyborg):
ip_map = cyborg.get_ip_map()
assert type(ip_map) == dict
assert list(ip_map.keys()) == ['Enterprise0', 'Enterprise1', 'Enterprise2', 'Defender', 'Op_Server0', 'Op_Host0', 'Op_Host1', 'Op_Host2', 'User0', 'User1', 'User2', 'User3', 'User4']
def test_get_rewards(cyborg):
cyborg.step(agent='Blue',action=Monitor(session=0,agent='Blue'))
rewards = cyborg.get_rewards()
assert type(rewards) == dict
assert set(rewards.keys()) == set(['Red','Blue','Green'])
def test_get_attr(cyborg):
for attribute in ['get_observation','get_action_space','get_last_action','get_ip_map',
'get_rewards', 'get_agent_state']:
method_output = cyborg.get_attr(attribute)
class_output = cyborg.__getattribute__(attribute)
assert method_output == class_output
|
the-stack_0_21686 | from functools import partial
from typing import Callable
import jax
import jax.numpy as jnp
import jax.random
from jax import ops
from jax.tree_util import tree_map
from numpyro import handlers
from numpyro.contrib.funsor import enum, config_enumerate
from numpyro.distributions import Distribution
from numpyro.distributions.transforms import IdentityTransform
from numpyro.infer import NUTS, MCMC, VI
from numpyro.infer.guide import ReinitGuide
from numpyro.infer.kernels import SteinKernel
from numpyro.infer.util import transform_fn, get_parameter_transform, _guess_max_plate_nesting
from numpyro.util import ravel_pytree
# TODO
# Fix MCMC updates to work reasonably with optimizer
# Lots of code based on SVI interface and commonalities should be refactored
class Stein(VI):
def __init__(self, model, guide: ReinitGuide, optim, loss, kernel_fn: SteinKernel, num_particles: int = 10,
loss_temperature: float = 1.0, repulsion_temperature: float = 1.0,
classic_guide_params_fn: Callable[[str], bool] = lambda name: False,
enum=True, sp_mcmc_crit='infl',
sp_mode='local', num_mcmc_particles: int = 0, num_mcmc_warmup: int = 100, num_mcmc_updates: int = 10,
sampler_fn=NUTS, sampler_kwargs=None, mcmc_kwargs=None, **static_kwargs):
"""
Stein Variational Gradient Descent for Non-parametric Inference.
:param model: Python callable with Pyro primitives for the model.
:param guide: Python callable with Pyro primitives for the guide
(recognition network).
:param optim: an instance of :class:`~numpyro.optim._NumpyroOptim`.
:param loss: ELBO loss, i.e. negative Evidence Lower Bound, to minimize.
:param kernel_fn: Function that produces a logarithm of the statistical kernel to use with Stein inference
:param num_particles: number of particles for Stein inference.
(More particles capture more of the posterior distribution)
:param loss_temperature: scaling of loss factor
:param repulsion_temperature: scaling of repulsive forces (Non-linear Stein)
:param enum: whether to apply automatic marginalization of discrete variables
:param classic_guide_param_fn: predicate on names of parameters in guide which should be optimized classically without Stein (E.g., parameters for large normal networks or other transformation)
:param sp_mcmc_crit: Stein Point MCMC update selection criterion, either 'infl' for most influential or 'rand' for random (EXPERIMENTAL)
:param sp_mode: Stein Point MCMC mode for calculating Kernelized Stein Discrepancy. Either 'local' for only the updated MCMC particles or 'global' for all particles. (EXPERIMENTAL)
:param num_mcmc_particles: Number of particles that should be updated with Stein Point MCMC (should be a subset of number of Stein particles) (EXPERIMENTAL)
:param num_mcmc_warmup: Number of warmup steps for the MCMC sampler (EXPERIMENTAL)
:param num_mcmc_updates: Number of MCMC update steps at each iteration (EXPERIMENTAL)
:param sampler_fn: The MCMC sampling kernel used for the Stein Point MCMC updates (EXPERIMENTAL)
:param sampler_kwargs: Keyword arguments provided to the MCMC sampling kernel (EXPERIMENTAL)
:param mcmc_kwargs: Keyword arguments provided to the MCMC interface (EXPERIMENTAL)
:param static_kwargs: Static keyword arguments for the model / guide, i.e. arguments
that remain constant during fitting.
"""
super().__init__(model, guide, optim, loss, name='Stein', **static_kwargs)
assert sp_mcmc_crit == 'infl' or sp_mcmc_crit == 'rand'
assert sp_mode == 'local' or sp_mode == 'global'
assert 0 <= num_mcmc_particles <= num_particles
self._inference_model = model
self.model = model
self.guide = guide
self.optim = optim
self.loss = loss
self.kernel_fn = kernel_fn
self.static_kwargs = static_kwargs
self.num_particles = num_particles
self.loss_temperature = loss_temperature
self.repulsion_temperature = repulsion_temperature
self.enum = enum
self.classic_guide_params_fn = classic_guide_params_fn
self.sp_mcmc_crit = sp_mcmc_crit
self.sp_mode = sp_mode
self.num_mcmc_particles = num_mcmc_particles
self.num_mcmc_warmup = num_mcmc_warmup
self.num_mcmc_updates = num_mcmc_updates
self.sampler_fn = sampler_fn
self.sampler_kwargs = sampler_kwargs or dict()
self.mcmc_kwargs = mcmc_kwargs or dict()
self.mcmc: MCMC = None
self.guide_param_names = None
self.constrain_fn = None
self.uconstrain_fn = None
self.particle_transform_fn = None
def _apply_kernel(self, kernel, x, y, v):
if self.kernel_fn.mode == 'norm' or self.kernel_fn.mode == 'vector':
return kernel(x, y) * v
else:
return kernel(x, y) @ v
def _kernel_grad(self, kernel, x, y):
if self.kernel_fn.mode == 'norm':
return jax.grad(lambda x: kernel(x, y))(x)
elif self.kernel_fn.mode == 'vector':
return jax.vmap(lambda i: jax.grad(lambda x: kernel(x, y)[i])(x)[i])(jnp.arange(x.shape[0]))
else:
return jax.vmap(lambda l: jnp.sum(jax.vmap(lambda m: jax.grad(lambda x: kernel(x, y)[l, m])(x)[m])
(jnp.arange(x.shape[0]))))(jnp.arange(x.shape[0]))
def _param_size(self, param):
if isinstance(param, tuple) or isinstance(param, list):
return sum(map(self._param_size, param))
return param.size
def _calc_particle_info(self, uparams, num_particles):
uparam_keys = list(uparams.keys())
uparam_keys.sort()
start_index = 0
res = {}
for k in uparam_keys:
end_index = start_index + self._param_size(uparams[k]) // num_particles
res[k] = (start_index, end_index)
start_index = end_index
return res
def _svgd_loss_and_grads(self, rng_key, unconstr_params, *args, **kwargs):
# 0. Separate model and guide parameters, since only guide parameters are updated using Stein
classic_uparams = {p: v for p, v in unconstr_params.items() if
p not in self.guide_param_names or self.classic_guide_params_fn(p)}
stein_uparams = {p: v for p, v in unconstr_params.items() if p not in classic_uparams}
# 1. Collect each guide parameter into monolithic particles that capture correlations
# between parameter values across each individual particle
stein_particles, unravel_pytree = ravel_pytree(stein_uparams, batch_dims=1)
unravel_pytree_batched = jax.vmap(unravel_pytree)
particle_info = self._calc_particle_info(stein_uparams, stein_particles.shape[0])
# 2. Calculate loss and gradients for each parameter
def scaled_loss(rng_key, classic_params, stein_params):
params = {**classic_params, **stein_params}
loss_val = self.loss.loss(rng_key, params, handlers.scale(self._inference_model, self.loss_temperature),
self.guide, *args, **kwargs)
return - loss_val
def kernel_particle_loss_fn(ps):
return scaled_loss(rng_key, self.constrain_fn(classic_uparams),
self.constrain_fn(unravel_pytree(ps)))
def particle_transform_fn(particle):
params = unravel_pytree(particle)
tparams = self.particle_transform_fn(params)
tparticle, _ = ravel_pytree(tparams)
return tparticle
tstein_particles = jax.vmap(particle_transform_fn)(stein_particles)
loss, particle_ljp_grads = jax.vmap(jax.value_and_grad(kernel_particle_loss_fn))(tstein_particles)
classic_param_grads = jax.vmap(lambda ps: jax.grad(lambda cps:
scaled_loss(rng_key, self.constrain_fn(cps),
self.constrain_fn(unravel_pytree(ps))))(
classic_uparams))(stein_particles)
classic_param_grads = tree_map(partial(jnp.mean, axis=0), classic_param_grads)
# 3. Calculate kernel on monolithic particle
kernel = self.kernel_fn.compute(stein_particles, particle_info, kernel_particle_loss_fn)
# 4. Calculate the attractive force and repulsive force on the monolithic particles
attractive_force = jax.vmap(lambda y: jnp.sum(
jax.vmap(lambda x, x_ljp_grad: self._apply_kernel(kernel, x, y, x_ljp_grad))(tstein_particles,
particle_ljp_grads), axis=0))(
tstein_particles)
repulsive_force = jax.vmap(lambda y: jnp.sum(
jax.vmap(lambda x: self.repulsion_temperature * self._kernel_grad(kernel, x, y))(tstein_particles),
axis=0))(
tstein_particles)
def single_particle_grad(particle, att_force, rep_force):
reparam_jac = jax.jacfwd(particle_transform_fn)(particle)
return (att_force + rep_force) @ reparam_jac
particle_grads = jax.vmap(single_particle_grad)(stein_particles, attractive_force,
repulsive_force) / self.num_particles
# 5. Decompose the monolithic particle forces back to concrete parameter values
stein_param_grads = unravel_pytree_batched(particle_grads)
# 6. Return loss and gradients (based on parameter forces)
res_grads = tree_map(lambda x: -x, {**classic_param_grads, **stein_param_grads})
return -jnp.mean(loss), res_grads
def _score_sp_mcmc(self, rng_key, subset_idxs, stein_uparams, sp_mcmc_subset_uparams, classic_uparams,
*args, **kwargs):
if self.sp_mode == 'local':
_, ksd = self._svgd_loss_and_grads(rng_key, {**sp_mcmc_subset_uparams, **classic_uparams}, *args, **kwargs)
else:
stein_uparams = {p: ops.index_update(v, subset_idxs, sp_mcmc_subset_uparams[p]) for p, v in
stein_uparams.items()}
_, ksd = self._svgd_loss_and_grads(rng_key, {**stein_uparams, **classic_uparams}, *args, **kwargs)
ksd_res = jnp.sum(jnp.concatenate([jnp.ravel(v) for v in ksd.values()]))
return ksd_res
def _sp_mcmc(self, rng_key, unconstr_params, *args, **kwargs):
# 0. Separate classical and stein parameters
classic_uparams = {p: v for p, v in unconstr_params.items() if
p not in self.guide_param_names or self.classic_guide_params_fn(p)}
stein_uparams = {p: v for p, v in unconstr_params.items() if p not in classic_uparams}
# 1. Run warmup on a subset of particles to tune the MCMC state
warmup_key, mcmc_key = jax.random.split(rng_key)
sampler = self.sampler_fn(
potential_fn=lambda params: self.loss.loss(warmup_key, {**params, **self.constrain_fn(classic_uparams)},
self._inference_model, self.guide, *args, **kwargs))
mcmc = MCMC(sampler, self.num_mcmc_warmup, self.num_mcmc_updates, num_chains=self.num_mcmc_particles,
progress_bar=False, chain_method='vectorized',
**self.mcmc_kwargs)
stein_params = self.constrain_fn(stein_uparams)
stein_subset_params = {p: v[0:self.num_mcmc_particles] for p, v in stein_params.items()}
mcmc.warmup(warmup_key, *args, init_params=stein_subset_params, **kwargs)
# 2. Choose MCMC particles
mcmc_key, choice_key = jax.random.split(mcmc_key)
if self.num_mcmc_particles == self.num_particles:
idxs = jnp.arange(self.num_particles)
else:
if self.sp_mcmc_crit == 'rand':
idxs = jax.random.shuffle(choice_key, jnp.arange(self.num_particles))[:self.num_mcmc_particles]
elif self.sp_mcmc_crit == 'infl':
_, grads = self._svgd_loss_and_grads(choice_key, unconstr_params, *args, **kwargs)
ksd = jnp.linalg.norm(
jnp.concatenate([jnp.reshape(grads[p], (self.num_particles, -1)) for p in stein_uparams.keys()],
axis=-1),
ord=2, axis=-1)
idxs = jnp.argsort(ksd)[:self.num_mcmc_particles]
else:
assert False, "Unsupported SP MCMC criterion: {}".format(self.sp_mcmc_crit)
# 3. Run MCMC on chosen particles
stein_params = self.constrain_fn(stein_uparams)
stein_subset_params = {p: v[idxs] for p, v in stein_params.items()}
mcmc.run(mcmc_key, *args, init_params=stein_subset_params, **kwargs)
samples_subset_stein_params = mcmc.get_samples(group_by_chain=True)
sss_uparams = self.uconstrain_fn(samples_subset_stein_params)
# 4. Select best MCMC iteration to update particles
scores = jax.vmap(
lambda i: self._score_sp_mcmc(mcmc_key, idxs, stein_uparams, {p: v[:, i] for p, v in sss_uparams.items()},
classic_uparams, *args, **kwargs))(jnp.arange(self.num_mcmc_particles))
mcmc_idx = jnp.argmax(scores)
stein_uparams = {p: ops.index_update(v, idxs, sss_uparams[p][:, mcmc_idx]) for p, v in stein_uparams.items()}
return {**stein_uparams, **classic_uparams}
def init(self, rng_key, *args, **kwargs):
"""
:param jax.random.PRNGKey rng_key: random number generator seed.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide (these can possibly vary
during the course of fitting).
:return: initial :data:`CurrentState`
"""
rng_key, model_seed, guide_seed = jax.random.split(rng_key, 3)
model_init = handlers.seed(self.model, model_seed)
guide_init = handlers.seed(self.guide, guide_seed)
guide_trace = handlers.trace(guide_init).get_trace(*args, **kwargs, **self.static_kwargs)
model_trace = handlers.trace(model_init).get_trace(*args, **kwargs, **self.static_kwargs)
rng_key, particle_seed = jax.random.split(rng_key)
particle_seeds = jax.random.split(particle_seed, num=self.num_particles)
self.guide.find_params(particle_seeds, *args, **kwargs,
**self.static_kwargs) # Get parameter values for each particle
guide_init_params = self.guide.init_params()
params = {}
transforms = {}
inv_transforms = {}
particle_transforms = {}
guide_param_names = set()
should_enum = False
for site in model_trace.values():
if isinstance(site['fn'], Distribution) and site['fn'].is_discrete:
if site['fn'].has_enumerate_support and self.enum:
should_enum = True
else:
raise Exception("Cannot enumerate model with discrete variables without enumerate support")
# NB: params in model_trace will be overwritten by params in guide_trace
for site in list(model_trace.values()) + list(guide_trace.values()):
if site['type'] == 'param':
transform = get_parameter_transform(site)
inv_transforms[site['name']] = transform
transforms[site['name']] = transform.inv
particle_transforms[site['name']] = site.get('particle_transform', IdentityTransform())
if site['name'] in guide_init_params:
pval, _ = guide_init_params[site['name']]
if self.classic_guide_params_fn(site['name']):
pval = tree_map(lambda x: x[0], pval)
else:
pval = site['value']
params[site['name']] = transform.inv(pval)
if site['name'] in guide_trace:
guide_param_names.add(site['name'])
if should_enum:
mpn = _guess_max_plate_nesting(model_trace)
self._inference_model = enum(config_enumerate(self.model), - mpn - 1)
self.guide_param_names = guide_param_names
self.constrain_fn = partial(transform_fn, inv_transforms)
self.uconstrain_fn = partial(transform_fn, transforms)
self.particle_transform_fn = partial(transform_fn, particle_transforms)
return VI.CurrentState(self.optim.init(params), rng_key)
def get_params(self, state):
"""
Gets values at `param` sites of the `model` and `guide`.
:param svi_state: current state of the optimizer.
"""
params = self.constrain_fn(self.optim.get_params(state.optim_state))
return params
def update(self, state, *args, **kwargs):
"""
Take a single step of Stein (possibly on a batch / minibatch of data),
using the optimizer.
:param state: current state of Stein.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide (these can possibly vary
during the course of fitting).
:return: tuple of `(state, loss)`.
"""
rng_key, rng_key_mcmc, rng_key_step = jax.random.split(state.rng_key, num=3)
params = self.optim.get_params(state.optim_state)
# Run Stein Point MCMC
if self.num_mcmc_particles > 0:
new_params = self._sp_mcmc(rng_key_mcmc, params, *args, **kwargs, **self.static_kwargs)
grads = {p: new_params[p] - params[p] for p in params}
optim_state = self.optim.update(grads, state.optim_state)
params = self.optim.get_params(state.optim_state)
else:
optim_state = state.optim_state
loss_val, grads = self._svgd_loss_and_grads(rng_key_step, params,
*args, **kwargs, **self.static_kwargs)
optim_state = self.optim.update(grads, optim_state)
return VI.CurrentState(optim_state, rng_key), loss_val
def evaluate(self, state, *args, **kwargs):
"""
Take a single step of Stein (possibly on a batch / minibatch of data).
:param state: current state of Stein.
:param args: arguments to the model / guide (these can possibly vary during
the course of fitting).
:param kwargs: keyword arguments to the model / guide.
:return: evaluate loss given the current parameter values (held within `state.optim_state`).
"""
# we split to have the same seed as `update_fn` given a state
_, rng_key_eval = jax.random.split(state.rng_key)
params = self.optim.get_params(state.optim_state)
loss_val, _ = self._svgd_loss_and_grads(rng_key_eval, params,
*args, **kwargs, **self.static_kwargs)
return loss_val
def predict(self, state, *args, num_samples=1, **kwargs):
_, rng_key_predict = jax.random.split(state.rng_key)
params = self.get_params(state)
classic_params = {p: v for p, v in params.items() if
p not in self.guide_param_names or self.classic_guide_params_fn(p)}
stein_params = {p: v for p, v in params.items() if p not in classic_params}
if num_samples == 1:
return jax.vmap(lambda sp: self._predict_model(rng_key_predict, {**sp, **classic_params}, *args, **kwargs)
)(stein_params)
else:
return jax.vmap(lambda rk: jax.vmap(lambda sp: self._predict_model(rk, {**sp, **classic_params},
*args, **kwargs)
)(stein_params))(jax.random.split(rng_key_predict, num_samples))
|
the-stack_0_21687 | from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# ---------------- added code here -------------------------#
import os, sys
from dotenv import load_dotenv
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
load_dotenv(os.path.join(BASE_DIR, ".env"))
sys.path.append(BASE_DIR)
# ------------------------------------------------------------#
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# ---------------- added code here -------------------------#
# this will overwrite the ini-file sqlalchemy.url path
# with the path given in the config of the main code
config.set_main_option("sqlalchemy.url", os.environ["DATABASE_URL"])
# ------------------------------------------------------------#
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
# target_metadata = None
# ---------------- added code here -------------------------#
from api import models
from api.db import metadata
# ------------------------------------------------------------#
# ---------------- changed code here -------------------------#
# here target_metadata was equal to None
target_metadata = metadata
# ------------------------------------------------------------#
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
the-stack_0_21688 | """
A doodle based on a question in Freenode #python on 12 Feb 2021 about
creating a data structure representing a grid of network-addressable
video display targets
"""
import attr
import csv
from dataclasses import dataclass
from ipaddress import IPv4Address
# first way, using attrs (see attrs.org)
@attr.s
class NetworkVideoTarget:
"""
An object representing a network destination for a video we want to play
"""
address = attr.ib(type=IPv4Address)
video = attr.ib(type=str, default="placeholder.mp4")
# second way using stdlib dataclass, which is a tiny subset of attrs
# note: attrs can do this kind of annotation-driven thing, too, just replace @attr.s
# with @attr.s(auto_attribs=True)
@dataclass
class BoringNetworkVideoTarget:
"""
*Also* an object representing a network destination for a video we want to play
"""
address: IPv4Address
video: str = "placeholder.mp4"
def make_grid(videos, cls=NetworkVideoTarget):
# note: it could be ragged, this might be wrong! but I'm being lazy
width = len(videos[0])
base_address = int(IPv4Address("192.168.0.0")) # arbitrary, we'll offset from here
grid = dict()
for nrow, row in enumerate(videos):
for ncol, vid in enumerate(row):
offset = width*nrow + ncol
addr = IPv4Address(base_address + offset) # note: built from an int!
grid[nrow, ncol] = cls(address=addr, video=vid)
return grid
# demo time!
from io import StringIO # a buffer like what an open() would give us
from pprint import pprint # pretty printer
import random
sample_csv = StringIO("""
doge.mp4, cade.webm
frogge.avi, capybara.oggv
""".strip())
if __name__ == "__main__":
# in the real world, you'd do `with open(some_file, "r") as f: videos = list(csv.reader(f))`
videos = [[fn.strip() for fn in row] for row in csv.reader(sample_csv)]
print("videos:")
pprint(videos)
print("---")
attrs_grid = make_grid(videos)
print("attrs grid:")
pprint(attrs_grid)
print("---")
x, y = (random.randint(0, 1) for _ in range(2))
print(f"Indexing the point {x,y}:")
instance = attrs_grid[x, y]
print(instance.address)
print(instance.video)
print("---")
dataclass_grid = make_grid(videos, cls=BoringNetworkVideoTarget)
print("dataclass grid:")
pprint(dataclass_grid)
print("---")
x, y = (random.randint(0, 1) for _ in range(2))
print(f"Indexing the point {x,y}:")
instance = dataclass_grid[x, y]
print(instance.address)
print(instance.video)
print("---")
for cls in (NetworkVideoTarget, BoringNetworkVideoTarget):
instance = cls(IPv4Address("127.0.0.1"))
print(f"{cls.__name__} created without specifying a video:\n\t{instance}")
|
the-stack_0_21689 | from collections import OrderedDict
import numpy as np
import experiment
class GenericSentenceClassificationData(experiment.Data):
def __init__(self, config, config_global, logger):
super(GenericSentenceClassificationData, self).__init__(config, config_global, logger)
self.classes = None
def setup(self):
# structure of train data: list[(embedding, label)]
self.train = self.load_split(self.config['train_embeddings'], self.config['train_labels'])
# np.random.shuffle(self.train)
assert 'valid_embeddings' not in self.config or 'valid_split' not in self.config
if 'valid_embeddings' in self.config:
self.valid = self.load_split(self.config['valid_embeddings'], self.config['valid_labels'])
else:
n_valid = int(len(self.train) * self.config['valid_split'])
self.valid = self.train[:n_valid]
self.train = self.train[n_valid:]
# structure of test data: dict[language, list[(embedding, label)]]
self.test = OrderedDict()
for embeddings_config in self.config['test_embeddings']:
embeddings_path = embeddings_config['embeddings']
language = embeddings_path.split('/')[-1].split('.')[-2]
self.test[language] = self.load_split(embeddings_path, embeddings_config['labels'])
self.logger.debug('Train examples: {}'.format(len(self.train)))
self.logger.debug('Valid examples: {}'.format(len(self.valid)))
self.logger.debug('Test examples: {}'.format(len(list(self.test.values())[0])))
self.embedding_size = len(self.train[0][0])
def load_split(self, embeddings_path, labels_path):
embeddings = []
labels = []
with open(embeddings_path, 'r') as f_embeddings, open(labels_path, 'r') as f_labels:
for line in f_embeddings:
embedding = [float(s) for s in line.strip().split(' ')]
embeddings.append(embedding)
label = next(f_labels).strip()
labels.append(label)
classes = sorted(list(set(labels)))
if self.classes is None:
self.classes = classes
assert classes == self.classes
self.logger.debug('Class distribution {}'.format(
[len([l for l in labels if l == c]) / float(len(labels)) for c in self.classes]))
labels = binarize(labels, self.classes)
return list(zip(embeddings, labels))
def binarize(labels, classes):
results = []
for label in labels:
val = np.zeros(len(classes), dtype=np.float32)
val[classes.index(label)] = 1.0
results.append(val)
return results
component = GenericSentenceClassificationData
|
the-stack_0_21690 | import nonebot
from nonebot.log import logger
from nonebot.plugin import export
from .browser import get_browser, shutdown_browser, get_new_page
from .date_source import (
text_to_pic,
md_to_pic,
template_to_html,
html_to_pic,
template_to_pic,
capture_element,
)
driver = nonebot.get_driver()
config = driver.config
export = export()
@driver.on_startup
async def init(**kwargs):
"""Start Browser
Returns:
Browser: Browser
"""
browser = await get_browser(**kwargs)
logger.info("Browser Started.")
return browser
@driver.on_shutdown
async def shutdown():
await shutdown_browser()
logger.info("Browser Stoped.")
browser_init = init
export.browser = init
export.text_to_pic = text_to_pic
export.get_new_page = get_new_page
export.md_to_pic = md_to_pic
export.template_to_html = template_to_html
export.template_to_pic = template_to_pic
export.html_to_pic = html_to_pic
export.capture_element = capture_element
all = [
"browser_init",
"text_to_pic",
"get_new_page",
"md_to_pic",
"template_to_html",
"template_to_pic",
"html_to_pic",
"capture_element",
]
|
the-stack_0_21691 | # Name: Nayalash Mohammad
# Date: January 20 2020
# File Name: scoreSaver.py
# Description: File containing all the methods to save the score in a txt file
# Module to Read File
import os
# Method to Check to Overwrite File
def isHighScore(score):
highscore = getScore()
if(score > highscore):
return True
return False
# Method To Read Score From .txt File
def getScore():
file = open("score.txt", "r")
if(os.path.getsize("score.txt") < 1):
return 0
highscore = int(file.read())
return highscore
# Method to Set High Score
def setHighScore(highscore):
file = open("score.txt","w")
file.write(str(highscore))
file.close()
|
the-stack_0_21692 | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkbranchpatching.py,v $
## Language: Python
## Date: $Date: 2006/07/07 10:46:17 $
## Version: $Revision: 1.9 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
from vmtk import vtkvmtk
import sys
from vmtk import pypes
class vmtkBranchPatching(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.PatchedData = None
self.CircularPatching = 1
self.UseConnectivity = 1
self.LongitudinalPatchSize = 1.0
self.CircularNumberOfPatches = 1
self.PatchSize = [0.0, 0.0]
self.GroupIdsArrayName = 'GroupIds'
self.LongitudinalMappingArrayName = 'AbscissaMetric'
self.CircularMappingArrayName = 'AngularMetric'
self.LongitudinalPatchNumberArrayName = 'Slab'
self.CircularPatchNumberArrayName = 'Sector'
self.PatchAreaArrayName = 'PatchArea'
self.SetScriptName('vmtkbranchpatching')
self.SetScriptDoc('cut a set of contiguous rectangular regions on a surface that follow iso-contours in the StretchedMapping and AngularMetric arrays')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','','vmtksurfacereader'],
['PatchSize','patchsize','float',2,'(0.0,)'],
['LongitudinalPatchSize','longitudinalpatchsize','float',1,'(0.0,)'],
['CircularNumberOfPatches','circularpatches','int',1,'(0,)'],
['CircularPatching','circularpatching','bool',1],
['UseConnectivity','connectivity','bool',1],
['GroupIdsArrayName','groupidsarray','str',1],
['LongitudinalMappingArrayName','longitudinalmappingarray','str',1],
['CircularMappingArrayName','circularmappingarray','str',1],
['LongitudinalPatchNumberArrayName','longitudinalpatchnumberarray','str',1],
['CircularPatchNumberArrayName','circularpatchnumberarray','str',1],
['PatchAreaArrayName','patchareaarray','str',1]
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','','vmtksurfacewriter'],
['PatchedData','patcheddata','vtkImageData',1,'','','vmtkimagewriter'],
['PatchSize','patchsize','float',2],
['LongitudinalPatchNumberArrayName','longitudinalpatchnumberarray','str',1],
['CircularPatchNumberArrayName','circularpatchnumberarray','str',1],
['PatchAreaArrayName','patchareaarray','str',1]
])
def Execute(self):
if self.Surface == None:
self.PrintError('Error: No input surface.')
self.PatchSize = [self.LongitudinalPatchSize, 1.0/float(self.CircularNumberOfPatches)]
patchingFilter = vtkvmtk.vtkvmtkPolyDataPatchingFilter()
patchingFilter.SetInputData(self.Surface)
patchingFilter.SetCircularPatching(self.CircularPatching)
patchingFilter.SetUseConnectivity(self.UseConnectivity)
patchingFilter.SetLongitudinalMappingArrayName(self.LongitudinalMappingArrayName)
patchingFilter.SetCircularMappingArrayName(self.CircularMappingArrayName)
patchingFilter.SetLongitudinalPatchNumberArrayName(self.LongitudinalPatchNumberArrayName)
patchingFilter.SetCircularPatchNumberArrayName(self.CircularPatchNumberArrayName)
patchingFilter.SetPatchAreaArrayName(self.PatchAreaArrayName)
patchingFilter.SetGroupIdsArrayName(self.GroupIdsArrayName)
patchingFilter.SetPatchSize(self.PatchSize)
patchingFilter.Update()
self.Surface = patchingFilter.GetOutput()
self.PatchedData = patchingFilter.GetPatchedData()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
the-stack_0_21693 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mongoengine as me
from st2common import log as logging
from st2common.models.db import stormbase
from st2common.fields import ComplexDateTimeField
from st2common.util import date as date_utils
from st2common.util.secrets import get_secret_parameters
from st2common.util.secrets import mask_secret_parameters
from st2common.constants.types import ResourceType
__all__ = [
'ActionExecutionDB'
]
LOG = logging.getLogger(__name__)
class ActionExecutionDB(stormbase.StormFoundationDB):
RESOURCE_TYPE = ResourceType.EXECUTION
UID_FIELDS = ['id']
trigger = stormbase.EscapedDictField()
trigger_type = stormbase.EscapedDictField()
trigger_instance = stormbase.EscapedDictField()
rule = stormbase.EscapedDictField()
action = stormbase.EscapedDictField(required=True)
runner = stormbase.EscapedDictField(required=True)
# Only the diff between the liveaction type and what is replicated
# in the ActionExecutionDB object.
liveaction = stormbase.EscapedDictField(required=True)
status = me.StringField(
required=True,
help_text='The current status of the liveaction.')
start_timestamp = ComplexDateTimeField(
default=date_utils.get_datetime_utc_now,
help_text='The timestamp when the liveaction was created.')
end_timestamp = ComplexDateTimeField(
help_text='The timestamp when the liveaction has finished.')
parameters = stormbase.EscapedDynamicField(
default={},
help_text='The key-value pairs passed as to the action runner & action.')
result = stormbase.EscapedDynamicField(
default={},
help_text='Action defined result.')
context = me.DictField(
default={},
help_text='Contextual information on the action execution.')
parent = me.StringField()
children = me.ListField(field=me.StringField())
meta = {
'indexes': [
{'fields': ['rule.ref']},
{'fields': ['action.ref']},
{'fields': ['liveaction.id']},
{'fields': ['start_timestamp']},
{'fields': ['end_timestamp']},
{'fields': ['status']},
{'fields': ['parent']},
{'fields': ['-start_timestamp', 'action.ref', 'status']}
]
}
def get_uid(self):
# TODO Construct od from non id field:
uid = [self.RESOURCE_TYPE, str(self.id)]
return ':'.join(uid)
def mask_secrets(self, value):
result = copy.deepcopy(value)
execution_parameters = value['parameters']
parameters = {}
# pylint: disable=no-member
parameters.update(value.get('action', {}).get('parameters', {}))
parameters.update(value.get('runner', {}).get('runner_parameters', {}))
secret_parameters = get_secret_parameters(parameters=parameters)
result['parameters'] = mask_secret_parameters(parameters=execution_parameters,
secret_parameters=secret_parameters)
return result
def get_masked_parameters(self):
"""
Retrieve parameters with the secrets masked.
:rtype: ``dict``
"""
serializable_dict = self.to_serializable_dict(mask_secrets=True)
return serializable_dict['parameters']
MODELS = [ActionExecutionDB]
|
the-stack_0_21694 | import imageio
import numba as nb
import numpy as np
from numba import njit
from scipy.signal import convolve
from skimage.transform import rescale
def deconvlucy_iter(im, im_deconv, psf, psf_mirror):
relative_blur = im / convolve(im_deconv, psf, mode="same")
im_deconv *= convolve(relative_blur, psf_mirror, mode="same")
return im_deconv
def deconvlucy(image, psf, iterations, clip=False):
im_deconv = np.full(image.shape, 0.5)
psf_mirror = psf[::1, ::-1, ::-1]
for i in range(iterations):
print(f"iter {i+1}")
im_deconv = deconvlucy_iter(image, im_deconv, psf, psf_mirror)
if clip:
im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1
return im_deconv.astype(np.float32)
def main(image_path, psf_path):
image = imageio.volread(image_path) # z 0.6u
psf = imageio.volread(psf_path) # z 0.2u
image, psf = image.astype(np.float32), psf.astype(np.float32)
# crop image (too large)
image = image[:, 768 : 768 + 512, 768 : 768 + 512]
imageio.volwrite("input.tif", image)
raise RuntimeError("DEBUG")
# rescale psf
psf = rescale(psf, (1 / 3, 1, 1), anti_aliasing=False)
# normalize
psf = (psf - psf.max()) / (psf.max() - psf.min())
psf /= psf.sum()
print(f"psf range [{psf.min(), psf.max()}]")
print(f"image shape {image.shape}, psf shape {psf.shape}")
try:
deconv = deconvlucy(image, psf, 10)
except Exception:
raise
else:
print("saving...")
imageio.volwrite("result.tif", deconv)
if __name__ == "__main__":
main(
"C:/Users/Andy/Desktop/background_removal/flybrain_Iter_ch0_stack0000_640nm_0000000msec_0015869850msecAbs.tif",
"C:/Users/Andy/Desktop/background_removal/psf/NA1p05_zp2um_cropped.tif",
)
|
the-stack_0_21696 | from __future__ import print_function
import json
import requests
def foo():
"""
A low level example of how JenkinsAPI runs a parameterized build
"""
toJson = {'parameter': [{'name': 'B', 'value': 'xyz'}]}
url = 'http://localhost:8080/job/ddd/build'
# url = 'http://localhost:8000'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
form = {'json': json.dumps(toJson)}
response = requests.post(url, data=form, headers=headers)
print(response.text.encode('UTF-8'))
if __name__ == '__main__':
foo()
|
the-stack_0_21698 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
~~~~~
:copyright: (c) 2015 by Halfmoon Labs, Inc.
:copyright: (c) 2017 by Stanislav Pankratov
:license: MIT, see LICENSE for more details.
"""
import json
import requests_mock
import unittest
from test import test_support
from pybitcoin import BitcoinPrivateKey, BitcoinPublicKey
from cryptography.hazmat.backends.openssl import backend as openssl_backend
from blockchainauth import AuthRequest, AuthResponse
from blockchainauth.dids import get_address_from_did
from tests.test_data import PRIVATE_KEY, PUBLIC_KEY, \
REQUEST_SAMPLE_ENCODED_TOKEN, REQUEST_SAMPLE_DECODED_TOKEN,\
RESPONSE_SAMPLE_ENCODED_TOKEN, RESPONSE_SAMPLE_DECODED_TOKEN, RYAN_PROFILE
from blockchainauth.tokenizer import Tokenizer
from blockchainauth.verification import do_public_keys_match_username, NAME_LOOKUP_URL
class AuthRequestTest(unittest.TestCase):
def setUp(self):
self.private_key_hex = str(PRIVATE_KEY)
self.public_key_hex = str(PUBLIC_KEY)
self.domain_name = 'localhost:3000'
self.private_key = BitcoinPrivateKey(self.private_key_hex)
self.public_key = BitcoinPublicKey(self.public_key_hex)
self.sample_encoded_token = REQUEST_SAMPLE_ENCODED_TOKEN
self.sample_decoded_token = REQUEST_SAMPLE_DECODED_TOKEN
self.maxDiff = None
def tearDown(self):
pass
def test_auth_request_token_encoding(self):
# valid AuthRequest
auth_request = AuthRequest(self.private_key_hex, self.domain_name)
auth_request_token = auth_request.token()
decoded_token = AuthRequest.decode(auth_request_token)
payload = decoded_token['payload']
self.assertEqual(payload['public_keys'][0], self.public_key_hex)
self.assertEqual(get_address_from_did(payload['iss']), self.public_key.address())
self.assertEqual(payload['scopes'], [])
self.assertEqual(payload['manifest_uri'], self.domain_name + '/manifest.json')
self.assertTrue(AuthRequest.verify(auth_request_token))
# invalid AuthRequest
auth_request = AuthRequest(self.private_key_hex, self.domain_name)
auth_request_token = auth_request.token()[:-1]
self.assertFalse(AuthRequest.verify(auth_request_token))
def test_auth_request_token_decoding(self):
decoded_token = AuthRequest.decode(self.sample_encoded_token)
self.assertEqual(decoded_token, self.sample_decoded_token)
def test_custom_openssl_backend(self):
auth_request = AuthRequest(self.private_key_hex, self.domain_name, crypto_backend=openssl_backend)
auth_request_token = auth_request.token()
self.assertTrue(AuthRequest.verify(auth_request_token))
class AuthResponseTest(unittest.TestCase):
def setUp(self):
self.private_key_hex = str(PRIVATE_KEY)
self.public_key_hex = str(PUBLIC_KEY)
self.private_key = BitcoinPrivateKey(self.private_key_hex)
self.public_key = BitcoinPublicKey(self.public_key_hex)
self.profile = RYAN_PROFILE
self.username = 'ryan.id'
self.sample_encoded_token = RESPONSE_SAMPLE_ENCODED_TOKEN
self.sample_decoded_token = RESPONSE_SAMPLE_DECODED_TOKEN
def tearDown(self):
pass
def test_auth_response_token_encoding(self):
# without username, testing basics
auth_response = AuthResponse(self.private_key_hex, RYAN_PROFILE)
auth_response_token = auth_response.token()
decoded_token = AuthResponse.decode(auth_response_token)
payload = decoded_token['payload']
self.assertEqual(payload['public_keys'][0], self.public_key_hex)
self.assertEqual(get_address_from_did(payload['iss']), self.public_key.address())
self.assertEqual(payload['profile'], self.profile)
self.assertEqual(payload['username'], None)
self.assertTrue(AuthResponse.verify(auth_response_token))
# with username
with requests_mock.mock() as m:
m.get(NAME_LOOKUP_URL.rstrip('/') + '/' + self.username,
text=json.dumps({'address': self.public_key.address()}))
auth_response = AuthResponse(self.private_key_hex, RYAN_PROFILE, self.username)
auth_response_token = auth_response.token()
self.assertTrue(do_public_keys_match_username(auth_response_token, Tokenizer(),
AuthResponse.decode(auth_response_token)))
self.assertTrue(AuthResponse.verify(auth_response_token))
def test_auth_response_token_decoding(self):
decoded_token = AuthResponse.decode(self.sample_encoded_token)
self.assertEqual(decoded_token, self.sample_decoded_token)
def test_main():
test_support.run_unittest(
AuthRequestTest,
AuthResponseTest
)
if __name__ == '__main__':
test_main()
|
the-stack_0_21701 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############################################################
# replace_module_test.py
############################################################
# Author: Umut Boz
# Copyright (c) 2020, OneframeMobile, KoçSistem
# Email: [email protected]
############################################################
# Version: 0.1.0
############################################################
# Built-in/Generic Imports
import os
import sys
from os.path import dirname,join, abspath,sys
current_file = os.path.dirname(__file__)
lib_dir_path = os.path.join(current_file, 'src')
lib_dir_module = os.path.abspath(lib_dir_path)
sys.path.insert(0, lib_dir_module)
# Own modules
#from codenums import MUSTACHE
from src.codegenlib.enums import MUSTACHE
from src.codegenlib.templateStreaming import TemplateStreaming
from src.codegenlib.templateFile import TemplateFile
from src.codegenlib.templateModule import TemplateModule
fileName = "test.swift"
testManagerClassTF = TemplateFile(
name="manager_class_mustache",
dict={"service_name": "OneframeMobile", "request_func": MUSTACHE.PARENT},
output_file="Manager.swift"
)
testGetRequestFuncTF = TemplateFile(
name="request_get_func_mustache",
dict={"result_model_name": "String","function_name": "getTest", "query_path" : '"api/getTest?name=query"', "func_param" : "query:String, "},
output_file=None,
is_child_template=True,
parent_mustache="request_func"
)
testPostRequestFuncTF = TemplateFile(
name="request_post_func_mustache",
dict={"result_model_name": "UserModel", "function_name": "login", "query_path" : '"api/login"', "func_param" : ""},
output_file=None,
is_child_template=True,
parent_mustache="request_func"
)
testManagerClassTF.childTemplateFiles.append(testGetRequestFuncTF)
testManagerClassTF.childTemplateFiles.append(testPostRequestFuncTF)
testModule = TemplateModule(
name="networking-swagger-swift",
templates_files=[testManagerClassTF]
)
tStreaming = TemplateStreaming(
template_module = testModule
)
tStreaming.execute()
'''
findParentFilter = lambda x: x[1] == MUSTACHE.PARENT
output = filter(findParentFilter,testManagerClassTF.dict.items())
print(len(output) > 0 if True else False)
'''
# print(testManagerClassTF.dict["request_func"] == MUSTACHE.PARENT)
# print(testManagerClassTF.dict.items()[0][0])
# print(testManagerClassTF.dict.get("request_func")=="PARENT")
|
the-stack_0_21702 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : 使用gs_guc set方法设置参数hashagg_table_size为50,观察预期结果
Description :
1.查询hashagg_table_size默认值
2.修改参数值为50并重启数据库
3.查询该参数修改后的值
4.恢复参数默认值
Expect :
1.显示默认值为0
2.修改成功
3.显示50
4.默认值恢复成功
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
LOG = Logger()
commonsh = CommonSH('dbuser')
class QueryPlan(unittest.TestCase):
def setUp(self):
self.constant = Constant()
LOG.info(
'------Opengauss_Function_Guc_Queryplan_Case0087start------')
def test_hashagg_table_size(self):
LOG.info('--步骤1:查看默认值--')
sql_cmd = commonsh.execut_db_sql('show hashagg_table_size;')
LOG.info(sql_cmd)
self.res = sql_cmd.splitlines()[-2].strip()
LOG.info('--步骤2:gs_guc set设置hashagg_table_size为50并重启数据库--')
msg = commonsh.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
'hashagg_table_size =50')
LOG.info(msg)
self.assertTrue(msg)
msg = commonsh.restart_db_cluster()
LOG.info(msg)
status = commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
LOG.info('--步骤3:查询该参数修改后的值--')
sql_cmd = commonsh.execut_db_sql('show hashagg_table_size;')
LOG.info(sql_cmd)
self.assertIn('50', sql_cmd)
def tearDown(self):
LOG.info('--步骤4:恢复默认值--')
sql_cmd = commonsh.execut_db_sql('show hashagg_table_size;')
LOG.info(sql_cmd)
if self.res != sql_cmd.split('\n')[-2].strip():
msg = commonsh.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
f"hashagg_table_size={self.res}")
LOG.info(msg)
msg = commonsh.restart_db_cluster()
LOG.info(msg)
status = commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
sql_cmd = commonsh.execut_db_sql('show hashagg_table_size;')
LOG.info(sql_cmd)
LOG.info(
'-----Opengauss_Function_Guc_Queryplan_Case0087执行完成------')
|
the-stack_0_21706 | import codecs
import io
import os
import sys
from setuptools import setup
VERSION = '0.4.3'
tests_require = ['pytest']
requires = [
"requests",
"babel",
"pycoingecko",
"blockchain",
"Pillow",
"matplotlib",
"numpy",
"pydantic",
"mplfinance",
"pandas"
]
if __name__ == '__main__':
setup(
name='btc-ticker',
version=VERSION,
description='BTC ticker',
url='http://www.github.com/btc-ticker/btc-ticker',
keywords=['btc', 'ticker'],
packages=[
"btcticker",
"btcticker.fonts",
"btcticker.fonts.googlefonts"
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
'Topic :: Office/Business :: Financial',
],
install_requires=requires,
package_data={'btcticker.fonts': ['*.ttf'], 'btcticker.fonts.googlefonts': ['*.ttf', '*.txt'], },
setup_requires=['pytest-runner'],
tests_require=tests_require,
include_package_data=True,
) |
the-stack_0_21707 | import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true",
default=False, help="run slow tests")
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
|
the-stack_0_21708 | #!/usr/bin/env python
"""
This script is for generating a 2-party end-to-end run of the PySyft REST-RPC
service. Detailed here are all the necessary payload submissions that are
required to be submitted to `http://<ttp_host>:<ttp_port>/ttp/connect/...` in
order to initialise and register for a PySyft REST-RPC project.
Note: Ensure that a TTP container is already up before running this script
"""
####################
# Required Modules #
####################
# Generic/Built-in
import logging
# Libs
import requests
##################
# Configurations #
##################
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.DEBUG)
# Relevant IDs
project_id = "fedlearn_project"
expt_id = "fedlearn_experiment_1"
run_id = "fedlearn_run_1_1"
participant_id_1 = "fedlearn_worker_1"
participant_id_2 = "fedlearn_worker_2"
# Relevant Connection Endpoints
# the ttp_host and ttp_port are set for a distributed environment via ssh from admin laptop
# running ttp and workers on laptop required Docker network evaluation
ttp_host = "localhost"
ttp_port = 5000
base_ttp_connect_url = f"http://{ttp_host}:{ttp_port}/ttp/connect"
project_upload_url = f"{base_ttp_connect_url}/projects"
project_retrieval_url = f"{base_ttp_connect_url}/projects/{project_id}"
expt_upload_url = f"{project_retrieval_url}/experiments"
expt_retrieval_url = f"{project_retrieval_url}/experiments/{expt_id}"
run_upload_url = f"{expt_retrieval_url}/runs"
run_retrieval_url = f"{expt_retrieval_url}/runs/{run_id}"
participant_upload_url = f"{base_ttp_connect_url}/participants"
participant_1_retrieval_url = f"{base_ttp_connect_url}/participants/{participant_id_1}"
participant_2_retrieval_url = f"{base_ttp_connect_url}/participants/{participant_id_2}"
registration_1_url = f"{participant_1_retrieval_url}/projects/{project_id}/registration"
registration_2_url = f"{participant_2_retrieval_url}/projects/{project_id}/registration"
tags_1_url = f"{registration_1_url}/tags"
tags_2_url = f"{registration_2_url}/tags"
# Project Simulation
fedlearn_project = {
"project_id": project_id,
"action": "classify",
"incentives": {
"tier_1": ["test_worker_1"],
"tier_2": ["test_worker_2"]
}
}
# Experiment Simulation
fedlearn_experiment = {
"expt_id": expt_id,
"model": [
{
"activation": "sigmoid",
"is_input": True,
"l_type": "Linear",
"structure": {
"bias": True,
"in_features": 28,
"out_features": 1
}
}
]
}
# Run Simulations
fedlearn_run = {
"run_id": run_id,
"input_size": 28,
"output_size": 1,
"batch_size": 32,
"rounds": 2,
"epochs": 1,
"lr": 0.15,
"weight_decay": 0.01,
"mu": 0.1,
"l1_lambda": 0.2,
"l2_lambda": 0.3,
"base_lr": 0.3,
"max_lr": 0.5,
"criterion": "NLLLoss"
}
# Participant Simulation
fedlearn_participant_1 = {
# "participant_id": participant_id_1,
"id": "fedlearn_worker_1",
"host": "172.17.0.2", # 0.0.0.0 only for local simulation!
"port": 8020,
"log_msgs": False,
"verbose": False,
"f_port": 5000, # Only required if custom port is required (i.e. local)
}
fedlearn_participant_2 = {
# "participant_id": participant_id_2,
"id": "fedlearn_worker_2",
"host": "172.17.0.3", # 0.0.0.0 only for local simulation!
"port": 8020,
"log_msgs": False,
"verbose": False,
"f_port": 5000, # Only required if custom port is required (i.e. local)
}
# Registration Simulation
# Host: contribute data
# Guest: has validation set, may also contribute data.
fedlearn_registration_p1 = {"role": "host"} # For fedlearn_participant_1
fedlearn_registration_p2 = {"role": "host"} # For fedlearn_participant_2
# Tag Simulation
# Tags define which datasets to use. Must be avaiable via Docker volume mount definitions
fedlearn_tags_p1 = { # For fedlearn_participant_1
"train": [ ["train"]],
"evaluate": [["evaluate"]]
}
fedlearn_tags_p2 = { # For fedlearn_participant_2
"train": [ ["train"]]
}
###################
# Helper Function #
###################
def execute_post(url, payload):
status = requests.post(url=url, json=payload)
assert status.status_code in [200, 201]
return status.json()
##########
# Script #
##########
if __name__ == "__main__":
# Step 1: TTP registers a new project
project_resp = execute_post(url=project_upload_url, payload=fedlearn_project)
logging.debug(f"New project: {project_resp}")
# Step 2: TTP registers an experiment
expt_resp = execute_post(url=expt_upload_url, payload=fedlearn_experiment)
logging.debug(f"New experiment: {expt_resp}")
# Step 3: TTP registers a run
run_resp = execute_post(url=run_upload_url, payload=fedlearn_run)
logging.debug(f"New run: {run_resp}")
# Step 4: Participants register server connection information on TTP node
participant_1_resp = execute_post(
url=participant_upload_url,
payload=fedlearn_participant_1
)
logging.debug(f"New participant 1: {participant_1_resp}")
participant_2_resp = execute_post(
url=participant_upload_url,
payload=fedlearn_participant_2
)
logging.debug(f"New participant 2: {participant_2_resp}")
# Step 5: Participants register to partake in aforementioned project
registration_1_resp = execute_post(
url=registration_1_url,
payload=fedlearn_registration_p1
)
logging.debug(f"New registration for participant 1: {registration_1_resp}")
registration_2_resp = execute_post(
url=registration_2_url,
payload=fedlearn_registration_p2
)
logging.debug(f"New registration for participant 2: {registration_2_resp}")
# Step 6: Participants register data tags to be used in project
tags_1_resp = execute_post(url=tags_1_url, payload=fedlearn_tags_p1)
logging.debug(f"New tags registered for participant 1: {tags_1_resp}")
tags_2_resp = execute_post(url=tags_2_url, payload=fedlearn_tags_p2)
logging.debug(f"New tags registered for participant 2: {tags_2_resp}")
|
the-stack_0_21709 | import os
import typing
import artm
def get_num_entries(d: artm.Dictionary) -> int:
"""
Возвращает размер словаря.
Parameters
----------
d словарь
Returns
-------
количество токенов в словаре
"""
return next(
x for x in d._master.get_info().dictionary if x.name == d.name
).num_entries
def limit_classwise(
dictionary: artm.Dictionary,
cls_ids: typing.Iterable[str],
max_dictionary_size: int,
tmp_dir: str,
out_file: str,
):
"""
Ограничивает словарь.
Таким образом, что в разрезе каждоого class id будет не более max_dictionary_size токенов.
Сохраняет словарь в текстовым форматом в файле out_file.
Parameters
----------
dictionary исходный словарь
cls_ids модальности
max_dictionary_size максимальный размер словаря в разрезе модальности
tmp_dir директория для хранения промежуточных результатов
out_file файл, в котором сохраняется результат
"""
for cls_id in cls_ids:
filtered = dictionary
inplace = False
for other_id in cls_ids:
if other_id != cls_id:
filtered = filtered.filter(
class_id=other_id, max_df_rate=0.4, min_df_rate=0.5, inplace=inplace
)
inplace = True
filtered.filter(max_dictionary_size=max_dictionary_size)
filtered.save_text(os.path.join(tmp_dir, f"{cls_id[1:]}.txt"))
res = []
for cls_id in cls_ids:
with open(os.path.join(tmp_dir, f"{cls_id[1:]}.txt")) as f:
res.extend(f.readlines()[2:] if len(res) > 0 else f.readlines())
with open(out_file, "w") as f:
f.write("".join(res))
|
the-stack_0_21710 | """Define data maps."""
import json
import os
from typing import Dict
from ..errors import SeventeenTrackError
def _load_json_list(filename: str) -> list:
"""Load json data into list."""
result: list = []
with open(os.path.join(os.path.dirname(__file__), filename), "r") as f:
for row in json.load(f):
result.append(row)
return result
CARRIER_MAP: Dict[int, str] = {
row.get("key"): row.get("_name") for row in _load_json_list("carrier.all.json")
}
COUNTRY_MAP: Dict[int, str] = {
row.get("key"): row.get("_name") for row in _load_json_list("country.all.json")
}
PACKAGE_STATUS_MAP: Dict[int, str] = {
0: "Not Found",
10: "In Transit",
20: "Expired",
30: "Ready to be Picked Up",
35: "Undelivered",
40: "Delivered",
50: "Returned",
}
PACKAGE_TYPE_MAP: Dict[int, str] = {
0: "Unknown",
1: "Small Registered Package",
2: "Registered Parcel",
3: "EMS Package",
}
def get_carrier_key(name: str) -> int:
"""Get carrier key from name."""
for key, carrier in CARRIER_MAP.items():
if carrier.lower() == name.lower():
return key
raise SeventeenTrackError(f"Could not map carrier {name} to id")
|
the-stack_0_21712 | import sys, logging, random, open_color, arcade
#check to make sure we are running the right version of Python
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
#turn on logging, in case we have to leave ourselves debugging messages
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
MARGIN = 10
NUM_BALLS = 20
SCREEN_TITLE = "Gravity Exercise"
GRAVITY = -9.8 #update this value
class Ball():
def __init__(self, x, y, color):
self.x = x
self.y = y
self.radius = 10
self.color = color
self.dx = 0
self.dy = 0
def draw(self):
arcade.draw_circle_filled(self.x, self.y, self.radius, self.color)
def update(self):
self.x += self.dx
self.y += self.dy
if self.x <= MARGIN:
self.x = MARGIN
if self.x >= SCREEN_WIDTH - MARGIN:
self.x = SCREEN_WIDTH - MARGIN
if self.y <= MARGIN:
self.y = MARGIN
if self.y >= SCREEN_HEIGHT - MARGIN:
self.y = SCREEN_HEIGHT - MARGIN
def accelerate(self,dx,dy):
self.dx += dx
self.dy += dy
class Window(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
self.set_mouse_visible(True)
arcade.set_background_color(open_color.black)
self.ball_list = []
def setup(self):
for i in range(NUM_BALLS):
x = random.randint(MARGIN,SCREEN_WIDTH-MARGIN)
y = random.randint(MARGIN,SCREEN_HEIGHT-MARGIN)
color = random.choice(open_color.yellows)
self.ball = Ball(x,y,color)
self.ball_list.append(self.ball)
def update(self, delta_time):
for b in self.ball_list:
# apply gravity here
b.accelerate(0,-9.8)
b.update()
def on_draw(self):
arcade.start_render()
for b in self.ball_list:
b.draw()
def main():
window = Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main() |
the-stack_0_21715 | import os
import glob
from setuptools import setup, find_packages
install_requires = [line.rstrip() for line in open(os.path.join(os.path.dirname(__file__), "requirements.txt"))]
with open("README.md") as fh:
long_description = fh.read()
setup(name='gs-chunked-io',
version='0.2.8',
description='Streaming read/writes to Google Storage blobs with ascynchronous buffering.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/xbrianh/gs-chunked-io.git',
author='Brian Hannafious',
author_email='[email protected]',
license='MIT',
packages=find_packages(exclude=['tests']),
scripts=glob.glob('scripts/*'),
zip_safe=False,
install_requires=install_requires,
platforms=['MacOS X', 'Posix'],
test_suite='test',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7'
]
)
|
the-stack_0_21717 | from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views.generic import View
from .models import Topic, ChatMessage, Subscription, Post, VotePost, Comment, VoteComment, Room
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, JsonResponse, HttpResponseBadRequest
import datetime
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from mainapp.forms import CreateRoomForm, PostModelForm, CommentForm, CommentEditForm
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
from django.db.models import Count, Q
import markdown
import bleach
from bs4 import BeautifulSoup
from itertools import chain
from django.contrib.auth.models import User
from operator import attrgetter
class IndexView(View):
template_name = 'mainapp/home_page.html'
def get(self, request):
"""
Chat room
"""
# hardcoded topic name initially
name = "general"
topic = Topic.objects.get(name=name)
# We want to show the last 10 messages, ordered most-recent-last
chat_queryset = ChatMessage.objects.filter(topic=topic).order_by("-created")[:30]
chat_message_count = len(chat_queryset)
if chat_message_count > 0:
first_message_id = chat_queryset[len(chat_queryset)-1].id
else:
first_message_id = -1
previous_id = -1
if first_message_id != -1:
try:
previous_id = ChatMessage.objects.filter(topic=topic).filter(pk__lt=first_message_id).order_by("-pk")[:1][0].id
except IndexError:
previous_id = -1
chat_messages = reversed(chat_queryset)
#subscribed rooms
if request.user.is_authenticated:
subscribed_rooms = Subscription.objects.filter(user=request.user).filter(deleted=False).order_by('topic__name')
else:
subscribed_rooms = None
#sub rooms
sub_rooms = Room.objects.filter(topic=topic).order_by('position')
return render(request, self.template_name, {
'topic': topic,
'chat_messages': chat_messages,
'first_message_id' : previous_id,
'subscribed_rooms' : subscribed_rooms,
'default_rooms' : settings.DEFAULT_TOPICS,
'sub_rooms' : sub_rooms,
})
class AboutView(View):
template_name = 'mainapp/about.html'
def get(self, request):
return render(request, self.template_name)
class TopicForum(View):
paginate_by = 10
template_name = 'mainapp/topic_forum.html'
def get(self, request, topic_name):
try:
topic = Topic.objects.get(name=topic_name)
except ObjectDoesNotExist:
raise Http404("Topic does not exist")
comments_count = Count('comment', filter=Q(comment__deleted=False))
if request.GET.get('sort_by') == "new":
all_results = Post.objects.filter(topic=topic).filter(deleted=False).order_by('-created').annotate(comments_count=comments_count)
sort_by = "New"
else:
sort_by = "Popular"
all_results = Post.objects.filter(topic=topic).filter(deleted=False).order_by('-rank').annotate(comments_count=comments_count)
paginator = Paginator(all_results, self.paginate_by)
page = request.GET.get('page')
try:
post_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
post_list = paginator.page(1)
page = 1
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
post_list = paginator.page(paginator.num_pages)
page = paginator.num_pages
if request.user.is_authenticated:
user_votes = VotePost.objects.filter(user=request.user)
else:
user_votes = list()
#subscribed rooms
if request.user.is_authenticated:
subscribed_rooms = Subscription.objects.filter(user=request.user).filter(deleted=False).order_by('topic__name')
else:
subscribed_rooms = None
return render(request, self.template_name, {'post_list' : post_list, 'sort_by' : sort_by, 'user_votes' : user_votes, 'topic' : topic, 'page' : page, 'subscribed_rooms' : subscribed_rooms, 'default_rooms' : settings.DEFAULT_TOPICS })
class CreateTopicPost(LoginRequiredMixin, View):
form_class = PostModelForm
template_name = 'mainapp/create_post.html'
def get(self, request, topic_name):
try:
topic = Topic.objects.get(name=topic_name)
except ObjectDoesNotExist:
raise Http404("Topic does not exist")
form = self.form_class()
#subscribed rooms
subscribed_rooms = Subscription.objects.filter(user=request.user).filter(deleted=False).order_by('topic__name')
return render(request, self.template_name, {'form' : form, 'topic' : topic, 'subscribed_rooms' : subscribed_rooms})
def post(self, request, topic_name):
try:
topic = Topic.objects.get(name=topic_name)
except ObjectDoesNotExist:
raise Http404("Topic does not exist")
form = self.form_class(request.POST)
if form.is_valid():
title = form.cleaned_data['title']
body = form.cleaned_data['body']
url = form.cleaned_data['url']
subscribed_rooms = Subscription.objects.filter(user=request.user).filter(deleted=False).order_by('topic__name')
if body and url:
messages.error(request, 'Submit either a url or the body, but not both.')
return render(request, self.template_name, {'form' : form, 'topic' : topic, 'subscribed_rooms' : subscribed_rooms})
if not body and not url:
messages.error(request, 'Submit either a url or the body. Atleast one of the fields has to entered.')
return render(request, self.template_name, {'form' : form, 'topic' : topic, 'subscribed_rooms' : subscribed_rooms})
if body:
body_html = markdown.markdown(body)
body_html = bleach.clean(body_html, tags=settings.POST_TAGS, strip=True)
else:
body = None
body_html = None
article = Post(topic=topic, title=title, url=url, body=body, user=request.user, body_html=body_html)
article.save()
vote_obj = VotePost(user=request.user,
post=article,
value=1)
vote_obj.save()
article.upvotes += 1
article.save()
messages.success(request, 'Post has been submitted.')
return redirect(reverse('mainapp:topic_forum', args=[topic]) + '?sort_by=new')
else:
return render(request, self.template_name, {'form' : form})
class ViewPost(View):
template_name = 'mainapp/view_post.html'
form_class = CommentForm
def get(self, request, topic_name, pk, slug):
try:
topic = Topic.objects.get(name=topic_name)
except ObjectDoesNotExist:
raise Http404("Topic does not exist")
try:
post = Post.objects.get(topic=topic, pk=pk)
except Post.DoesNotExist:
return
if post.deleted:
return
nodes = Comment.objects.filter(post=post)
comments_count = len(Comment.objects.filter(post=post).filter(deleted=False))
if request.user.is_authenticated:
user_votes = VoteComment.objects.filter(user=request.user).filter(comment__post=post)
else:
user_votes = list()
form = self.form_class(initial={'parent_id' : 'None'})
#subscribed rooms
if request.user.is_authenticated:
subscribed_rooms = Subscription.objects.filter(user=request.user).filter(deleted=False).order_by('topic__name')
else:
subscribed_rooms = None
vote_value = 0
if request.user.is_authenticated:
try:
vote_obj = VotePost.objects.filter(user=request.user).get(post=post)
vote_value = vote_obj.value
except ObjectDoesNotExist:
pass
return render(request, self.template_name, {'post' : post, 'nodes' : nodes, 'form' : form, 'user_votes' : user_votes, 'comments_count' : comments_count, 'topic' : topic, 'subscribed_rooms' : subscribed_rooms, 'vote_value' : vote_value, 'default_rooms' : settings.DEFAULT_TOPICS })
class ForumAddComment(View):
form_class = CommentForm
def post(self, request, pk):
if not request.user.is_authenticated:
return JsonResponse({'error' : 'Please login to comment on this post.' }, status=400)
try:
post = Post.objects.filter(deleted=False).get(pk=pk)
except Post.DoesNotExist:
return JsonResponse({'error' : 'Invalid post id.' }, status=400)
form = self.form_class(request.POST)
if post.deleted:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if form.is_valid():
if Comment.objects.filter(user=request.user).filter(created__gte=timezone.now() - datetime.timedelta(minutes=60)).count() >= 5:
return JsonResponse({'error' : 'Rate limit reached. You\'re posting too fast!' }, status=403)
comment_parent_id = form.cleaned_data['parent_id']
if comment_parent_id == "None":
comment_parent_object = None
else:
try:
comment_parent_id = int(comment_parent_id)
comment_parent_object = Comment.objects.get(pk=comment_parent_id)
if comment_parent_object.deleted:
return JsonResponse({'error' : 'Invalid request. Cannot reply to deleted comment' }, status=400)
except (ValueError, Comment.DoesNotExist) :
return JsonResponse({'error' : 'Invalid request.' }, status=400)
comment_text = form.cleaned_data['comment']
comment_text_html = markdown.markdown(comment_text)
comment_text_html = bleach.clean(comment_text_html, tags=settings.COMMENT_TAGS, strip=True)
soup = BeautifulSoup(comment_text_html, "html.parser")
for i in soup.find_all('a'):
i['target'] = '_blank'
i['rel'] = 'noopener noreferrer nofollow'
for i in soup.find_all('blockquote'):
i['class'] = 'blockquote'
comment_text_html = soup.prettify()
comment = Comment(comment_text=comment_text, comment_text_html=comment_text_html, user=request.user, post=post, parent=comment_parent_object)
comment.save()
vote_obj = VoteComment(user=request.user,
comment=comment,
value=1)
vote_obj.save()
comment.upvotes += 1
comment.net_votes += 1
comment.save()
#todo : notification
return JsonResponse({'success' : 'Comment has been saved.', 'comment_id' : comment.id, 'comment_html' : comment.comment_text_html, 'username' : comment.user.username, 'comment_raw' : comment.comment_text })
else:
return JsonResponse({'error' : 'Invalid form submission.' }, status=400)
class VoteCommentView(View):
def post(self, request, pk):
if not request.user.is_authenticated:
return JsonResponse({'error' : 'Please login to comment on this post.' }, status=400)
if request.POST.get('vote_value'):
try:
comment = Comment.objects.filter(deleted=False).get(pk=pk)
except Comment.DoesNotExist:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if comment.post.deleted:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
vote_value = request.POST.get('vote_value', None)
try:
vote_value = int(vote_value)
if vote_value not in [-1, 1]:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
except (ValueError, TypeError):
return JsonResponse({'error' : 'Invalid request.' }, status=400)
try:
vote_obj = VoteComment.objects.get(comment=comment,user=request.user)
except ObjectDoesNotExist:
vote_obj = VoteComment(user=request.user,
comment=comment,
value=vote_value)
vote_obj.save()
if vote_value == 1:
vote_diff = 1
comment.upvotes += 1
comment.net_votes += 1
elif vote_value == -1:
vote_diff = -1
comment.downvotes += 1
comment.net_votes -= 1
comment.save()
if comment.user != request.user:
comment.user.userprofile.comment_karma += vote_diff
comment.user.userprofile.save()
return JsonResponse({'vote_diff': vote_diff})
if vote_obj.value == vote_value:
# cancel vote
vote_diff = vote_obj.unvote(request.user)
if not vote_diff:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
else:
# change vote
vote_diff = vote_obj.change_vote(vote_value, request.user)
if not vote_diff:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
return JsonResponse({'vote_diff': vote_diff})
else:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
class DeleteCommentView(View):
def post(self, request, pk):
if not request.user.is_authenticated:
return JsonResponse({'error' : 'Please login to comment on this post.' }, status=400)
try:
post = Post.objects.filter(deleted=False).get(pk=pk)
except Post.DoesNotExist:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if post.deleted:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
try:
comment_id = int(request.POST.get('comment_id', None))
except (ValueError, TypeError):
return JsonResponse({'error' : 'Invalid request.' }, status=400)
try:
comment = Comment.objects.get(pk=comment_id)
except Comment.DoesNotExist:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if not comment.can_delete():
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if comment.user == request.user:
comment.deleted = True
comment.save()
return JsonResponse({'success' : 'Comment has been deleted.' })
else:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
class EditCommentView(View):
form_class = CommentEditForm
def post(self, request, pk):
if not request.user.is_authenticated:
return JsonResponse({'error' : 'Please login to comment on this post.' }, status=400)
form = self.form_class(request.POST)
try:
post = Post.objects.filter(deleted=False).get(pk=pk)
except Post.DoesNotExist:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if post.deleted:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if form.is_valid():
comment_id = form.cleaned_data['comment_id']
try:
comment_id = int(comment_id)
comment_object = Comment.objects.get(pk=comment_id)
except (ValueError, Comment.DoesNotExist) :
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if not comment_object.can_edit():
return JsonResponse({'error' : 'Invalid request.' }, status=400)
if comment_object.user != request.user:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
comment_text = form.cleaned_data['comment']
comment_text_html = markdown.markdown(comment_text)
comment_text_html = bleach.clean(comment_text_html, tags=settings.COMMENT_TAGS, strip=True)
soup = BeautifulSoup(comment_text_html, "html.parser")
for i in soup.find_all('a'):
i['target'] = '_blank'
i['rel'] = 'noopener noreferrer nofollow'
for i in soup.find_all('blockquote'):
i['class'] = 'blockquote'
comment_text_html = soup.prettify()
comment_object.comment_text = comment_text
comment_object.comment_text_html = comment_text_html
comment_object.save()
return JsonResponse({'success' : 'Comment has been updated.', 'comment_id' : comment_object.id, 'comment_html' : comment_object.comment_text_html, 'comment_raw' : comment_object.comment_text })
else:
return JsonResponse({'error' : 'Invalid form submission.' }, status=400)
class VotePostView(LoginRequiredMixin, View):
def post(self, request, pk):
if request.POST.get('vote_value'):
try:
post = Post.objects.filter(deleted=False).get(pk=pk)
except Post.DoesNotExist:
return HttpResponseBadRequest()
vote_value = request.POST.get('vote_value', None)
try:
vote_value = int(vote_value)
if vote_value != 1:
raise ValueError("Invalid request")
except (ValueError, TypeError):
return HttpResponseBadRequest()
try:
vote_obj = VotePost.objects.get(post=post,user=request.user)
except ObjectDoesNotExist:
vote_obj = VotePost(user=request.user,
post=post,
value=vote_value)
vote_obj.save()
if vote_value == 1:
vote_diff = 1
post.upvotes += 1
elif vote_value == -1:
vote_diff = -1
post.upvotes -= 1
post.save()
if post.user != request.user:
post.user.userprofile.submission_karma += vote_diff
post.user.userprofile.save()
return JsonResponse({'error' : None,
'vote_diff': vote_diff})
if vote_obj.value == vote_value:
# cancel vote
vote_diff = vote_obj.unvote(request.user)
if not vote_diff:
return HttpResponseBadRequest(
'Something went wrong while canceling the vote')
else:
# change vote
vote_diff = vote_obj.vote(vote_value, request.user)
if not vote_diff:
return HttpResponseBadRequest(
'Wrong values for old/new vote combination')
return JsonResponse({'error' : None,
'vote_diff': vote_diff})
else:
return HttpResponseBadRequest()
class ChatArchive(View):
template_name = 'mainapp/chat_archive.html'
def get(self, request, topic_name):
try:
topic = Topic.objects.get(name=topic_name)
except ObjectDoesNotExist:
raise Http404("Topic does not exist")
if ChatMessage.objects.filter(topic=topic).count() == 0:
return render(request, self.template_name, {'topic' : topic, 'error_message' : "No messages have been sent in this chat room", 'message' : "No valid dates can be selected"})
first_message = ChatMessage.objects.filter(topic=topic).earliest('created')
now = timezone.now()
min_date = datetime.datetime(first_message.created.year, first_message.created.month, first_message.created.day, tzinfo=now.tzinfo)
given_date = request.GET.get('date', None)
error_message = None
if given_date == None:
given_date = datetime.datetime(now.year, now.month, now.day, tzinfo=now.tzinfo)
else:
try:
#hacky way to set timezone to utc
given_date = datetime.datetime.strptime(given_date, "%Y-%m-%d")
given_date = datetime.datetime(given_date.year, given_date.month, given_date.day, tzinfo=now.tzinfo)
if given_date < min_date or now < given_date:
error_message = "Invalid date selected."
except ValueError:
error_message = "Invalid date selected."
message = "Choose a date between {} and {} to view the chat archive:".format(min_date.strftime('%b-%d-%Y'), now.strftime('%b-%d-%Y'))
if error_message != None:
return render(request, self.template_name, {'topic' : topic, 'error_message' : error_message, 'message' : message})
chat_messages = ChatMessage.objects.filter(topic=topic).filter(created__gte=given_date).filter(created__lte=given_date + datetime.timedelta(days=1)).order_by('created')
# next/prev links
if given_date - datetime.timedelta(days=1) < min_date:
prev_page = None
else:
prev_page = "{}?date={}".format(reverse('mainapp:chat_archive', args=[topic_name,]), (given_date - datetime.timedelta(days=1)).strftime('%Y-%m-%d'))
if now < given_date + datetime.timedelta(days=1):
next_page = None
else:
next_page = "{}?date={}".format(reverse('mainapp:chat_archive', args=[topic_name,]), (given_date + datetime.timedelta(days=1)).strftime('%Y-%m-%d'))
#format date
given_date = given_date.strftime('%b-%d-%Y')
return render(request, self.template_name, {'topic' : topic, 'chat_messages' : chat_messages, 'date' : given_date, 'error_message' : error_message, 'message' : message, 'prev_page' : prev_page, 'next_page' : next_page})
class ChatView(View):
template_name = 'mainapp/chat_room.html'
def get(self, request, topic_name):
"""
Chat room
"""
try:
topic = Topic.objects.get(name=topic_name)
except ObjectDoesNotExist:
raise Http404("Topic does not exist")
# We want to show the last 10 messages, ordered most-recent-last
chat_queryset = ChatMessage.objects.filter(topic=topic).order_by("-created")[:30]
chat_message_count = len(chat_queryset)
if chat_message_count > 0:
first_message_id = chat_queryset[len(chat_queryset)-1].id
else:
first_message_id = -1
previous_id = -1
if first_message_id != -1:
try:
previous_id = ChatMessage.objects.filter(topic=topic).filter(pk__lt=first_message_id).order_by("-pk")[:1][0].id
except IndexError:
previous_id = -1
chat_messages = reversed(chat_queryset)
#subscribed rooms
if request.user.is_authenticated:
subscribed_rooms = Subscription.objects.filter(user=request.user).filter(deleted=False).order_by('topic__name')
else:
subscribed_rooms = None
#sub rooms
sub_rooms = Room.objects.filter(topic=topic).order_by('position')
return render(request, self.template_name, {
'topic': topic,
'chat_messages': chat_messages,
'first_message_id' : previous_id,
'subscribed_rooms' : subscribed_rooms,
'default_rooms' : settings.DEFAULT_TOPICS,
'sub_rooms' : sub_rooms,
})
class CreateTopic(LoginRequiredMixin, View):
form_class = CreateRoomForm
template_name = "mainapp/create_topic.html"
def get(self, request):
form = self.form_class()
return render(request, self.template_name, {'form' : form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
topic_name = form.cleaned_data['name']
topic_count = Topic.objects.filter(name=topic_name).count()
if topic_count == 0:
topic = Topic(name=topic_name, title=topic_name)
topic.save()
subscription = Subscription(user=request.user, topic=topic)
subscription.save()
messages.success(request ,"Topic has been created successfully")
return redirect(reverse("mainapp:chat_room", args=[topic_name]))
else:
messages.error(request ,"Topic already exists")
return redirect(reverse("mainapp:chat_room", args=[topic_name]))
else:
messages.error(request, "Invalid form data. Only lower case letters are allowed.")
return render(request, self.template_name, {'form' : self.form_class()})
class TopicsList(View):
template_name = "mainapp/topics_list.html"
paginate_by = 10
def get(self, request):
rooms = Topic.objects.all().order_by('-created')
paginator = Paginator(rooms, self.paginate_by)
page = request.GET.get('page', 1)
try:
current_page_rooms = paginator.page(page)
except PageNotAnInteger:
messages.error(request, "Invalid page number, showing the first page instead.")
current_page_rooms = paginator.page(1)
except EmptyPage:
messages.error(request, "Invalid page number, showing the last page instead.")
current_page_rooms = paginator.page(paginator.num_pages)
return render(request, self.template_name, {'current_page_rooms': current_page_rooms})
class SearchView(View):
template_name = "mainapp/search.html"
paginate_by = 10
def get(self, request):
search_query = request.GET.get('query', None)
if search_query == None or len(search_query) > 20:
messages.error(request, "Invalid search query")
return render(request, self.template_name, {'current_page_rooms': None, search_query : None})
rooms = Topic.objects.filter(name__trigram_similar=search_query.lower()) | Topic.objects.filter(name__icontains=search_query.lower())
paginator = Paginator(rooms, self.paginate_by)
page = request.GET.get('page', 1)
try:
current_page_rooms = paginator.page(page)
except PageNotAnInteger:
messages.error(request, "Invalid page number, showing the first page instead.")
current_page_rooms = paginator.page(1)
except EmptyPage:
messages.error(request, "Invalid page number, showing the last page instead.")
current_page_rooms = paginator.page(paginator.num_pages)
return render(request, self.template_name, {'current_page_rooms': current_page_rooms, 'search_query' : search_query })
class ChatRoomSubscription(View):
def post(self, request):
if not request.user.is_authenticated:
return JsonResponse({'error' : 'Please login to subscribe.' }, status=400)
topic_name = request.POST.get('topic_name', None)
if topic_name == None:
return JsonResponse({'error' : 'Invalid request.' }, status=400)
try:
topic = Topic.objects.get(name=topic_name)
except ObjectDoesNotExist:
return JsonResponse({'error' : 'Invalid request.' }, status=404)
user = request.user
try:
subscription = Subscription.objects.get(topic=topic, user=user)
if subscription.deleted:
subscription.deleted = False
message = "You have subscribed to {}".format(topic.name)
button_text = "Unsubscribe"
else:
subscription.deleted = True
message = "You have unsubscribed from {}".format(topic.name)
button_text = "Subscribe"
subscription.save()
except ObjectDoesNotExist:
subscription = Subscription(topic=topic, user=user)
subscription.save()
message = "You have subscribed to {}".format(topic.name)
button_text = "Unsubscribe"
return JsonResponse({'message' : message, 'button_text' : button_text })
class MyPosts(View):
template_name = 'mainapp/myposts.html'
paginate_by = 10
def get(self, request, username):
try:
user = User.objects.get(username=username)
except ObjectDoesNotExist:
messages.error(request, "User does not exist")
return render(request, self.template_name, {'error' : True})
comments = Comment.objects.all().filter(deleted=False).filter(user=user).order_by('-created')
posts = Post.objects.all().filter(deleted=False).filter(user=user).order_by('-created')
total_count = Comment.objects.all().filter(deleted=False).filter(user=user).count() + Post.objects.all().filter(deleted=False).filter(user=user).count()
all_items = chain(comments, posts)
all_items = sorted(all_items, key=attrgetter('created'), reverse=True)
paginator = Paginator(all_items, self.paginate_by)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
posts = paginator.page(1)
page = 1
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = paginator.page(paginator.num_pages)
page = paginator.num_pages
return render(request, self.template_name, {'posts': posts, 'page' : page, 'user' : user })
class DeletePost(LoginRequiredMixin, View):
template_name = 'mainapp/post_delete.html'
def get(self, request, topic_name, pk):
post = get_object_or_404(Post, pk=pk)
go_back_url = post.get_post_url()
if (not post.can_delete()) or (post.user != request.user):
messages.error(request, 'Invalid request, please try again.')
return redirect(go_back_url)
subscribed_rooms = Subscription.objects.filter(user=request.user).filter(deleted=False).order_by('topic__name')
return render(request, self.template_name, {'post' : post, 'go_back_url' : go_back_url, 'subscribed_rooms' : subscribed_rooms, 'topic' : post.topic})
def post(self, request, topic_name, pk):
post = get_object_or_404(Post, pk=pk)
if request.POST.get('delete_post'):
if post.can_delete() and post.user == request.user:
post.deleted = True
post.save()
messages.success(request, 'Post has been deleted.')
else:
messages.error(request, 'Post could not be deleted.')
else:
messages.error(request, 'Invalid request')
return redirect(post.topic.get_topic_forum_url()) |
the-stack_0_21719 | class Node:
def __init__(self, data=None, nex=None):
self.data = data
self.nex = nex
class Queue:
def __init__(self, data=None):
if data is None:
# inserting from tail and removing from head
self.head = None
self.tail =None
else:
new_node = Node(data)
self.head = new_node
self.tail = new_node
def travel(self):
temp = self.head
while temp is not None:
print(temp.data)
temp = temp.nex
def enqueue(self, data):
new_node = Node(data)
if (self.tail is None) and (self.head is None):
# when queue/linked-list is empty
self.head = new_node
self.tail = new_node
else:
self.tail.nex = new_node
self.tail = new_node
def dequeue(self):
if (self.tail is None) and (self.head is None):
# when queue/linked-list is empty
return
ret = self.head.data
if self.head == self.tail:
self.tail = None
self.head = self.head.nex
return ret
|
the-stack_0_21720 | """
Title: Variational AutoEncoder
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2020/05/03
Last modified: 2020/05/03
Description: Convolutional Variational AutoEncoder (VAE) trained on MNIST digits.
"""
"""
## Setup
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## Create a sampling layer
"""
class Sampling(layers.Layer):
"""Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
def call(self, inputs):
z_mean, z_log_var = inputs
batch = tf.shape(z_mean)[0]
dim = tf.shape(z_mean)[1]
epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
return z_mean + tf.exp(0.5 * z_log_var) * epsilon
"""
## Build the encoder
"""
latent_dim = 2
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(encoder_inputs)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Flatten()(x)
x = layers.Dense(16, activation="relu")(x)
z_mean = layers.Dense(latent_dim, name="z_mean")(x)
z_log_var = layers.Dense(latent_dim, name="z_log_var")(x)
z = Sampling()([z_mean, z_log_var])
encoder = keras.Model(encoder_inputs, [z_mean, z_log_var, z], name="encoder")
encoder.summary()
"""
## Build the decoder
"""
latent_inputs = keras.Input(shape=(latent_dim,))
x = layers.Dense(7 * 7 * 64, activation="relu")(latent_inputs)
x = layers.Reshape((7, 7, 64))(x)
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(x)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, activation="sigmoid", padding="same")(x)
decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
decoder.summary()
"""
## Define the VAE as a `Model` with a custom `train_step`
"""
class VAE(keras.Model):
def __init__(self, encoder, decoder, **kwargs):
super(VAE, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def train_step(self, data):
data = data[0]
with tf.GradientTape() as tape:
z_mean, z_log_var, z = encoder(data)
reconstruction = decoder(z)
reconstruction_loss = tf.reduce_mean(
keras.losses.binary_crossentropy(data, reconstruction)
)
reconstruction_loss *= 28 * 28
kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
kl_loss = tf.reduce_mean(kl_loss)
kl_loss *= -0.5
total_loss = reconstruction_loss + kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return {
"loss": total_loss,
"reconstruction_loss": reconstruction_loss,
"kl_loss": kl_loss,
}
"""
## Train the VAE
"""
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
mnist_digits = np.concatenate([x_train, x_test], axis=0)
mnist_digits = np.expand_dims(mnist_digits, -1).astype("float32") / 255
vae = VAE(encoder, decoder)
vae.compile(optimizer=keras.optimizers.Adam())
vae.fit(mnist_digits, epochs=30, batch_size=128)
"""
## Display a grid of sampled digits
"""
import matplotlib.pyplot as plt
def plot_latent(encoder, decoder):
# display a n*n 2D manifold of digits
n = 30
digit_size = 28
scale = 2.0
figsize = 15
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = np.linspace(-scale, scale, n)
grid_y = np.linspace(-scale, scale, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]])
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[
i * digit_size : (i + 1) * digit_size,
j * digit_size : (j + 1) * digit_size,
] = digit
plt.figure(figsize=(figsize, figsize))
start_range = digit_size // 2
end_range = n * digit_size + start_range + 1
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.imshow(figure, cmap="Greys_r")
plt.show()
plot_latent(encoder, decoder)
"""
## Display how the latent space clusters different digit classes
"""
def plot_label_clusters(encoder, decoder, data, labels):
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = encoder.predict(data)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.show()
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1).astype("float32") / 255
plot_label_clusters(encoder, decoder, x_train, y_train)
|
the-stack_0_21721 | """
Ejercicio: hacer un juego "Guess The number"
PARTE 1: Pedir al usuario que introduzca un número entre 0 y 100
PARTE 2: Adivinar el número por parte del usuario
Usar una función para capitalizar el código común
"""
MIN = 0
MAX = 99
def solicitar_introducir_numero(invite, minimum=MIN, maximum=MAX):
# Completar la entrada:
invite += " entre " + str(minimum) + " y " + str(maximum) + " incluídos: "
while True:
# Entramos en un bucle infinito
# Pedimos introducir un número
datoIntroducido = input(invite)
try:
datoIntroducido = int(datoIntroducido)
except:
pass
else:
# Hacer la comparación
if minimum <= datoIntroducido <= maximum:
# Tenemos lo que queremos, salimos del bucle
break
return datoIntroducido
# PARTE 1
numero = solicitar_introducir_numero("Introduzca el número a adivinar")
minimum = MIN
maximum = MAX
# PARTE 2
while True:
# Entramos en un bucle infinito
# que permite jugar varios turnos
intento = solicitar_introducir_numero("Adivine el número", minimum, maximum)
# Se prueba si el intento es correcto o no
if intento < numero:
print("Demasiado pequeño")
minimum = intento + 1
elif intento > numero:
print("Demasiado grande")
maximum = intento - 1
else:
print("Victoria!")
break
|
the-stack_0_21722 | import sys, os, datetime
from asyncio import get_event_loop, TimeoutError, ensure_future, new_event_loop, set_event_loop
from . import datelock, feed, get, output, verbose, storage
from .token import TokenExpiryException
from . import token
from .storage import db
from .feed import NoMoreTweetsException
import logging as logme
import time
bearer = 'Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejRCOuH5E6I8xnZz4puTs' \
'%3D1Zv7ttfk8LF81IUq16cHjhLTvJu4FA33AGWWjCpTnA'
class Twint:
def __init__(self, config):
logme.debug(__name__ + ':Twint:__init__')
if config.Resume is not None and (config.TwitterSearch or config.Followers or config.Following):
logme.debug(__name__ + ':Twint:__init__:Resume')
self.init = self.get_resume(config.Resume)
else:
self.init = '-1'
self.feed = [-1]
self.count = 0
self.user_agent = ""
self.config = config
self.config.Bearer_token = bearer
# TODO might have to make some adjustments for it to work with multi-treading
# USAGE : to get a new guest token simply do `self.token.refresh()`
self.token = token.Token(config)
self.token.refresh()
self.conn = db.Conn(config.Database)
self.d = datelock.Set(self.config.Until, self.config.Since)
verbose.Elastic(config.Elasticsearch)
if self.config.Store_object:
logme.debug(__name__ + ':Twint:__init__:clean_follow_list')
output._clean_follow_list()
if self.config.Pandas_clean:
logme.debug(__name__ + ':Twint:__init__:pandas_clean')
storage.panda.clean()
def get_resume(self, resumeFile):
if not os.path.exists(resumeFile):
return '-1'
with open(resumeFile, 'r') as rFile:
_init = rFile.readlines()[-1].strip('\n')
return _init
async def Feed(self):
logme.debug(__name__ + ':Twint:Feed')
consecutive_errors_count = 0
while True:
# this will receive a JSON string, parse it into a `dict` and do the required stuff
try:
response = await get.RequestUrl(self.config, self.init, headers=[("User-Agent", self.user_agent)])
except TokenExpiryException as e:
logme.debug(__name__ + 'Twint:Feed:' + str(e))
self.token.refresh()
response = await get.RequestUrl(self.config, self.init, headers=[("User-Agent", self.user_agent)])
if self.config.Debug:
print(response, file=open("twint-last-request.log", "w", encoding="utf-8"))
self.feed = []
try:
if self.config.Favorites:
self.feed, self.init = feed.MobileFav(response)
favorite_err_cnt = 0
if len(self.feed) == 0 and len(self.init) == 0:
while ((len(self.feed) == 0 or len(self.init) == 0) and favorite_err_cnt < 5):
self.user_agent = await get.RandomUserAgent(wa=False)
response = await get.RequestUrl(self.config, self.init,
headers=[("User-Agent", self.user_agent)])
self.feed, self.init = feed.MobileFav(response)
favorite_err_cnt += 1
time.sleep(1)
if favorite_err_cnt == 5:
print("Favorite page could not be fetched")
if not self.count % 40:
time.sleep(5)
elif self.config.Followers or self.config.Following:
self.feed, self.init = feed.Follow(response)
if not self.count % 40:
time.sleep(5)
elif self.config.Profile:
if self.config.Profile_full:
self.feed, self.init = feed.Mobile(response)
else:
self.feed, self.init = feed.profile(response)
elif self.config.TwitterSearch:
try:
self.feed, self.init = feed.search_v2(response)
except NoMoreTweetsException as e:
logme.debug(__name__ + ':Twint:Feed:' + str(e))
print(e, 'is it though? because sometimes twitter lie.')
break
except TimeoutError as e:
if self.config.Proxy_host.lower() == "tor":
print("[?] Timed out, changing Tor identity...")
if self.config.Tor_control_password is None:
logme.critical(__name__ + ':Twint:Feed:tor-password')
sys.stderr.write("Error: config.Tor_control_password must be set for proxy autorotation!\r\n")
sys.stderr.write(
"Info: What is it? See https://stem.torproject.org/faq.html#can-i-interact-with-tors-controller-interface-directly\r\n")
break
else:
get.ForceNewTorIdentity(self.config)
continue
else:
logme.critical(__name__ + ':Twint:Feed:' + str(e))
print(str(e))
break
except Exception as e:
if self.config.Profile or self.config.Favorites:
print("[!] Twitter does not return more data, scrape stops here.")
break
logme.critical(__name__ + ':Twint:Feed:noData' + str(e))
# Sometimes Twitter says there is no data. But it's a lie.
# raise
consecutive_errors_count += 1
if consecutive_errors_count < self.config.Retries_count:
# skip to the next iteration if wait time does not satisfy limit constraints
delay = round(consecutive_errors_count ** self.config.Backoff_exponent, 1)
# if the delay is less than users set min wait time then replace delay
if self.config.Min_wait_time > delay:
delay = self.config.Min_wait_time
sys.stderr.write('sleeping for {} secs\n'.format(delay))
time.sleep(delay)
self.user_agent = await get.RandomUserAgent(wa=True)
continue
logme.critical(__name__ + ':Twint:Feed:Tweets_known_error:' + str(e))
sys.stderr.write(str(e) + " [x] run.Feed")
sys.stderr.write(
"[!] if get this error but you know for sure that more tweets exist, please open an issue and we will investigate it!")
break
if self.config.Resume:
print(self.init, file=open(self.config.Resume, "a", encoding="utf-8"))
async def follow(self):
await self.Feed()
if self.config.User_full:
logme.debug(__name__ + ':Twint:follow:userFull')
self.count += await get.Multi(self.feed, self.config, self.conn)
else:
logme.debug(__name__ + ':Twint:follow:notUserFull')
for user in self.feed:
self.count += 1
username = user.find("a")["name"]
await output.Username(username, self.config, self.conn)
async def favorite(self):
logme.debug(__name__ + ':Twint:favorite')
await self.Feed()
favorited_tweets_list = []
for tweet in self.feed:
tweet_dict = {}
self.count += 1
try:
tweet_dict['data-item-id'] = tweet.find("div", {"class": "tweet-text"})['data-id']
t_url = tweet.find("span", {"class": "metadata"}).find("a")["href"]
tweet_dict['data-conversation-id'] = t_url.split('?')[0].split('/')[-1]
tweet_dict['username'] = tweet.find("div", {"class": "username"}).text.replace('\n', '').replace(' ',
'')
tweet_dict['tweet'] = tweet.find("div", {"class": "tweet-text"}).find("div", {"class": "dir-ltr"}).text
date_str = tweet.find("td", {"class": "timestamp"}).find("a").text
# test_dates = ["1m", "2h", "Jun 21, 2019", "Mar 12", "28 Jun 19"]
# date_str = test_dates[3]
if len(date_str) <= 3 and (date_str[-1] == "m" or date_str[-1] == "h"): # 25m 1h
dateu = str(datetime.date.today())
tweet_dict['date'] = dateu
elif ',' in date_str: # Aug 21, 2019
sp = date_str.replace(',', '').split(' ')
date_str_formatted = sp[1] + ' ' + sp[0] + ' ' + sp[2]
dateu = datetime.datetime.strptime(date_str_formatted, "%d %b %Y").strftime("%Y-%m-%d")
tweet_dict['date'] = dateu
elif len(date_str.split(' ')) == 3: # 28 Jun 19
sp = date_str.split(' ')
if len(sp[2]) == 2:
sp[2] = '20' + sp[2]
date_str_formatted = sp[0] + ' ' + sp[1] + ' ' + sp[2]
dateu = datetime.datetime.strptime(date_str_formatted, "%d %b %Y").strftime("%Y-%m-%d")
tweet_dict['date'] = dateu
else: # Aug 21
sp = date_str.split(' ')
date_str_formatted = sp[1] + ' ' + sp[0] + ' ' + str(datetime.date.today().year)
dateu = datetime.datetime.strptime(date_str_formatted, "%d %b %Y").strftime("%Y-%m-%d")
tweet_dict['date'] = dateu
favorited_tweets_list.append(tweet_dict)
except Exception as e:
logme.critical(__name__ + ':Twint:favorite:favorite_field_lack')
print("shit: ", date_str, " ", str(e))
try:
self.config.favorited_tweets_list += favorited_tweets_list
except AttributeError:
self.config.favorited_tweets_list = favorited_tweets_list
async def profile(self):
await self.Feed()
if self.config.Profile_full:
logme.debug(__name__ + ':Twint:profileFull')
self.count += await get.Multi(self.feed, self.config, self.conn)
else:
logme.debug(__name__ + ':Twint:notProfileFull')
for tweet in self.feed:
self.count += 1
await output.Tweets(tweet, self.config, self.conn)
async def tweets(self):
await self.Feed()
# TODO : need to take care of this later
if self.config.Location:
logme.debug(__name__ + ':Twint:tweets:location')
self.count += await get.Multi(self.feed, self.config, self.conn)
else:
logme.debug(__name__ + ':Twint:tweets:notLocation')
for tweet in self.feed:
self.count += 1
await output.Tweets(tweet, self.config, self.conn)
async def main(self, callback=None):
task = ensure_future(self.run()) # Might be changed to create_task in 3.7+.
if callback:
task.add_done_callback(callback)
await task
async def run(self):
if self.config.TwitterSearch:
self.user_agent = await get.RandomUserAgent(wa=True)
else:
self.user_agent = await get.RandomUserAgent()
if self.config.User_id is not None and self.config.Username is None:
logme.debug(__name__ + ':Twint:main:user_id')
self.config.Username = await get.Username(self.config.User_id, self.config.Bearer_token,
self.config.Guest_token)
if self.config.Username is not None and self.config.User_id is None:
logme.debug(__name__ + ':Twint:main:username')
self.config.User_id = await get.User(self.config.Username, self.config, self.conn,
self.config.Bearer_token,
self.config.Guest_token, True)
if self.config.User_id is None:
raise ValueError("Cannot find twitter account with name = " + self.config.Username)
# TODO : will need to modify it to work with the new endpoints
if self.config.TwitterSearch and self.config.Since and self.config.Until:
logme.debug(__name__ + ':Twint:main:search+since+until')
while self.d._since < self.d._until:
self.config.Since = str(self.d._since)
self.config.Until = str(self.d._until)
if len(self.feed) > 0:
await self.tweets()
else:
logme.debug(__name__ + ':Twint:main:gettingNewTweets')
break
if get.Limit(self.config.Limit, self.count):
break
else:
logme.debug(__name__ + ':Twint:main:not-search+since+until')
while True:
if len(self.feed) > 0:
if self.config.Followers or self.config.Following:
logme.debug(__name__ + ':Twint:main:follow')
await self.follow()
elif self.config.Favorites:
logme.debug(__name__ + ':Twint:main:favorites')
await self.favorite()
elif self.config.Profile:
logme.debug(__name__ + ':Twint:main:profile')
await self.profile()
elif self.config.TwitterSearch:
logme.debug(__name__ + ':Twint:main:twitter-search')
await self.tweets()
else:
logme.debug(__name__ + ':Twint:main:no-more-tweets')
break
# logging.info("[<] " + str(datetime.now()) + ':: run+Twint+main+CallingGetLimit2')
if get.Limit(self.config.Limit, self.count):
logme.debug(__name__ + ':Twint:main:reachedLimit')
break
if self.config.Count:
verbose.Count(self.count, self.config)
def run(config, callback=None):
logme.debug(__name__ + ':run')
try:
get_event_loop()
except RuntimeError as e:
if "no current event loop" in str(e):
set_event_loop(new_event_loop())
else:
logme.exception(__name__ + ':run:Unexpected exception while handling an expected RuntimeError.')
raise
except Exception as e:
logme.exception(
__name__ + ':run:Unexpected exception occurred while attempting to get or create a new event loop.')
raise
get_event_loop().run_until_complete(Twint(config).main(callback))
def Favorites(config):
logme.debug(__name__ + ':Favorites')
config.Favorites = True
config.Following = False
config.Followers = False
config.Profile = False
config.Profile_full = False
config.TwitterSearch = False
run(config)
if config.Pandas_au:
storage.panda._autoget("tweet")
def Followers(config):
logme.debug(__name__ + ':Followers')
config.Followers = True
config.Following = False
config.Profile = False
config.Profile_full = False
config.Favorites = False
config.TwitterSearch = False
run(config)
if config.Pandas_au:
storage.panda._autoget("followers")
if config.User_full:
storage.panda._autoget("user")
if config.Pandas_clean and not config.Store_object:
# storage.panda.clean()
output._clean_follow_list()
def Following(config):
logme.debug(__name__ + ':Following')
config.Following = True
config.Followers = False
config.Profile = False
config.Profile_full = False
config.Favorites = False
config.TwitterSearch = False
run(config)
if config.Pandas_au:
storage.panda._autoget("following")
if config.User_full:
storage.panda._autoget("user")
if config.Pandas_clean and not config.Store_object:
# storage.panda.clean()
output._clean_follow_list()
def Lookup(config):
logme.debug(__name__ + ':Lookup')
try:
get_event_loop()
except RuntimeError as e:
if "no current event loop" in str(e):
set_event_loop(new_event_loop())
else:
logme.exception(__name__ + ':Lookup:Unexpected exception while handling an expected RuntimeError.')
raise
except Exception as e:
logme.exception(
__name__ + ':Lookup:Unexpected exception occured while attempting to get or create a new event loop.')
raise
try:
if config.User_id is not None:
logme.debug(__name__ + ':Twint:Lookup:user_id')
config.Username = get_event_loop().run_until_complete(get.Username(config.User_id))
url = f"https://mobile.twitter.com/{config.Username}?prefetchTimestamp=" + str(int(time.time() * 1000))
get_event_loop().run_until_complete(get.User(url, config, db.Conn(config.Database)))
if config.Pandas_au:
storage.panda._autoget("user")
except RuntimeError as e:
if "no current event loop" in str(e):
logme.exception(__name__ + ':Lookup:Previous attempt to to create an event loop failed.')
raise
except Exception as e:
logme.exception(__name__ + ':Lookup:Unexpected exception occured.')
raise
def Profile(config):
logme.debug(__name__ + ':Profile')
config.Profile = True
config.Favorites = False
config.Following = False
config.Followers = False
config.TwitterSearch = False
run(config)
if config.Pandas_au:
storage.panda._autoget("tweet")
def Search(config, callback=None):
logme.debug(__name__ + ':Search')
config.TwitterSearch = True
config.Favorites = False
config.Following = False
config.Followers = False
config.Profile = False
config.Profile_full = False
run(config, callback)
if config.Pandas_au:
storage.panda._autoget("tweet")
|
the-stack_0_21724 | #! /usr/bin/env python
# -*- coding: utf-8
'''
Python implementation of Krippendorff's alpha -- inter-rater reliability
(c)2011-17 Thomas Grill (http://grrrr.org)
Python version >= 2.4 required
'''
from __future__ import print_function
try:
import numpy as np
except ImportError:
np = None
def nominal_metric(a, b):
return a != b
def interval_metric(a, b):
return (a-b)**2
def ratio_metric(a, b):
return ((a-b)/(a+b))**2
def krippendorff_alpha(data, metric=interval_metric, force_vecmath=False, convert_items=float, missing_items=None):
'''
Calculate Krippendorff's alpha (inter-rater reliability):
data is in the format
[
{unit1:value, unit2:value, ...}, # coder 1
{unit1:value, unit3:value, ...}, # coder 2
... # more coders
]
or
it is a sequence of (masked) sequences (list, numpy.array, numpy.ma.array, e.g.) with rows corresponding to coders and columns to items
metric: function calculating the pairwise distance
force_vecmath: force vector math for custom metrics (numpy required)
convert_items: function for the type conversion of items (default: float)
missing_items: indicator for missing items (default: None)
'''
# number of coders
m = len(data)
# set of constants identifying missing values
if missing_items is None:
maskitems = []
else:
maskitems = list(missing_items)
if np is not None:
maskitems.append(np.ma.masked_singleton)
# convert input data to a dict of items
units = {}
for d in data:
try:
# try if d behaves as a dict
diter = d.items()
except AttributeError:
# sequence assumed for d
diter = enumerate(d)
for it, g in diter:
if g not in maskitems:
try:
its = units[it]
except KeyError:
its = []
units[it] = its
its.append(convert_items(g))
units = dict((it, d) for it, d in units.items() if len(d) > 1) # units with pairable values
n = sum(len(pv) for pv in units.values()) # number of pairable values
if n == 0:
raise ValueError("No items to compare.")
np_metric = (np is not None) and ((metric in (interval_metric, nominal_metric, ratio_metric)) or force_vecmath)
Do = 0.
for grades in units.values():
if np_metric:
gr = np.asarray(grades)
Du = sum(np.sum(metric(gr, gri)) for gri in gr)
else:
Du = sum(metric(gi, gj) for gi in grades for gj in grades)
Do += Du/float(len(grades)-1)
Do /= float(n)
if Do == 0:
return 1.
De = 0.
for g1 in units.values():
if np_metric:
d1 = np.asarray(g1)
for g2 in units.values():
De += sum(np.sum(metric(d1, gj)) for gj in g2)
else:
for g2 in units.values():
De += sum(metric(gi, gj) for gi in g1 for gj in g2)
De /= float(n*(n-1))
return 1.-Do/De if (Do and De) else 1.
if __name__ == '__main__':
print("Example from http://en.wikipedia.org/wiki/Krippendorff's_Alpha")
data = (
"* * * * * 3 4 1 2 1 1 3 3 * 3", # coder A
"1 * 2 1 3 3 4 3 * * * * * * *", # coder B
"* * 2 1 3 4 4 * 2 1 1 3 3 * 4", # coder C
)
missing = '*' # indicator for missing values
array = [d.split() for d in data] # convert to 2D list of string items
print("nominal metric: %.3f" % krippendorff_alpha(array, nominal_metric, missing_items=missing))
print("interval metric: %.3f" % krippendorff_alpha(array, interval_metric, missing_items=missing)) |
the-stack_0_21727 | import glob
import os
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.multiprocessing as mp
from transformers import BertTokenizer
from config import load_config
from dataset import get_loader
from model.net import Transformer
from trainer import Trainer
from utils import ResultWriter, fix_seed
def main(rank, hparams, ngpus_per_node: int):
fix_seed(hparams.seed)
resultwriter = ResultWriter(hparams.result_path)
if hparams.distributed:
hparams.rank = hparams.rank * ngpus_per_node + rank
print(f"Use GPU {hparams.rank} for training")
dist.init_process_group(
backend=hparams.dist_backend,
init_method=hparams.dist_url,
world_size=hparams.world_size,
rank=hparams.rank,
)
# get shared tokenizer and vocab
if hparams.distributed:
if rank != 0:
dist.barrier()
tok = BertTokenizer.from_pretrained("bert-base-multilingual-cased")
if rank == 0:
dist.barrier()
else:
tok = BertTokenizer.from_pretrained("bert-base-multilingual-cased")
# get dataloaders
loaders = [
get_loader(
tok=tok,
batch_size=hparams.batch_size,
root_path=hparams.root_path,
workers=hparams.workers,
max_len=hparams.max_len,
mode=mode,
distributed=hparams.distributed,
)
for mode in ["train", "valid"]
]
# get model and initialize
model = Transformer(
vocab_size=len(tok.vocab),
num_enc_block=hparams.n_enc_block,
num_dec_block=hparams.n_dec_block,
num_head=hparams.num_head,
hidden=hparams.hidden,
fc_hidden=hparams.fc_hidden,
dropout=hparams.dropout,
)
for param in model.parameters():
if param.dim() > 1:
nn.init.xavier_uniform_(param)
# training phase
trainer = Trainer(hparams, loaders, model, resultwriter, pad_idx=tok.pad_token_id)
best_result = trainer.fit()
# testing phase
if rank in [-1, 0]:
version = best_result["version"]
state_dict = torch.load(
glob.glob(
os.path.join(hparams.ckpt_path, f"version-{version}/best_model_*.pt")
)[0],
)
test_loader = get_loader(
tok=tok,
batch_size=hparams.batch_size,
root_path=hparams.root_path,
workers=hparams.workers,
max_len=hparams.max_len,
mode="test",
)
test_result = trainer.test(test_loader, state_dict)
# save result
best_result.update(test_result)
resultwriter.update(hparams, **best_result)
if __name__ == "__main__":
hparams = load_config()
ngpus_per_node = torch.cuda.device_count()
if hparams.distributed:
hparams.rank = 0
hparams.world_size = ngpus_per_node * hparams.world_size
mp.spawn(main, nprocs=ngpus_per_node, args=(hparams, ngpus_per_node))
else:
main(hparams.rank, hparams, ngpus_per_node)
|
the-stack_0_21728 | import os
def disk_usage(path):
total = os.path.getsize(path)
if os.path.isdir(path):
for filename in os.listdir(path):
childpath = os.path.join(path, filename)
total += disk_usage(childpath)
print('{0:<7}'.format(total), path)
print('{0}'.format(total))
if __name__ == '__main__':
disk_usage(path="YOUR PATH") |
the-stack_0_21730 | import math
import numpy as np
import subprocess
import numbers
import importlib
import sys
import re
import traceback
import multiprocessing as mp
from itertools import chain, combinations
import numba
from numba.core import config, cpu
from numba import prange, njit
from numba.core.compiler import compile_isolated, Flags
from numba.tests.support import TestCase, tag, override_env_config
import unittest
needs_svml = unittest.skipUnless(config.USING_SVML,
"SVML tests need SVML to be present")
# a map of float64 vector lenghs with corresponding CPU architecture
vlen2cpu = {2: 'nehalem', 4: 'haswell', 8: 'skylake-avx512'}
# force LLVM to use AVX512 registers for vectorization
# https://reviews.llvm.org/D67259
vlen2cpu_features = {2: '', 4: '', 8: '-prefer-256-bit'}
# K: SVML functions, V: python functions which are expected to be SIMD-vectorized
# using SVML, explicit references to Python functions here are mostly for sake of
# instant import checks.
# TODO: [] and comments below mean unused/untested SVML function, it's to be
# either enabled or to be replaced with the explanation why the function
# cannot be used in Numba
# TODO: this test does not support functions with more than 1 arguments yet
# The test logic should be modified if there is an SVML function being used under
# different name or module from Python
svml_funcs = {
"sin": [np.sin, math.sin],
"cos": [np.cos, math.cos],
"pow": [], # pow, math.pow],
"exp": [np.exp, math.exp],
"log": [np.log, math.log],
"acos": [math.acos],
"acosh": [math.acosh],
"asin": [math.asin],
"asinh": [math.asinh],
"atan2": [], # math.atan2],
"atan": [math.atan],
"atanh": [math.atanh],
"cbrt": [], # np.cbrt],
"cdfnorm": [],
"cdfnorminv": [],
"ceil": [], # np.ceil, math.ceil],
"cosd": [],
"cosh": [np.cosh, math.cosh],
"erf": [math.erf], # np.erf is available in Intel Distribution
"erfc": [math.erfc],
"erfcinv": [],
"erfinv": [],
"exp10": [],
"exp2": [], # np.exp2],
"expm1": [np.expm1, math.expm1],
"floor": [], # np.floor, math.floor],
"fmod": [], # np.fmod, math.fmod],
"hypot": [], # np.hypot, math.hypot],
"invsqrt": [], # available in Intel Distribution
"log10": [np.log10, math.log10],
"log1p": [np.log1p, math.log1p],
"log2": [], # np.log2],
"logb": [],
"nearbyint": [],
"rint": [], # np.rint],
"round": [], # round],
"sind": [],
"sinh": [np.sinh, math.sinh],
"sqrt": [np.sqrt, math.sqrt],
"tan": [np.tan, math.tan],
"tanh": [np.tanh, math.tanh],
"trunc": [], # np.trunc, math.trunc],
}
# TODO: these functions are not vectorizable with complex types
complex_funcs_exclude = ["sqrt", "tan", "log10", "expm1", "log1p", "tanh", "log"]
# remove untested entries
svml_funcs = {k: v for k, v in svml_funcs.items() if len(v) > 0}
# lists for functions which belong to numpy and math modules correpondently
numpy_funcs = [f for f, v in svml_funcs.items() if "<ufunc" in \
[str(p).split(' ')[0] for p in v]]
other_funcs = [f for f, v in svml_funcs.items() if "<built-in" in \
[str(p).split(' ')[0] for p in v]]
def func_patterns(func, args, res, dtype, mode, vlen, fastmath, pad=' '*8):
"""
For a given function and its usage modes,
returns python code and assembly patterns it should and should not generate
"""
# generate a function call according to the usecase
if mode == "scalar":
arg_list = ','.join([a+'[0]' for a in args])
body = '%s%s[0] += math.%s(%s)\n' % (pad, res, func, arg_list)
elif mode == "numpy":
body = '%s%s += np.%s(%s)' % (pad, res, func, ','.join(args))
body += '.astype(np.%s)\n' % dtype if dtype.startswith('int') else '\n'
else:
assert mode == "range" or mode == "prange"
arg_list = ','.join([a+'[i]' for a in args])
body = '{pad}for i in {mode}({res}.size):\n' \
'{pad}{pad}{res}[i] += math.{func}({arg_list})\n'. \
format(**locals())
# TODO: refactor so this for-loop goes into umbrella function,
# 'mode' can be 'numpy', '0', 'i' instead
# TODO: it will enable mixed usecases like prange + numpy
# type specialization
is_f32 = dtype == 'float32' or dtype == 'complex64'
f = func+'f' if is_f32 else func
v = vlen*2 if is_f32 else vlen
# general expectations
prec_suff = '' if fastmath else '_ha'
scalar_func = '$_'+f if config.IS_OSX else '$'+f
svml_func = '__svml_%s%d%s,' % (f, v, prec_suff)
if mode == "scalar":
contains = [scalar_func]
avoids = ['__svml_', svml_func]
else: # will vectorize
contains = [svml_func]
avoids = [] # [scalar_func] - TODO: if possible, force LLVM to prevent
# generating the failsafe scalar paths
if vlen != 8 and (is_f32 or dtype == 'int32'): # Issue #3016
avoids += ['%zmm', '__svml_%s%d%s,' % (f, v*2, prec_suff)]
# special handling
if func == 'sqrt':
if mode == "scalar":
contains = ['sqrts']
avoids = [scalar_func, svml_func] # LLVM uses CPU instruction
elif vlen == 8:
contains = ['vsqrtp']
avoids = [scalar_func, svml_func] # LLVM uses CPU instruction
# else expect use of SVML for older architectures
return body, contains, avoids
def usecase_name(dtype, mode, vlen, name):
""" Returns pretty name for given set of modes """
return f"{dtype}_{mode}{vlen}_{name}"
def combo_svml_usecase(dtype, mode, vlen, fastmath, name):
""" Combine multiple function calls under single umbrella usecase """
name = usecase_name(dtype, mode, vlen, name)
body = """def {name}(n):
x = np.empty(n*8, dtype=np.{dtype})
ret = np.empty_like(x)\n""".format(**locals())
funcs = set(numpy_funcs if mode == "numpy" else other_funcs)
if dtype.startswith('complex'):
funcs = funcs.difference(complex_funcs_exclude)
contains = set()
avoids = set()
# fill body and expectation patterns
for f in funcs:
b, c, a = func_patterns(f, ['x'], 'ret', dtype, mode, vlen, fastmath)
avoids.update(a)
body += b
contains.update(c)
body += " "*8 + "return ret"
# now compile and return it along with its body in __doc__ and patterns
ldict = {}
exec(body, globals(), ldict)
ldict[name].__doc__ = body
return ldict[name], contains, avoids
@needs_svml
class TestSVMLGeneration(TestCase):
""" Tests all SVML-generating functions produce desired calls """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
# RE for a generic symbol reference and for each particular SVML function
asm_filter = re.compile('|'.join(['\$[a-z_]\w+,']+list(svml_funcs)))
@classmethod
def mp_runner(cls, testname, outqueue):
method = getattr(cls, testname)
try:
ok, msg = method()
except Exception:
msg = traceback.format_exc()
ok = False
outqueue.put({'status': ok, 'msg': msg})
@classmethod
def _inject_test(cls, dtype, mode, vlen, flags):
# unsupported combinations
if dtype.startswith('complex') and mode != 'numpy':
return
# TODO: address skipped tests below
skipped = dtype.startswith('int') and vlen == 2
sig = (numba.int64,)
# unit test body template
@staticmethod
def run_template():
fn, contains, avoids = combo_svml_usecase(dtype, mode, vlen,
flags['fastmath'],
flags['name'])
# look for specific patters in the asm for a given target
with override_env_config('NUMBA_CPU_NAME', vlen2cpu[vlen]), \
override_env_config('NUMBA_CPU_FEATURES', vlen2cpu_features[vlen]):
# recompile for overridden CPU
try:
jitted_fn = njit(sig, fastmath=flags['fastmath'],
error_model=flags['error_model'],)(fn)
except:
raise Exception("raised while compiling "+fn.__doc__)
asm = jitted_fn.inspect_asm(sig)
missed = [pattern for pattern in contains if not pattern in asm]
found = [pattern for pattern in avoids if pattern in asm]
ok = not missed and not found
detail = '\n'.join(
[line for line in asm.split('\n')
if cls.asm_filter.search(line) and not '"' in line])
msg = (
f"While expecting {missed} and not {found},\n"
f"it contains:\n{detail}\n"
f"when compiling {fn.__doc__}"
)
return ok, msg
# inject it into the class
postfix = usecase_name(dtype, mode, vlen, flags['name'])
testname = f"run_{postfix}"
setattr(cls, testname, run_template)
@unittest.skipUnless(not skipped, "Not implemented")
def test_runner(self):
ctx = mp.get_context("spawn")
q = ctx.Queue()
p = ctx.Process(target=type(self).mp_runner, args=[testname, q])
p.start()
# timeout to avoid hanging and long enough to avoid bailing too early
term_or_timeout = p.join(timeout=10)
exitcode = p.exitcode
if term_or_timeout is None:
if exitcode is None:
self.fail("Process timed out.")
elif exitcode < 0:
self.fail(f"Process terminated with signal {-exitcode}.")
self.assertEqual(exitcode, 0, msg="process ended unexpectedly")
out = q.get()
status = out['status']
msg = out['msg']
self.assertTrue(status, msg=msg)
setattr(cls, f"test_{postfix}", test_runner)
@classmethod
def autogenerate(cls):
flag_list = [{'fastmath':False, 'error_model':'numpy',
'name':'usecase'},
{'fastmath':True, 'error_model':'numpy',
'name':'fastmath_usecase'},]
# main loop covering all the modes and use-cases
for dtype in ('complex64', 'float64', 'float32', 'int32', ):
for vlen in vlen2cpu:
for flags in flag_list:
for mode in "scalar", "range", "prange", "numpy":
cls._inject_test(dtype, mode, vlen, dict(flags))
# mark important
for n in ( "test_int32_range4_usecase", # issue #3016
):
setattr(cls, n, tag("important")(getattr(cls, n)))
TestSVMLGeneration.autogenerate()
def math_sin_scalar(x):
return math.sin(x)
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
@needs_svml
class TestSVML(TestCase):
""" Tests SVML behaves as expected """
# env mutating, must not run in parallel
_numba_parallel_test_ = False
def __init__(self, *args):
self.flags = Flags()
self.flags.nrt = True
# flags for njit(fastmath=True)
self.fastflags = Flags()
self.fastflags.nrt = True
self.fastflags.fastmath = cpu.FastMathOptions(True)
super(TestSVML, self).__init__(*args)
def compile(self, func, *args, **kwargs):
assert not kwargs
sig = tuple([numba.typeof(x) for x in args])
std = compile_isolated(func, sig, flags=self.flags)
fast = compile_isolated(func, sig, flags=self.fastflags)
return std, fast
def copy_args(self, *args):
if not args:
return tuple()
new_args = []
for x in args:
if isinstance(x, np.ndarray):
new_args.append(x.copy('k'))
elif isinstance(x, np.number):
new_args.append(x.copy())
elif isinstance(x, numbers.Number):
new_args.append(x)
else:
raise ValueError('Unsupported argument type encountered')
return tuple(new_args)
def check(self, pyfunc, *args, **kwargs):
jitstd, jitfast = self.compile(pyfunc, *args)
std_pattern = kwargs.pop('std_pattern', None)
fast_pattern = kwargs.pop('fast_pattern', None)
cpu_name = kwargs.pop('cpu_name', 'skylake-avx512')
# force LLVM to use AVX512 registers for vectorization
# https://reviews.llvm.org/D67259
cpu_features = kwargs.pop('cpu_features', '-prefer-256-bit')
# python result
py_expected = pyfunc(*self.copy_args(*args))
# jit result
jitstd_result = jitstd.entry_point(*self.copy_args(*args))
# fastmath result
jitfast_result = jitfast.entry_point(*self.copy_args(*args))
# assert numerical equality
np.testing.assert_almost_equal(jitstd_result, py_expected, **kwargs)
np.testing.assert_almost_equal(jitfast_result, py_expected, **kwargs)
# look for specific patters in the asm for a given target
with override_env_config('NUMBA_CPU_NAME', cpu_name), \
override_env_config('NUMBA_CPU_FEATURES', cpu_features):
# recompile for overridden CPU
jitstd, jitfast = self.compile(pyfunc, *args)
if std_pattern:
self.check_svml_presence(jitstd, std_pattern)
if fast_pattern:
self.check_svml_presence(jitfast, fast_pattern)
def check_svml_presence(self, func, pattern):
asm = func.library.get_asm_str()
self.assertIn(pattern, asm)
def test_scalar_context(self):
# SVML will not be used.
pat = '$_sin' if config.IS_OSX else '$sin'
self.check(math_sin_scalar, 7., std_pattern=pat)
self.check(math_sin_scalar, 7., fast_pattern=pat)
def test_svml(self):
# loops both with and without fastmath should use SVML.
# The high accuracy routines are dropped if `fastmath` is set
std = "__svml_sin8_ha,"
fast = "__svml_sin8," # No `_ha`!
self.check(math_sin_loop, 10, std_pattern=std, fast_pattern=fast)
def test_svml_disabled(self):
code = """if 1:
import os
import numpy as np
import math
def math_sin_loop(n):
ret = np.empty(n, dtype=np.float64)
for x in range(n):
ret[x] = math.sin(np.float64(x))
return ret
def check_no_svml():
try:
# ban the use of SVML
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '1'
# delay numba imports to account for env change as
# numba.__init__ picks up SVML and it is too late by
# then to override using `numba.config`
import numba
from numba import config
from numba.core import cpu
from numba.tests.support import override_env_config
from numba.core.compiler import compile_isolated, Flags
# compile for overridden CPU, with and without fastmath
with override_env_config('NUMBA_CPU_NAME', 'skylake-avx512'), \
override_env_config('NUMBA_CPU_FEATURES', ''):
sig = (numba.int32,)
f = Flags()
f.nrt = True
std = compile_isolated(math_sin_loop, sig, flags=f)
f.fastmath = cpu.FastMathOptions(True)
fast = compile_isolated(math_sin_loop, sig, flags=f)
fns = std, fast
# assert no SVML call is present in the asm
for fn in fns:
asm = fn.library.get_asm_str()
assert '__svml_sin' not in asm
finally:
# not really needed as process is separate
os.environ['NUMBA_DISABLE_INTEL_SVML'] = '0'
config.reload_config()
check_no_svml()
"""
popen = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError(
"process failed with code %s: stderr follows\n%s\n" %
(popen.returncode, err.decode()))
def test_svml_working_in_non_isolated_context(self):
@njit(fastmath={'fast'}, error_model="numpy")
def impl(n):
x = np.empty(n * 8, dtype=np.float64)
ret = np.empty_like(x)
for i in range(ret.size):
ret[i] += math.cosh(x[i])
return ret
impl(1)
self.assertTrue('intel_svmlcc' in impl.inspect_llvm(impl.signatures[0]))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_21731 | #!/usr/bin/python3
# ----------------------
# Automated MISP lookup
# from Honas results.
# ----------------------
import requests
import argparse
import json
# Create file 'keys.py' according to PyMISP structure. We can leave out the verify statement.
from keys import misp_key,misp_url
# Parse input arguments.
parser = argparse.ArgumentParser(description='Automated MISP IoC lookup tool for Honas')
parser.add_argument('-s', action='store', dest='search_value', help='The value to search the MISP for', required=True)
parser.add_argument('-v', action='store_true', dest='verbose', help='Verbose output')
results = parser.parse_args()
searchurl = misp_url + 'attributes/restSearch'
if results.verbose:
print("Performing search job in MISP at " + searchurl + " for " + results.search_value)
# Create MISP query for this hit.
searchheaders = { 'accept' : 'application/json', 'content-type' : 'application/json', 'Authorization' : misp_key }
payload = '{ "value" : "' + results.search_value + '" }'
r = requests.post(searchurl, data=payload, headers=searchheaders)
# Load the JSON result from the search job.
jsonresult = json.loads(r.text)
attributes = jsonresult["response"]["Attribute"]
# Threat level dictionary.
threatlevels = { 1 : "High", 2 : "Medium", 3 : "Low", 4 : "Undefined" }
# Loop through attributes.
jsonoutput = {}
for att in attributes:
# Only use 'domain' attributes.
if att["type"] == "domain":
eventid = att["event_id"]
# We have the event ID, now lets look up the event and take some information about it.
event = requests.get(misp_url + "/events/" + str(eventid), headers=searchheaders)
jsonevent = json.loads(event.text)
eventresp = jsonevent["Event"]
# Print some information about the event.
jsonoutput['search_value'] = results.search_value
jsonoutput['event_id'] = eventid
jsonoutput['info'] = eventresp["info"]
jsonoutput['threat_level'] = threatlevels[int(eventresp["threat_level_id"])]
# Get tags to find TLP level.
tags = eventresp["Tag"]
for tag in tags:
if tag["name"].lower().find("tlp") != -1:
# Print tag, because it is the TLP level of this IoC.
tlplevel = tag["name"].split(':')[1]
jsonoutput['tlp'] = tlplevel
# Print out JSON output.
print(json.dumps(jsonoutput, indent=4, ensure_ascii=False))
|
the-stack_0_21733 | import subprocess
import shlex
import os
import shutil
import tarfile
import argparse
parser = argparse.ArgumentParser(description="Troubleshooting EFS CSI Driver")
parser.add_argument("--driver-pod-name", required=True, help="The EFS CSI driver pod name")
args = parser.parse_args(['--driver-pod-name', 'efs-csi-node-5dpc4'])
driver_pod_name = args.driver_pod_name
results_dir_path = 'results'
# Clean up existing results folder
shutil.rmtree(results_dir_path, ignore_errors=True)
os.makedirs(results_dir_path)
def execute(command, file, shell=False):
print(command + "\n", file=file, flush=True)
if shell:
subprocess.run(command, shell=True, text=True, stderr=subprocess.STDOUT, stdout=f)
else:
subprocess.run(shlex.split(command), text=True, stderr=subprocess.STDOUT, stdout=f)
print("\n", file=file, flush=True)
with open(results_dir_path + '/driver_info', 'w') as f:
describe_driver_pod = f'kubectl describe po {driver_pod_name} -n kube-system'
execute(command=describe_driver_pod, file=f)
get_driver_pod = f'kubectl get po {driver_pod_name} -n kube-system -o yaml'
execute(command=get_driver_pod, file=f)
with open(results_dir_path + '/driver_logs', 'w') as f:
mounts = f'kubectl logs {driver_pod_name} -n kube-system efs-plugin'
execute(command=mounts, file=f)
def collect_driver_files_under_dir(dir_name, file):
collect_driver_files_under_dir = f'kubectl exec {driver_pod_name} -n kube-system -c efs-plugin -- find {dir_name} ' + \
r'-type f -exec echo {} \; -exec cat {} \; -exec echo \;'
execute(command=collect_driver_files_under_dir, file=file)
with open(results_dir_path + '/efs_utils_logs', 'w') as f:
collect_driver_files_under_dir(dir_name='/var/log/amazon/efs', file=f)
with open(results_dir_path + '/efs_utils_state_dir', 'w') as f:
collect_driver_files_under_dir(dir_name='/var/run/efs', file=f)
with open(results_dir_path + '/mounts', 'w') as f:
mounts = f'kubectl exec {driver_pod_name} -n kube-system -c efs-plugin -- mount |grep nfs '
execute(command=mounts, file=f, shell=True)
with tarfile.open("results.tgz", "w:gz") as tar:
tar.add(results_dir_path, arcname=os.path.basename(results_dir_path))
|
the-stack_0_21736 | """Request user or proid keytabs, directly contacting krb5keytab server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import logging
import json
import errno
import io
import fcntl
import os
import pwd
import time
import click
import dns.resolver
import treadmill
from treadmill import sysinfo
from treadmill import fs
from treadmill import gssapiprotocol
from treadmill_aws import awscontext
_LOGGER = logging.getLogger(__name__)
_DEFAULT_KEYTAB_DIR = '/var/spool/keytabs'
def _cache_kt(cachedir, principal):
"""Replace / with # in principal name."""
return os.path.join(cachedir, principal.replace('/', '#')) + '.keytab'
def _lock(lockdir, principal):
"""Create a file lock while processing keytab request."""
lockfile = _cache_kt(lockdir, principal) + '.lock'
lock = io.open(lockfile, 'w+')
_LOGGER.debug('Locking: %s', lockfile)
while True:
try:
fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
_LOGGER.debug('Locked.')
return lock
except IOError as err:
if err.errno != errno.EAGAIN:
raise
time.sleep(0.1)
def _request_keytab(server, port, principal):
"""Request keytab from keytab server."""
client = gssapiprotocol.GSSAPILineClient(
server, port, "host@%s" % server
)
if not client.connect():
_LOGGER.warning(
'Failed to connect/authenticate to %s:%s',
server, port
)
return None
client.write(principal.encode("utf-8"))
line = client.read().decode("utf-8")
client.disconnect()
response = json.loads(line)
if response['status'] == 'failure':
# TODO: need better output for error messages.
_LOGGER.error(
'Error requesting keytab: %s',
json.dumps(response, indent=4, sort_keys=True)
)
return None
if response['status'] == 'success':
keytab_entries = base64.standard_b64decode(
response['result']['keytab_entries']
)
return keytab_entries
raise Exception(
'Unexpected error: %s' %
json.dumps(response, indent=4, sort_keys=True)
)
def _write_keytab(keytab_entries, keytab, owner):
"""Write keytab file."""
try:
pwnam = pwd.getpwnam(owner)
except KeyError:
_LOGGER.error('Invalid user: %s', owner)
return
fs.write_safe(
keytab,
lambda f: f.write(keytab_entries),
owner=(pwnam.pw_uid, pwnam.pw_gid)
)
def _copy_keytab(kt_file, keytab, owner):
"""Copy keytab from cache."""
try:
pwnam = pwd.getpwnam(owner)
except KeyError:
_LOGGER.error('Invalid user: %s', owner)
return
with io.open(kt_file, 'rb') as kt:
fs.write_safe(
keytab,
lambda f: f.write(kt.read()),
owner=(pwnam.pw_uid, pwnam.pw_gid)
)
def init():
"""Admin Cell CLI module"""
@click.command()
@click.option('--krb5keytab-server',
required=False,
metavar='HOST:PORT',
multiple=True,
help='Address of ipakeytab server.')
@click.option('--principal',
required=False,
help='Requsted principal ($user or $user/$hostname).')
@click.option('--keytab',
required=False,
help='Destination keytab file.')
@click.option('--owner',
required=False,
help='chown to specifed Unix ID.')
@click.option('--cachedir',
required=False,
metavar='DIRECTORY',
help='Use local cache for keytabs.')
@click.option('--lockdir',
required=False,
metavar='DIRECTORY',
default='/tmp',
help='Lock directory.')
def krb5keytab(krb5keytab_server, principal, keytab, owner, cachedir,
lockdir):
"""krb5keytab client"""
# pylint: disable=too-many-branches
username = pwd.getpwuid(os.getuid())[0]
hostname = sysinfo.hostname()
treadmill.logging.set_log_level(logging.INFO)
if not principal:
principal = '{}/{}'.format(username, hostname)
if not owner:
owner = username
if not keytab:
keytab = os.path.join(_DEFAULT_KEYTAB_DIR, owner)
if not krb5keytab_server:
krb5keytab_server = []
domain = awscontext.GLOBAL.ipa_domain
try:
srvrecs = dns.resolver.query(
'_ipakeytab._tcp.{}'.format(domain), 'SRV'
)
except dns.resolver.NXDOMAIN:
srvrecs = []
for result in srvrecs:
_, _, port, server = result.to_text().split()
krb5keytab_server.append('{}:{}'.format(server, port))
if not krb5keytab_server:
treadmill.cli.bad_exit(
'Configuration/usage error: '
'--krb5keytab-server not specified/DNS not configured'
' - exiting.'
)
_LOGGER.info('Principal: %s', principal)
_LOGGER.info('Keytab: %s', keytab)
_LOGGER.info('Owner: %s', owner)
kt_entries = None
lock = None
if lockdir != '-':
# Obtain the lock and keep it open until app exits.
lock = _lock(lockdir, principal)
if cachedir:
cache_kt = _cache_kt(cachedir, principal)
if os.path.exists(cache_kt):
_LOGGER.info('Copy cached keytab: %s', cache_kt)
_copy_keytab(cache_kt, keytab, owner)
return
for endpoint in krb5keytab_server:
_LOGGER.info('Connecting to %s', endpoint)
server, port = endpoint.split(':')
kt_entries = _request_keytab(server, int(port), principal)
if kt_entries:
if cachedir:
_write_keytab(kt_entries, cache_kt, 'root')
_write_keytab(kt_entries, keytab, owner)
return
return krb5keytab
|
the-stack_0_21737 | """
scaffoldgraph setup.py
"""
from setuptools import setup, find_packages
from pathlib import Path
__version__ = '1.0.3'
url = 'https://github.com/UCLCheminformatics/scaffoldgraph'
description = 'ScaffoldGraph is an open-source cheminformatics library, built using RDKit and \
NetworkX for generating scaffold networks and scaffold trees.'
root = Path(__file__).parent.resolve()
requires_path = root / 'requirements.txt'
with requires_path.open('r', encoding='utf8') as f:
install_requires = [line.strip() for line in f]
install_requires.remove('rdkit')
readme_path = root / 'README.md'
with readme_path.open('r', encoding='utf-8') as f:
long_description = f.read()
setup_requires = ['pytest-runner']
tests_require = ['pytest', 'pytest-cov']
entry_points = {
'console_scripts': [
'scaffoldgraph = scaffoldgraph.scripts.run:scaffoldgraph_main',
]
}
setup(
name='ScaffoldGraph',
version=__version__,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
author='Oliver Scott',
author_email='[email protected]',
url=url,
download_url='{}/archive/{}.tar.gz'.format(url, __version__),
license='MIT',
keywords=[
'rdkit',
'networkx',
'cheminformatics',
'scaffolds',
],
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
entry_points=entry_points,
packages=find_packages(),
)
|
the-stack_0_21738 | import random
import pytest
import snowflake
from mock import Mock, call, patch
from snowflake.connector.cursor import SnowflakeCursor
from dbnd.testing.helpers_mocks import set_airflow_context
from dbnd_snowflake.snowflake_controller import SNOWFLAKE_METRIC_TO_UI_NAME
from dbnd_snowflake.snowflake_tracker import snowflake_query_tracker
from dbnd_snowflake.sql_utils import try_extract_tables
TEST_SNOWFLAKE_CONN_STRING = (
"snowflake://test_account:test_password@test_account/test_database"
)
TEST_TABLE_NAME = "some_table"
def _authenticate(self_auth, *args, **kwargs):
self_auth._rest._connection._session_id = random.randint(0, 2000000000)
return {}
@pytest.fixture
def mock_snowflake():
execute_mock = Mock()
def _execute(self_cursor, command, *args, **kwargs):
# type: (snowflake.connector.cursor.SnowflakeCursor, str, ..., ...) -> ...
execute_mock(command, *args, **kwargs)
self_cursor._sfqid = random.randint(0, 2000000000)
some_key = list(SNOWFLAKE_METRIC_TO_UI_NAME)[0]
if some_key in command:
# this probably Resource Usage query, generate dummy metric
result = [{k: 0 for k in SNOWFLAKE_METRIC_TO_UI_NAME}]
elif "information_schema.columns" in command:
# probably, SnowflakeController.get_column_types()
result = [
{"COLUMN_NAME": "column_a", "DATA_TYPE": "int"},
{"COLUMN_NAME": "column_b", "DATA_TYPE": "int"},
]
elif "SHOW TABLES" in command:
# probably, SnowflakeController.get_dimensions()
result = [{"rows": 0, "bytes": 0}]
else:
result = []
self_cursor._result = (x for x in result) # this should be generator
return self_cursor
with patch.object(
snowflake.connector.auth.Auth, "authenticate", new=_authenticate
), patch.object(
snowflake.connector.cursor.SnowflakeCursor, "execute", new=_execute,
), patch.object(
snowflake.connector.network.SnowflakeRestful, "delete_session",
):
yield execute_mock
def _snowflake_connect():
return snowflake.connector.connect(
user="test_user",
password="test_password",
account="test_account",
warehouse="test_warehouse",
database="test_database",
region="test_region",
role="test_role",
schema="test_schema",
)
def _run_simple_query_with_close_conn(mock_snowflake, log_tables, log_resource_usage):
with snowflake_query_tracker(
log_tables=log_tables,
log_resource_usage=log_resource_usage,
log_tables_with_preview=True,
) as st:
query = "select * from " + TEST_TABLE_NAME
with _snowflake_connect() as conn:
with conn.cursor() as cursor:
cursor.execute(query)
cursor.fetchall()
assert sorted(st.get_all_tables()) == [TEST_TABLE_NAME]
session_id, query_id = st.get_last_session_with_query_id(many=False)
session_id, query_ids = st.get_last_session_with_query_id(many=True)
session_queries = st.get_all_session_queries().copy()
# 1 session
assert len(session_queries) == 1
for st_session_id, st_query_ids in session_queries.items():
assert len(st_query_ids) == 1
assert (st_session_id, st_query_ids[0]) == (session_id, query_id)
assert (st_session_id, st_query_ids) == (session_id, query_ids)
# query
assert len(mock_snowflake.mock_calls) == 1
assert mock_snowflake.mock_calls[0] == call(query)
if log_resource_usage:
# should be cleaned
assert len(st.get_all_session_queries()) == 0
else:
# on exit from context manager, st.get_all_sessions() shouldn't be affected
# - resources/tables queries shouldn't be tracked anyway
assert len(st.get_all_session_queries()) == len(session_queries)
assert all(
[a == b for a, b in zip(st.get_all_session_queries(), session_queries)]
)
return session_id, query_ids
def _run_simple_query_no_close_conn(mock_snowflake, log_tables, log_resource_usage):
with snowflake_query_tracker(
log_tables=log_tables,
log_resource_usage=log_resource_usage,
log_tables_with_preview=True,
) as st:
query = "select * from " + TEST_TABLE_NAME
# with self._snowflake_connect() as conn:
conn = _snowflake_connect()
with conn.cursor() as cursor:
cursor.execute(query)
cursor.fetchall()
# we want COMMIT here to have same behavior/amount of queries
# with case when connection is auto-closed (with context manager)
cursor.execute("COMMIT")
assert sorted(st.get_all_tables()) == [TEST_TABLE_NAME]
session_id, query_id = st.get_last_session_with_query_id(many=False)
session_id, query_ids = st.get_last_session_with_query_id(many=True)
session_queries = st.get_all_session_queries().copy()
# 1 session
assert len(session_queries) == 1
for st_session_id, st_query_ids in session_queries.items():
assert len(st_query_ids) == 1
assert (st_session_id, st_query_ids[0]) == (session_id, query_id)
assert (st_session_id, st_query_ids) == (session_id, query_ids)
# query + COMMIT
assert len(mock_snowflake.mock_calls) == 2
assert mock_snowflake.mock_calls[0] == call(query)
if log_resource_usage:
# should be cleaned
assert len(st.get_all_session_queries()) == 0
else:
# on exit from context manager, st.get_all_sessions() shouldn't be affected
# - resources/tables queries shouldn't be tracked anyway
assert len(st.get_all_session_queries()) == len(session_queries)
assert all(
[a == b for a, b in zip(st.get_all_session_queries(), session_queries)]
)
return session_id, query_ids
QUERY_RUNNERS = [_run_simple_query_no_close_conn, _run_simple_query_with_close_conn]
@pytest.mark.usefixtures(set_airflow_context.__name__)
class TestSnowflakeQueryTracker:
@pytest.mark.parametrize("run_query", QUERY_RUNNERS)
def test_no_auto_log(self, mock_snowflake, run_query):
run_query(mock_snowflake, log_tables=False, log_resource_usage=False)
# query + COMMIT
assert len(mock_snowflake.mock_calls) == 2
@pytest.mark.parametrize("run_query", QUERY_RUNNERS)
def test_just_resource(self, mock_snowflake, run_query):
session_id, (query_id,) = run_query(
mock_snowflake, log_tables=False, log_resource_usage=True
)
# 1 for actual query + COMMIT
# + 1 for resource usage
assert len(mock_snowflake.mock_calls) == 3
resource_query = mock_snowflake.mock_calls[-1][1][0]
assert str(session_id) in resource_query
assert str(query_id) in resource_query
@pytest.mark.parametrize("run_query", QUERY_RUNNERS)
def test_resource_and_table(self, mock_snowflake, run_query):
session_id, (query_id,) = run_query(
mock_snowflake, log_tables=True, log_resource_usage=True
)
# 1 for actual query + COMMIT
# + 1 for resource usage
# + 3 for tables
assert len(mock_snowflake.mock_calls) == 6
for pattern in (
"information_schema.columns",
"TRY_HEX_DECODE_STRING",
"SHOW TABLES",
):
assert any(
[
pattern in mock_call[1][0] or TEST_TABLE_NAME in mock_call[1][0]
for mock_call in mock_snowflake.mock_calls
]
), mock_snowflake.mock_calls
resource_query = mock_snowflake.mock_calls[2][1][0]
assert str(session_id) in resource_query
assert str(query_id) in resource_query
@pytest.mark.parametrize("run_query", QUERY_RUNNERS)
def test_explicit_tables(self, mock_snowflake, run_query):
track_tables = ["tableX", "tableY"]
run_query(mock_snowflake, log_tables=track_tables, log_resource_usage=False)
# 1 for actual query + COMMIT
# + 3 for tables * 2 (tableX, tableY)
assert len(mock_snowflake.mock_calls) == 8
for pattern in (
"information_schema.columns",
"TRY_HEX_DECODE_STRING",
"SHOW TABLES",
):
for table_name in track_tables:
assert any(
[
pattern in call[1][0] and table_name in call[1][0]
for call in mock_snowflake.mock_calls
]
), mock_snowflake.mock_calls
def test_not_tracked_queries(self, mock_snowflake):
with snowflake_query_tracker() as st:
with _snowflake_connect() as conn, conn.cursor() as cursor:
for sql in [
"create table something ()",
"alter table alter column ()",
"alter session set ...",
]:
cursor.execute(sql)
assert len(st.get_all_tables()) == 0
assert len(st.get_all_session_queries()) == 0
assert st.get_last_session_with_query_id(many=True) == (None, [])
assert st.get_last_session_with_query_id(many=False) == (None, None)
# 1 for automatic "alert session autocommit=false" + 3 queries above
assert len(mock_snowflake.mock_calls) == 4
# should stay the same - no extra queries
assert len(mock_snowflake.mock_calls) == 4
def test_extract_tables():
queries = (
("select * from table0", ["table0"]),
("update table1 set a=0", ["table1"]),
("delete from table2", ["table2"]),
("insert into table3", ["table3"]),
(
"""sEleCT * fRom table1
inner jOin schema1.table2 on a=b
inner joIn (select * from (table3) where a=b) x on
""",
["table1", "schema1.table2", "table3"],
),
(
"""
WITH A AS (
select * from table3
),
B as (
select * from table4
)
select * from A
left join B on A.a = B.b
inner join (C) on A.c = C.c
""",
["table3", "table4", "C"],
),
)
for query, expected_tables in queries:
tables = try_extract_tables(query)
assert sorted(tables) == sorted(expected_tables), query
|
the-stack_0_21739 | import torch
import torchvision.transforms as transforms
from dataset.ListDatasetFolder import ListDatasetFolder
def get_loader(files, labels, batch_size):
transforms_list = [
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
]
transform = transforms.Compose(transforms_list)
folder = ListDatasetFolder(files, labels, transform)
loader = torch.utils.data.DataLoader(
folder, batch_size=batch_size, shuffle=True, num_workers=6, drop_last=True)
return loader, folder
|
the-stack_0_21741 | import engineio
class WSGIApp(engineio.WSGIApp):
"""WSGI middleware for Socket.IO.
This middleware dispatches traffic to a Socket.IO application. It can also
serve a list of static files to the client, or forward unrelated HTTP
traffic to another WSGI application.
:param socketio_app: The Socket.IO server. Must be an instance of the
``socketio.Server`` class.
:param wsgi_app: The WSGI app that receives all other traffic.
:param static_files: A dictionary with static file mapping rules. See the
documentation for details on this argument.
:param socketio_path: The endpoint where the Socket.IO application should
be installed. The default value is appropriate for
most cases.
Example usage::
import socketio
import eventlet
from . import wsgi_app
sio = socketio.Server()
app = socketio.WSGIApp(sio, wsgi_app)
eventlet.wsgi.server(eventlet.listen(('', 8000)), app)
"""
def __init__(self, socketio_app, wsgi_app=None, static_files=None,
socketio_path='socket.io'):
super(WSGIApp, self).__init__(socketio_app, wsgi_app,
static_files=static_files,
engineio_path=socketio_path)
class Middleware(WSGIApp):
"""This class has been renamed to WSGIApp and is now deprecated."""
def __init__(self, socketio_app, wsgi_app=None,
socketio_path='socket.io'):
super(Middleware, self).__init__(socketio_app, wsgi_app,
socketio_path=socketio_path)
|
the-stack_0_21742 | from .evaluation import EvalSectionSerializer
from .. import models
from rest_framework import serializers
class SocioeconomicSerializer(EvalSectionSerializer):
digi_entry = serializers.ChoiceField(
choices=models.Socioeconomic.DIGI_ENTRY_CHOICES,
required=False,
allow_blank=True
)
digi_date = serializers.DateField(
required=False,
allow_null=True
)
digi_id_type = serializers.ChoiceField(
required=False,
choices=models.Socioeconomic.ID_TYPE_CHOICES,
allow_blank=True
)
digi_id_number = serializers.CharField(
required=False,
allow_blank=True
)
digi_image = serializers.PrimaryKeyRelatedField(
queryset=models.Image.objects.all(),
required=False,
allow_null=True
)
class Meta:
model = models.Socioeconomic
fields = EvalSectionSerializer.Meta.fields + (
'digi_entry',
'digi_date',
'digi_id_type',
'digi_id_number',
'digi_image'
)
|
the-stack_0_21744 | # coding: utf-8
import pprint
import re
import six
class UpdateScalingPolicyRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'scaling_policy_id': 'str',
'body': 'UpdateScalingPolicyOption'
}
attribute_map = {
'scaling_policy_id': 'scaling_policy_id',
'body': 'body'
}
def __init__(self, scaling_policy_id=None, body=None):
"""UpdateScalingPolicyRequest - a model defined in huaweicloud sdk"""
self._scaling_policy_id = None
self._body = None
self.discriminator = None
self.scaling_policy_id = scaling_policy_id
if body is not None:
self.body = body
@property
def scaling_policy_id(self):
"""Gets the scaling_policy_id of this UpdateScalingPolicyRequest.
伸缩策略ID。
:return: The scaling_policy_id of this UpdateScalingPolicyRequest.
:rtype: str
"""
return self._scaling_policy_id
@scaling_policy_id.setter
def scaling_policy_id(self, scaling_policy_id):
"""Sets the scaling_policy_id of this UpdateScalingPolicyRequest.
伸缩策略ID。
:param scaling_policy_id: The scaling_policy_id of this UpdateScalingPolicyRequest.
:type: str
"""
self._scaling_policy_id = scaling_policy_id
@property
def body(self):
"""Gets the body of this UpdateScalingPolicyRequest.
:return: The body of this UpdateScalingPolicyRequest.
:rtype: UpdateScalingPolicyOption
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateScalingPolicyRequest.
:param body: The body of this UpdateScalingPolicyRequest.
:type: UpdateScalingPolicyOption
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateScalingPolicyRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_21746 | from queue import Queue
from threading import Thread
import time
class Student(Thread):
def __init__(self, name, queue):
super().__init__()
self.name = name
self.queue = queue
def run(self):
while True:
# 阻塞程序,时刻监听老师,接收消息
msg = self.queue.get()
# 一旦发现点到自己名字,就赶紧答到
if msg == self.name:
print("{}:到!".format(self.name))
class Teacher:
def __init__(self, queue):
self.queue = queue
def call(self, student_name):
print("老师:{}来了没?".format(student_name))
# 发送消息,要点谁的名
self.queue.put(student_name)
queue = Queue()
teacher = Teacher(queue=queue)
s1 = Student(name="小明", queue=queue)
s2 = Student(name="小亮", queue=queue)
s1.start()
s2.start()
print('开始点名~')
teacher.call('小明')
time.sleep(1)
teacher.call('小亮')
|
the-stack_0_21748 | """
Script for evaluating trained model on PyTorch / ImageNet-1K (demo mode).
"""
import math
import argparse
import numpy as np
import cv2
import torch
from gluoncv.data import ImageNet1kAttr
from pytorchcv.model_provider import get_model as ptcv_get_model
def parse_args():
"""
Create python script parameters.
Returns
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(
description="Evaluate an ImageNet-1K model on PyTorch (demo mode)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--image",
type=str,
required=True,
help="path to testing image")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"--input-size",
type=int,
default=224,
help="size of the input for model")
parser.add_argument(
"--resize-inv-factor",
type=float,
default=0.875,
help="inverted ratio for input image crop")
parser.add_argument(
"--mean-rgb",
nargs=3,
type=float,
default=(0.485, 0.456, 0.406),
help="Mean of RGB channels in the dataset")
parser.add_argument(
"--std-rgb",
nargs=3,
type=float,
default=(0.229, 0.224, 0.225),
help="STD of RGB channels in the dataset")
args = parser.parse_args()
return args
def main():
"""
Main body of script.
"""
args = parse_args()
# Load a testing image:
image = cv2.imread(args.image, flags=cv2.IMREAD_COLOR)
# cv2.imshow("image", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
image = cv2.cvtColor(image, code=cv2.COLOR_BGR2RGB)
# Resize image with keeping aspect ratio:
resize_value = int(math.ceil(float(args.input_size) / args.resize_inv_factor))
h, w = image.shape[:2]
if not ((w == resize_value and w <= h) or (h == resize_value and h <= w)):
resize_size = (resize_value, int(resize_value * h / w)) if w < h else (int(resize_value * w / h), resize_value)
image = cv2.resize(image, dsize=resize_size, interpolation=cv2.INTER_LINEAR)
# Center crop of the image:
h, w = image.shape[:2]
th, tw = args.input_size, args.input_size
ih = int(round(0.5 * (h - th)))
jw = int(round(0.5 * (w - tw)))
image = image[ih:(ih + th), jw:(jw + tw), :]
# cv2.imshow("image2", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# Convert image to a float tensor and normalize it:
x = image.astype(np.float32)
x = x / 255.0
x = (x - np.array(args.mean_rgb)) / np.array(args.std_rgb)
# Create `use_cuda` flag:
use_cuda = (args.num_gpus > 0)
# Convert the tensor to a Pytorch tensor:
x = x.transpose(2, 0, 1)
x = np.expand_dims(x, axis=0)
x = torch.FloatTensor(x)
if use_cuda:
x = x.cuda()
# Create model with loading pretrained weights:
net = ptcv_get_model(args.model, pretrained=True)
net.eval()
if use_cuda:
net = net.cuda()
# Evaluate the network:
y = net(x)
probs = torch.nn.Softmax(dim=-1)(y)
# Show results:
top_k = 5
probs_np = probs.cpu().detach().numpy().squeeze(axis=0)
top_k_inds = probs_np.argsort()[::-1][:top_k]
classes = ImageNet1kAttr().classes
print("The input picture is classified to be:")
for k in range(top_k):
print("{idx}: [{class_name}], with probability {prob:.3f}.".format(
idx=(k + 1),
class_name=classes[top_k_inds[k]],
prob=probs_np[top_k_inds[k]]))
if __name__ == "__main__":
main()
|
the-stack_0_21749 | from ikologikapi.domain.AbstractIkologikInstallationsObject import AbstractIkologikInstallationsObject
class AlertType(AbstractIkologikInstallationsObject):
def __init__(self, customer: str, installation: str):
super().__init__(customer, installation)
self.severity = None
self.message = None
self.autoAchnowledge = None
self.active = None
self.timeoutActivation = None
self.activationMessageEnabled = None
self.timeoutDeactivation = None
self.deactivationMessageEnabled = None
self.deactivationMessage = None
self.availabilityRelated = None
self.operationRelated = None
self.connectivitiyRelated = None
self.criteria = None
self.notificationReceivers = None
self.notificationMessageLanguage = None
self.notificationMessageRepeat = None
|
the-stack_0_21753 | #!/usr/bin/env python
"""
Python implementation of the hash used for "hashed sequence index" files.
The "hassock" hash is a variant of Austin Appleby's MurmurHash2. The
latter is described (as of Apr/2009) at
murmurhash.googlepages.com
This variant is based on the endian-neutral version found at
murmurhash.googlepages.com/MurmurHashNeutral2.cpp
and differs in the following ways:
(a) The "seed" is hardwired.
(b) We parse the data block in reverse; this allows the caller to
prepend an additional seed pattern to his buffer, potentially
getting better mixing for the bits in the final incorporated
bytes.
(c) The last three bytes are incorporated in a different order than
they were in MurmurHash2, because the code just works out better
this way.
:Author: Bob Harris ([email protected])
"""
import sys
seed = 0x5C3FC4D3
mult = 0x87C10417
def hassock_hash(s):
ix = len(s)
h = seed ^ ix # h = seed ^ len;
while (ix >= 4):
k = ord(s[ix-1]) # k = *(--data);
k |= ord(s[ix-2]) << 8 # k |= *(--data) << 8;
k |= ord(s[ix-3]) << 16 # k |= *(--data) << 16;
k |= ord(s[ix-4]) << 24 # k |= *(--data) << 24;
k = (k * mult) & 0xFFFFFFFF # k *= m;
k ^= k >> 24 # k ^= k >> r;
k = (k * mult) & 0xFFFFFFFF # k *= m;
h = (h * mult) & 0xFFFFFFFF # h *= m;
h ^= k # h ^= k;
ix -= 4
if (ix >= 3):
h ^= ord(s[2]) << 16 # h ^= *(--data) << 16;
if (ix >= 2):
h ^= ord(s[1]) << 8 # h ^= *(--data) << 8;
if (ix >= 1):
h ^= ord(s[0]) # h ^= *(--data);
h = (h * mult) & 0xFFFFFFFF # h *= m;
h ^= h >> 13 # h ^= h >> 13;
h = (h * mult) & 0xFFFFFFFF # h *= m;
h ^= h >> 15 # h ^= h >> 15;
return h
# main program to test
def main():
m = None
strings = []
for s in sys.argv[1:]:
if (s.startswith("--mod=")): m = int(s.split("=",1)[1])
else: strings += [s]
if (strings != []):
for s in strings:
demonstrate_hash(s,m)
else:
for line in sys.stdin:
line = line.rstrip()
demonstrate_hash(line,m)
def demonstrate_hash(s,m):
if (m == None): print("%08X: %s" % (hassock_hash(s),s))
else: print("%d: %s" % (hassock_hash(s)%m,s))
if __name__ == "__main__": main()
|
the-stack_0_21754 | # Copyright (c) 2015-2017 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import copy
from nose_parameterized import parameterized
from tests.st.test_base import TestBase
from tests.st.utils.utils import log_and_run, calicoctl, \
API_VERSION, name, ERROR_CONFLICT, NOT_FOUND, NOT_NAMESPACED, \
SET_DEFAULT, NOT_SUPPORTED, KUBERNETES_NP, writeyaml
from tests.st.utils.data import *
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
logger = logging.getLogger(__name__)
class TestCalicoctlCommands(TestBase):
"""
Test calicoctl pool
1) Test the CRUD aspects of the pool commands.
2) Test IP assignment from pool.
BGP exported routes are hard to test and aren't expected to change much so
write tests for them (yet)
"""
def test_get(self):
"""
Test that a basic CRUD flow for pool commands works.
"""
# Create the ipv6 pool using calicoctl, and read it out using an
# exact get and a list query.
rc = calicoctl("create", data=ippool_name2_rev1_v6)
rc.assert_no_error()
rc = calicoctl("get ippool %s -o yaml" % name(ippool_name2_rev1_v6))
rc.assert_data(ippool_name2_rev1_v6)
rc = calicoctl("get ippool -o yaml")
rc.assert_list("IPPool", [ippool_name2_rev1_v6])
# Add in the ipv4 network with calicoctl, and read out using an exact
# get, and a list query.
rc = calicoctl("create", data=ippool_name1_rev1_v4)
rc.assert_no_error()
rc = calicoctl("get ippool %s -o yaml" % name(ippool_name1_rev1_v4))
rc.assert_data(ippool_name1_rev1_v4)
rc = calicoctl("get ippool -o yaml")
rc.assert_list("IPPool", [ippool_name1_rev1_v4, ippool_name2_rev1_v6])
# Remove both the ipv4 pool and ipv6 pool by CLI options and by file.
rc = calicoctl("delete ippool %s" % name(ippool_name1_rev1_v4))
rc.assert_no_error()
rc = calicoctl("delete", ippool_name2_rev1_v6)
rc.assert_no_error()
# Assert pools are now deleted
rc = calicoctl("get ippool -o yaml")
rc.assert_empty_list("IPPool")
# Assert that deleting the pool again fails.
rc = calicoctl("delete ippool %s" % name(ippool_name2_rev1_v6))
rc.assert_error(text=NOT_FOUND)
def test_delete_with_resource_version(self):
"""
Test that resource version operates correctly with delete, i.e.
calicoctl honors the resource version when it's specified.
"""
# Create a new BGP Peer and get it to determine the current resource
# version.
rc = calicoctl("create", data=bgppeer_name1_rev1_v4)
rc.assert_no_error()
rc = calicoctl("get bgppeer %s -o yaml" % name(bgppeer_name1_rev1_v4))
rc.assert_no_error()
rev0 = rc.decoded
# Update the BGP Peer and get it to assert the resource version is not
# the same.
rc = calicoctl("apply", data=bgppeer_name1_rev2_v4)
rc.assert_no_error()
rc = calicoctl("get bgppeer %s -o yaml" % name(bgppeer_name1_rev2_v4))
rc.assert_no_error()
rev1 = rc.decoded
self.assertNotEqual(rev0['metadata']['resourceVersion'], rev1['metadata']['resourceVersion'])
# Attempt to delete using the old revision (rev0). This will fail.
rc = calicoctl("delete", data=rev0)
rc.assert_error(text=ERROR_CONFLICT)
# Delete using the new revision (rev1).
rc = calicoctl("delete", data=rev1)
rc.assert_no_error()
def test_replace_with_resource_version(self):
"""
Test that resource version operates correctly with update, i.e.
calicoctl honors the resource version when it's specified.
"""
# Create a new Network Policy and get it to determine the current
# resource version.
rc = calicoctl("create", data=networkpolicy_name1_rev1)
rc.assert_no_error()
rc = calicoctl(
"get networkpolicy %s -o yaml" % name(networkpolicy_name1_rev1))
rc.assert_no_error()
rev0 = rc.decoded
# Replace the Network Policy (with no resource version) and get it to
# assert the resource version is not the same.
rc = calicoctl("replace", data=networkpolicy_name1_rev2)
rc.assert_no_error()
rc = calicoctl(
"get networkpolicy %s -o yaml" % name(networkpolicy_name1_rev2))
rc.assert_no_error()
rev1 = rc.decoded
self.assertNotEqual(rev0['metadata']['resourceVersion'], rev1['metadata']['resourceVersion'])
# Attempt to replace using the old revision (rev0). This will fail.
rc = calicoctl("replace", data=rev0)
rc.assert_error(text=ERROR_CONFLICT)
# Replace using the original data, but with the new resource version.
rev0['metadata']['resourceVersion'] = rev1['metadata']['resourceVersion']
rc = calicoctl("replace", data=rev0)
rc.assert_no_error()
# Delete the resource by name (i.e. without using a resource version).
rc = calicoctl("delete networkpolicy %s" % name(rev0))
rc.assert_no_error()
# Attempt to replace the (now deleted) resource.
rc = calicoctl("replace", data=networkpolicy_name1_rev2)
rc.assert_error(text=NOT_FOUND)
def test_apply_with_resource_version(self):
"""
Test that resource version operates correctly with apply, i.e.
calicoctl honors the resource version when it's specified.
"""
# Use apply to create a new Host Endpoint and get it to determine the
# current resource version (first checking that it doesn't exist).
rc = calicoctl(
"get hostendpoint %s -o yaml" % name(hostendpoint_name1_rev1))
rc.assert_error(text=NOT_FOUND)
rc = calicoctl("apply", data=hostendpoint_name1_rev1)
rc.assert_no_error()
rc = calicoctl(
"get hostendpoint %s -o yaml" % name(hostendpoint_name1_rev1))
rc.assert_no_error()
rev0 = rc.decoded
# Apply the Host Endpoint (with no resource version) and get it to
# assert the resource version is not the same.
rc = calicoctl("apply", data=hostendpoint_name1_rev2)
rc.assert_no_error()
rc = calicoctl(
"get hostendpoint %s -o yaml" % name(hostendpoint_name1_rev2))
rc.assert_no_error()
rev1 = rc.decoded
self.assertNotEqual(rev0['metadata']['resourceVersion'], rev1['metadata']['resourceVersion'])
# Attempt to apply using the old revision (rev0). This will fail.
rc = calicoctl("apply", data=rev0)
rc.assert_error(text=ERROR_CONFLICT)
# Apply using the original data, but with the new resource version.
rev0['metadata']['resourceVersion'] = rev1['metadata']['resourceVersion']
rc = calicoctl("apply", data=rev0)
rc.assert_no_error()
# Delete the resource without using a resource version.
rc = calicoctl("delete hostendpoint %s" % name(rev0))
rc.assert_no_error()
def test_json(self):
"""
Test mainline CRUD operations using JSON input and output.
"""
# Use create to create a new profile and get the profile to check the
# data was stored (using JSON input/output).
rc = calicoctl("create", data=profile_name1_rev1, format="json")
rc.assert_no_error()
rc = calicoctl("get profile %s -o json" % name(profile_name1_rev1))
rc.assert_data(profile_name1_rev1, format="json")
# Use apply to update the profile and get the profile to check the
# data was stored (using JSON input/output).
rc = calicoctl("apply", data=profile_name1_rev2, format="json")
rc.assert_no_error()
rc = calicoctl("get profile %s -o json" % name(profile_name1_rev1))
rc.assert_data(profile_name1_rev2, format="json")
# Use replace to update the profile and get the profile to check the
# data was stored (using JSON input/output).
rc = calicoctl("replace", data=profile_name1_rev1, format="json")
rc.assert_no_error()
rc = calicoctl("get profile %s -o json" % name(profile_name1_rev1))
rc.assert_data(profile_name1_rev1, format="json")
# Use delete to delete the profile (using JSON input).
rc = calicoctl("delete", data=profile_name1_rev1, format="json")
rc.assert_no_error()
def test_stdin(self):
"""
Test mainline CRUD operations using stdin input and output (mixing
JSON and YAML types).
"""
# Use create to create a new GlobalNetworkPolicy and get the resource to check the
# data was stored (using JSON input/output).
rc = calicoctl("create", data=globalnetworkpolicy_name1_rev1, format="json", load_as_stdin=True)
rc.assert_no_error()
rc = calicoctl("get globalnetworkpolicy %s -o json" % name(globalnetworkpolicy_name1_rev1))
rc.assert_data(globalnetworkpolicy_name1_rev1, format="json")
# Use apply to update the GlobalNetworkPolicy and get the resource to check the
# data was stored (using YAML input/output).
rc = calicoctl("apply", data=globalnetworkpolicy_name1_rev2, format="yaml", load_as_stdin=True)
rc.assert_no_error()
rc = calicoctl("get globalnetworkpolicy %s -o yaml" % name(globalnetworkpolicy_name1_rev1))
rc.assert_data(globalnetworkpolicy_name1_rev2, format="yaml")
# Use replace to update the GlobalNetworkPolicy and get the resource to check the
# data was stored (using JSON input/output).
rc = calicoctl("replace", data=globalnetworkpolicy_name1_rev1, format="json", load_as_stdin=True)
rc.assert_no_error()
rc = calicoctl("get globalnetworkpolicy %s -o json" % name(globalnetworkpolicy_name1_rev1))
rc.assert_data(globalnetworkpolicy_name1_rev1, format="json")
# Use delete to delete the GlobalNetworkPolicy (using YAML input).
rc = calicoctl("delete", data=globalnetworkpolicy_name1_rev1, format="yaml", load_as_stdin=True)
rc.assert_no_error()
def test_file_multi(self):
"""
Test CRUD operations using a file containing multiple entries (a mix
of non-List and List types).
"""
# Since the file processing is the same for all commands, we only
# need to test multi entries per file on a single command (in this case
# we use delete).
# Combine three different resources and create those in a single file-based command.
resources = [globalnetworkpolicy_name1_rev1, workloadendpoint_name1_rev1, workloadendpoint_name2_rev1]
rc = calicoctl("create", data=resources)
rc.assert_no_error()
# Get the resources using file based input. It should return the
# same results.
rc = calicoctl("get -o yaml", data=resources)
rc.assert_data(resources)
# Use a get/list to get one of the resource types and an exact get to
# get the other. Join them together and use it to delete the resource.
# This tests a mix of List and non-list types in the same file.
# We use the data returned from the get since this should be able to
# be used directly as input into the next command.
rc = calicoctl("get globalnetworkpolicy %s -o yaml" % name(globalnetworkpolicy_name1_rev1))
rc.assert_data(globalnetworkpolicy_name1_rev1)
gnp = rc.decoded
rc = calicoctl("get workloadendpoints -o yaml --all-namespaces")
rc.assert_list("WorkloadEndpoint", [workloadendpoint_name1_rev1, workloadendpoint_name2_rev1])
wepList = rc.decoded
rc = calicoctl("delete", data=[gnp, wepList])
rc.assert_no_error()
# Finally do a get to make sure nothing is returned.
rc = calicoctl("get workloadendpoints -o yaml")
rc.assert_empty_list("WorkloadEndpoint")
def test_file_single_list(self):
"""
Test CRUD operations using a file containing a single List.
"""
# Create a couple of resources.
resources = [workloadendpoint_name1_rev1, workloadendpoint_name2_rev1]
rc = calicoctl("create", data=resources)
rc.assert_no_error()
# Get the resources using file based input. It should return the
# same results.
rc = calicoctl("get workloadendpoints -o yaml --all-namespaces")
rc.assert_list("WorkloadEndpoint", resources)
wepList = rc.decoded
# Use the returned list to perform a get. Since the list is expanded
# this query results in two exact gets - so we'll end up with a []
# of resources rather than a resource List.
rc = calicoctl("get -o yaml", wepList)
rc.assert_data(resources)
# Use the returned list to perform a delete.
rc = calicoctl("delete", wepList)
rc.assert_no_error()
# Use the returned list to perform a delete.
rc = calicoctl("get workloadendpoints -o yaml")
rc.assert_empty_list("WorkloadEndpoint")
@parameterized.expand([
(ippool_name1_rev1_v4,),
(profile_name1_rev1,),
(globalnetworkpolicy_name1_rev1,),
(globalnetworkset_name1_rev1,),
(globalnetworkset_name1_rev1_large,),
(hostendpoint_name1_rev1,),
(bgppeer_name1_rev1_v4,),
(node_name1_rev1,),
])
def test_non_namespaced(self, data):
"""
Test namespace is handled as expected for each non-namespaced resource type.
"""
# Clone the data so that we can modify the metadata parms.
data1 = copy.deepcopy(data)
kind = data['kind']
rc = calicoctl("create", data=data1)
rc.assert_no_error()
# Get the resource type as normal.
rc = calicoctl("get %s" % kind)
rc.assert_no_error()
rc = calicoctl("get %s -o wide" % kind)
rc.assert_no_error()
# Get the resource with name1 and namespace2.
# For non-namespaced resources this will error.
rc = calicoctl("get %s %s --namespace default -o yaml" % (kind, data1['metadata']['name']))
rc.assert_error(NOT_NAMESPACED)
# Get the resource type for all namespaces.
# For non-namespaced resources this will error.
rc = calicoctl("get %s --all-namespaces -o yaml" % kind)
rc.assert_error(NOT_NAMESPACED)
# Get the resource type for all namespaces.
# For non-namespaced resources this will error.
rc = calicoctl("get %s --all-namespaces -o yaml" % kind)
rc.assert_error(NOT_NAMESPACED)
# Delete the resource
rc = calicoctl("delete", data=data1)
rc.assert_no_error()
def test_nets_truncation(self):
"""
Test that the list of nets is truncated if it's too long.
"""
rc = calicoctl("create", data=globalnetworkset_name1_rev1_large)
rc.assert_no_error()
rc = calicoctl("get globalnetworkset -o wide")
rc.assert_no_error()
rc.assert_output_contains("10.0.0.0/28,10.0.1.0/28,10.0.2.0/28,10.0.3.0/28,10.0.4.0/28,10.0.5.0/28,10.0....")
def test_nets_no_truncation(self):
"""
Test that the list of nets is shown in full if not too long.
"""
rc = calicoctl("create", data=globalnetworkset_name1_rev1)
rc.assert_no_error()
rc = calicoctl("get globalnetworkset -o wide")
rc.assert_no_error()
rc.assert_output_contains("10.0.0.1,11.0.0.0/16,feed:beef::1,dead:beef::96")
@parameterized.expand([
(networkpolicy_name1_rev1,),
(workloadendpoint_name1_rev1,),
])
def test_namespaced(self, data):
"""
Tests namespace is handled as expected for each namespaced resource type.
"""
# Clone the data so that we can modify the metadata parms.
data1 = copy.deepcopy(data)
data2 = copy.deepcopy(data)
kind = data['kind']
# Create resource with name1 and with name2.
# Leave the first namespace blank and the second set to
# namespace2 for the actual create request.
if kind == "WorkloadEndpoint":
# The validation in libcalico-go WorkloadEndpoint checks the
# construction of the name so keep the name on the workloadendpoint.
# Below namespace2 is searched for the WorkloadEndpoint data1
# name so we need data2 to have a different name than data1 so we
# change it to have eth1 instead of eth0
# Strip off the last character (the zero in eth0) and replace it
# with a 1
data2['metadata']['name'] = data1['metadata']['name'][:len(data1['metadata']['name'])-1] + "1"
# Change endpoint to eth1 so the validation works on the WEP
data2['spec']['endpoint'] = "eth1"
elif kind == "IPPool":
data1['metadata']['name'] = "name1"
data2['metadata']['name'] = "name2"
data2['spec']['cidr'] = "10.10.1.0/24"
else:
data1['metadata']['name'] = "name1"
data2['metadata']['name'] = "name2"
data1['metadata']['namespace'] = ""
data2['metadata']['namespace'] = "namespace2"
rc = calicoctl("create", data=data1)
rc.assert_no_error()
rc = calicoctl("create", data=data2)
rc.assert_no_error()
# We expect the namespace to be defaulted to "default"
# if not specified. Tweak the namespace in data1 to be default so that
# we can use it to compare against the calicoctl get output.
data1['metadata']['namespace'] = "default"
if kind == "WorkloadEndpoint":
data1['metadata']['labels']['projectcalico.org/namespace'] = 'default'
# Get the resource with name1 and namespace2. For a namespaced
# resource this should match the modified data to default the
# namespace.
rc = calicoctl("get %s %s --namespace default -o yaml" % (kind, data1['metadata']['name']))
rc.assert_data(data1)
if kind == "WorkloadEndpoint":
data2['metadata']['labels']['projectcalico.org/namespace'] = 'namespace2'
# Get the resource type for all namespaces. For a namespaced resource
# this will return everything.
rc = calicoctl("get %s --all-namespaces -o yaml" % kind)
rc.assert_list(kind, [data1, data2])
# For namespaced resources, if you do a list without specifying the
# namespace we'll just get the default namespace.
rc = calicoctl("get %s -o yaml" % kind)
rc.assert_list(kind, [data1])
# For namespaced resources, if you do a list specifying a namespace
# we'll get results for that namespace.
rc = calicoctl("get %s -o yaml -n namespace2" % kind)
rc.assert_list(kind, [data2])
# Doing a get by file will use the namespace in the file.
rc = calicoctl("get -o yaml", data1)
rc.assert_data(data1)
rc = calicoctl("get -o yaml", data2)
rc.assert_data(data2)
# Doing a get by file will use the default namespace if not specified
# in the file or through the CLI args.
data1_no_ns = copy.deepcopy(data1)
del (data1_no_ns['metadata']['namespace'])
rc = calicoctl("get -o yaml", data1_no_ns)
rc.assert_data(data1)
rc = calicoctl("get -o yaml -n namespace2", data1_no_ns)
rc.assert_error(NOT_FOUND)
data2_no_ns = copy.deepcopy(data2)
del(data2_no_ns['metadata']['namespace'])
rc = calicoctl("get -o yaml -n namespace2", data2_no_ns)
rc.assert_data(data2)
rc = calicoctl("get -o yaml", data2_no_ns)
rc.assert_error(NOT_FOUND)
# Deleting without a namespace will delete the default.
rc = calicoctl("delete %s %s" % (kind, data1['metadata']['name']))
rc.assert_no_error()
rc = calicoctl("delete %s %s" % (kind, data2['metadata']['name']))
rc.assert_error(NOT_FOUND)
rc = calicoctl("delete", data2)
rc.assert_no_error()
def test_bgpconfig(self):
"""
Test CRUD commands behave as expected on the BGP configuration resource:
"""
# Create a new default BGPConfiguration and get it to determine the current
# resource version.
rc = calicoctl("create", data=bgpconfig_name1_rev1)
rc.assert_no_error()
rc = calicoctl(
"get bgpconfig %s -o yaml" % name(bgpconfig_name1_rev1))
rc.assert_no_error()
rev0 = rc.decoded
# Replace the BGP Configuration (with no resource version) and get it to
# assert the resource version is not the same.
rc = calicoctl("replace", data=bgpconfig_name1_rev2)
rc.assert_no_error()
rc = calicoctl(
"get bgpconfig %s -o yaml" % name(bgpconfig_name1_rev2))
rc.assert_no_error()
rev1 = rc.decoded
self.assertNotEqual(rev0['metadata']['resourceVersion'], rev1['metadata']['resourceVersion'])
# Attempt to delete the default resource by name (i.e. without using a resource version).
# rc = calicoctl("delete bgpconfig %s" % name(rev0))
# rc.assert_error(DELETE_DEFAULT)
rc = calicoctl("create", data=bgpconfig_name2_rev1)
rc.assert_no_error()
rc = calicoctl(
"get bgpconfig %s -o yaml" % name(bgpconfig_name2_rev1))
rc.assert_no_error()
rev2 = rc.decoded
# Apply an update to the BGP Configuration and assert the resource version is not the same.
rc = calicoctl("apply", data=bgpconfig_name2_rev2)
rc.assert_no_error()
rc = calicoctl(
"get bgpconfig %s -o yaml" % name(bgpconfig_name2_rev2))
rc.assert_no_error()
rev3 = rc.decoded
self.assertNotEqual(rev2['metadata']['resourceVersion'], rev3['metadata']['resourceVersion'])
# Attempt to apply an update to change fields that are for default configs ONLY
rc = calicoctl("apply", data=bgpconfig_name2_rev3)
rc.assert_error(SET_DEFAULT)
# Delete the resource by name (i.e. without using a resource version).
rc = calicoctl("delete bgpconfig %s" % name(rev3))
rc.assert_no_error()
def test_felixconfig(self):
"""
Test CRUD commands behave as expected on the felix configuration resource:
"""
# Create a new default BGPConfiguration and get it to determine the current
# resource version.
rc = calicoctl("create", data=felixconfig_name1_rev1)
rc.assert_no_error()
rc = calicoctl(
"get felixconfig %s -o yaml" % name(felixconfig_name1_rev1))
rc.assert_no_error()
rev0 = rc.decoded
# Replace the BGP Configuration (with no resource version) and get it to
# assert the resource version is not the same.
rc = calicoctl("replace", data=felixconfig_name1_rev2)
rc.assert_no_error()
rc = calicoctl(
"get felixconfig %s -o yaml" % name(felixconfig_name1_rev2))
rc.assert_no_error()
rev1 = rc.decoded
self.assertNotEqual(rev0['metadata']['resourceVersion'], rev1['metadata']['resourceVersion'])
# Apply an update to the BGP Configuration and assert the resource version is not the same.
rc = calicoctl("apply", data=felixconfig_name1_rev1)
rc.assert_no_error()
rc = calicoctl(
"get felixconfig %s -o yaml" % name(felixconfig_name1_rev1))
rc.assert_no_error()
rev2 = rc.decoded
self.assertNotEqual(rev1['metadata']['resourceVersion'], rev2['metadata']['resourceVersion'])
# Apply an update to the felix configuration with a large duration.
rc = calicoctl("apply", data=felixconfig_name1_rev3)
rc.assert_no_error()
rc = calicoctl(
"get felixconfig %s -o yaml" % name(felixconfig_name1_rev3))
rc.assert_no_error()
rev3 = rc.decoded
self.assertEqual(rev3['spec']['netlinkTimeout'], '2m5s')
self.assertEqual(rev3['spec']['reportingTTL'], '2h45m10s')
# Delete the resource by name (i.e. without using a resource version).
rc = calicoctl("delete felixconfig %s" % name(rev2))
rc.assert_no_error()
def test_clusterinfo(self):
"""
Test CRUD commands behave as expected on the cluster information resource:
"""
# Try to create a cluster info, should be rejected.
rc = calicoctl("create", data=clusterinfo_name1_rev1)
rc.assert_error(NOT_SUPPORTED)
rc = calicoctl("get clusterinfo %s -o yaml" % name(clusterinfo_name1_rev1))
rc.assert_error(NOT_FOUND)
# Replace the cluster information (with no resource version) - assert not supported.
rc = calicoctl("replace", data=clusterinfo_name1_rev2)
rc.assert_error(NOT_FOUND)
# Apply an update to the cluster information and assert not found (we need the node to
# create it).
rc = calicoctl("apply", data=clusterinfo_name1_rev2)
rc.assert_error(NOT_SUPPORTED)
# Delete the resource by name (i.e. without using a resource version) - assert not
# supported.
rc = calicoctl("delete clusterinfo %s" % name(clusterinfo_name1_rev1))
rc.assert_error(NOT_SUPPORTED)
# Create a node, this should trigger auto-creation of a cluster info.
rc = calicoctl("create", data=node_name2_rev1)
rc.assert_no_error()
rc = calicoctl("get clusterinfo %s -o yaml" % name(clusterinfo_name1_rev1))
rc.assert_no_error()
# Check the GUID is populated.
self.assertRegexpMatches(rc.decoded["spec"]["clusterGUID"], "^[a-f0-9]{32}$")
# The GUID is unpredictable so tweak our test data to match it.
ci = copy.deepcopy(clusterinfo_name1_rev1)
ci["spec"]["clusterGUID"] = rc.decoded["spec"]["clusterGUID"]
rc.assert_data(ci)
# Create a second node, this should keep the existing cluster info.
rc = calicoctl("create", data=node_name3_rev1)
rc.assert_no_error()
rc = calicoctl("get clusterinfo %s -o yaml" % name(clusterinfo_name1_rev1))
rc.assert_no_error()
rc.assert_data(ci) # Implicitly checks the GUID is still the same.
@parameterized.expand([
('create', 'replace'),
('apply', 'apply'),
])
def test_metadata_unchanged(self, create_cmd, update_cmd):
"""
Test that the metadata fields other than labels and annotations cannot be changed
in create and update operations by applying a resource twice.
"""
# Create a new Host Endpoint and get it to determine the
# current resource version (first checking that it doesn't exist).
rc = calicoctl(
"get hostendpoint %s -o yaml" % name(hostendpoint_name1_rev2))
rc.assert_error(text=NOT_FOUND)
rc = calicoctl(create_cmd, data=hostendpoint_name1_rev2)
rc.assert_no_error()
rc = calicoctl(
"get hostendpoint %s -o yaml" % name(hostendpoint_name1_rev2))
rc.assert_no_error()
rev0 = rc.decoded
self.assertIn('uid', rev0['metadata'])
self.assertIn('creationTimestamp', rev0['metadata'])
# Update the Host Endpoint (with no resource version) and get it to
# assert the resource version is not the same.
rc = calicoctl(update_cmd, data=hostendpoint_name1_rev3)
rc.assert_no_error()
rc = calicoctl(
"get hostendpoint %s -o yaml" % name(hostendpoint_name1_rev3))
rc.assert_no_error()
rev1 = rc.decoded
self.assertNotEqual(rev0['metadata']['resourceVersion'], rev1['metadata']['resourceVersion'])
# Validate that only annotations and labels were changed in the metadata
self.assertNotIn('selfLink', rev1['metadata'])
self.assertNotIn('generation', rev1['metadata'])
self.assertNotIn('finalizers', rev1['metadata'])
self.assertIn('uid', rev1['metadata'])
self.assertIn('creationTimestamp', rev1['metadata'])
self.assertNotEqual(rev1['metadata']['uid'], hostendpoint_name1_rev3['metadata']['uid'])
self.assertNotEqual(rev1['metadata']['creationTimestamp'], hostendpoint_name1_rev3['metadata']['creationTimestamp'])
self.assertEqual(rev1['metadata'].get('name', ''), hostendpoint_name1_rev2['metadata'].get('name', ''))
self.assertEqual(rev1['metadata']['labels'], hostendpoint_name1_rev3['metadata']['labels'])
self.assertNotEqual(rev1['metadata']['labels'], rev0['metadata']['labels'])
self.assertEqual(rev1['metadata']['annotations'], hostendpoint_name1_rev3['metadata']['annotations'])
self.assertNotEqual(rev1['metadata']['annotations'], rev0['metadata']['labels'])
# Validate that creationTimestamp and UID are unchanged from when they were created
self.assertEqual(rev1['metadata']['creationTimestamp'], rev0['metadata']['creationTimestamp'])
self.assertEqual(rev1['metadata']['uid'], rev0['metadata']['uid'])
# Delete the resource without using a resource version.
rc = calicoctl("delete hostendpoint %s" % name(rev1))
rc.assert_no_error()
def test_export_flag(self):
"""
Test that the cluster-specific information gets stripped
out of a "get" request.
"""
# Create a new Network Policy with all metadata specified
rc = calicoctl('create', data=networkpolicy_name2_rev1)
rc.assert_no_error()
rc = calicoctl(
"get networkpolicy %s -o yaml" % name(networkpolicy_name2_rev1))
rc.assert_no_error()
rev0 = rc.decoded
self.assertIn('uid', rev0['metadata'])
self.assertIn('creationTimestamp', rev0['metadata'])
self.assertEqual(rev0['metadata']['name'], networkpolicy_name2_rev1['metadata']['name'])
self.assertEqual(rev0['metadata']['namespace'], networkpolicy_name2_rev1['metadata']['namespace'])
self.assertIn('resourceVersion', rev0['metadata'])
# Retrieve the Network Policy with the export flag and
# Verify that cluster-specific information is not present
rc = calicoctl(
"get networkpolicy %s -o yaml --export" % name(networkpolicy_name2_rev1))
rc.assert_no_error()
rev1 = rc.decoded
self.assertNotIn('uid', rev1['metadata'])
self.assertIsNone(rev1['metadata']['creationTimestamp'])
self.assertNotIn('namespace', rev1['metadata'])
self.assertNotIn('resourceVersion', rev1['metadata'])
self.assertEqual(rev1['metadata']['name'], rev0['metadata']['name'])
# Write the output to yaml so that it can be applied later
rev1['spec']['order'] = 100
writeyaml('/tmp/export_data.yaml', rev1)
# Verify that the cluster-specific information IS present if
# the export flag is used without specifying a specific resource.
rc = calicoctl(
"get networkpolicy -o yaml --export")
rc.assert_no_error()
rev2 = rc.decoded
self.assertEqual(len(rev2['items']), 1)
self.assertIn('uid', rev2['items'][0]['metadata'])
self.assertIsNotNone(rev2['items'][0]['metadata']['creationTimestamp'])
self.assertIn('namespace', rev2['items'][0]['metadata'])
self.assertIn('resourceVersion', rev2['items'][0]['metadata'])
self.assertEqual(rev2['items'][0]['metadata']['name'], rev0['metadata']['name'])
# Apply the output and verify that it did not error out
rc = calicoctl(
"apply -f %s" % '/tmp/export_data.yaml')
rc.assert_no_error()
rc = calicoctl(
"get networkpolicy %s -o yaml" % name(networkpolicy_name2_rev1))
rc.assert_no_error()
rev3 = rc.decoded
self.assertEqual(rev3['metadata']['name'], rev1['metadata']['name'])
self.assertEqual(rev3['spec']['order'], 100)
# Delete the resource without using a resource version.
rc = calicoctl("delete networkpolicy %s" % name(rev3))
rc.assert_no_error()
def test_disallow_crud_on_knp_defaults(self):
"""
Test that we disallow CRUD on a knp.default prefixed NetworkPolicy.
"""
k8s_np = copy.deepcopy(networkpolicy_name1_rev1)
k8s_np['metadata']['name'] = 'knp.default.foobarfizz'
rc = calicoctl("create", data=k8s_np)
rc.assert_error(text=NOT_SUPPORTED)
rc.assert_error(text=KUBERNETES_NP)
rc = calicoctl("apply", data=k8s_np)
rc.assert_error(text=NOT_FOUND)
rc = calicoctl("replace", data=k8s_np)
rc.assert_error(text=NOT_FOUND)
rc = calicoctl("delete", data=k8s_np)
rc.assert_error(text=NOT_SUPPORTED)
rc.assert_error(text=KUBERNETES_NP)
@parameterized.expand([
('replace'),
('apply'),
])
def test_disallow_update_old_resource_version(self, update_cmd):
"""
Test that we disallow updates on resources with old resource versions.
"""
rc = calicoctl("create", data=ippool_name1_rev1_v4)
rc.assert_no_error()
rc = calicoctl(
"get ippool %s -o yaml" % name(ippool_name1_rev1_v4))
rc.assert_no_error()
rev1 = rc.decoded
rc = calicoctl(update_cmd, data=rev1)
rc.assert_no_error()
rc = calicoctl(
"get ippool %s -o yaml" % name(ippool_name1_rev1_v4))
rc.assert_no_error()
rev2 = rc.decoded
self.assertNotEqual(rev1['metadata']['resourceVersion'], rev2['metadata']['resourceVersion'])
rc = calicoctl(update_cmd, data=rev1)
rc.assert_error(text=ERROR_CONFLICT)
# Delete the resource
rc = calicoctl("delete ippool %s" % name(ippool_name1_rev1_v4))
rc.assert_no_error()
#
#
# class TestCreateFromFile(TestBase):
# """
# Test calicoctl create command
# Test data is a pair of different resource objects of each type.
# Test creates one using json and the other using yaml, then we retrieve
# them and check the output objects are the same as we input when retrieved
# in both yaml and json formats.
# """
#
# testdata = [
# ("bgpPeer1", {
# 'apiVersion': API_VERSION,
# 'kind': 'BGPPeer',
# 'metadata': { 'name': 'bgppeer-123'},
# 'spec': {'node': 'node1',
# 'peerIP': '192.168.0.250',
# 'asNumber': 64514},
# }),
# ("bgpPeer2", {
# 'apiVersion': API_VERSION,
# 'kind': 'BGPPeer',
# 'metadata': { 'name': 'bgppeer-456'},
# 'spec': {'node': 'node2',
# 'peerIP': 'fd5f::6:ee',
# 'asNumber': 64590},
# }),
# ("hostEndpoint1", {
# 'apiVersion': API_VERSION,
# 'kind': 'HostEndpoint',
# 'metadata': { 'name': 'endpoint1', 'labels': {'type': 'database'}},
# 'spec': {'interfaceName': 'eth0',
# 'profiles': ['prof1',
# 'prof2'],
# 'node': 'host1'}
# }),
# ("hostEndpoint2", {
# 'apiVersion': API_VERSION,
# 'kind': 'HostEndpoint',
# 'metadata': { 'name': 'endpoint2', 'labels': {'type': 'frontend'}},
# 'spec': {'interfaceName': 'cali7',
# 'profiles': ['prof1',
# 'prof2'],
# 'node': 'host2',
# 'ports': [{"name": "tcp-port",
# "port": 1234,
# "protocol": "tcp"},
# {"name": "udp-port",
# "port": 5000,
# "protocol": "udp"}]}}
# }),
# ("workloadEndpoint1", {
# 'apiVersion': API_VERSION,
# 'kind': 'WorkloadEndpoint',
# 'metadata': {'name': 'endpoint2',
# 'labels': {'type': 'frontend'}},
# 'spec': {'interfaceName': 'cali7',
# 'profiles': ['prof1',
# 'prof2'],
# 'node': 'host2',
# 'orchestrator': 'orch',
# 'workload': 'workl',
# 'ipNetworks': ['10.0.0.1/32'],
# 'ports': [{"name": "tcp-port",
# "port": 1234,
# "protocol": "tcp"},
# {"name": "udp-port",
# "port": 5000,
# "protocol": "udp"}]}
# }),
# ("networkPolicy1", {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy1',
# 'namespace': 'default'},
# 'spec': {'egress': [{'action': 'allow',
# 'source': {
# 'selector':
# "type=='application'"},
# 'destination': {},
# }],
# 'ingress': [{'notICMP': {'type': 19, 'code': 255},
# 'ipVersion': 4,
# 'action': 'deny',
# 'destination': {
# 'notNets': ['10.3.0.0/16'],
# 'notPorts': ['110:1050'],
# 'notSelector': "type=='apples'",
# 'notTag': "bananas",
# 'nets': ['10.2.0.0/16'],
# 'ports': ['100:200'],
# 'selector':
# "type=='application'",
# 'tag': 'alphatag'},
# 'icmp': {'type': 10, 'code': 6},
# 'protocol': 'tcp',
# 'source': {
# 'notNets': ['10.1.0.0/16'],
# 'notPorts': [1050],
# 'notSelector': "type=='database'",
# 'notTag': 'bartag',
# 'nets': ['10.0.0.0/16'],
# 'ports': [1234,
# '10:1024',
# 'named-port'],
# 'selector':
# "type=='application'",
# 'tag': 'footag'}}],
# 'order': 100,
# 'selector': "type=='database'",
# 'types': ['ingress', 'egress']}
# }),
# ("networkPolicy2", {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy2',
# 'namespace': 'default'},
# 'spec': {'egress': [{'action': 'deny',
# 'destination': {},
# 'protocol': 'tcp',
# 'source': {}}],
# 'ingress': [{'action': 'allow',
# 'destination': {},
# 'protocol': 'udp',
# 'source': {}}],
# 'order': 100000,
# 'applyOnForward': True,
# 'doNotTrack': True,
# 'types': ['ingress', 'egress']}
# }),
# ("networkPolicy3", {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy2',
# 'namespace': 'default'},
# 'spec': {'egress': [{'action': 'allow',
# 'destination': {
# 'ports': ['http-port']},
# 'protocol': 'tcp',
# 'source': {}}],
# 'selector': "type=='application'",
# 'types': ['egress']}
# }),
# ("networkPolicy4", {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy2',
# 'namespace': 'default'},
# 'spec': {
# 'egress': [{
# 'action': 'allow',
# 'destination': {'ports': ['Telnet']},
# 'protocol': 'udp',
# 'source': {},
# }],
# 'ingress': [{
# 'action': 'allow',
# 'destination': {
# 'ports': ['echo', 53, 17, 'Quote']
# },
# 'protocol': 'udp',
# 'source': {},
# }],
# 'selector': "type=='application'",
# 'types': ['egress', 'ingress']
# }}),
# ("pool1", {'apiVersion': API_VERSION,
# 'kind': 'IPPool',
# 'metadata': {'name': 'ippool1'},
# 'spec': {'ipip': {'mode': "Always"},
# 'cidr': "10.0.1.0/24"}
# }),
# ("pool2", {'apiVersion': API_VERSION,
# 'kind': 'IPPool',
# 'metadata': {'name': 'ippool2'},
# 'spec': {'ipip': {'mode': 'CrossSubnet'},
# 'cidr': "10.0.2.0/24"}
# }),
# ("profile1", {'apiVersion': API_VERSION,
# 'kind': 'Profile',
# 'metadata': {
# 'labels': {'foo': 'bar'},
# 'name': 'profile1'
# },
# 'spec': {
# 'egress': [{'action': 'allow',
# 'destination': {},
# 'source': {
# 'selector': "type=='application'"}}],
# 'ingress': [{'notICMP': {'type': 19, 'code': 255},
# 'ipVersion': 4,
# 'action': 'deny',
# 'destination': {
# 'notNets': ['10.3.0.0/16'],
# 'notPorts': ['110:1050'],
# 'notSelector': "type=='apples'",
# 'notTag': "bananas",
# 'nets': ['10.2.0.0/16'],
# 'ports': ['100:200'],
# 'selector': "type=='application'",
# 'tag': 'alphatag'},
# 'icmp': {'type': 10, 'code': 6},
# 'protocol': 'tcp',
# 'source': {
# 'notNets': ['10.1.0.0/16'],
# 'notPorts': [1050],
# 'notSelector': "type=='database'",
# 'notTag': 'bartag',
# 'nets': ['10.0.0.0/16'],
# 'ports': [1234, '10:20'],
# 'selector': "type=='application'",
# 'tag': "production"}}],
# }}),
# ("profile2", {'apiVersion': API_VERSION,
# 'kind': 'Profile',
# 'metadata': {
# 'name': 'profile2',
# },
# 'spec': {
# 'egress': [{'action': 'allow',
# 'destination': {},
# 'source': {}}],
# 'ingress': [{'ipVersion': 6,
# 'action': 'deny',
# 'destination': {},
# 'source': {}}],
# }}),
# ]
#
# @parameterized.expand(testdata)
# def test_create_from_file_yaml(self, name, data):
# self._check_data_save_load(data)
# res_type = data['kind']
# logger.debug("Testing %s" % res_type)
# # Write out the files to load later
# self.writeyaml('/tmp/%s-1.yaml' % res_type, data)
#
# calicoctl("create", "/tmp/%s-1.yaml" % res_type)
#
# # Check both come out OK in yaml:
# self.check_data_in_datastore([data], res_type)
#
# # Check both come out OK in json:
# self.check_data_in_datastore([data], res_type, yaml_format=False)
#
# # Tidy up
# calicoctl("delete", "/tmp/%s-1.yaml" % res_type)
#
# # Check it deleted
# self.check_data_in_datastore([], res_type)
#
# @parameterized.expand(testdata)
# def test_create_from_file_json(self, name, data):
# self._check_data_save_load(data)
# res_type = data['kind']
# logger.debug("Testing %s" % res_type)
# # Write out the files to load later
# self.writejson('/tmp/%s-1.json' % res_type, data)
#
# calicoctl("create", "/tmp/%s-1.json" % res_type)
#
# # Check both come out OK in yaml:
# self.check_data_in_datastore([data], res_type)
#
# # Check both come out OK in json:
# self.check_data_in_datastore([data], res_type, yaml_format=False)
#
# # Tidy up
# calicoctl("delete", "/tmp/%s-1.json" % res_type)
#
# # Check it deleted
# self.check_data_in_datastore([], res_type)
#
# @parameterized.expand(testdata)
# def test_create_from_stdin_json(self, name, data):
# self._check_data_save_load(data)
# res_type = data['kind']
# logger.debug("Testing %s" % res_type)
# # Write out the files to load later
# self.writejson('/tmp/%s-1.json' % res_type, data)
#
# # Test use of create with stdin
# calicoctl("create", "/tmp/%s-1.json" % res_type, True)
#
# # Check both come out OK in yaml:
# self.check_data_in_datastore([data], res_type)
#
# # Check both come out OK in json:
# self.check_data_in_datastore([data], res_type, yaml_format=False)
#
# # Tidy up
# calicoctl("delete", "/tmp/%s-1.json" % res_type)
#
# # Check it deleted
# self.check_data_in_datastore([], res_type)
#
# @parameterized.expand(testdata)
# def test_create_from_stdin_yaml(self, name, data):
# self._check_data_save_load(data)
# res_type = data['kind']
# logger.debug("Testing %s" % res_type)
# # Write out the files to load later
# self.writeyaml('/tmp/%s-1.yaml' % res_type, data)
#
# # Test use of create with stdin
# calicoctl("create", "/tmp/%s-1.yaml" % res_type, True)
#
# # Check both come out OK in yaml:
# self.check_data_in_datastore([data], res_type)
#
# # Check both come out OK in yaml:
# self.check_data_in_datastore([data], res_type, yaml_format=False)
#
# # Tidy up
# calicoctl("delete", "/tmp/%s-1.yaml" % res_type)
#
# # Check it deleted
# self.check_data_in_datastore([], res_type)
#
# @parameterized.expand([
# ("bgpPeer",
# {
# 'apiVersion': API_VERSION,
# 'kind': 'BGPPeer',
# 'metadata': {'name': 'bgppeer-abc'},
# 'spec': {'asNumber': 64514,
# 'peerIP': '192.168.0.250',
# 'node': 'Node1'}
# },
# {
# 'apiVersion': API_VERSION,
# 'kind': 'BGPPeer',
# 'metadata': {'name': 'bgppeer-def'},
# 'spec': {'asNumber': 64590,
# 'peerIP': 'fd5f::6:ee',
# 'node': 'node2'}
# }
# ),
# ("hostEndpoint",
# {
# 'apiVersion': API_VERSION,
# 'kind': 'HostEndpoint',
# 'metadata': {'labels': {'type': 'database'},
# 'name': 'endpoint3'},
# 'spec': {'interfaceName': 'eth0',
# 'profiles': ['prof1',
# 'prof2'],
# 'node': 'host1',}
# },
# {
# 'apiVersion': API_VERSION,
# 'kind': 'HostEndpoint',
# 'metadata': {'labels': {'type': 'frontend'},
# 'name': 'endpoint4'},
# 'spec': {'interfaceName': 'cali7',
# 'profiles': ['prof1',
# 'prof2'],
# 'node': 'host2',}
# },
# ),
# ("policy",
# {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy-123',
# 'namespace': 'default' },
# 'spec': {'egress': [{'action': 'allow',
# 'source': {
# 'selector': "type=='application'"},
# 'destination': {},
# }],
# 'ingress': [{'notICMP': {'type': 19, 'code': 255},
# 'ipVersion': 4,
# 'action': 'deny',
# 'destination': {
# 'notNets': ['10.3.0.0/16'],
# 'notPorts': ['110:1050'],
# 'notSelector': "type=='apples'",
# 'notTag': "bananas",
# 'nets': ['10.2.0.0/16'],
# 'ports': ['100:200'],
# 'selector': "type=='application'",
# 'tag': 'alphatag'},
# 'icmp': {'type': 10, 'code': 6},
# 'protocol': 'tcp',
# 'source': {'notNets': ['10.1.0.0/16'],
# 'notPorts': [1050],
# 'notSelector': "type=='database'",
# 'notTag': 'bartag',
# 'nets': ['10.0.0.0/16'],
# 'ports': [1234, '10:1024'],
# 'selector': "type=='application'",
# 'tag': 'footag'}}],
# 'order': 100,
# 'selector': "type=='database'",
# 'types': ['ingress', 'egress']}},
# {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy-456',
# 'namespace': 'default' },
# 'spec': {'egress': [{'action': 'deny',
# 'destination': {},
# 'protocol': 'tcp',
# 'source': {}}],
# 'ingress': [{'action': 'allow',
# 'destination': {},
# 'protocol': 'udp',
# 'source': {}}],
# 'order': 100000,
# 'types': ['ingress', 'egress']}},
# ),
# ("ipPool",
# {'apiVersion': API_VERSION,
# 'kind': 'IPPool',
# 'metadata': {'name': 'ippool-3'},
# 'spec': {'ipip': {'mode': 'Always'},
# 'cidr': "10.0.1.0/24"}
# },
# {'apiVersion': API_VERSION,
# 'kind': 'IPPool',
# 'metadata': {'name': 'ippool-4'},
# 'spec': {'ipip': {'mode': 'Always'},
# 'cidr': "10.0.2.0/24"}
# },
# ),
# ("profile",
# {'apiVersion': API_VERSION,
# 'kind': 'Profile',
# 'metadata': {
# 'labels': {'foo': 'bar'},
# 'name': 'profile-2',
# },
# 'spec': {
# 'egress': [{'action': 'allow',
# 'destination': {},
# 'source': {
# 'selector': "type=='application'"}}],
# 'ingress': [{'notICMP': {'type': 19, 'code': 255},
# 'ipVersion': 4,
# 'action': 'deny',
# 'destination': {
# 'notNets': ['10.3.0.0/16'],
# 'notPorts': ['110:1050'],
# 'notSelector': "type=='apples'",
# 'notTag': "bananas",
# 'nets': ['10.2.0.0/16'],
# 'ports': ['100:200'],
# 'selector': "type=='application'",
# 'tag': 'alphatag'},
# 'icmp': {'type': 10, 'code': 6},
# 'protocol': 'tcp',
# 'source': {'notNets': ['10.1.0.0/16'],
# 'notPorts': [1050],
# 'notSelector': "type=='database'",
# 'notTag': 'bartag',
# 'nets': ['10.0.0.0/16'],
# 'ports': [1234, '10:20'],
# 'selector': "type=='application'",
# 'tag': "production"}}],
# }},
# {'apiVersion': API_VERSION,
# 'kind': 'Profile',
# 'metadata': {
# 'name': 'profile-3',
# },
# 'spec': {
# 'egress': [{'action': 'allow',
# 'destination': {},
# 'source': {}}],
# 'ingress': [{'ipVersion': 6,
# 'action': 'deny',
# 'destination': {},
# 'source': {}}],
# }},
# )
# ])
# def test_create_from_file(self, res, data1, data2):
# self._check_data_save_load(data1)
# self._check_data_save_load(data2)
# logger.debug("Testing %s" % res)
# # Write out the files to load later
# self.writeyaml('/tmp/%s-1.yaml' % res, data1)
# self.writejson('/tmp/%s-2.json' % res, data2)
#
# calicoctl("create", "/tmp/%s-1.yaml" % res)
# # Test use of create with stdin
# #TODO - There shouldn't be a hardcoded path here
# calicoctl("create", "/tmp/%s-2.json" % res, True)
#
# # Check both come out OK in yaml:
# self.check_data_in_datastore([data1, data2], res)
#
# # Check both come out OK in json:
# self.check_data_in_datastore([data1, data2], res, yaml_format=False)
#
# # Tidy up
# calicoctl("delete", "/tmp/%s-1.yaml" % res)
# calicoctl("delete", "/tmp/%s-2.json" % res)
#
# # Check it deleted
# self.check_data_in_datastore([], res)
#
# @parameterized.expand([
# ("bgpPeer",
# {
# 'apiVersion': API_VERSION,
# 'kind': 'BGPPeer',
# 'metadata': {'name': 'bgppeer-5'},
# 'spec': {'asNumber': 64514,
# 'node': 'Node1',
# 'peerIP': '192.168.0.250'}
# },
# {
# 'apiVersion': API_VERSION,
# 'kind': 'BGPPeer',
# 'metadata': {'name': 'bgppeer-6'},
# 'spec': {'asNumber': 64590,
# 'node': 'Node1',
# 'peerIP': '192.168.0.250'}
# }
# ),
# ("hostEndpoint",
# {
# 'apiVersion': API_VERSION,
# 'kind': 'HostEndpoint',
# 'metadata': {'labels': {'type': 'database'},
# 'name': 'endpoint-7'},
# 'spec': {'interfaceName': 'eth0',
# 'profiles': ['prof1',
# 'prof2'],
# 'node': 'host1'}
# },
# {
# 'apiVersion': API_VERSION,
# 'kind': 'HostEndpoint',
# 'metadata': {'labels': {'type': 'frontend'},
# 'name': 'endpoint-8'},
# 'spec': {'node': 'host1',
# 'interfaceName': 'cali7',
# 'profiles': ['prof1',
# 'prof2']}
# },
# ),
# ("policy",
# {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy3',
# 'namespace': 'default' },
# 'spec': {'egress': [{'action': 'deny',
# 'protocol': 'tcp',
# 'destination': {},
# 'source': {
# 'notNets': ['aa:bb:cc:ff::/100', 'aa:bb:cc:fe::/100'],
# 'notPorts': [100],
# 'notTag': 'abcd'}}],
# 'ingress': [{'action': 'allow',
# 'destination': {
# 'nets': ['10.20.30.40/32'],
# 'tag': 'database'},
# 'icmp': {'code': 100,
# 'type': 10},
# 'protocol': 'udp',
# 'source': {
# 'nets': ['1.2.0.0/16'],
# 'ports': [1, 2, 3, 4],
# 'tag': 'web'}}],
# 'order': 6543215.5,
# 'types': ['ingress', 'egress']}},
# {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy4',
# 'namespace': 'default'},
# 'spec': {'egress': [{'action': 'deny',
# 'protocol': 'tcp',
# 'destination': {},
# 'source': {
# 'notNets': ['aa:bb:cc::/100'],
# 'notPorts': [100],
# 'notTag': 'abcd'}}],
# 'ingress': [{'action': 'allow',
# 'destination': {
# 'nets': ['10.20.30.40/32'],
# 'tag': 'database'},
# 'icmp': {'code': 100,
# 'type': 10},
# 'protocol': 'udp',
# 'source': {
# 'nets': ['1.2.3.0/24'],
# 'ports': [1, 2, 3, 4],
# 'tag': 'web'}}],
# 'order': 100000,
# 'types': ['ingress', 'egress']}},
# ),
# # https://github.com/projectcalico/libcalico-go/issues/230
# ("policy",
# {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy5',
# 'namespace': 'default' },
# 'spec': {'egress': [{'action': 'deny',
# 'protocol': 'tcp',
# 'destination': {},
# 'source': {
# 'notNets': ['aa:bb:cc:ff::/100'],
# 'notPorts': [100],
# 'notTag': 'abcd'}}],
# 'ingress': [{'action': 'allow',
# 'destination': {
# 'nets': ['10.20.30.40/32'],
# 'tag': 'database'},
# 'icmp': {'code': 100,
# 'type': 10},
# 'protocol': 'udp',
# 'source': {
# 'nets': ['1.2.0.0/16'],
# 'ports': [1, 2, 3, 4],
# 'tag': 'web'}}],
# 'order': 6543215.321,
# 'types': ['ingress', 'egress']}},
# {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy6',
# 'namespace': 'default'},
# 'spec': {'egress': [{'action': 'deny',
# 'protocol': 'tcp',
# 'destination': {},
# 'source': {
# 'notNets': ['aa:bb:cc::/100'],
# 'notPorts': [100],
# 'notTag': 'abcd'}}],
# 'ingress': [{'action': 'allow',
# 'destination': {
# 'nets': ['10.20.30.40/32'],
# 'tag': 'database'},
# 'icmp': {'code': 100,
# 'type': 10},
# 'protocol': 'udp',
# 'source': {
# 'nets': ['1.2.3.0/24'],
# 'ports': [1, 2, 3, 4],
# 'tag': 'web'}}],
# 'order': 100000,
# 'types': ['ingress', 'egress']}},
# ),
# ("ipPool",
# {'apiVersion': API_VERSION,
# 'kind': 'IPPool',
# 'metadata': {'name': 'ippool-5'},
# 'spec': {'cidr': "10.0.1.0/24"}
# },
# {'apiVersion': API_VERSION,
# 'kind': 'IPPool',
# 'metadata': {'name': 'ippool-6'},
# 'spec': {'ipip': {'mode': 'Always'},
# 'cidr': "10.0.1.0/24"}
# },
# ),
# ("profile",
# {'apiVersion': API_VERSION,
# 'kind': 'Profile',
# 'metadata': {
# 'name': 'profile-9',
# 'labels': {'type': 'database'},
# },
# 'spec': {
# 'egress': [{
# 'source': {},
# 'destination': {},
# 'action': 'deny'}],
# 'ingress': [{
# 'source': {},
# 'destination': {},
# 'action': 'deny'}],
# }, },
# {'apiVersion': API_VERSION,
# 'kind': 'Profile',
# 'metadata': {
# 'labels': {'type': 'frontend'},
# 'name': 'profile-10',
# },
# 'spec': {
# 'egress': [{
# 'source': {},
# 'destination': {},
# 'action': 'deny'}],
# 'ingress': [{
# 'source': {},
# 'destination': {},
# 'action': 'deny'}],
# }},
# )
# ])
# def test_apply_create_replace(self, res, data1, data2):
# """
# Test calicoctl create/apply/replace/delete commands.
# Test data is a pair of resource objects - both are the same object,
# but the details differ in some way to simulate a user updating the
# object.
# """
# self._check_data_save_load(data1)
# self._check_data_save_load(data2)
# logger.debug("Testing %s" % res)
#
# # Write test data files for loading later
# self.writeyaml('/tmp/data1.yaml', data1)
# self.writejson('/tmp/data2.json', data2)
#
# # apply - create when not present
# calicoctl("apply", "/tmp/data1.yaml")
# # Check it went in OK
# self.check_data_in_datastore([data1], res)
#
# # create - skip overwrite with data2
# calicoctl("create", "/tmp/data2.json --skip-exists")
# # Check that nothing's changed
# self.check_data_in_datastore([data1], res)
#
# # replace - overwrite with data2
# calicoctl("replace", "/tmp/data2.json")
# # Check that we now have data2 in the datastore
# self.check_data_in_datastore([data2], res)
#
# # apply - overwrite with data1
# calicoctl("apply", "/tmp/data1.yaml")
# # Check that we now have data1 in the datastore
# self.check_data_in_datastore([data1], res)
#
# # delete
# calicoctl("delete --filename=/tmp/data1.yaml")
# # Check it deleted
# self.check_data_in_datastore([], res)
#
# def _check_data_save_load(self, data):
# """
# Confirms that round tripping the data via json and yaml format works
# OK so that we can be sure any errors the tests find are due to the
# calicoctl code under test
# :param data: The dictionary of test data to check
# :return: None.
# """
# exp_data=data #['kind']+"List"
#
# # Do yaml first
# self.writeyaml('/tmp/test', data)
# with open('/tmp/test', 'r') as f:
# output = yaml.safe_load(f.read())
# self.assert_same(exp_data, output)
# # Now check json
# self.writejson('/tmp/test', data)
# with open('/tmp/test', 'r') as f:
# output = json.loads(f.read())
# self.assert_same(exp_data, output)
#
class InvalidData(TestBase):
testdata = [
("bgpPeer-invalidASnum", {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {'name': 'bgppeer2'},
'spec': {'asNumber': 4294967296, # Valid numbers are <=4294967295
'node': 'node1',
'peerIP': '192.168.0.250',
'scope': 'node'}
}, 'cannot unmarshal number into Go value of type string'),
("bgpPeer-invalidIP", {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {'name': 'bgppeer3'},
'spec': {'asNumber': 64513,
'node': 'node1',
'peerIP': '192.168.0.256',
}
}, "error with field peerIP = '192.168.0.256'"),
("bgpPeer-apiversion", {
'apiVersion': 'v7',
'kind': 'BGPPeer',
'metadata': {'name': 'bgppeer4'},
'spec': {'asNumber': 64513,
'node': 'node1',
'peerIP': '192.168.0.250',
}
}, 'Unknown resource type (BGPPeer) and/or version (v7)'),
("bgpPeer-invalidIpv6", {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {'name': 'bgppeer5'},
'spec': {'asNumber': 64590,
'node': 'node2',
'peerIP': 'fd5f::6::ee',
}
}, "error with field peerIP = 'fd5f::6::ee'"),
("bgpPeer-invalidnodename", {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {'name': 'bgppeer6'},
'spec': {'asNumber': 64590,
'node': 'node 2',
'peerIP': 'fd5f::6:ee',
}
}, "error with field node = 'node 2'"),
# See issue https://github.com/projectcalico/libcalico-go/issues/248
("bgpPeer-unrecognisedfield", {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {'name': 'bgppeer7'},
'spec': {'asNumber': 64590,
'unknown': 'thing',
'node': 'node2',
'peerIP': 'fd5f::6:ee',
}
}, 'field in document is not recognized or is in the wrong location: unknown'),
# See issue https://github.com/projectcalico/libcalico-go/issues/222
# ("bgpPeer-longname", {
# 'apiVersion': API_VERSION,
# 'kind': 'BGPPeer',
# 'metadata': {'name': 'bgppeer8'},
# 'spec': {'asNumber': 64590,
# 'node':
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest'
# 'testtesttesttesttesttesttesttesttesttesttest',
# 'peerIP': 'fd5f::6:ee',
# }
# }),
("hostEndpoint-invalidInterface", {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {'labels': {'type': 'database'},
'name': 'endpoint1'},
'spec': {'interfaceName': 'wibblywobblyeth0', # overlength interface name
'profiles': ['prof1',
'prof2'],
'node': 'host1',
}
}, "error with field interfaceName = 'wibblywobblyeth0'"),
# https://github.com/projectcalico/libcalico-go/pull/236/files
("policy-invalidHighPortinList", {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {'name': 'policy2'},
'spec': {'Egress': [{'action': 'Deny',
'destination': {},
'source': {
'protocol': 'TCP',
'ports': [10, 90, 65536] # Max port is 65535
},
}],
'Ingress': [{'action': 'Allow',
'destination': {},
'protocol': 'UDP',
'source': {}}],
'order': 100000,
'selector': ""}
}, 'cannot unmarshal number 65536 into Go value of type uint16'),
# https://github.com/projectcalico/libcalico-go/issues/248
("policy-invalidHighPortinRange", {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {'name': 'policy2'},
'spec': {'Egress': [{'action': 'Deny',
'destination': {},
'source': {
'protocol': 'TCP',
'ports': ['1:65536'] # Max port is 65535
},
}],
'Ingress': [{'action': 'Allow',
'destination': {},
'protocol': 'UDP',
'source': {}}],
'order': 100000,
'selector': ""}
}, 'invalid maximum port number in range (1:65536)'),
("policy-invalidLowPortinRange", {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {'name': 'policy2'},
'spec': {'Egress': [{'action': 'Deny',
'destination': {},
'protocol': 'TCP',
'source': {
'ports': ['0:65535'], # Min port is 1
},
}],
'Ingress': [{'action': 'Allow',
'destination': {},
'protocol': 'UDP',
'source': {}}],
'order': 100000,
'selector': ""}
}, "error with field Port = '0' (port range invalid, port number must be between 1 and 65535)"),
("policy-invalidLowPortinList", {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {'name': 'policy2'},
'spec': {'Egress': [{'action': 'Deny',
'destination': {},
'protocol': 'TCP',
'source': {
'ports': [0, 10, 80] # Min port is 1
},
}],
'Ingress': [{'action': 'Allow',
'destination': {},
'protocol': 'UDP',
'source': {}}],
'order': 100000,
'selector': ""}
}, "error with field Port = '0' (port range invalid, port number must be between 1 and 65535)"),
("policy-invalidReversedRange", {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {'name': 'policy2'},
'spec': {'Egress': [{'action': 'Deny',
'destination': {},
'protocol': 'TCP',
'source': {
'ports': ['65535:1'] # range should be low-high
},
}],
'Ingress': [{'action': 'Allow',
'destination': {},
'protocol': 'UDP',
'source': {}}],
'order': 100000,
'selector': ""}
}, 'minimum port number (65535) is greater than maximum port number (1) in port range'),
("policy-invalidAction", {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {'name': 'policy2'},
'spec': {'Egress': [{'action': 'jumpupanddown', # invalid action
'destination': {},
'protocol': 'TCP',
'source': {},
}],
'Ingress': [{'action': 'Allow',
'destination': {},
'protocol': 'UDP',
'source': {}}],
'order': 100000,
'selector': ""}
}, "error with field action = 'jumpupanddown'"),
("policy-NetworkPolicyNameRejected", {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {'name': 'knp.default.rejectmeplease',
'namespace': 'default'},
'spec': {'Egress': [{'action': 'Allow',
'destination': {},
'protocol': 'TCP',
'source': {},
}],
'Ingress': [{'action': 'Allow',
'destination': {},
'protocol': 'UDP',
'source': {}}],
'order': 100000,
'selector': ""}
}, 'kubernetes network policies must be managed through the kubernetes API'),
("pool-invalidNet1", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'pool-invalid-net-1'},
'spec': {
'ipipMode': 'Always',
'cidr': "10.0.1.0/33"} # impossible mask
}, "error with field cidr = '10.0.1.0/33'"),
("pool-invalidNet2", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'pool-invalid-net-1'},
'spec': {
'ipipMode': 'Always',
'cidr': "10.0.256.0/24"} # invalid octet
}, "error with field cidr = '10.0.256.0/24'"),
("pool-invalidNet3", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'pool-invalid-net-1'},
'spec': {
'ipipMode': 'Always',
'cidr': "10.0.250.0"} # no mask
}, "error with field IPpool.CIDR = '10.0.250.0/32' "
"(IP pool size is too small (min /26) for use with Calico IPAM)"),
("pool-invalidNet4", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'pool-invalid-net-1'},
'spec': {
'ipipMode': 'Never',
'cidr': "fd5f::2::1/32"} # too many ::
}, "error with field cidr = 'fd5f::2::1/32'"),
# https://github.com/projectcalico/libcalico-go/issues/224
# ("pool-invalidNet5a", {'apiVersion': API_VERSION,
# 'kind': 'IPPool',
# 'metadata': {'cidr': "::/0"}, # HUGE pool
# }),
# ("pool-invalidNet5b", {'apiVersion': API_VERSION,
# 'kind': 'IPPool',
# 'metadata': {'cidr': "1.1.1.1/0"}, # BIG pool
# }),
("pool-invalidNet6", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'invalid-net-6'},
'spec': {
'ipipMode': 'Never',
'cidr': "::/128",
}
# nothing
}, "error with field IPpool.CIDR = '::/128' "
"(IP pool size is too small (min /122) for use with Calico IPAM)"),
("pool-invalidNet7", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'invalid-net-7'},
'spec': {
'cidr': "192.168.0.0/27"} # invalid mask
}, "error with field IPpool.CIDR = '192.168.0.0/27' "
"(IP pool size is too small (min /26) for use with Calico IPAM)"),
("pool-invalidNet8", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'invalid-net-8'},
'spec': {
'ipipMode': 'Never',
'cidr': "fd5f::1/123",
} # invalid mask
}, "error with field cidr = 'fd5f::1/123'"),
("pool-invalidIpIp1", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'invalid-ipip-1'},
'spec': {'disabled': 'True', # disabled value must be a bool
'cidr': "10.0.1.0/24"}
}, "cannot parse string 'True' into field IPPoolSpec.disabled of type bool"),
("pool-invalidIpIp2", {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {'name': 'invalid-ipip-2'},
'spec': {
'disabled': 'Maybe',
'cidr': "10.0.1.0/24"}
}, "cannot parse string 'Maybe' into field IPPoolSpec.disabled of type bool"),
("profile-ICMPtype", {
'apiVersion': API_VERSION,
'kind': 'Profile',
'metadata': {
'name': 'profile2',
},
'spec': {
'Egress': [{'action': 'Allow',
'destination': {},
'source': {}}],
'Ingress': [{'ipVersion': 6,
'ICMP': {'type': 256, # max value 255
'code': 255},
'action': 'Deny',
'protocol': 'ICMP',
'destination': {},
'source': {}}],
}
}, "error with field type = '256'"),
("profile-ICMPcode", {
'apiVersion': API_VERSION,
'kind': 'Profile',
'metadata': {
'name': 'profile2',
},
'spec': {
'Egress': [{'action': 'Allow',
'destination': {},
'source': {}}],
'Ingress': [{'ipVersion': 6,
'ICMP': {'type': 19,
'code': 256}, # max value 255
'action': 'Deny',
'protocol': 'ICMP',
'destination': {},
'source': {}}],
}
}, "error with field code = '256'"),
("compound-config", [{
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': "compound-config",
},
'spec': {
'node': 'node1',
'peerIP': '192.168.0.250',
'asNumber': 64513
}
},
{
'apiVersion': API_VERSION,
'kind': 'Profile',
'metadata': {
'name': 'profile2',
},
'spec': {
'Egress': [{'action': 'Allow',
'destination': {},
'source': {}}],
'Ingress': [{'ipVersion': 6,
'ICMP': {'type': 256, # 1-byte field
'code': 255},
'action': 'Deny',
'protocol': 'ICMP',
'destination': {},
'source': {}}],
},
}], "error with field type = '256'"),
]
@parameterized.expand(testdata)
def test_invalid_profiles_rejected(self, name, testdata, error):
def check_no_data_in_store(testdata):
out = calicoctl("get %s --output=yaml" % testdata['kind'])
out.assert_output_contains(
'apiVersion: %s\n'
'items: []\n'
'kind: %sList\n'
'metadata:\n'
' resourceVersion: ' % (API_VERSION, testdata['kind'])
)
log_and_run("cat << EOF > %s\n%s" % ("/tmp/testfile.yaml", testdata))
ctl = calicoctl("create", testdata)
if name.startswith('compound'):
for data in testdata:
check_no_data_in_store(data)
else:
check_no_data_in_store(testdata)
# Assert that we saw the correct error being reported
ctl.assert_error(error)
# TODO: uncomment this once we have default field handling in libcalico
# class TestTypes(TestBase):
# """
# Test calicoctl types field. Confirm that for a policy with:
# 1) both ingress and egress rules, the types:ingress,egress
# field is appended.
# 2) neither an ingress rule nor an egress rule, the
# types:ingress field is appended.
# 3) only an ingress rule, the types:ingress field is appended.
# 4) only an egress rule, the types:egress field is appended.
# """
# def test_types_both_egress_and_ingress(self):
# """
# Test that a simple policy with both ingress and egress
# rules will have the types:ingress,egress field appended.
# """
# # Set up simple ingress/egress policy
# policy1_dict = {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy-9',
# 'namespace': 'default'},
# 'spec': {
# 'egress': [{
# 'action': 'deny',
# 'destination': {},
# 'source': {},
# }],
# 'ingress': [{
# 'action': 'allow',
# 'destination': {},
# 'source': {},
# }],
# 'selector': "type=='application'"
# }
# }
# self.writeyaml('/tmp/policy1.yaml', policy1_dict)
#
# # append types: 'ingress', 'egress'
# policy1_types_dict = policy1_dict
# policy1_types_dict['spec'].update({'types': ['ingress', 'egress']})
#
# # Create the policy using calicoctl
# calicoctl("create", "/tmp/policy1.yaml")
#
# # Now read it out (yaml format) with calicoctl and verify it matches:
# self.check_data_in_datastore([policy1_types_dict], "policy")
#
# # Remove policy1
# calicoctl("delete", "/tmp/policy1.yaml")
#
# def test_types_no_ingress_or_egress(self):
# """
# Test that a simple policy with neither an ingress nor an
# egress rule will have the types:ingress field appended.
# """
# # Set up simple policy without ingress or egress rules
# policy2_dict = {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy-10',
# 'namespace': 'default'},
# 'spec': {
# 'selector': "type=='application'"
# }
# }
#
# self.writeyaml('/tmp/policy2.yaml', policy2_dict)
#
# # Create the policy using calicoctl
# calicoctl("create", "/tmp/policy2.yaml")
#
# # append types: 'ingress'
# policy2_types_dict = policy2_dict
# policy2_types_dict['spec'].update({'types': ['ingress']})
#
# # Now read it out (yaml format) with calicoctl and verify it matches:
# self.check_data_in_datastore([policy2_types_dict], "policy")
#
# # Remove policy2
# calicoctl("delete", "/tmp/policy2.yaml")
#
# def test_types_ingress_only(self):
# """
# Test that a simple policy with only an ingress
# rule will have the types:ingress field appended.
# """
# # Set up simple ingress-only policy
# policy2_dict = {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy-11',
# 'namespace': 'default'},
# 'spec': {
# 'ingress': [{
# 'action': 'allow',
# 'destination': {},
# 'source': {},
# }],
# 'selector': "type=='application'"
# }
# }
#
# self.writeyaml('/tmp/policy2.yaml', policy2_dict)
#
# # Create the policy using calicoctl
# calicoctl("create", "/tmp/policy2.yaml")
#
# # append types: 'ingress'
# policy2_types_dict = policy2_dict
# policy2_types_dict['spec'].update({'types': ['ingress']})
#
# # Now read it out (yaml format) with calicoctl and verify it matches:
# self.check_data_in_datastore([policy2_types_dict], "policy")
#
# # Remove policy2
# calicoctl("delete", "/tmp/policy2.yaml")
#
# def test_types_egress_only(self):
# """
# Test that a simple policy with only an egress
# rule will have the types:egress field appended.
# """
# # Set up simple egress-only policy
# policy2_dict = {'apiVersion': API_VERSION,
# 'kind': 'NetworkPolicy',
# 'metadata': {'name': 'policy-12',
# 'namespace': 'default'},
# 'spec': {
# 'egress': [{
# 'action': 'allow',
# 'destination': {},
# 'source': {},
# }],
# 'selector': "type=='application'"
# }
# }
#
# self.writeyaml('/tmp/policy2.yaml', policy2_dict)
#
# # Create the policy using calicoctl
# calicoctl("create", "/tmp/policy2.yaml")
#
# # append types: 'egress'
# policy2_types_dict = policy2_dict
# policy2_types_dict['spec'].update({'types': ['egress']})
#
# # Now read it out (yaml format) with calicoctl and verify it matches:
# self.check_data_in_datastore([policy2_types_dict], "policy")
#
# # Remove policy2
# calicoctl("delete", "/tmp/policy2.yaml")
#
|
the-stack_0_21755 | # - import modules - #
import os, sys
import matplotlib.pyplot as plt
import pandas as pd
import math
# hack to allow scripts to be placed in subdirectories next to pyAp:
if not os.path.exists('pyAp') and os.path.exists('../pyAp'):
sys.path.insert(1, os.path.abspath('..'))
from pyAp import ApTernary
# - import module finish - #
############################################################################
# set up a figure for ternary plot
fig = plt.figure()
fig.set_size_inches(10, 8)
# plot ternary (w/o data)
ApTernary.ternary(fig)
## plot data in ternary diagram
# load data from csv/xlsx file
df = pd.read_csv('outputs_apfu_26o.csv') # df = pd.read_excel('data_calc_water.xlsx')
# calculate the x,y coordinates on ternary diagram according to input XF and XCL values
for idx, value in df.iterrows():
x_f = value['XF']
x_cl = value['XCL']
x = (x_f + x_cl/2) * 100
y = x_cl*math.sqrt(3)*50
if x > 100:
x = 100
if y > math.sqrt(3)*50:
y = math.sqrt(3)*50
plt.plot(x,y,'o',label=value['sample'])
plt.legend(loc='best')
plt.show()
|
the-stack_0_21756 | import re
"""
email_reply_parser is a python library port of GitHub's Email Reply Parser.
For more information, visit https://github.com/zapier/email-reply-parser
"""
class EmailReplyParser(object):
""" Represents a email message that is parsed.
"""
@staticmethod
def read(text):
""" Factory method that splits email into list of fragments
text - A string email body
Returns an EmailMessage instance
"""
return EmailMessage(text).read()
@staticmethod
def parse_reply(text):
""" Provides the reply portion of email.
text - A string email body
Returns reply body message
"""
return EmailReplyParser.read(text).reply
class EmailMessage(object):
""" An email message represents a parsed email body.
"""
SIG_REGEX = r'(--|__|-\w)|(^Sent from my (\w+\s*){1,3})'
QUOTE_HDR_REGEX = r'^:etorw.*nO'
MULTI_QUOTE_HDR_REGEX = r'(?!On.*On\s.+?wrote:)(On\s(.+?)wrote:)'
QUOTED_REGEX = r'(>+)'
HEADER_REGEX = r'^(From|Sent|To|Subject): .+'
# original repro is trying to allow for inline responses (tests email_1_2.txt), going to carve that out as unsupported
# so we can get a better reply parsed
# not sure why this is called multi_quote_header
MULTI_QUOTE_HEADER_REGEX = r'(?!On.*On\s.+?wrote:)(On\s(.+?)wrote:)'
ORIGINAL_MESSAGE_HEADER_REGEX = r'-+ (Original message) -+'
def __init__(self, text):
self.fragments = []
self.fragment = None
self.text = text.replace('\r\n', '\n')
self.found_visible = False
def read(self):
""" Creates new fragment for each line
and labels as a signature, quote, or hidden.
Returns EmailMessage instance
"""
self.found_visible = False
is_multi_quote_header = re.search(self.MULTI_QUOTE_HDR_REGEX, self.text, re.MULTILINE | re.DOTALL)
if is_multi_quote_header:
expr = re.compile(self.MULTI_QUOTE_HDR_REGEX, flags=re.DOTALL)
self.text = expr.sub(
is_multi_quote_header.groups()[0].replace('\n', ''),
self.text)
self.lines = self.text.split('\n')
self.lines.reverse()
for line in self.lines:
self._scan_line(line)
self._finish_fragment()
self.fragments.reverse()
return self
@property
def reply(self):
""" Captures reply message within email
"""
reply = []
for f in self.fragments:
if not (f.hidden or f.quoted):
reply.append(f.content)
return '\n'.join(reply)
def _scan_line(self, line):
""" Reviews each line in email message and determines fragment type
line - a row of text from an email message
"""
is_quoted = re.match(self.QUOTED_REGEX, line) is not None
is_header = (re.match(self.HEADER_REGEX, line) is not None or
re.match(self.MULTI_QUOTE_HEADER_REGEX, line) is not None or
re.match(self.ORIGINAL_MESSAGE_HEADER_REGEX, line, flags=re.IGNORECASE | re.DOTALL) is not None
)
if self.fragment and len(line.strip()) == 0:
if re.match(self.SIG_REGEX, self.fragment.lines[-1]):
self.fragment.signature = True
self._finish_fragment()
if self.fragment and (((self.fragment.headers == is_header) and (self.fragment.quoted == is_quoted))
or (self.fragment.quoted and (self.quote_header(line) or len(line.strip()) == 0))):
self.fragment.lines.append(line)
else:
self._finish_fragment()
self.fragment = Fragment(is_quoted, line, headers=is_header)
def quote_header(self, line):
""" Determines whether line is part of a quoted area
line - a row of the email message
Returns True or False
"""
return re.match(self.QUOTE_HDR_REGEX, line[::-1]) != None
def _finish_fragment(self):
""" Creates fragment
"""
if self.fragment:
self.fragment.finish()
if self.fragment.headers:
# Regardless of what's been seen to this point, if we encounter a headers fragment,
# all the previous fragments should be marked hidden and found_visible set to False.
self.found_visible = False
for f in self.fragments:
f.hidden = True
if not self.found_visible:
if self.fragment.quoted \
or self.fragment.headers \
or self.fragment.signature \
or (len(self.fragment.content.strip()) == 0):
self.fragment.hidden = True
else:
self.found_visible = True
self.fragments.append(self.fragment)
self.fragment = None
class Fragment(object):
""" A Fragment is a part of
an Email Message, labeling each part.
"""
def __init__(self, quoted, first_line, headers=False):
self.signature = False
self.headers = headers
self.hidden = False
self.quoted = quoted
self._content = None
self.lines = [first_line]
def finish(self):
""" Creates block of content with lines
belonging to fragment.
"""
self.lines.reverse()
self._content = '\n'.join(self.lines)
self.lines = None
@property
def content(self):
return self._content
|
the-stack_0_21757 | '''pallete.py
This represents the twelve hues (RYGCBM, OLTSPV) across the two tones (saturated
and dark). The two other tones (light, unsaturated) are 'reserved' by convention
but not represented here.
We use capital letters and lowercase letters to refer to saturated and dark,
respectively. (e.g. pR = ord('R'), corresponding to '#ff0000', and
pr = ord('r'), corresponding to '#800000'.
This code provides useful enums, etc. With no imports, an
'import palette from *' should be fine. (Note that 'from package import *' is
usually a bad idea!)
If you want to import all the enums from this namespace and want to avoid *,
but you *also* don't want to write 'palette.pR' (because it's so long!)
then here are some easy copy-pastes for you:
from palette import pR, pY, pG, pC, pB, pM
from palette import pO, pL, pT, pS, pP, pV
from palette import pr, py, pg, pc, pb, pm
from palette import po, pl, pt, ps, pp, pv
from palette import get, resel_to_rgb, rgb_to_resel
'''
# palette.py
# These represent the twelve hues, across two tones (saturated, dark) from the palette in README.md
# By convention,
# Palette stuff below!
# These are useful enums for colors in our palette.
# Here, we assign enums to match meaningful characters rather than arbitrary ints.
# (where 'meaningful character' is taken from enums.)
# These letters correspond to RYGCBM OLTSPV
# (Red, Yellow, Green, Cyan, Blue, Magenta; Orange, Lime, Teal, Sapphire, Purple, Violet)
pO = ord('O') # 79 # On orange wire
po = ord('o') # 111 # Off orange wire
pL = ord('L') # 76 # On lime wire
pl = ord('l') # 108 # Off lime wire
pT = ord('T') # 84 # Xor node
pt = ord('t') # 116 # And node
pS = ord('S') # 83 # On sapphire wire
ps = ord('s') # 115 # Off sapphire wire
pP = ord('P') # 80 # Output node (from input/logic node to wire)
pp = ord('p') # 112 # Input node (from wire to input/logic node)
pV = ord('V') # 86
pv = ord('v') # 118
pR = ord('R') # 82
pr = ord('r') # 114
pG = ord('G') # 71
pg = ord('g') # 103
pB = ord('B') # 66
pb = ord('b') # 98
pY = ord('Y') # 89
py = ord('y') # 121
pC = ord('C') # 67
pc = ord('c') # 99
pM = ord('M') # 77
pm = ord('m') # 109
#_pal is just a helper; we loop over it to create our resel-rgb dicts.
# This represents the 12 hues, 'Saturated' and 'Dark'
_pal = [
(pR, (255,0,0)),
(pr, (128,0,0)),
(pG, (0,255,0)),
(pg, (0,128,0)),
(pB, (0,0,255)),
(pb, (0,0,128)),
(pY, (255,255,0)),
(py, (128,128,0)),
(pC, (0,255,255)),
(pc, (0,128,128)),
(pM, (255,0,255)),
(pm, (128,0,128)),
(pO, (255,128,0)),
(po, (128,64,0)),
(pL, (128,255,0)),
(pl, (64,128,0)),
(pT, (0,255,128)),
(pt, (0,128,64)),
(pS, (0,128,255)),
(ps, (0,64,128)),
(pP, (128,0,255)),
(pp, (64,0,128)),
(pV, (255,0,128)),
(pv, (128,0,64)),
]
# We implement a cheap bidict here
resel_to_rgb = dict()
rgb_to_resel = dict()
for resel, rgb in _pal:
rgb_to_resel[rgb] = resel
resel_to_rgb[resel] = rgb
# And we implement a cheap defaultdict with this function
def get(dd, kk, default=0):
"""Get dd[kk], or return default if kk is not in dd.keys()
:param dd: Dictionary
:type dd: dict
:param kk: Any hashable key
:type kk: object
:param default: The default value to return if kk is not a valid key
:type default: object (anything!)
:return: dd[kk], or 'default' if kk is not in dd.keys
:rtype: object (anything!)
"""
if kk in dd.keys():
return dd[kk]
else:
return default
|
the-stack_0_21759 | #####################################################################################
# MIT License #
# #
# Copyright (C) 2019 Charly Lamothe #
# Copyright (C) 2018 Zalando Research #
# #
# This file is part of VQ-VAE-images. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#####################################################################################
from residual_stack import ResidualStack
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
def __init__(self, in_channels, num_hiddens, num_residual_layers, num_residual_hiddens, use_kaiming_normal=False):
super(Decoder, self).__init__()
self._conv_1 = nn.Conv2d(
in_channels=in_channels,
out_channels=num_hiddens,
kernel_size=3,
stride=1,
padding=1
)
if use_kaiming_normal:
self._conv_1 = nn.utils.weight_norm(self._conv_1)
nn.init.kaiming_normal_(self._conv_1.weight)
# Same number of residual layers as specified in the paper
self._residual_stack = ResidualStack(
in_channels=num_hiddens,
num_hiddens=num_hiddens,
num_residual_layers=num_residual_layers,
num_residual_hiddens=num_residual_hiddens,
use_kaiming_normal=use_kaiming_normal
)
# Same parameters as specified in the paper
self._conv_trans_1 = nn.ConvTranspose2d(
in_channels=num_hiddens,
out_channels=num_hiddens//2,
kernel_size=4,
stride=2,
padding=1
)
if use_kaiming_normal:
self._conv_trans_1 = nn.utils.weight_norm(self._conv_trans_1)
nn.init.kaiming_normal_(self._conv_trans_1.weight)
# Same parameters as specified in the paper
self._conv_trans_2 = nn.ConvTranspose2d(
in_channels=num_hiddens//2,
out_channels=3,
kernel_size=4,
stride=2,
padding=1
)
if use_kaiming_normal:
self._conv_trans_2 = nn.utils.weight_norm(self._conv_trans_2)
nn.init.kaiming_normal_(self._conv_trans_2.weight)
def forward(self, inputs):
x = self._conv_1(inputs)
x = self._residual_stack(x)
x = self._conv_trans_1(x)
x = F.relu(x)
x = self._conv_trans_2(x)
return x
|
the-stack_0_21763 | import datetime
from huey.tests.base import b
from huey.tests.base import HueyTestCase
from huey.utils import EmptyData
class TestRedisStorage(HueyTestCase):
def test_queues(self):
storage = self.huey.storage
storage.flush_queue()
@self.huey.task()
def test_queues_add(k, v):
return k + v
res = test_queues_add('k', 'v')
self.assertEqual(storage.queue_size(), 1)
task = self.huey.dequeue()
self.huey.execute(task)
self.assertEqual(res.get(), 'kv')
res = test_queues_add('\xce', '\xcf')
task = self.huey.dequeue()
self.huey.execute(task)
self.assertEqual(res.get(), '\xce\xcf')
def test_data_stores(self):
storage = self.huey.storage
storage.put_data('k1', 'v1')
storage.put_data('k2', 'v2')
storage.put_data('k3', 'v3')
self.assertEqual(storage.peek_data('k2'), b('v2'))
self.assertEqual(storage.pop_data('k2'), b('v2'))
self.assertEqual(storage.peek_data('k2'), EmptyData)
self.assertEqual(storage.pop_data('k2'), EmptyData)
self.assertEqual(storage.peek_data('k3'), b('v3'))
storage.put_data('k3', 'v3-2')
self.assertEqual(storage.peek_data('k3'), b('v3-2'))
def test_schedules(self):
storage = self.huey.storage
dt1 = datetime.datetime(2013, 1, 1, 0, 0)
dt2 = datetime.datetime(2013, 1, 2, 0, 0)
dt3 = datetime.datetime(2013, 1, 3, 0, 0)
dt4 = datetime.datetime(2013, 1, 4, 0, 0)
# Add to schedule out-of-order to ensure sorting is performed by
# the schedule.
storage.add_to_schedule('s2', dt2)
storage.add_to_schedule('s1', dt1)
storage.add_to_schedule('s4', dt4)
storage.add_to_schedule('s3', dt3)
# Ensure that asking for a timestamp previous to any item in the
# schedule returns empty list.
self.assertEqual(
storage.read_schedule(dt1 - datetime.timedelta(days=1)),
[])
# Ensure the upper boundary is inclusive of whatever timestamp
# is passed in.
self.assertEqual(
storage.read_schedule(dt3),
[b('s1'), b('s2'), b('s3')])
self.assertEqual(storage.read_schedule(dt3), [])
# Ensure the schedule is flushed and an empty schedule returns an
# empty list.
self.assertEqual(storage.read_schedule(dt4), [b('s4')])
self.assertEqual(storage.read_schedule(dt4), [])
def test_events(self):
storage = self.huey.storage
ps = storage.listener()
messages = ['a', 'b', 'c']
for message in messages:
storage.emit(message)
g = ps.listen()
next(g)
self.assertEqual(next(g)['data'], b('a'))
self.assertEqual(next(g)['data'], b('b'))
self.assertEqual(next(g)['data'], b('c'))
def test_event_iterator(self):
i = iter(self.huey.storage)
self.huey.storage.emit('"a"')
self.huey.storage.emit('"b"')
res = next(i)
self.assertEqual(res, 'a')
res = next(i)
self.assertEqual(res, 'b')
def test_metadata(self):
s = self.huey.storage
s.write_metadata('k1', 'v1')
self.assertEqual(s.read_metadata('k1'), b('v1'))
self.assertEqual(s.read_metadata('kx'), None)
s.write_metadata('k2', 'v2')
vals = s.metadata_values()
self.assertEqual(vals, {b('k1'): b('v1'), b('k2'): b('v2')})
|
the-stack_0_21765 | # Author: Michael Lissner
# History:
# - 2013-06-03, mlr: Created
# - 2014-08-06, mlr: Updated for new website
# - 2015-07-30, mlr: Updated for changed website (failing xpaths)
from datetime import datetime
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.string_utils import clean_if_py3
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.url = 'http://nvcourts.gov/Supreme/Decisions/Advance_Opinions/'
self.xpath_adjustment = 0
self.table_number = 2
self.base_path = '(//table)[{table_number}]//td[{i}]'
self.date_path = self._make_date_path()
# Site has bad certificate, need to ignore
# verification for now for scraper to work
self.request['verify'] = False
def _make_date_path(self):
"""Needed so that subclasses can make a date path as part of their
init process
"""
return '{base}//text()[normalize-space(.)]'.format(
base=self.base_path.format(
table_number=self.table_number,
i=4 + self.xpath_adjustment,
),
)
def _get_download_urls(self):
path = '{base}//@href'.format(
base=self.base_path.format(
table_number=self.table_number,
i=4 + self.xpath_adjustment,
),
)
return list(self.html.xpath(path))
def _get_case_names(self):
path = '{base}//text()'.format(
base=self.base_path.format(
table_number=self.table_number,
i=3 + self.xpath_adjustment,
),
)
return list(self.html.xpath(path))
def _get_case_dates(self):
case_dates = []
for el in self.html.xpath(self.date_path):
date_string = clean_if_py3(str(el)).strip()
if date_string:
case_dates.append(datetime.strptime(date_string, '%b %d, %Y').date())
return case_dates
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_docket_numbers(self):
path = '{base}//text()[normalize-space(.)]'.format(
base=self.base_path.format(
table_number=self.table_number,
i=2 + self.xpath_adjustment,
),
)
docket_numbers = []
for el in self.html.xpath(path):
text = clean_if_py3(str(el)).strip()
if text:
docket_numbers.append(text)
return docket_numbers
def _get_neutral_citations(self):
neutral_path = '{base}//text()'.format(
base=self.base_path.format(
table_number=self.table_number,
i=1 + self.xpath_adjustment,
),
)
date_strings = []
for el in self.html.xpath(self.date_path):
date_string = clean_if_py3(str(el)).strip()
if date_string:
date_strings.append(date_string)
neutral_citations = []
for neutral_number, \
date_string in zip(
self.html.xpath(neutral_path),
date_strings):
year = datetime.strptime(date_string.strip(), '%b %d, %Y').year
neutral_citations.append('{year} NV {num}'.format(year=year, num=neutral_number))
return neutral_citations
|
the-stack_0_21767 | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,[email protected]
Reference:
[1] Cheng H T, Koc L, Harmsen J, et al. Wide & deep learning for recommender systems[C]//Proceedings of the 1st Workshop on Deep Learning for Recommender Systems. ACM, 2016: 7-10.(https://arxiv.org/pdf/1606.07792.pdf)
"""
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import Dense,add
from ..inputs import build_input_features, get_linear_logit,input_from_feature_columns,combined_dnn_input
from ..layers.core import PredictionLayer, DNN
def WDL(linear_feature_columns, dnn_feature_columns, embedding_size=8, dnn_hidden_units=(128, 128), l2_reg_linear=1e-5,
l2_reg_embedding=1e-5, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu',
task='binary'):
"""Instantiates the Wide&Deep Learning architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param embedding_size: positive integer,sparse feature embedding_size
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN
:param l2_reg_linear: float. L2 regularizer strength applied to wide part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_activation: Activation function to use in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(linear_feature_columns + dnn_feature_columns)
inputs_list = list(features.values())
sparse_embedding_list, dense_value_list = input_from_feature_columns(features, dnn_feature_columns,
embedding_size,
l2_reg_embedding, init_std,
seed)
linear_logit = get_linear_logit(features, linear_feature_columns, init_std=init_std, seed=seed, prefix='linear',
l2_reg=l2_reg_linear)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
dnn_out = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout,
False, seed)(dnn_input)
dnn_logit = Dense(
1, use_bias=False, activation=None)(dnn_out)
if len(linear_feature_columns) > 0 and len(dnn_feature_columns) > 0: # linear + dnn
final_logit = add([linear_logit,dnn_logit])
elif len(linear_feature_columns) == 0:
final_logit = dnn_logit
elif len(dnn_feature_columns) == 0:
final_logit = linear_logit
else:
raise NotImplementedError
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
|
the-stack_0_21768 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
options:
comment:
type: str
description:
- Brief description of the domain. Maximum length of 160 characters
data:
type: str
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
type: str
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
type: str
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
name:
type: str
description:
- FQDN record name to create
required: True
overwrite:
description:
- Add new records if data doesn't match, instead of updating existing
record with matching name. If there are already multiple records with
matching name and overwrite=true, this module will fail.
default: true
type: bool
priority:
type: int
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
type: str
description:
- Server ID to create a PTR record for. Only used with type=PTR
state:
type: str
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
type: int
description:
- Time to live of record in seconds
default: 3600
type:
type: str
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: "Matt Martz (@sivel)"
extends_documentation_fragment:
- community.general.rackspace
- community.general.rackspace.openstack
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.rax import (rax_argument_spec,
rax_find_loadbalancer,
rax_find_server,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
overwrite=True, priority=None, record_type='A',
state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
if overwrite:
record = domain.find_record(record_type, name=name)
else:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='overwrite=true and there are multiple matching records')
except pyrax.exceptions.DomainRecordNotFound as e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound as e:
record = {}
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
overwrite=dict(type='bool', default=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
overwrite = module.params.get('overwrite')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, overwrite=overwrite, priority=priority,
record_type=record_type, state=state, ttl=ttl)
if __name__ == '__main__':
main()
|
the-stack_0_21771 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Import/Export Contacts API
"""
import csv
import StringIO
from treeio.identities.models import Contact, ContactType, ContactValue
import re
import urlparse
class ProcessContacts():
"Import/Export Contacts"
"""
def export_contacts(self, contacts):
"Export contacts into CSV file"
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=Contacts.csv'
writer = csv.writer(response)
headers = ['name', 'type']
fields = ContactField.objects.filter(trash=False)
for field in fields:
headers.append(field.name)
writer.writerow(headers)
for contact in contacts:
row = []
row.append(contact)
row.append(contact.contact_type)
vals = contact.contactvalue_set.all()
for field in fields:
inserted = False
for val in vals:
if val.field == field:
row.append(val.value)
inserted = True
if not inserted:
row.append('')
writer.writerow(row)
return response
"""
def import_contacts(self, content):
"Import contacts from CSV file"
f = StringIO.StringIO(content)
contacts = csv.DictReader(f, delimiter=',')
self.parse_contacts(contacts)
def verify_email(self, email):
"Verify email format"
try:
email_matched = re.findall(
'[a-zA-Z0-9+_\-\.]+@[0-9a-zA-Z][.-0-9a-zA-Z]*.[a-zA-Z]+', email)
if email_matched:
return email # Contact Email Address
except Exception:
return None
def verify_url(self, url):
"Verify url"
if url:
if '://' not in url:
# If no URL scheme given, assume http://
url = u'http://%s' % url
url_fields = list(urlparse.urlsplit(url))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
url = urlparse.urlunsplit(url_fields)
return url
def parse_contacts(self, contacts):
"Break down CSV file into fields"
for row in contacts:
# Tidy up keys (iterkeys strip())
try:
type = row['type']
except Exception:
pass # Set type to default type
try:
name = row['name']
except Exception:
try:
firstname = row['firstname']
surname = row['surname']
name = firstname + " " + surname
except Exception:
continue
contact_type = ContactType.objects.filter(name=type)
if contact_type:
contact_type = contact_type[0]
# Create a new contact if it doesn't exist
contact_exists = Contact.objects.filter(
name=name, contact_type__name=type, trash=False)
# TODO: If one does exist then append the data on that contact
if not contact_exists:
contact = Contact()
contact.name = name
contact.contact_type = contact_type
contact.auto_notify = False
contact.save()
fields = contact_type.fields.filter(trash=False)
for field in fields:
if field.name in row:
x = row[field.name]
if field.field_type == 'email':
x = self.verify_email(x)
if field.field_type == 'url':
x = self.verify_url(x)
if x:
contact_value = ContactValue()
contact_value.field = field
contact_value.contact = contact
contact_value.value = x
contact_value.save()
|
the-stack_0_21772 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules and macros for collecting LicenseInfo providers."""
load(
"@rules_license//rules:providers.bzl",
"LicenseInfo",
"LicensesInfo",
)
# Debugging verbosity
_VERBOSITY = 0
def _debug(loglevel, msg):
if _VERBOSITY > loglevel:
print(msg) # buildifier: disable=print
def _get_transitive_licenses(deps, licenses, trans):
for dep in deps:
if LicenseInfo in dep:
license = dep[LicenseInfo]
_debug(1, " depends on license: %s" % license.rule)
licenses.append(license)
if LicensesInfo in dep:
license_list = dep[LicensesInfo].licenses
if license_list:
_debug(1, " transitively depends on: %s" % licenses)
trans.append(license_list)
def _gather_licenses_info_impl(target, ctx):
licenses = []
trans = []
if hasattr(ctx.rule.attr, "applicable_licenses"):
_get_transitive_licenses(ctx.rule.attr.applicable_licenses, licenses, trans)
if hasattr(ctx.rule.attr, "data"):
_get_transitive_licenses(ctx.rule.attr.data, licenses, trans)
if hasattr(ctx.rule.attr, "deps"):
_get_transitive_licenses(ctx.rule.attr.deps, licenses, trans)
if hasattr(ctx.rule.attr, "srcs"):
_get_transitive_licenses(ctx.rule.attr.srcs, licenses, trans)
return [LicensesInfo(licenses = depset(tuple(licenses), transitive = trans))]
gather_licenses_info = aspect(
doc = """Collects LicenseInfo providers into a single LicensesInfo provider.""",
implementation = _gather_licenses_info_impl,
attr_aspects = ["applicable_licenses", "data", "deps", "srcs"],
)
def write_licenses_info(ctx, deps, json_out):
"""Writes LicensesInfo providers for a set of targets as JSON.
TODO(aiuto): Document JSON schema.
Usage:
write_licenses_info must be called from a rule implementation, where the
rule has run the gather_licenses_info aspect on its deps to collect the
transitive closure of LicenseInfo providers into a LicenseInfo provider.
foo = rule(
implementation = _foo_impl,
attrs = {
"deps": attr.label_list(aspects = [gather_licenses_info])
}
)
def _foo_impl(ctx):
...
out = ctx.actions.declare_file("%s_licenses.json" % ctx.label.name)
write_licenses_info(ctx, ctx.attr.deps, licenses_file)
Args:
ctx: context of the caller
deps: a list of deps which should have LicensesInfo providers.
This requires that you have run the gather_licenses_info
aspect over them
json_out: output handle to write the JSON info
"""
rule_template = """ {{
"rule": "{rule}",
"license_kinds": [{kinds}
],
"copyright_notice": "{copyright_notice}",
"package_name": "{package_name}",
"license_text": "{license_text}"\n }}"""
kind_template = """
{{
"target": "{kind_path}",
"name": "{kind_name}",
"conditions": {kind_conditions}
}}"""
licenses = []
for dep in deps:
if LicensesInfo in dep:
for license in dep[LicensesInfo].licenses.to_list():
_debug(0, " Requires license: %s" % license)
kinds = []
for kind in license.license_kinds:
kinds.append(kind_template.format(
kind_name = kind.name,
kind_path = kind.label,
kind_conditions = kind.conditions,
))
licenses.append(rule_template.format(
rule = license.rule,
copyright_notice = license.copyright_notice,
package_name = license.package_name,
license_text = license.license_text.path,
kinds = ",\n".join(kinds),
))
ctx.actions.write(
output = json_out,
content = "[\n%s\n]\n" % ",\n".join(licenses),
)
|
the-stack_0_21773 | """
1-bit Full Adder
Jimmy Tran
"""
from abc import ABC, abstractmethod
class CostMixin:
# Constants
COST_MULTIPLIER = 10
def __init__(self, number_of_components):
self._number_of_components = number_of_components
@property
def number_of_components(self):
return self._number_of_components
@property
def cost(self):
self._cost = self.COST_MULTIPLIER * (self._number_of_components ** 2)
return self._cost
class NodeMixin:
def __init__(self):
self._next = None
@property
def next(self):
return self._next
@next.setter
def next(self, data):
if not isinstance(data, NodeMixin):
raise TypeError("This should be the next NodeMixin, dude")
self._next = data
class Circuit:
def __init__(self):
self._circuit = None
self._cost = 0
def add(self, gate):
if not isinstance(gate, LogicGate):
raise TypeError("This should be a type of LogicGate, dude")
if self._circuit is not None:
gate.next = self._circuit
self._circuit = gate
@property
def cost(self):
traverse = self._circuit
while traverse is not None:
# print(traverse.cost)
self._cost += traverse.cost
# print(traverse)
traverse = traverse.next
# print(traverse)
return self._cost
class Input:
def __init__(self, owner):
if not isinstance(owner, LogicGate):
raise TypeError("Own should be a type of LogicGate")
self._owner = owner
def __str__(self):
try:
return str(self.value)
except AttributeError:
# It's possible to not have a value at the beginning
return "(no value)"
@property
def owner(self):
return self._owner
@property
def value(self):
return self._value
@value.setter
def value(self, value):
# Normalize the value to bool
self._value = bool(value)
# Now that the input value has changed, tell to owner logic gate to re-evaluate
self._owner.evaluate()
class Output:
def __init__(self):
self._connections = []
def __str__(self):
try:
return str(self.value)
except AttributeError:
# It's possible not to have a value at the beginning
return "(no value)"
def connect(self, input):
if not isinstance(input, Input):
raise TypeError("Output must be connected to an input")
# If the input is not already in the list, add it; alternative is to use a set
if input not in self._connections:
self._connections.append(input)
try:
# Set the input's value to this output's value upon connection
input.value = self._value
except AttributeError:
# If self.value is not there, skip it
pass
@property
def value(self):
return self._value
@value.setter
def value(self, value):
# Normalize the value to bool
self._value = bool(value)
# After the output value changes, remember to send it to all the connected inputs
for connection in self._connections:
connection.value = self.value
@property
def connections(self):
return self._connections
class LogicGate(ABC, NodeMixin):
def __init__(self, name):
self._name = name
NodeMixin.__init__(self)
@property
def name(self):
return self._name
@abstractmethod
def evaluate(self):
pass
@abstractmethod
def __str__(self):
pass
class UnaryGate(LogicGate, CostMixin):
def __init__(self, name, number_of_components):
LogicGate.__init__(self, name)
CostMixin.__init__(self, number_of_components)
self._input = Input(self)
self._output = Output()
def __str__(self):
return f"LogicGate {self.name}: input={self.input}, output={self.output}"
@property
def input(self):
return self._input
@property
def output(self):
return self._output
class BinaryGate(LogicGate, CostMixin):
def __init__(self, name, number_of_components):
LogicGate.__init__(self, name)
CostMixin.__init__(self, number_of_components)
self._input0 = Input(self)
self._input1 = Input(self)
self._output = Output()
def __str__(self):
return f"LogicGate {self.name}: input0={self.input0}, input1={self.input1}, output={self.output}"
@property
def input0(self):
return self._input0
@property
def input1(self):
return self._input1
@property
def output(self):
return self._output
class NotGate(UnaryGate):
def __init__(self, name, circuit=None, number_of_components=2):
UnaryGate.__init__(self, name, circuit)
CostMixin.__init__(self, number_of_components)
if circuit is not None:
if not isinstance(circuit, Circuit):
raise TypeError("The circuit parameter should be of class Circuit, dude")
circuit.add(self)
def evaluate(self):
self.output.value = not self.input.value
class AndGate(BinaryGate):
def __init__(self, name, circuit=None, number_of_components=3):
BinaryGate.__init__(self, name, circuit)
CostMixin.__init__(self, number_of_components)
if circuit is not None:
if not isinstance(circuit, Circuit):
raise TypeError("The circuit parameter should be of class Circuit, dude")
circuit.add(self)
def evaluate(self):
try:
# This may throw an exception, if one of the input is not yet set, which is possible
# in the normal course of evaluation, because setting the first input will kick
# off the evaluation. So just don't set the output.
self.output.value = self.input0.value and self.input1.value
except AttributeError:
pass
class OrGate(BinaryGate):
def __init__(self, name, circuit=None, number_of_components=3):
BinaryGate.__init__(self, name, circuit)
CostMixin.__init__(self, number_of_components)
if circuit is not None:
if not isinstance(circuit, Circuit):
raise TypeError("The circuit parameter should be of class Circuit, dude")
circuit.add(self)
def evaluate(self):
try:
self.output.value = self.input0.value or self.input1.value
except AttributeError:
pass
class XorGate(BinaryGate):
def __init__(self, name, circuit=None, number_of_components=3):
BinaryGate.__init__(self, name, circuit)
CostMixin.__init__(self, number_of_components)
if circuit is not None:
if not isinstance(circuit, Circuit):
raise TypeError("The circuit parameter should be of class Circuit, dude")
circuit.add(self)
def evaluate(self):
try:
# Assume the value is bool, != is same as xor
self.output.value = (self.input0.value != self.input1.value)
except AttributeError:
pass
# This makes sure that the old classes are still functional
def test():
tests = [test_not, test_and, test_or, test_xor, test_not_not, test_and_not]
for t in tests:
print("Running " + t.__name__ + " " + "-" * 20)
t()
def test_not():
not_gate = NotGate("not")
not_gate.input.value = True
print(not_gate)
not_gate.input.value = False
print(not_gate)
def test_and():
and_gate = AndGate("and")
print("AND gate initial state:", and_gate)
and_gate.input0.value = True
print("AND gate with 1 input set", and_gate)
and_gate.input1.value = False
print("AND gate with 2 inputs set:", and_gate)
and_gate.input1.value = True
print("AND gate with 2 inputs set:", and_gate)
def test_or():
or_gate = OrGate("or")
or_gate.input0.value = False
or_gate.input1.value = False
print(or_gate)
or_gate.input1.value = True
print(or_gate)
def test_xor():
# Testing xor
xor_gate = XorGate("xor")
xor_gate.input0.value = False
xor_gate.input1.value = False
print(xor_gate)
xor_gate.input1.value = True
print(xor_gate)
def test_not_not():
not_gate1 = NotGate("not1")
not_gate2 = NotGate("not2")
not_gate1.output.connect(not_gate2.input)
print(not_gate1)
print(not_gate2)
print("Setting not-gate input to False...")
not_gate1.input.value = False
print(not_gate1)
print(not_gate2)
def test_and_not():
and_gate = AndGate("and")
not_gate = NotGate("not")
and_gate.output.connect(not_gate.input)
and_gate.input0.value = True
and_gate.input1.value = False
print(and_gate)
print(not_gate)
and_gate.input1.value = True
print(and_gate)
print(not_gate)
def abstract_class_test():
try:
logic_gate = LogicGate("logic")
print(logic_gate)
except TypeError:
print("Can't instantiate logic gate")
try:
unary_gate = UnaryGate("unary")
print(unary_gate)
except TypeError:
print("Can't instantiate unary gate")
try:
binary_gate = BinaryGate("binary")
print(binary_gate)
except TypeError:
print("Can't instantiate binary gate")
# Part of the new set of tests for this program
def test2():
tests2 = [test_not_not_circuit, test_and_not_circuit]
for s in tests2:
print("Running " + s.__name__ + " " + "-" * 20)
s()
def test_not_not_circuit():
circuit = Circuit()
not_gate1 = NotGate("not1", circuit)
not_gate2 = NotGate("not2", circuit)
not_gate1.output.connect(not_gate2.input)
print("Cost of NOT-NOT circuit is " + str(circuit.cost))
def test_and_not_circuit():
circuit2 = Circuit()
and_gate1 = AndGate("and1", circuit2)
not_gate3 = NotGate("not3", circuit2)
and_gate1.output.connect(not_gate3.input)
print("Cost of AND-NOT circuit is " + str(circuit2.cost))
# 1-bit full adder
def full_adder(a, b, ci):
# Instantiate the circuit
circuit_adder = Circuit()
# sum
xor_adder_gate1 = XorGate("xor_adder_1", circuit_adder)
xor_adder_gate1.input0.value = a
xor_adder_gate1.input1.value = b
xor_adder_gate2 = XorGate("xor_adder_2", circuit_adder)
xor_adder_gate1.output.connect(xor_adder_gate2.input0)
xor_adder_gate2.input1.value = ci
summation = xor_adder_gate2.output.value
# co
and_adder_gate1 = AndGate("and_adder_1", circuit_adder)
xor_adder_gate1.output.connect(and_adder_gate1.input0)
and_adder_gate1.input1.value = ci
and_adder_gate2 = AndGate("and_adder_2", circuit_adder)
and_adder_gate2.input0.value = a
and_adder_gate2.input1.value = b
or_adder_gate1 = OrGate("or_adder_1", circuit_adder)
and_adder_gate1.output.connect(or_adder_gate1.input0)
and_adder_gate2.output.connect(or_adder_gate1.input1)
co = or_adder_gate1.output.value
# cost
cost = circuit_adder.cost
tup = (summation, co, cost)
return tup
if __name__ == '__main__':
# Tests from old assignment
test()
abstract_class_test()
try:
wow = AndGate("try", circuit="potato")
except TypeError as e:
print("The TypeError for circuit works: {}".format(e))
try:
wow2 = AndGate("try")
wow2.next = "potato"
except TypeError as e:
print("The TypeError for circuit works: {}".format(e))
try:
circuit_wow = Circuit()
circuit_wow.add("potato")
except TypeError as e:
print("The TypeError for circuit works: {}".format(e))
test2()
print(full_adder(a=True, b=False, ci=True))
|
the-stack_0_21777 | # Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to initialize Node using a NumPy-like syntax."""
import warnings
from typing import Optional, Sequence, Tuple, Any, Union, Type, Callable, List
from typing import Text
import numpy as np
from tensornetwork.backends import abstract_backend
#pylint: disable=line-too-long
from tensornetwork.network_components import AbstractNode, Node, outer_product_final_nodes
from tensornetwork import backend_contextmanager
from tensornetwork import backends
from tensornetwork import network_components
Tensor = Any
BaseBackend = abstract_backend.AbstractBackend
# INITIALIZATION
def initialize_node(fname: Text,
*fargs: Any,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None,
**fkwargs: Any) -> Tensor:
"""Return a Node wrapping data obtained by an initialization function
implemented in a backend. The Node will have the same shape as the
underlying array that function generates, with all Edges dangling.
This function is not intended to be called directly, but doing so should
be safe enough.
Args:
fname: Name of the method of backend to call (a string).
*fargs: Positional arguments to the initialization method.
name: Optional name of the Node.
axis_names: Optional names of the Node's dangling edges.
backend: The backend or its name.
**fkwargs: Keyword arguments to the initialization method.
Returns:
node: A Node wrapping data generated by
(the_backend).fname(*fargs, **fkwargs), with one dangling edge per
axis of data.
"""
if backend is None:
backend_obj = backend_contextmanager.get_default_backend()
else:
backend_obj = backends.backend_factory.get_backend(backend)
func = getattr(backend_obj, fname)
data = func(*fargs, **fkwargs)
node = Node(data, name=name, axis_names=axis_names, backend=backend)
return node
def eye(N: int,
dtype: Optional[Type[np.number]] = None,
M: Optional[int] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node representing a 2D array with ones on the diagonal and
zeros elsewhere. The Node has two dangling Edges.
Args:
N (int): The first dimension of the returned matrix.
dtype, optional: dtype of array (default np.float64).
M (int, optional): The second dimension of the returned matrix.
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
I : Node of shape (N, M)
Represents an array of all zeros except for the k'th diagonal of all
ones.
"""
the_node = initialize_node(
"eye",
N,
name=name,
axis_names=axis_names,
backend=backend,
dtype=dtype,
M=M)
return the_node
def zeros(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of shape `shape` of all zeros.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape`. Represents an array of all zeros.
"""
the_node = initialize_node(
"zeros",
shape,
name=name,
axis_names=axis_names,
backend=backend,
dtype=dtype)
return the_node
def zeros_like(a: Any,
dtype: Optional[Type[np.number]] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of all ones, of same shape as `a`.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape`
Represents an array of all ones.
"""
the_node = initialize_node(
"zeros",
a.shape,
name=name,
axis_names=axis_names,
backend=backend,
dtype=dtype)
return the_node
def ones(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of shape `shape` of all ones.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape`
Represents an array of all ones.
"""
the_node = initialize_node(
"ones",
shape,
name=name,
axis_names=axis_names,
backend=backend,
dtype=dtype)
return the_node
def ones_like(a: Any,
dtype: Optional[Type[np.number]] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of all ones, of same shape as `a`.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape`
Represents an array of all ones.
"""
the_node = initialize_node(
"ones",
a.shape,
name=name,
axis_names=axis_names,
backend=backend,
dtype=dtype)
return the_node
def randn(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of shape `shape` of Gaussian random floats.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape` filled with Gaussian random data.
"""
the_node = initialize_node(
"randn",
shape,
name=name,
axis_names=axis_names,
backend=backend,
seed=seed,
dtype=dtype)
return the_node
def random_uniform(
shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None,
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None,
backend: Optional[Union[Text, BaseBackend]] = None) -> Tensor:
"""Return a Node of shape `shape` of uniform random floats.
The Node has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
boundaries : Values lie in [boundaries[0], boundaries[1]).
name (text, optional): Name of the Node.
axis_names (optional): List of names of the edges.
backend (optional): The backend or its name.
Returns:
the_node : Node of shape `shape` filled with uniform random data.
"""
the_node = initialize_node(
"random_uniform",
shape,
name=name,
axis_names=axis_names,
backend=backend,
seed=seed,
boundaries=boundaries,
dtype=dtype)
return the_node
def norm(node: AbstractNode) -> Tensor:
"""The L2 norm of `node`
Args:
node: A `AbstractNode`.
Returns:
The L2 norm.
Raises:
AttributeError: If `node` has no `backend` attribute.
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
return node.backend.norm(node.tensor)
def conj(node: AbstractNode,
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None) -> AbstractNode:
"""Conjugate a `node`.
Args:
node: A `AbstractNode`.
name: Optional name to give the new node.
axis_names: Optional list of names for the axis.
Returns:
A new node. The complex conjugate of `node`.
Raises:
AttributeError: If `node` has no `backend` attribute.
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
backend = node.backend
if not axis_names:
axis_names = node.axis_names
return Node(
backend.conj(node.tensor),
name=name,
axis_names=axis_names,
backend=backend)
def transpose(node: AbstractNode,
permutation: Sequence[Union[Text, int]],
name: Optional[Text] = None,
axis_names: Optional[List[Text]] = None) -> AbstractNode:
"""Transpose `node`
Args:
node: A `AbstractNode`.
permutation: A list of int or str. The permutation of the axis.
name: Optional name to give the new node.
axis_names: Optional list of names for the axis.
Returns:
A new node. The transpose of `node`.
Raises:
AttributeError: If `node` has no `backend` attribute, or if
`node` has no tensor.
ValueError: If either `permutation` is not the same as expected or
if you try to permute with a trace edge.
"""
if not hasattr(node, 'backend'):
raise AttributeError('Node {} of type {} has no `backend`'.format(
node, type(node)))
perm = [node.get_axis_number(p) for p in permutation]
if not axis_names:
axis_names = node.axis_names
new_node = Node(
node.tensor, name=name, axis_names=node.axis_names, backend=node.backend)
return new_node.reorder_axes(perm)
def kron(nodes: Sequence[AbstractNode]) -> AbstractNode:
"""Kronecker product of the given nodes.
Kronecker products of nodes is the same as the outer product, but the order
of the axes is different. The first half of edges of all of the nodes will
appear first half of edges in the resulting node, and the second half ot the
edges in each node will be in the second half of the resulting node.
For example, if I had two nodes :math:`X_{ab}`, :math:`Y_{cdef}`, and
:math:`Z_{gh}`, then the resulting node would have the edges ordered
:math:`R_{acdgbefh}`.
The kronecker product is designed such that the kron of many operators is
itself an operator.
Args:
nodes: A sequence of `AbstractNode` objects.
Returns:
A `Node` that is the kronecker product of the given inputs. The first
half of the edges of this node would represent the "input" edges of the
operator and the last half of edges are the "output" edges of the
operator.
"""
input_edges = []
output_edges = []
for node in nodes:
order = len(node.shape)
if order % 2 != 0:
raise ValueError(f"All operator tensors must have an even order. "
f"Found tensor with order {order}")
input_edges += node.edges[:order // 2]
output_edges += node.edges[order // 2:]
result = outer_product_final_nodes(nodes, input_edges + output_edges)
return result
|
the-stack_0_21778 | # realsense-simple.py
#
# Class to interact with the RealSense camera using mostly simple defaults and no image post processing
# Can capture a frame and return as a Realsense frame object and provides distance to a
# given X,Y pixel value in the frame.
#
# Author: Dean Colcott - https://www.linkedin.com/in/deancolcott/
#
#
import pyrealsense2 as rs
import numpy as np
import logging
import sys
import cv2
# Config the logger.
log = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(name)s - [%(levelname)s] - %(message)s", stream=sys.stdout, level=logging.INFO)
class RealsenseDevice():
"""
Interact with the RealSense d4xx camera initialising with mostly simple defaults and no image post processing
"""
def __init__(self):
log.info('Initialising RealSense Camera')
# Configure RealSense device,
self.pipeline = rs.pipeline()
config = rs.config()
profile = config.resolve(self.pipeline)
# Configure depth and color streams (can also add IR stream if desired)
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 15)
config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 15)
# Start RealSense camera streaming
self.pipeline.start(config)
self.rs_name = profile.get_device().get_info(rs.camera_info.name)
self.rs_serial = profile.get_device().get_info(rs.camera_info.serial_number)
log.info(
'Successfully Initialised {} - Serial:{}'.format(self.rs_name, self.rs_serial))
def get_device_name(self):
return self.rs_name
def get_device_serial(self):
return self.rs_serial
def get_rbg_depth_frames(self):
"""
Returns a dict of the latest available color and depth frames from the RealSense Device
labelled as color_frame and depth_frame respectively.
"""
# Wait for a coherent pair of frames: depth and color
frames = self.pipeline.wait_for_frames()
# Create alignment primitive with color as its target stream:
align = rs.align(rs.stream.color)
frames = align.process(frames)
# Capture the depth frame
depth_frame = frames.get_depth_frame()
if not depth_frame:
raise Exception('Depth Frame requested but not available')
# Capture the Color frame
color_frame = frames.get_color_frame()
if not color_frame:
raise Exception('Color Frame requested but not available')
return {'color_frame': color_frame, 'depth_frame': depth_frame}
def get_frames_as_np_array(self, frames=None):
"""
Takes frames as a dict and converts each to a NP array and returns in a dict with same key values
appended with '_np. If a frame is a Realsense Depth frame, a colorised depth map is created and returned for it.
frames: (Optional) A dict of named Realsense device frames. If not provided, will take the
next available color and depth frame from the RealSense Device and return these
labelled as color_frame_np and depth_frame_np respectively.
"""
# If frames not provided, get next available depth and color frames.
if not frames:
frames = self.get_rbg_depth_frames()
# Convert frames to numpy arrays
np_arrays = {}
for key in frames:
frame = frames[key]
if frame.is_depth_frame():
np_arrays[key + "_np"] = np.asanyarray(
rs.colorizer().colorize(frame).get_data())
else:
np_arrays[key + "_np"] = np.asanyarray(frame.get_data())
return np_arrays
def get_distance_to_frame_pixel(self, depth_frame=None, x=None, y=None):
"""
Returns the distance measured (in Meters) to the x, y pixel of the depth_frame
depth_frame: (Optional) The depth_frame to perform the depth measurement on,
if not provided will take the next available depth frame from the RealSense Device.
X / Y: (Optional) Will default to center of the given depth_frame plane if not provided.
"""
# If previous depth frames not provided, get next available frame from RealSense Device.
if not depth_frame:
depth_frame = self.get_rbg_depth_frames()[' depth_frame']
# If not x or y set then set to center of given image plane
if not x:
x = int(depth_frame.get_width() / 2)
if not y:
y = int(depth_frame.get_height() / 2)
zDepth = depth_frame.get_distance(x, y)
zDepth = '{:.3f}'.format(zDepth)
return float(zDepth)
def get_resize_np_array(self, width, height, frames=None):
"""
Computer Vision ML models need to perform inference on images of the same size
that they were trained on. This is a simple conveyance function to resize a dict of frames
in NP array format to the given width and height.
Note: This is a basic example that will introduce distortion to the image if a different aspect than
its being resized to. For best results consider more advanced solutions.
frames: (Optional) A dict of named RealSense device frames in NP array format. If not provided, will take the
next availiable color and depth frame from the RealSense Device and will resize to the given dimentions
and return these labelled as color_frame_np_resized and depth_frame_np_resized respectively.
Else will return dict with same keys appended with '_resized'
"""
# If frames not provided, get next available depth and color frames.
if not frames:
frames_np = self.get_frames_as_np_array()
# Convert frames to numpy arrays
np_arrays = {}
for key in frames:
frame = frames[key]
if frame.is_depth_frame():
np_arrays[key + "_resized"] = np.asanyarray(
rs.colorizer().colorize(frame).get_data())
else:
np_arrays[key + "_resized"] = np.asanyarray(frame.get_data())
return np_arrays
def close_realsense_connection(self):
self.pipeline.stop()
log.info('RealSense pipeline successfully closed for {} - Serial {}'.format(
self.rs_name, self.rs_serial))
|
the-stack_0_21779 | import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
import torch.nn as nn
import torch.nn.functional as func
import torch.optim as optim
import os
import pandas as pd
import io
import cv2
class AnimalDataset(Dataset):
"""
Animal dataset.
References
----------
https://www.youtube.com/watch?v=ZoZHd0Zm3RY
"""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.annotations = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.annotations)
def __getitem__(self, idx):
# if torch.is_tensor(idx):
# idx = idx.tolist()
img_name = self.annotations.iloc[idx, 0]
image = cv2.imread(img_name)
animalType = torch.tensor(int(self.annotations.iloc[idx, 1]))
# animalType = animalType.reshape(-1, 2)
if self.transform:
image = self.transform(image)
return image, animalType
class ConvNet(nn.Module):
def __init__(self, num_classes):
super(ConvNet, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
# 28x28x1 => 28x28x8
self.conv_1 = torch.nn.Conv2d(in_channels=1,
out_channels=8,
kernel_size=(3, 3),
stride=(1, 1),
padding=1) # (1(28-1) - 28 + 3) / 2 = 1
# 28x28x8 => 14x14x8
self.pool_1 = torch.nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2),
padding=0) # (2(14-1) - 28 + 2) = 0
# 14x14x8 => 14x14x16
self.conv_2 = torch.nn.Conv2d(in_channels=8,
out_channels=16,
kernel_size=(3, 3),
stride=(1, 1),
padding=1) # (1(14-1) - 14 + 3) / 2 = 1
# 14x14x16 => 7x7x16
self.pool_2 = torch.nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2),
padding=0) # (2(7-1) - 14 + 2) = 0
self.linear_1 = torch.nn.Linear(7 * 7 * 16, num_classes)
def forward(self, x):
out = self.conv_1(x)
out = func.relu(out)
out = self.pool_1(out)
out = self.conv_2(out)
out = func.relu(out)
out = self.pool_2(out)
logits = self.linear_1(out.view(-1, 7 * 7 * 16))
probas = func.softmax(logits, dim=1)
return logits, probas
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(func.relu(self.conv1(x)))
x = self.pool(func.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = func.relu(self.fc1(x))
x = func.relu(self.fc2(x))
x = self.fc3(x)
return x
def imShow(img):
img = img / 2 + 0.5 # unnormalize
np_img = img.numpy()
plt.imshow(np.transpose(np_img, (1, 2, 0)))
plt.show()
def findImages(directory):
source_tree = list(os.walk(directory))
root, subdir, filenames = source_tree[0]
return [("%s/%s" % (root, filename)) for filename in filenames]
def main():
"""
Classification implementation using PyTorch.
References
----------
Loading data: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
num_epoch = 10
# Read training data and categories. Also read test data.
cwd = os.getcwd().replace("\\", "/")
trainDir = "%s/training" % cwd
testDir = "%s/testing" % cwd
csvFileName_train = "%s/categories.csv" % cwd
csvFileName_test = "%s/test_images.csv" % cwd
trainFileNames = findImages(trainDir)
testFileNames = findImages(testDir)
categoryCSV = io.open(csvFileName_train, mode="wt")
for name in trainFileNames:
category = name.split("/")[-1].split(".")[0]
categoryCSV.write("%s,%d\n" % (name, category == "dog"))
testCSV = io.open(csvFileName_test, mode="wt")
for name in testFileNames:
testCSV.write("%s\n" % name)
# Create training and testing sets of images
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainingSet = AnimalDataset(csvFileName_train, trainDir, transform=transform)
trainingLoader = DataLoader(trainingSet, batch_size=4, shuffle=True, num_workers=2)
testSet = AnimalDataset(csvFileName_test, testDir, transform=transform)
testLoader = DataLoader(testSet, batch_size=4, shuffle=False, num_workers=2)
exit(0)
classes = ("dog", "cat")
# Get some random training images
dataIterator = iter(trainingLoader)
images, labels = next(dataIterator)
print(classes, len(classes))
# show images
imShow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(len(labels))))
# Create neural net
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = ConvNet(2)
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
optimizer.zero_grad()
# Save the net (just to show how it's done)
# PATH = './cifar_net.pth'
# torch.save(net.state_dict(), PATH)
# Training
for epoch in range(num_epoch):
losses = []
for index, (data, targets) in enumerate(trainingLoader):
inputs, labels = data[0].to(device), data[1].to(device)
print(inputs, labels)
exit(0)
# Testing
dataIterator = iter(testLoader)
images, labels = next(dataIterator)
# print images
imShow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(len(labels))))
# Check output of testing for a few images
outputs = net(images)
predicted = torch.max(outputs, 1)[1]
print('Predicted: ', ' '.join('%5s' % classes[int(predicted[j])]
for j in range(4)))
# See results for entire data set
correct = 0
total = 0
with torch.no_grad():
for data in testLoader:
inputs, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
# See sub-par results
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testLoader:
inputs, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
if __name__ == "__main__":
main()
|
the-stack_0_21781 | import pathlib
from typing import Dict
from greendoge.consensus.constants import ConsensusConstants
from greendoge.consensus.default_constants import DEFAULT_CONSTANTS
from greendoge.harvester.harvester import Harvester
from greendoge.harvester.harvester_api import HarvesterAPI
from greendoge.rpc.harvester_rpc_api import HarvesterRpcApi
from greendoge.server.outbound_message import NodeType
from greendoge.server.start_service import run_service
from greendoge.types.peer_info import PeerInfo
from greendoge.util.config import load_config_cli
from greendoge.util.default_root import DEFAULT_ROOT_PATH
# See: https://bugs.python.org/issue29288
"".encode("idna")
SERVICE_NAME = "harvester"
def service_kwargs_for_harvester(
root_path: pathlib.Path,
config: Dict,
consensus_constants: ConsensusConstants,
) -> Dict:
connect_peers = [PeerInfo(config["farmer_peer"]["host"], config["farmer_peer"]["port"])]
overrides = config["network_overrides"]["constants"][config["selected_network"]]
updated_constants = consensus_constants.replace_str_to_bytes(**overrides)
harvester = Harvester(root_path, config, updated_constants)
peer_api = HarvesterAPI(harvester)
network_id = config["selected_network"]
kwargs = dict(
root_path=root_path,
node=harvester,
peer_api=peer_api,
node_type=NodeType.HARVESTER,
advertised_port=config["port"],
service_name=SERVICE_NAME,
server_listen_ports=[config["port"]],
connect_peers=connect_peers,
auth_connect_peers=True,
network_id=network_id,
)
if config["start_rpc_server"]:
kwargs["rpc_info"] = (HarvesterRpcApi, config["rpc_port"])
return kwargs
def main() -> None:
config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
kwargs = service_kwargs_for_harvester(DEFAULT_ROOT_PATH, config, DEFAULT_CONSTANTS)
return run_service(**kwargs)
if __name__ == "__main__":
main()
|
the-stack_0_21782 | from apartments.models import Apartment
from .forms import ApartmentCreationForm
from django.urls import reverse
import pytest
@pytest.mark.django_db
class TestModels:
def test_city_creation(self, city_model):
assert city_model.cityName == 'nice_city'
def test_apartment_creation(self, apartment_model):
assert apartment_model.address == 'street'
assert apartment_model.rent == 4500
assert apartment_model.is_relevant
def test_get_apartment_by_id(self, apartment_model):
check_apartment_success = Apartment.get_apartment_by_id(apartment_model.owner.id)
check_apartment_fail = Apartment.get_apartment_by_id(-1)
assert check_apartment_success == apartment_model
assert check_apartment_fail is None
def test_get_all_relevant_apartments(self):
apartments_qurey = Apartment.get_all_relevant_apartments()
assert all(isinstance(current_apartment, Apartment) for current_apartment in apartments_qurey)
assert all(current_apartment.is_relevant for current_apartment in apartments_qurey)
@pytest.mark.django_db
class TestViews:
def test_update_apartment_view_to_owner(self, client, apartment_model):
client.login(email='[email protected]', password='password')
response = client.get('/apartments/update')
assert response.status_code == 200
def test_update_apartment_view_to_not_owner_user(self, client, user_model):
client.login(email='[email protected]', password='password')
response = client.get('/apartments/update')
assert response.status_code == 302
response = client.get(response.url)
assert response.status_code == 200
def test_apartment_details_view_to_valid_apartment_id(self, client, apartment_model):
client.login(email='[email protected]', password='password')
path = '/apartments/' + str(apartment_model.owner.id) + '/details'
response = client.get(path)
assert response.status_code == 200
def test_apartment_details_view_to_invalid_apartment_id(self, client, apartment_model):
client.login(email='[email protected]', password='password')
path = '/apartments/0/details'
response = client.get(path)
assert response.status_code == 302
response = client.get(response.url)
assert response.status_code == 200
@pytest.mark.parametrize(
'city, address, rent, num_of_roomates, num_of_rooms, start_date, about, validity',
[
('city_model', 'address', 1000, 2, 2, '2021-2-2', 'about', True),
('city_model', '', 1000, 2, 2, '2021-2-2', 'about', False),
('city_model', 'address', None, 2, 2, '2021-2-2', 'about', False),
('city_model', 'address', 1000, None, 2, '2021-2-2', 'about', False),
('city_model', 'address', 1000, 2, None, '2021-2-2', 'about', False),
('city_model', 'address', 1000, 2, 2, '', 'about', False),
('city_model', 'address', 1000, 2, 2, '2021-2-2', '', True),
('city_model', 'address', -1, 2, 2, '2021-2-2', 'about', False),
('city_model', 'address', 1000, -1, 2, '2021-2-2', 'about', False),
('city_model', 'address', 1000, 2, -1, '2021-2-2', 'about', False),
])
@pytest.mark.django_db
def test_apartment_form_validity(city, address, rent, num_of_roomates, num_of_rooms, start_date, about,
validity, request):
city = request.getfixturevalue(city)
form = ApartmentCreationForm(data={
'city': city,
'address': address,
'rent': rent,
'num_of_roomates': num_of_roomates,
'num_of_rooms': num_of_rooms,
'start_date': start_date,
'about': about,
})
assert form.is_valid() is validity
@pytest.mark.django_db
def test_valid_form_is_valid(valid_apartment_creation_form):
assert valid_apartment_creation_form.is_valid
@pytest.mark.django_db
def test_fail_to_save_apartment_form_with_commit_true(valid_apartment_creation_form):
with pytest.raises(ValueError):
valid_apartment_creation_form.save(commit=True)
@pytest.mark.django_db
def test_saving_function_of_apartment_creation_form(valid_user_creation_form, valid_apartment_creation_form):
new_user = valid_user_creation_form.save()
new_apart = valid_apartment_creation_form.save()
assert Apartment.objects.filter(owner=new_user).count() == 0
new_apart.owner = new_user
new_apart.save()
assert Apartment.objects.filter(owner=new_user).count() == 1
@pytest.mark.django_db
def test_register_apartment_view(client):
url = reverse('register_apartment')
response = client.get(url)
assert response.status_code == 200
@pytest.mark.django_db
def test_accessing_apartment_register_view_with_logged_user(client, user_model):
client.login(email='[email protected]', password='password')
url = reverse('register_apartment')
response = client.get(url)
assert response.status_code == 302
response = client.get(response.url)
assert response.status_code == 200
|
the-stack_0_21783 | """
Copyright (c) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.ops.simplernms import SimplerNMSOp
from mo.graph.graph import Node
from mo.utils.unittest.graph import build_graph
nodes_attributes = {'SimplerNMS_1': {'type': 'SimplerNMS', 'kind': 'op'},
'node_1': {'type': 'Identity', 'kind': 'op'},
'op_output': { 'kind': 'op', 'op': 'Result'}
}
class TestSimplerNMSInfer(unittest.TestCase):
def test_simplernms_infer_ideal(self):
graph = build_graph(nodes_attributes,
[('SimplerNMS_1', 'node_1'),
('node_1', 'op_output')
],
{'node_1': {'shape': None},
'SimplerNMS_1': {'feat_stride': 16, 'post_nms_topn': 150, 'scale': [1, 2, 3]}
})
simplernms_node = Node(graph, 'SimplerNMS_1')
SimplerNMSOp.simplernms_infer(simplernms_node)
exp_shape = np.array([150, 5])
res_shape = graph.node['node_1']['shape']
for i in range(0, len(exp_shape)):
self.assertEqual(exp_shape[i], res_shape[i])
self.assertEqual(simplernms_node.scale, ['1', '2', '3'])
def test_simplernms_infer_no_shape(self):
graph = build_graph(nodes_attributes,
[('SimplerNMS_1', 'node_1'),
('node_1', 'op_output')
],
{'node_1': {'shape': None},
'SimplerNMS_1': {'feat_stride': 12, 'post_nms_topn': 150, 'scale': [1, 2, 3]}
})
simplernms_node = Node(graph, 'SimplerNMS_1')
SimplerNMSOp.simplernms_infer(simplernms_node)
self.assertIsNone(graph.node['node_1']['shape'])
|
the-stack_0_21784 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'''
This is a small DSL to describe builds of Facebook's open-source projects
that are published to Github from a single internal repo, including projects
that depend on folly, wangle, proxygen, fbthrift, etc.
This file defines the interface of the DSL, and common utilieis, but you
will have to instantiate a specific builder, with specific options, in
order to get work done -- see e.g. make_docker_context.py.
== Design notes ==
Goals:
- A simple declarative language for what needs to be checked out & built,
how, in what order.
- The same specification should work for external continuous integration
builds (e.g. Travis + Docker) and for internal VM-based continuous
integration builds.
- One should be able to build without root, and to install to a prefix.
Non-goals:
- General usefulness. The only point of this is to make it easier to build
and test Facebook's open-source services.
Ideas for the future -- these may not be very good :)
- Especially on Ubuntu 14.04 the current initial setup is inefficient:
we add PPAs after having installed a bunch of packages -- this prompts
reinstalls of large amounts of code. We also `apt-get update` a few
times.
- A "shell script" builder. Like DockerFBCodeBuilder, but outputs a
shell script that runs outside of a container. Or maybe even
synchronously executes the shell commands, `make`-style.
- A "Makefile" generator. That might make iterating on builds even quicker
than what you can currently get with Docker build caching.
- Generate a rebuild script that can be run e.g. inside the built Docker
container by tagging certain steps with list-inheriting Python objects:
* do change directories
* do NOT `git clone` -- if we want to update code this should be a
separate script that e.g. runs rebase on top of specific targets
across all the repos.
* do NOT install software (most / all setup can be skipped)
* do NOT `autoreconf` or `configure`
* do `make` and `cmake`
- If we get non-Debian OSes, part of ccache setup should be factored out.
'''
import os
import re
from shell_quoting import path_join, shell_join, ShellQuoted
def _read_project_github_hashes():
base_dir = 'deps/github_hashes/' # trailing slash used in regex below
for dirname, _, files in os.walk(base_dir):
for filename in files:
path = os.path.join(dirname, filename)
with open(path) as f:
m_proj = re.match('^' + base_dir + '(.*)-rev\.txt$', path)
if m_proj is None:
raise RuntimeError('Not a hash file? {0}'.format(path))
m_hash = re.match('^Subproject commit ([0-9a-f]+)\n$', f.read())
if m_hash is None:
raise RuntimeError('No hash in {0}'.format(path))
yield m_proj.group(1), m_hash.group(1)
class FBCodeBuilder(object):
def __init__(self, **kwargs):
self._options_do_not_access = kwargs # Use .option() instead.
# This raises upon detecting options that are specified but unused,
# because otherwise it is very easy to make a typo in option names.
self.options_used = set()
self._github_hashes = dict(_read_project_github_hashes())
def __repr__(self):
return '{0}({1})'.format(
self.__class__.__name__,
', '.join(
'{0}={1}'.format(k, repr(v))
for k, v in self._options_do_not_access.items()
)
)
def option(self, name, default=None):
value = self._options_do_not_access.get(name, default)
if value is None:
raise RuntimeError('Option {0} is required'.format(name))
self.options_used.add(name)
return value
def has_option(self, name):
return name in self._options_do_not_access
def add_option(self, name, value):
if name in self._options_do_not_access:
raise RuntimeError('Option {0} already set'.format(name))
self._options_do_not_access[name] = value
#
# Abstract parts common to every installation flow
#
def render(self, steps):
'''
Converts nested actions to your builder's expected output format.
Typically takes the output of build().
'''
res = self._render_impl(steps) # Implementation-dependent
# Now that the output is rendered, we expect all options to have
# been used.
unused_options = set(self._options_do_not_access)
unused_options -= self.options_used
if unused_options:
raise RuntimeError(
'Unused options: {0} -- please check if you made a typo '
'in any of them. Those that are truly not useful should '
'be not be set so that this typo detection can be useful.'
.format(unused_options)
)
return res
def build(self, steps):
return [self.setup(), self.diagnostics()] + steps
def setup(self):
'Your builder may want to install packages here.'
raise NotImplementedError
def diagnostics(self):
'Log some system diagnostics before/after setup for ease of debugging'
# The builder's repr is not used in a command to avoid pointlessly
# invalidating Docker's build cache.
return self.step('Diagnostics', [
self.comment('Builder {0}'.format(repr(self))),
self.run(ShellQuoted('hostname')),
self.run(ShellQuoted('cat /etc/issue')),
self.run(ShellQuoted('g++ --version || echo g++ not installed')),
])
def step(self, name, actions):
'A labeled collection of actions or other steps'
raise NotImplementedError
def run(self, shell_cmd):
'Run this bash command'
raise NotImplementedError
def workdir(self, dir):
'Create this directory if it does not exist, and change into it'
raise NotImplementedError
def copy_local_repo(self, dir, dest_name):
'''
Copy the local repo at `dir` into this step's `workdir()`, analog of:
cp -r /path/to/folly folly
'''
raise NotImplementedError
#
# Specific build helpers
#
def install_debian_deps(self):
actions = [
self.run(ShellQuoted(
'apt-get update && apt-get install -yq '
'autoconf-archive '
'bison '
'build-essential '
'cmake '
'curl '
'flex '
'git '
'gperf '
'joe '
'libboost-all-dev '
'libcap-dev '
'libdouble-conversion-dev '
'libevent-dev '
'libgflags-dev '
'libgoogle-glog-dev '
'libkrb5-dev '
'libnuma-dev '
'libsasl2-dev '
'libsnappy-dev '
'libsqlite3-dev '
'libssl-dev '
'libtool '
'netcat-openbsd '
'pkg-config '
'sudo '
'unzip '
'wget'
)),
]
gcc_version = self.option('gcc_version')
# We need some extra packages to be able to install GCC 4.9 on 14.04.
if self.option('os_image') == 'ubuntu:14.04' and gcc_version == '4.9':
actions.append(self.run(ShellQuoted(
'apt-get install -yq software-properties-common && '
'add-apt-repository ppa:ubuntu-toolchain-r/test && '
'apt-get update'
)))
# Make the selected GCC the default before building anything
actions.extend([
self.run(ShellQuoted('apt-get install -yq {c} {cpp}').format(
c=ShellQuoted('gcc-{v}').format(v=gcc_version),
cpp=ShellQuoted('g++-{v}').format(v=gcc_version),
)),
self.run(ShellQuoted(
'update-alternatives --install /usr/bin/gcc gcc {c} 40 '
'--slave /usr/bin/g++ g++ {cpp}'
).format(
c=ShellQuoted('/usr/bin/gcc-{v}').format(v=gcc_version),
cpp=ShellQuoted('/usr/bin/g++-{v}').format(v=gcc_version),
)),
self.run(ShellQuoted('update-alternatives --config gcc')),
])
# Ubuntu 14.04 comes with a CMake version that is too old for mstch.
if self.option('os_image') == 'ubuntu:14.04':
actions.append(self.run(ShellQuoted(
'apt-get install -yq software-properties-common && '
'add-apt-repository ppa:george-edison55/cmake-3.x && '
'apt-get update && '
'apt-get upgrade -yq cmake'
)))
# Debian 8.6 comes with a CMake version that is too old for folly.
if self.option('os_image') == 'debian:8.6':
actions.append(self.run(ShellQuoted(
'echo deb http://ftp.debian.org/debian jessie-backports main '
'>> /etc/apt/sources.list.d/jessie-backports.list && '
'apt-get update && '
'apt-get -yq -t jessie-backports install cmake'
)))
actions.extend(self.debian_ccache_setup_steps())
return self.step('Install packages for Debian-based OS', actions)
def debian_ccache_setup_steps(self):
raise [] # It's ok to ship a renderer without ccache support.
def github_project_workdir(self, project, path):
# Only check out a non-default branch if requested. This especially
# makes sense when building from a local repo.
git_hash = self.option(
'{0}:git_hash'.format(project),
# Any repo that has a hash in deps/github_hashes defaults to
# that, with the goal of making builds maximally consistent.
self._github_hashes.get(project, '')
)
maybe_change_branch = [
self.run(ShellQuoted('git checkout {hash}').format(hash=git_hash)),
] if git_hash else []
base_dir = self.option('projects_dir')
local_repo_dir = self.option('{0}:local_repo_dir'.format(project), '')
return self.step('Check out {0}, workdir {1}'.format(project, path), [
self.workdir(base_dir),
self.run(
ShellQuoted('git clone https://github.com/{p}').format(p=project)
) if not local_repo_dir else self.copy_local_repo(
local_repo_dir, os.path.basename(project)
),
self.workdir(path_join(base_dir, os.path.basename(project), path)),
] + maybe_change_branch)
def fb_github_project_workdir(self, project_and_path, github_org='facebook'):
'This helper lets Facebook-internal CI special-cases FB projects'
project, path = project_and_path.split('/', 1)
return self.github_project_workdir(github_org + '/' + project, path)
def _make_vars(self, make_vars):
return shell_join(' ', (
ShellQuoted('{k}={v}').format(k=k, v=v)
for k, v in ({} if make_vars is None else make_vars).items()
))
def parallel_make(self, make_vars=None):
return self.run(ShellQuoted('make -j {n} {vars}').format(
n=self.option('make_parallelism'),
vars=self._make_vars(make_vars),
))
def make_and_install(self, make_vars=None):
return [
self.parallel_make(make_vars),
self.run(ShellQuoted('make install {vars}').format(
vars=self._make_vars(make_vars),
)),
]
def configure(self):
return [
self.run(ShellQuoted(
'LDFLAGS="$LDFLAGS -L"{p}"/lib -Wl,-rpath="{p}"/lib" '
'CFLAGS="$CFLAGS -I"{p}"/include" '
'CPPFLAGS="$CPPFLAGS -I"{p}"/include" '
'PY_PREFIX={p} '
'./configure --prefix={p}'
).format(p=self.option('prefix'))),
]
def autoconf_install(self, name):
return self.step('Build and install {0}'.format(name), [
self.run(ShellQuoted('autoreconf -ivf')),
] + self.configure() + self.make_and_install())
def cmake_configure(self, name):
cmake_defines = {
'BUILD_SHARED_LIBS': 'ON',
'CMAKE_INSTALL_PREFIX': self.option('prefix'),
}
cmake_defines.update(
self.option('{0}:cmake_defines'.format(name), {})
)
return [
self.run(ShellQuoted(
'CXXFLAGS="$CXXFLAGS -fPIC -isystem "{p}"/include" '
'CFLAGS="$CFLAGS -fPIC -isystem "{p}"/include" '
'cmake {args} ..'
).format(
p=self.option('prefix'),
args=shell_join(' ', (
ShellQuoted('-D{k}={v}').format(k=k, v=v)
for k, v in cmake_defines.items()
)),
)),
]
def cmake_install(self, name):
return self.step('Build and install {0}'.format(name),
self.cmake_configure(name) + self.make_and_install())
def fb_github_autoconf_install(self, project_and_path):
return [
self.fb_github_project_workdir(project_and_path),
self.autoconf_install(project_and_path),
]
def fb_github_cmake_install(self, project_and_path):
return [
self.fb_github_project_workdir(project_and_path),
self.cmake_install(project_and_path),
]
|
the-stack_0_21789 | """
Copyright 2018 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hnswlib
import importlib
import itertools
import numpy as np
import operator
import os
import sys
import warnings
from contextlib import contextmanager
from scipy.ndimage.interpolation import zoom
from scipy.stats import norm
from sklearn.neighbors import BallTree
from sklearn.preprocessing import MinMaxScaler
from typing import Callable, List
# Stupid Keras things is a smart way to always print. See:
# https://github.com/keras-team/keras/issues/1406
stderr = sys.stderr
sys.stderr = open(os.devnull, "w")
import keras
from keras.layers import Input
from keras.models import Model
sys.stderr = stderr
flatten = itertools.chain.from_iterable
def compare_lists(
a: List, b: List, conditionator: Callable = all, comparator: Callable = operator.eq
):
return conditionator(map(comparator, a, itertools.islice(a, 1, None)))
def unpredictability(p: np.ndarray) -> float:
"""Unpredictability score
Unpredictability is defined as the minimum deviation of the prediction probability
from `0.5` to `0` or `1`. For example, for a prediction probability of 0.6 the
unpredictability is 0.4. The highest unpredictability is 1 and the lowest is 0.
"""
return np.mean(np.abs(p - np.round(p))) * 2
def prediction_proba_change(p0: np.ndarray, p1: np.ndarray) -> float:
"""Unpredictability score
Total amount of change in the prediction probability
"""
return np.mean(np.abs(p0 - p1))
def prediction_change(p0: np.ndarray, p1: np.ndarray, border: float = 0.5) -> float:
"""Prediction change score
Prediction change is defined as the number of times the predicted class changes
based on the border probability.
"""
return np.mean(np.sign(p0 - border) != np.sign(p1 - border))
# def uncertainty(model, X_train: np.ndarray, X_test: np.ndarray) -> float:
# """Unpredictability score
#
# Unpredictability is defined as the minimum deviation of the prediction probability
# from `0.5` to `0` or `1`. For example, for a prediction probability of 0.6 the
# unpredictability is 0.4. The highest unpredictability is 1 and the lowest is 0.
# """
# return random_forest_error(model, X_train, X_test).mean()
def convergence(
x0: np.ndarray, x1: np.ndarray, x2: np.ndarray, decimals: int = 2
) -> float:
"""Convergence score
Given three measurements, the convergence score is the percentage of changes that
increase or decrease in both steps. The highest convergence score is 1 and the
lowest is 0.
"""
x0r = np.round(x0, decimals=decimals)
x1r = np.round(x1, decimals=decimals)
x2r = np.round(x2, decimals=decimals)
return np.mean(np.abs(np.sign(x1r - x0r) + np.sign(x2r - x1r)) == 2)
def divergence(
x0: np.ndarray, x1: np.ndarray, x2: np.ndarray, decimals: int = 3
) -> float:
"""Divergence score
Given three measurements, the divergence score is the percentage of changes that
increase in one step and decrease in the other step or vice versa. The highest
convergence score is 1 and the lowest is 0.
"""
x0r = np.round(x0, decimals=decimals)
x1r = np.round(x1, decimals=decimals)
x2r = np.round(x2, decimals=decimals)
d0 = np.sign(x1r - x0r)
d1 = np.sign(x2r - x1r)
return np.mean((d0 + d1 == 0) * (np.abs(d0) > 0))
def normalize(data, percentile: float = 99.9):
cutoff = np.percentile(data, (0, percentile))
data_norm = np.copy(data)
data_norm[np.where(data_norm < cutoff[0])] = cutoff[0]
data_norm[np.where(data_norm > cutoff[1])] = cutoff[1]
return MinMaxScaler().fit_transform(data_norm)
def normalize_simple(data: np.ndarray):
data -= np.min(data)
return data / np.max(data)
def load_model(filepath: str, silent: bool = False, additional_args: list = None):
try:
if silent:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = keras.models.load_model(filepath)
else:
model = keras.models.load_model(filepath)
except Exception:
# We assume it's a custom model
Model = getattr(
importlib.import_module(os.path.dirname(filepath)),
os.path.basename(filepath)
)
model = Model.load(*additional_args)
return model
def get_encoder(autoencoder):
# Find embedding layer
embedding_layer_idx = None
for i, layer in enumerate(autoencoder.layers):
if layer.name == "embed":
embedding_layer_idx = i
# Create encoder
inputs = autoencoder.input
encoded = inputs
for i in range(1, embedding_layer_idx + 1):
encoded = autoencoder.layers[i](encoded)
return Model(inputs, encoded)
def get_decoder(autoencoder):
# Find embedding layer
embedding_layer = None
embedding_layer_idx = None
for i, layer in enumerate(autoencoder.layers):
if layer.name == "embed":
embedding_layer = layer
embedding_layer_idx = i
embedding = embedding_layer.output_shape[1]
encoded_input = Input(shape=(embedding,), name="input")
decoded_input = encoded_input
for i in range(embedding_layer_idx + 1, len(autoencoder.layers)):
decoded_input = autoencoder.layers[i](decoded_input)
return Model(encoded_input, decoded_input)
def get_search_target_windows(
db, search_id, window_size, abs_offset, no_stack: bool = False
):
# Get search target window
search = db.get_search(search_id)
search_target_windows = get_target_window_idx(
search["target_from"],
search["target_to"],
window_size,
search["config"]["step_freq"],
abs_offset,
)
# stwi == search target window indices
stwi = np.arange(*search_target_windows[1])
if no_stack:
return stwi
return np.hstack(
(
stwi.reshape(stwi.shape[0], 1),
np.ones(stwi.shape[0]).reshape(stwi.shape[0], 1),
)
).astype(int)
def get_search_target_classif(db, search_id, window_size, abs_offset):
# Get search target window
search = db.get_search(search_id)
search_target_windows = get_target_window_idx(
search["target_from"],
search["target_to"],
window_size,
search["config"]["step_freq"],
abs_offset,
)
# stwi == search target window indices
stwi = np.arange(*search_target_windows[1])
return np.hstack(
(
stwi.reshape(stwi.shape[0], 1),
np.ones(stwi.shape[0]).reshape(stwi.shape[0], 1),
)
).astype(int)
def get_num_windows(chrom_size, window_size, step_size):
return np.ceil((chrom_size - window_size) / step_size).astype(int) + 1
def scaleup_vector(v, out_len, aggregator: Callable = np.mean):
in_len = v.shape[0]
lcm = np.lcm(in_len, out_len)
blowup = np.repeat(v, lcm / in_len)
return aggregator(blowup.reshape(-1, (lcm / out_len).astype(int)), axis=1)
def zoom_array(
in_array,
final_shape,
same_sum=False,
aggregator=np.mean,
zoomor=zoom,
**zoomor_kwargs
):
"""Rescale vectors savely.
Normally, one can use scipy.ndimage.zoom to do array/image rescaling.
However, scipy.ndimage.zoom does not coarsegrain images well. It basically
takes nearest neighbor, rather than averaging all the pixels, when
coarsegraining arrays. This increases noise. Photoshop doesn't do that, and
performs some smart interpolation-averaging instead.
If you were to coarsegrain an array by an integer factor, e.g. 100x100 ->
25x25, you just need to do block-averaging, that's easy, and it reduces
noise. But what if you want to coarsegrain 100x100 -> 30x30?
Then my friend you are in trouble. But this function will help you. This
function will blow up your 100x100 array to a 120x120 array using
scipy.ndimage zoom Then it will coarsegrain a 120x120 array by
block-averaging in 4x4 chunks.
It will do it independently for each dimension, so if you want a 100x100
array to become a 60x120 array, it will blow up the first and the second
dimension to 120, and then block-average only the first dimension.
Parameters
----------
in_array: n-dimensional numpy array (1D also works)
final_shape: resulting shape of an array
same_sum: bool, preserve a sum of the array, rather than values.
by default, values are preserved
aggregator: by default, np.mean. You can plug your own.
zoomor: by default, scipy.ndimage.zoom. You can plug your own.
zoomor_kwargs: a dict of options to pass to zoomor.
"""
in_array = np.asarray(in_array, dtype=np.double)
in_shape = in_array.shape
assert len(in_shape) == len(final_shape), "Number of dimensions need to equal"
mults = [] # multipliers for the final coarsegraining
for i in range(len(in_shape)):
if final_shape[i] < in_shape[i]:
mults.append(int(np.ceil(in_shape[i] / final_shape[i])))
else:
mults.append(1)
# shape to which to blow up
temp_shape = tuple([i * j for i, j in zip(final_shape, mults)])
# stupid zoom doesn't accept the final shape. Carefully crafting the
# multipliers to make sure that it will work.
zoom_multipliers = np.array(temp_shape) / np.array(in_shape) + 0.0000001
assert zoom_multipliers.min() >= 1
# applying zoom
rescaled = zoomor(in_array, zoom_multipliers, **zoomor_kwargs)
for ind, mult in enumerate(mults):
if mult != 1:
sh = list(rescaled.shape)
assert sh[ind] % mult == 0
newshape = sh[:ind] + [sh[ind] // mult, mult] + sh[ind + 1 :]
rescaled.shape = newshape
rescaled = aggregator(rescaled, axis=ind + 1)
assert rescaled.shape == final_shape
if same_sum:
extra_size = np.prod(final_shape) / np.prod(in_shape)
rescaled /= extra_size
return rescaled
def merge_interleaved(v, step_freq, aggregator=np.nanmean):
v_len = v.shape[0]
out_len = v_len + (step_freq - 1)
blowup = np.zeros((out_len, step_freq))
blowup[:] = np.nan
for i in np.arange(step_freq):
blowup[:, i][i : min(i + v_len, out_len)] = v[: min(v_len, out_len - i)]
return aggregator(blowup, axis=1)
def get_norm_sym_norm_kernel(size):
half_a = np.ceil(size / 2).astype(int)
half_b = np.floor(size / 2).astype(int)
# Normal distribution from the 1st to the 99th percentile
k = norm.pdf(np.linspace(norm.ppf(0.01), norm.ppf(0.99), size))
# Normalize to 1
k /= np.max(k)
# Make symmetric to be usable for convex combination (e.g., in weighted
# averaging)
kn = k
kn[:half_a] = k[:half_a] / (k[:half_a] + k[:half_a][::-1])
kn[half_b:] = kn[:half_a][::-1]
return kn
def merge_interleaved_mat(m: np.ndarray, step_freq: int, kernel: np.ndarray = None):
if kernel is None:
# Take the mean of the interleave vectors by default
kernel = np.ones(m.shape[1])
# length of one consecutive encoding
M = np.int(m.shape[0] / step_freq) * m.shape[1]
# Step size of windows
# I.e., including binning, so 12Kb at 100 bins = 120 bin windows
SZ = np.int(m.shape[1] / step_freq)
# Out length
# N = M + ((step_freq - 1) * SZ)
# Out matrix
o = np.zeros((M, step_freq))
o[:] = np.nan
# Kernel matrix
k = np.zeros((M, step_freq))
k[:] = np.nan
long_k = np.tile(kernel, M)
for i in np.arange(step_freq):
# Linear, consecutive encoding
LCE = m[i::step_freq].flatten()
j = i * SZ
o[:, i][j:M] = LCE[: M - j]
k[:, i][j:M] = long_k[: M - j]
# Normalize kernels
k /= np.nansum(k, axis=1).reshape(k.shape[0], -1)
return np.nansum(o * k, axis=1)
def hashify(l: list, key: str) -> dict:
h = {}
for item in l:
key_value = item.get(key, "unknown")
h[key_value] = item
return h
def is_int(s: str, is_pos: bool) -> bool:
if s is None:
return False
try:
i = int(s)
if is_pos:
return i >= 0
return True
except ValueError:
return False
def kNN(data: np.ndarray, id: int, n: int) -> np.ndarray:
dist = np.sqrt(np.sum((data - data[id]) ** 2, axis=1))
return np.argsort(dist)[1 : n + 1]
def enforce_window_size(start, end, window_size):
if end - start == window_size:
return np.array([start, end])
size = end - start
center = start + (size // 2)
return np.array([center - window_size // 2, center + window_size // 2])
def serialize_classif(classif):
sorting = np.argsort(classif[:, 0])
merged = classif[:, 0] * classif[:, 1]
return merged[sorting].tobytes()
def unserialize_classif(serialized_classif):
return np.frombuffer(serialized_classif, dtype=np.int)
def impact(data, impact=1.0):
impact = min(1, max(0, impact))
return impact * data + (1 - impact)
def get_target_window_idx(
target_from: int,
target_to: int,
window_size: int,
step_freq: int,
abs_offset: int,
max_offset: float = 0.66,
) -> list:
step_size = window_size / step_freq
target_locus = enforce_window_size(target_from, target_to, window_size)
target_locus[0] -= abs_offset
target_locus[1] -= abs_offset
window_from_idx = int(target_locus[0] // step_size)
window_from_pos = int(window_from_idx * step_size)
window_to_idx = window_from_idx + step_freq
# Remove windows that overlap too much with the target search
offset = (target_locus[0] - window_from_pos) / window_size
k = step_freq * (offset - max_offset)
m = np.ceil(k).astype(int)
n = step_freq * offset
return (
# Including any kind of overlaping window
(window_from_idx + np.floor(k), window_to_idx + np.ceil(n)),
# Only include windows that overlap at least 33% with the target
(window_from_idx + m, window_to_idx + m),
)
def knn_density(
data: np.ndarray,
k: int = 5,
dist_metric: str = "euclidean",
summary: Callable[[np.ndarray], np.float64] = np.mean,
):
n, dim = data.shape
if (n > 100000):
# Declaring index
p = hnswlib.Index(space='l2', dim=dim)
# Also see https://github.com/nmslib/hnswlib/blob/master/ALGO_PARAMS.md
ef = np.int(np.ceil(20 * np.log2(n)))
# Initing index - the maximum number of elements should be known beforehand
p.init_index(max_elements=n, ef_construction=ef, M=16)
# Element insertion (can be called several times):
p.add_items(data, np.arange(n))
# Controlling the recall by setting ef
p.set_ef(ef)
_, dist = p.knn_query(data, k = k)
# Delete the index
del p
else:
leaf_size = np.int(np.round(10 * np.log(n)))
bt = BallTree(data, leaf_size=leaf_size)
dist, _ = bt.query(data, k, dualtree=True, sort_results=False)
try:
return summary(dist, axis=1)
except Exception:
out = np.zeros(dist.shape[0])
out[:] = np.nan
return out
@contextmanager
def suppress_with_default(*exceptions, **kwargs):
"""Like contextlib.suppress but with a default value on exception
Decorators:
contextmanager
Arguments:
*exceptions {list} -- List of exceptions to suppress. By default all exceptions are suppressed.
**kwargs {dict} -- Dictionary of key word arguments
Yields:
any -- Default value from ``kwargs``
"""
try:
yield kwargs.get("default", None)
except exceptions or Exception:
pass
def get_c(target_c: list, bg_c: list, opacity: float):
target = np.array(target_c) / 255
bg = np.array(bg_c) / 255
return ((target * (1 / opacity) - bg * ((1 - opacity) / opacity)) * 255).astype(int)
|
the-stack_0_21790 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Google Kubernetes Engine Hook.
"""
import time
import warnings
from typing import Dict, Optional, Sequence, Union
from google.api_core.exceptions import AlreadyExists, NotFound
from google.api_core.gapic_v1.method import DEFAULT
from google.api_core.retry import Retry
from google.cloud import container_v1, exceptions
from google.cloud.container_v1.gapic.enums import Operation
from google.cloud.container_v1.types import Cluster
from google.protobuf.json_format import ParseDict
from airflow import version
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
OPERATIONAL_POLL_INTERVAL = 15
class GKEHook(GoogleBaseHook):
"""
Hook for Google Kubernetes Engine APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id, delegate_to=delegate_to, impersonation_chain=impersonation_chain,
)
self._client = None
self.location = location
def get_conn(self) -> container_v1.ClusterManagerClient:
"""
Returns ClusterManagerCLinet object.
:rtype: google.cloud.container_v1.ClusterManagerClient
"""
if self._client is None:
credentials = self._get_credentials()
self._client = container_v1.ClusterManagerClient(
credentials=credentials, client_info=self.client_info
)
return self._client
# To preserve backward compatibility
# TODO: remove one day
def get_client(self) -> container_v1.ClusterManagerClient: # pylint: disable=missing-docstring
warnings.warn(
"The get_client method has been deprecated. " "You should use the get_conn method.",
DeprecationWarning,
)
return self.get_conn()
def wait_for_operation(self, operation: Operation, project_id: Optional[str] = None) -> Operation:
"""
Given an operation, continuously fetches the status from Google Cloud until either
completion or an error occurring
:param operation: The Operation to wait for
:type operation: google.cloud.container_V1.gapic.enums.Operation
:param project_id: Google Cloud project ID
:type project_id: str
:return: A new, updated operation fetched from Google Cloud
"""
self.log.info("Waiting for OPERATION_NAME %s", operation.name)
time.sleep(OPERATIONAL_POLL_INTERVAL)
while operation.status != Operation.Status.DONE:
if operation.status == Operation.Status.RUNNING or operation.status == Operation.Status.PENDING:
time.sleep(OPERATIONAL_POLL_INTERVAL)
else:
raise exceptions.GoogleCloudError("Operation has failed with status: %s" % operation.status)
# To update status of operation
operation = self.get_operation(operation.name, project_id=project_id or self.project_id)
return operation
def get_operation(self, operation_name: str, project_id: Optional[str] = None) -> Operation:
"""
Fetches the operation from Google Cloud
:param operation_name: Name of operation to fetch
:type operation_name: str
:param project_id: Google Cloud project ID
:type project_id: str
:return: The new, updated operation from Google Cloud
"""
return self.get_conn().get_operation(
project_id=project_id or self.project_id, zone=self.location, operation_id=operation_name
)
@staticmethod
def _append_label(cluster_proto: Cluster, key: str, val: str) -> Cluster:
"""
Append labels to provided Cluster Protobuf
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param cluster_proto: The proto to append resource_label airflow
version to
:type cluster_proto: google.cloud.container_v1.types.Cluster
:param key: The key label
:type key: str
:param val:
:type val: str
:return: The cluster proto updated with new label
"""
val = val.replace('.', '-').replace('+', '-')
cluster_proto.resource_labels.update({key: val})
return cluster_proto
@GoogleBaseHook.fallback_to_default_project_id
def delete_cluster(
self, name: str, project_id: str, retry: Retry = DEFAULT, timeout: float = DEFAULT
) -> Optional[str]:
"""
Deletes the cluster, including the Kubernetes endpoint and all
worker nodes. Firewalls and routes that were configured during
cluster creation are also deleted. Other Google Compute Engine
resources that might be in use by the cluster (e.g. load balancer
resources) will not be deleted if they were not present at the
initial create time.
:param name: The name of the cluster to delete
:type name: str
:param project_id: Google Cloud project ID
:type project_id: str
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: The full url to the delete operation if successful, else None
"""
self.log.info("Deleting (project_id=%s, zone=%s, cluster_id=%s)", project_id, self.location, name)
try:
resource = self.get_conn().delete_cluster(
project_id=project_id, zone=self.location, cluster_id=name, retry=retry, timeout=timeout
)
resource = self.wait_for_operation(resource)
# Returns server-defined url for the resource
return resource.self_link
except NotFound as error:
self.log.info('Assuming Success: %s', error.message)
return None
@GoogleBaseHook.fallback_to_default_project_id
def create_cluster(
self, cluster: Union[Dict, Cluster], project_id: str, retry: Retry = DEFAULT, timeout: float = DEFAULT
) -> str:
"""
Creates a cluster, consisting of the specified number and type of Google Compute
Engine instances.
:param cluster: A Cluster protobuf or dict. If dict is provided, it must
be of the same form as the protobuf message
:class:`google.cloud.container_v1.types.Cluster`
:type cluster: dict or google.cloud.container_v1.types.Cluster
:param project_id: Google Cloud project ID
:type project_id: str
:param retry: A retry object (``google.api_core.retry.Retry``) used to
retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: The full url to the new, or existing, cluster
:raises:
ParseError: On JSON parsing problems when trying to convert dict
AirflowException: cluster is not dict type nor Cluster proto type
"""
if isinstance(cluster, dict):
cluster_proto = Cluster()
cluster = ParseDict(cluster, cluster_proto)
elif not isinstance(cluster, Cluster):
raise AirflowException("cluster is not instance of Cluster proto or python dict")
self._append_label(cluster, 'airflow-version', 'v' + version.version)
self.log.info(
"Creating (project_id=%s, zone=%s, cluster_name=%s)", project_id, self.location, cluster.name
)
try:
resource = self.get_conn().create_cluster(
project_id=project_id, zone=self.location, cluster=cluster, retry=retry, timeout=timeout
)
resource = self.wait_for_operation(resource)
return resource.target_link
except AlreadyExists as error:
self.log.info('Assuming Success: %s', error.message)
return self.get_cluster(name=cluster.name, project_id=project_id)
@GoogleBaseHook.fallback_to_default_project_id
def get_cluster(
self, name: str, project_id: str, retry: Retry = DEFAULT, timeout: float = DEFAULT
) -> Cluster:
"""
Gets details of specified cluster
:param name: The name of the cluster to retrieve
:type name: str
:param project_id: Google Cloud project ID
:type project_id: str
:param retry: A retry object used to retry requests. If None is specified,
requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:return: google.cloud.container_v1.types.Cluster
"""
self.log.info(
"Fetching cluster (project_id=%s, zone=%s, cluster_name=%s)",
project_id or self.project_id,
self.location,
name,
)
return (
self.get_conn()
.get_cluster(
project_id=project_id, zone=self.location, cluster_id=name, retry=retry, timeout=timeout
)
.self_link
)
|
the-stack_0_21791 | #!/usr/bin/env python
"""Train a model.
Argv:
output-dir: A folder to store any output to
kernel: Kernel type to be used in the algorithm
penalty: Penalty parameter of the error term
"""
import argparse
import joblib
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
from sklearn import datasets
from sklearn.metrics import classification_report
from sklearn.metrics import plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from amlrun import get_AMLRun
def train(output_dir='outputs', kernel='linear', penalty=1.0):
# make sure output directory exist
os.makedirs(output_dir, exist_ok=True)
# Safely get the Azure ML run
run = get_AMLRun()
# loading the iris dataset
iris = datasets.load_iris()
# X -> features, y -> label
X = iris.data
y = iris.target
class_names = iris.target_names
# dividing X, y into train and test data. Random seed for reproducability
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.20, random_state=0)
# create our model - a linear SVM classifier
svm_model_linear = SVC(kernel=kernel, C=penalty)
# evaluate each model in turn
kfold = StratifiedKFold(n_splits=10, random_state=1)
cv_results = cross_val_score(svm_model_linear, X_train, y_train,
cv=kfold, scoring='accuracy')
print('Cross Validation Mean: ', cv_results.mean())
print('Cross Validation Std: ', cv_results.std())
if run is not None:
run.log_list('Cross Validation Accuracies', cv_results)
run.log('Cross Validation Mean', cv_results.mean())
run.log('Cross Validation Std', cv_results.std())
# now training on the full dataset
svm_model_linear.fit(X_train, y_train)
y_pred = svm_model_linear.predict(X_test)
# model accuracy for X_test
accuracy = svm_model_linear.score(X_test, y_test)
print('Accuracy of SVM classifier on test set: {:.2f}'.format(accuracy))
if run is not None:
run.log('Accuracy', np.float(accuracy))
# Plot non-normalized confusion matrix
title = 'Test confusion matrix'
disp = plot_confusion_matrix(svm_model_linear, X_test, y_test,
display_labels=class_names,
cmap=plt.cm.Blues)
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
if run is not None:
run.log_image(title, plot=plt)
else:
plt.savefig(os.path.join(output_dir, 'confusion_matrix.png'))
# Plot normalized confusion matrix
title = 'Normalized test confusion matrix'
disp = plot_confusion_matrix(svm_model_linear, X_test, y_test,
display_labels=class_names,
cmap=plt.cm.Blues,
normalize='true')
disp.ax_.set_title(title)
print(title)
print(disp.confusion_matrix)
if run is not None:
run.log_image(title, plot=plt)
else:
plt.savefig(
os.path.join(output_dir, 'confusion_matrix_normalised.png'))
# Print classification report
print(classification_report(y_test, y_pred))
# files saved in the "outputs" folder are automatically uploaded into
# Azure ML Service run history
model_folder = os.path.join(output_dir, 'model')
model_path = os.path.join(model_folder, '{{cookiecutter.mlops_name}}.joblib')
os.makedirs(model_folder, exist_ok=True)
joblib.dump(svm_model_linear, model_path)
print('Output saved to', output_dir)
def main(arguments):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# environment parameters
# parser.add_argument(
# '--data-folder',
# help="local path to training data",
# required=True
# )
parser.add_argument(
"--output-dir", type=str,
default=os.path.join('..', '..', 'data', 'training', 'outputs'),
help='location to write output'
)
# training specific parameters
parser.add_argument('--kernel', type=str, default='linear',
help='Kernel type to be used in the algorithm')
parser.add_argument('--penalty', type=float, default=1.0,
help='Penalty parameter of the error term')
# parse the arguments
args = parser.parse_args(arguments)
# setup output directory
# model_output_dir = os.path.join(
# os.path.dirname(os.path.realpath(__file__)),
# args.output_dir)
# os.makedirs(args.output-dir, exist_ok=True)
train(args.output_dir, args.kernel, args.penalty)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
the-stack_0_21792 | # -*- coding: utf-8 -*-
#
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import jsonschema
import six
from gitrepo import error
from gitrepo import utils
def validate_schema(data, schema, file_path, value_path=None):
logging.debug("Start schema validation for {0} file, {1}".format(file_path, schema))
try:
jsonschema.validate(data, schema)
except jsonschema.exceptions.ValidationError as exc:
raise error.ValidationError(_make_error_message(exc, file_path, value_path))
def validate_file_by_schema(schema, file_path):
if not utils.file_exists(file_path):
raise error.FileDoesNotExist(file_path)
data = utils.parse_yaml(file_path)
if data is not None:
validate_schema(data, schema, file_path)
else:
raise error.FileIsEmpty(file_path)
return data
def _make_error_message(exc, file_path, value_path):
if value_path is None:
value_path = []
if exc.absolute_path:
value_path.extend(exc.absolute_path)
if exc.context:
sub_exceptions = sorted(
exc.context, key=lambda e: len(e.schema_path), reverse=True
)
sub_message = sub_exceptions[0]
value_path.extend(list(sub_message.absolute_path)[2:])
message = sub_message.message
else:
message = exc.message
error_msg = "File '{0}', {1}".format(file_path, message)
if value_path:
value_path = " -> ".join(map(six.text_type, value_path))
error_msg = "{0}, {1}".format(error_msg, "value path '{0}'".format(value_path))
return error_msg
|
the-stack_0_21794 | # -*- coding: utf-8 -*-
import logging
import base64
import time
from lxml import etree
from odoo import api, fields, models, _
from lxml_to_dict import lxml_to_dict
from odoo.addons.wecom_api.api.wecom_abstract_api import ApiException
_logger = logging.getLogger(__name__)
WECOM_USER_MAPPING_ODOO_EMPLOYEE_CATEGORY = {
"TagId": "tagid", # 标签Id
"AddUserItems": "add_employee_ids", # 标签中新增的成员userid列表,用逗号分隔
"DelUserItems": "del_employee_ids", # 标签中删除的成员userid列表,用逗号分隔
"AddPartyItems": "add_department_ids", # 标签中新增的部门id列表,用逗号分隔
"DelPartyItems": "del_department_ids", # 标签中删除的部门id列表,用逗号分隔
}
class EmployeeCategory(models.Model):
_inherit = "hr.employee.category"
company_id = fields.Many2one(
"res.company",
string="Company",
index=True,
default=lambda self: self.env.company,
readonly=True,
)
display_name = fields.Char(string="Display Name", compute="_compute_display_name")
employee_ids = fields.Many2many(
"hr.employee",
"employee_category_rel",
"category_id",
"emp_id",
string="Employees",
domain="[('company_id', '=', company_id)]",
)
department_ids = fields.Many2many(
"hr.department",
"department_category_rel",
"category_id",
"dmp_id",
string="Departments",
domain="[('company_id', '=', company_id)]",
)
tagid = fields.Integer(
string="WeCom Tag ID",
readonly=True,
default=0,
help="Tag ID, non negative integer. When this parameter is specified, the new tag will generate the corresponding tag ID. if it is not specified, it will be automatically increased by the current maximum ID.",
)
is_wecom_tag = fields.Boolean(string="WeCom Tag", default=False,)
@api.depends("is_wecom_tag")
def _compute_display_name(self):
tag = _("WeCom Tag")
for rec in self:
if rec.is_wecom_tag:
rec.display_name = "%s:%s" % (tag, rec.name)
else:
rec.display_name = rec.name
@api.onchange("employee_ids")
def _onchange_employee_ids(self):
if self.is_wecom_tag:
self.change = True
@api.onchange("department_ids")
def _onchange_department_ids(self):
if self.is_wecom_tag:
self.change = True
def unlink(self):
del_wecom_tag = (
self.env["ir.config_parameter"].sudo().get_param("wecom.del_wecom_tag")
)
for tag in self:
if tag.is_wecom_tag and del_wecom_tag:
tag.delete_wecom_tag()
return super(EmployeeCategory, self).unlink()
# ------------------------------------------------------------
# 企微标签及企微标签成员
# ------------------------------------------------------------
def create_wecom_tag(self):
"""
创建企微标签
"""
debug = self.env["ir.config_parameter"].sudo().get_param("wecom.debug_enabled")
company = self.company_id
if not company:
company = self.env.company
params = {}
try:
wxapi = self.env["wecom.service_api"].InitServiceApi(
company.corpid, company.contacts_app_id.secret
)
if self.tagid:
if debug:
_logger.info(_("Update contacts tags: %s to WeCom") % self.name)
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"TAG_CREATE"
),
{"tagid": self.tagid, "tagname": self.name},
)
message = _("Successfully updated tag.")
else:
if debug:
_logger.info(_("Create contacts tags: %s to WeCom") % self.name)
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"TAG_CREATE"
),
{"tagname": self.name},
)
self.write({"tagid": response["tagid"]})
message = _("Tag successfully created.")
if response["errcode"] == 0:
params = {
"title": _("Success"),
"message": message,
"sticky": False, # 延时关闭
"className": "bg-success",
"next": {"type": "ir.actions.client", "tag": "reload",}, # 刷新窗体
}
action = {
"type": "ir.actions.client",
"tag": "display_notification",
"params": {
"title": params["title"],
"type": "success",
"message": params["message"],
"sticky": params["sticky"],
"next": params["next"],
},
}
return action
except ApiException as ex:
return self.env["wecomapi.tools.action"].ApiExceptionDialog(
ex, raise_exception=True
)
def upload_wecom_tag(self):
"""
上传企微标签
"""
debug = self.env["ir.config_parameter"].sudo().get_param("wecom.debug_enabled")
params = {}
try:
wxapi = self.env["wecom.service_api"].InitServiceApi(
self.company_id.corpid, self.company_id.contacts_app_id.secret
)
tagid = 0
# 创建标签和更新标签
if self.tagid:
tagid = self.tagid
# 更新标签
if debug:
_logger.info(_("Update contacts tags: %s to WeCom") % self.name)
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"TAG_UPDATE"
),
{"tagid": self.tagid, "tagname": self.name},
)
message = _("Successfully updated tag.")
else:
# 创建标签
if debug:
_logger.info(_("Create contacts tags: %s to WeCom") % self.name)
if debug:
_logger.info(_("Create contacts tags: %s to WeCom") % self.name)
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"TAG_CREATE"
),
{"tagname": self.name},
)
tagid = response["tagid"]
message = _("Tag successfully created.")
if tagid:
# 比较本地和远程数据,增加和删除标签成员
add_tag_members, del_tag_members = self.upload_compare_data(
wxapi, tagid
)
add_tag_members.update({"tagid": tagid})
del_tag_members.update({"tagid": tagid})
if (
len(add_tag_members["userlist"]) > 0
or len(add_tag_members["partylist"]) > 0
):
# 添加远程标签成员
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"TAG_ADD_MEMBER"
),
add_tag_members,
)
if (
len(del_tag_members["userlist"]) > 0
or len(del_tag_members["partylist"]) > 0
):
# 删除远程标签尘缘
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"TAG_DELETE_MEMBER"
),
del_tag_members,
)
except ApiException as ex:
error = self.env["wecom.service_api_error"].get_error_by_code(ex.errCode)
params.update(
{
"title": _("Fail"),
"message": _("API error: %s, error name: %s, error message: %s")
% (str(ex.errCode), error["name"], ex.errMsg),
"type": "warning",
"sticky": True,
"className": "bg-warning",
}
)
else:
params.update(
{
"title": _("Success"),
"message": message,
"type": "success",
"sticky": False, # 延时关闭
"className": "bg-success",
}
)
print(tagid)
if response["errmsg"] == "created":
self.write({"tagid": tagid})
params.update(
{"next": {"type": "ir.actions.client", "tag": "reload",},} # 刷新窗体
)
finally:
action = {
"type": "ir.actions.client",
"tag": "display_notification",
"params": params,
}
return action
def upload_compare_data(self, wxapi, tagid):
"""
上传:比较本地和远程数据
"""
# 企业微信上的标签成员数据列表
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call("TAG_GET_MEMBER"),
{"tagid": str(tagid)},
)
remote_userlist = []
remote_partylist = []
if response["errcode"] == 0:
for user in response["userlist"]:
remote_userlist.append(user["userid"].lower())
for party in response["partylist"]:
remote_partylist.append(party)
# 本地数据列表
local_userlist = []
local_partylist = []
if self.employee_ids:
for user in self.employee_ids:
local_userlist.append(user.wecom_userid)
if self.department_ids:
for department in self.department_ids:
local_partylist.append(department.wecom_department_id)
# 本地与远程的差集,及需要在远程增加的数据
remote_add_userlist = list(set(local_userlist).difference(set(remote_userlist)))
remote_add_partylist = list(
set(local_partylist).difference(set(remote_partylist))
)
add_tag_members = {
"userlist": [user for user in remote_add_userlist],
"partylist": [party for party in remote_add_partylist],
}
# 远程与本地的差集,及需要在远程删除的数据
remote_del_userlist = list(set(remote_userlist).difference(set(local_userlist)))
remote_del_partylist = list(
set(remote_partylist).difference(set(local_partylist))
)
del_tag_members = {
"userlist": [user for user in remote_del_userlist],
"partylist": [party for party in remote_del_partylist],
}
return add_tag_members, del_tag_members
def download_wecom_tag(self):
"""
下载单个企微标签
"""
debug = self.env["ir.config_parameter"].sudo().get_param("wecom.debug_enabled")
params = {}
try:
wxapi = self.env["wecom.service_api"].InitServiceApi(
self.company_id.corpid, self.company_id.contacts_app_id.secret
)
tag_response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call("TAG_GET_LIST")
)
except ApiException as ex:
error = self.env["wecom.service_api_error"].get_error_by_code(ex.errCode)
params.update(
{
"title": _("Fail"),
"message": _("API error: %s, error name: %s, error message: %s")
% (str(ex.errCode), error["name"], ex.errMsg),
"type": "warning",
"sticky": True,
"className": "bg-warning",
}
)
else:
tags = tag_response["taglist"]
for tag in tags:
if tag["tagid"] == self.tagid:
self.name = tag["tagname"]
result = self.download_wecom_tag_member(
self, wxapi, tag["tagid"], self.company_id
)
if result is False:
params.update(
{
"title": _("Fail"),
"message": _("Tag downloaded failed."),
"type": "warning",
"sticky": True,
"className": "bg-warning",
}
)
action = {
"type": "ir.actions.client",
"tag": "display_notification",
"params": params,
}
return action
message = _("Tag downloaded successfully.")
params.update(
{
"title": _("Success"),
"message": message,
"type": "success",
"sticky": True, # 延时关闭
"className": "bg-success",
"next": {"type": "ir.actions.client", "tag": "reload",},
}
)
finally:
action = {
"type": "ir.actions.client",
"tag": "display_notification",
"params": params,
}
return action
@api.model
def download_wecom_tags(self):
"""
下载企微标签列表 hr.employee.category
"""
start_time = time.time()
company = self.env.context.get("company_id")
tasks = []
if not company:
company = self.env.company
if company.is_wecom_organization:
try:
wxapi = self.env["wecom.service_api"].InitServiceApi(
company.corpid, company.contacts_app_id.secret
)
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"TAG_GET_LIST"
)
)
except ApiException as ex:
end_time = time.time()
self.env["wecomapi.tools.action"].ApiExceptionDialog(
ex, raise_exception=False
)
tasks = [
{
"name": "download_tag_data",
"state": False,
"time": end_time - start_time,
"msg": str(ex),
}
]
except Exception as e:
end_time = time.time()
tasks = [
{
"name": "download_tag_data",
"state": False,
"time": end_time - start_time,
"msg": str(e),
}
]
else:
tags = response["taglist"]
for tag in tags:
category = self.search(
[
("tagid", "=", tag["tagid"]),
("company_id", "=", company.id),
],
limit=1,
)
if not category:
category.create(
{
"name": tag["tagname"],
"tagid": tag["tagid"],
"is_wecom_tag": True,
}
)
else:
category.write(
{"name": tag["tagname"], "is_wecom_tag": True,}
)
result = self.download_wecom_tag_member(
category, wxapi, tag["tagid"], company
)
if result:
tasks.append(
{
"name": "download_tag_members",
"state": False,
"time": 0,
"msg": _(
"Failed to download tag [%s] member of company [%s]!"
)
% (tag["tagname"], company.name),
}
)
finally:
end_time = time.time()
task = {
"name": "download_tag_data",
"state": True,
"time": end_time - start_time,
"msg": _("Tag list downloaded successfully."),
}
tasks.append(task)
else:
end_time = time.time()
tasks = [
{
"name": "download_tag_data",
"state": False,
"time": end_time - start_time,
"msg": _(
"The current company does not identify the enterprise wechat organization. Please configure or switch the company."
),
}
]
return tasks
def download_wecom_tag_member(self, category, wxapi, tagid, company):
"""
下载企微标签成员
"""
res = {}
try:
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call(
"TAG_GET_MEMBER"
),
{"tagid": str(tagid)},
)
except ApiException as ex:
self.env["wecomapi.tools.action"].ApiExceptionDialog(
ex, raise_exception=False
)
res = {
"name": "download_tag_members",
"state": False,
"time": 0,
"msg": repr(e),
}
except Exception as e:
res = {
"name": "download_tag_members",
"state": False,
"time": 0,
"msg": repr(e),
}
else:
employee_ids = []
for user in response["userlist"]:
employee = (
self.env["hr.employee"]
.sudo()
.search(
[
("wecom_userid", "=", user["userid"].lower()),
("company_id", "=", company.id),
("is_wecom_user", "=", True),
"|",
("active", "=", True),
("active", "=", False),
],
limit=1,
)
)
if employee:
employee_ids.append(employee.id)
if len(employee_ids) > 0:
category.write({"employee_ids": [(6, 0, employee_ids)]})
department_ids = []
for party in response["partylist"]:
department = (
self.env["hr.department"]
.sudo()
.search(
[
("wecom_department_id", "=", party),
("is_wecom_department", "=", True),
("company_id", "=", company.id),
"|",
("active", "=", True),
("active", "=", False),
],
)
)
if department:
department_ids.append(department.id)
if len(department_ids) > 0:
category.write({"department_ids": [(6, 0, department_ids)]})
finally:
return res # 返回失败的结果
def delete_wecom_tag(self):
"""
删除企微标签
"""
debug = self.env["ir.config_parameter"].sudo().get_param("wecom.debug_enabled")
company = self.company_id
if not company:
company = self.env.company
if debug:
_logger.info(_("Delete contacts tags: %s to WeCom") % self.name)
params = {}
try:
wxapi = self.env["wecom.service_api"].InitServiceApi(
company.corpid, company.contacts_app_id.secret
)
response = wxapi.httpCall(
self.env["wecom.service_api_list"].get_server_api_call("TAG_DELETE"),
{"tagid": str(self.tagid)},
)
if response["errcode"] == 0:
params = {
"title": _("Success"),
"type": "success",
"message": _("Tag: %s deleted successfully.") % self.name,
"sticky": False, # 延时关闭
"className": "bg-success",
"next": {"type": "ir.actions.client", "tag": "reload",}, # 刷新窗体
}
tag = self.search([("tagid", "=", self.tagid)], limit=1,)
tag.write(
{"is_wecom_tag": False, "tagid": 0,}
)
# tag.unlink()
else:
params = {
"title": _("Failed"),
"type": "danger",
"message": _("Tag: %s deletion failed.") % self.name,
"sticky": False, # 延时关闭
"className": "bg-success",
"next": {"type": "ir.actions.client", "tag": "reload",}, # 刷新窗体
}
action = {
"type": "ir.actions.client",
"tag": "display_notification",
"params": {
"title": params["title"],
"type": params["type"],
"message": params["message"],
"sticky": params["sticky"],
"next": params["next"],
},
}
return action
except ApiException as ex:
return self.env["wecomapi.tools.action"].ApiExceptionDialog(
ex, raise_exception=True
)
# ------------------------------------------------------------
# 企微通讯录事件
# ------------------------------------------------------------
def wecom_event_change_contact_tag(self, cmd):
"""
通讯录事件更新标签
"""
xml_tree = self.env.context.get("xml_tree")
company_id = self.env.context.get("company_id")
xml_tree_str = etree.fromstring(bytes.decode(xml_tree))
dic = lxml_to_dict(xml_tree_str)["xml"]
callback_tag = self.sudo().search(
[("company_id", "=", company_id.id), ("tagid", "=", dic["TagId"])], limit=1,
)
domain = [
"|",
("active", "=", True),
("active", "=", False),
]
employee = (
self.env["hr.employee"]
.sudo()
.search([("company_id", "=", company_id.id)] + domain)
)
department = (
self.env["hr.department"]
.sudo()
.search([("company_id", "=", company_id.id)] + domain)
)
update_dict = {}
for key, value in dic.items():
if (
key == "ToUserName"
or key == "FromUserName"
or key == "CreateTime"
or key == "Event"
or key == "MsgType"
or key == "ChangeType"
):
# 忽略掉 不需要的key
pass
else:
if key in WECOM_USER_MAPPING_ODOO_EMPLOYEE_CATEGORY.keys():
if WECOM_USER_MAPPING_ODOO_EMPLOYEE_CATEGORY[key] != "":
update_dict[
WECOM_USER_MAPPING_ODOO_EMPLOYEE_CATEGORY[key]
] = value
else:
_logger.info(
_(
"There is no mapping for field [%s], please contact the developer."
)
% key
)
# print("update_dict", update_dict)
add_employee_list = []
del_employee_list = []
add_department_list = []
del_department_list = []
if "add_employee_ids" in update_dict.keys():
for wecom_userid in update_dict["add_employee_ids"].split(","):
add_employee_list.append(
employee.search(
[("wecom_userid", "=", wecom_userid.lower())], limit=1
).id
)
elif "del_employee_ids" in update_dict.keys():
for wecom_userid in update_dict["del_employee_ids"].split(","):
del_employee_list.append(
employee.search(
[("wecom_userid", "=", wecom_userid.lower())], limit=1
).id
)
elif "add_department_ids" in update_dict.keys():
for wecom_department_id in update_dict["add_department_ids"].split(","):
add_department_list.append(
department.search(
[("wecom_department_id", "=", wecom_department_id)], limit=1,
).id
)
elif "del_department_ids" in update_dict.keys():
for wecom_department_id in update_dict["del_department_ids"].split(","):
del_department_list.append(
department.search(
[("wecom_department_id", "=", wecom_department_id)], limit=1,
).id
)
# print(
# add_employee_list,
# del_employee_list,
# add_department_list,
# del_department_list,
# )
if len(add_employee_list) > 0:
callback_tag.write(
{"employee_ids": [(4, res, False) for res in add_employee_list]}
)
if len(del_employee_list) > 0:
callback_tag.write(
{"employee_ids": [(3, res, False) for res in del_employee_list]}
)
if len(add_department_list) > 0:
callback_tag.write(
{"department_ids": [(4, res, False) for res in add_department_list]}
)
if len(del_department_list) > 0:
callback_tag.write(
{"department_ids": [(3, res, False) for res in del_department_list]}
)
|
the-stack_0_21795 | def register_elasticapm(client, worker):
"""Given an ElasticAPM client and an RQ worker, registers exception handlers
with the worker so exceptions are logged to the apm server.
E.g.:
from elasticapm.contrib.django.models import client
from elasticapm.contrib.rq import register_elasticapm
worker = Worker(map(Queue, listen))
register_elasticapm(client, worker)
worker.work()
"""
def send_to_server(job, *exc_info):
client.capture_exception(
exc_info=exc_info,
extra={
"job_id": job.id,
"func": job.func_name,
"args": job.args,
"kwargs": job.kwargs,
"description": job.description,
},
)
worker.push_exc_handler(send_to_server)
|
the-stack_0_21796 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object-based saving which use tf.train.* optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import six
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tracking.AutoTrackable):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = graph_view.ObjectGraphView(
root_trackable).serialize_object_graph()
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["optimizer_step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual(
"beta1_power",
named_variables["optimizer/beta1_power" + suffix].full_name)
self.assertEqual(
"beta2_power",
named_variables["optimizer/beta2_power" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power",
optimizer_node.children[0].local_name)
self.assertEqual("beta1_power",
serialized_graph.nodes[optimizer_node.children[0].node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=model._named_dense.kernel,
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_trackable.save_counter # pylint: disable=pointless-statement
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_trackable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_trackable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_trackable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_trackable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta1_power and beta2_power when applying gradients so we can
# test that they've been restored correctly.
beta1=1.0,
beta2=1.0)
on_create_root = trackable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
status.assert_nontrivial_match()
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = variables.Variable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_existing_objects_matched()
status.assert_consumed()
beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testEagerDistributionStrategy(self):
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
def _train_fn(optimizer, model):
input_value = constant_op.constant([[3.]])
optimizer.minimize(
functools.partial(model, input_value),
global_step=root.optimizer_step)
strategy = mirrored_strategy.MirroredStrategy()
with strategy.scope():
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer,
model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(
checkpoint_management.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
strategy.extended.call_for_each_replica(
functools.partial(_train_fn, optimizer, model))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testGraphDistributionStrategy(self):
self.skipTest("b/121381184")
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
def _train_fn(optimizer, model):
input_value = constant_op.constant([[3.]])
return optimizer.minimize(
functools.partial(model, input_value),
global_step=root.optimizer_step)
for training_continuation in range(3):
with ops.Graph().as_default():
strategy = mirrored_strategy.MirroredStrategy()
with strategy.scope():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
status = root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
train_op = strategy.extended.call_for_each_replica(
functools.partial(_train_fn, optimizer, model))
with self.session() as session:
if training_continuation > 0:
status.assert_consumed()
status.initialize_or_restore()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.CheckpointV1(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
else:
status.assert_consumed()
status.assert_existing_objects_matched()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
manager = checkpoint_management.CheckpointManager(
root, checkpoint_directory, max_to_keep=1)
status = root.restore(save_path=manager.latest_checkpoint)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
manager.save()
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.AdamOptimizer(0.)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@def_function.function
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def _get_checkpoint_name(self, name):
root = tracking.AutoTrackable()
trackable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
(named_variable,), _, _ = trackable_utils._serialize_object_graph(
root, saveables_cache=None)
with ops.name_scope("root/" + named_variable.name):
pass # Make sure we can use this as an op name if we prefix it.
return named_variable.name
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = variables.Variable(0.0)
self.b = variables.Variable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = trackable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def test_initialize_if_not_restoring(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
model=model, # Do not save the optimizer with the checkpoint.
global_step=training_util.get_or_create_global_step())
optimizer_checkpoint = trackable_utils.Checkpoint(
optimizer=optimizer)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
self.evaluate([v.initializer for v in optimizer.variables()])
train_fn()
model_save_path = root.save(file_prefix=checkpoint_prefix)
self.evaluate(optimizer.variables()[0].assign(42.))
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
# Restore into a graph with the optimizer
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
status = root.restore(save_path=model_save_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
train_fn()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
# Make sure initialization doesn't clobber later restores
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001, beta1=1.0)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
opt_root = trackable_utils.Checkpoint(
optimizer=optimizer)
status = root.restore(save_path=model_save_path)
init_only_optimizer_status = opt_root.restore(save_path=None)
optimizer_status = opt_root.restore(save_path=optimizer_save_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
optimizer_status.run_restore_ops()
status.initialize_or_restore()
init_only_optimizer_status.initialize_or_restore()
train_fn()
self.assertEqual(42., self.evaluate(optimizer.variables()[0]))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.], self.evaluate(root_trackable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m")))
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = saver_lib.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = trackable_utils.TrackableSaver(
graph_view.ObjectGraphView(root))
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_consumed()
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_existing_objects_matched()
with self.assertRaisesRegex(AssertionError, "not restored"):
status.assert_nontrivial_match()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
self._check_sentinels(root)
# Check that there is no error when keys are missing from the name-based
# checkpoint.
root.not_in_name_checkpoint = variables.Variable([1.])
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
|
the-stack_0_21797 | import time
import os
import re
import requests
import platform
import yaml
import subprocess
from pathlib import Path
from utils import (
get_arch,
kubectl,
wait_for_pod_state,
kubectl_get,
wait_for_installation,
docker,
update_yaml_with_arch,
run_until_success,
)
TEMPLATES = Path(__file__).absolute().parent / "templates"
def validate_dns_dashboard():
"""
Validate the dashboard addon by trying to access the kubernetes dashboard.
The dashboard will return an HTML indicating that it is up and running.
"""
wait_for_pod_state(
"", "kube-system", "running", label="k8s-app=kubernetes-dashboard"
)
wait_for_pod_state(
"", "kube-system", "running", label="k8s-app=dashboard-metrics-scraper"
)
attempt = 30
while attempt > 0:
try:
output = kubectl(
"get "
"--raw "
"/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/"
)
if "Kubernetes Dashboard" in output:
break
except subprocess.CalledProcessError:
pass
time.sleep(10)
attempt -= 1
assert attempt > 0
def validate_storage():
"""
Validate storage by creating a PVC.
"""
output = kubectl("describe deployment hostpath-provisioner -n kube-system")
if "hostpath-provisioner-{}:1.0.0".format(get_arch()) in output:
# we are running with a hostpath-provisioner that is old and we need to patch it
kubectl(
"set image deployment hostpath-provisioner -n kube-system hostpath-provisioner=cdkbot/hostpath-provisioner:1.1.0"
)
wait_for_pod_state(
"", "kube-system", "running", label="k8s-app=hostpath-provisioner"
)
manifest = TEMPLATES / "pvc.yaml"
kubectl("apply -f {}".format(manifest))
wait_for_pod_state("hostpath-test-pod", "default", "running")
attempt = 50
while attempt >= 0:
output = kubectl("get pvc")
if "Bound" in output:
break
time.sleep(2)
attempt -= 1
# Make sure the test pod writes data sto the storage
found = False
for root, dirs, files in os.walk("/var/snap/microk8s/common/default-storage"):
for file in files:
if file == "dates":
found = True
assert found
assert "myclaim" in output
assert "Bound" in output
kubectl("delete -f {}".format(manifest))
def common_ingress():
"""
Perform the Ingress validations that are common for all
the Ingress controllers.
"""
attempt = 50
while attempt >= 0:
output = kubectl("get ing")
if "microbot.127.0.0.1.nip.io" in output:
break
time.sleep(5)
attempt -= 1
assert "microbot.127.0.0.1.nip.io" in output
service_ok = False
attempt = 50
while attempt >= 0:
try:
resp = requests.get("http://microbot.127.0.0.1.nip.io/")
if resp.status_code == 200 and "microbot.png" in resp.content.decode(
"utf-8"
):
service_ok = True
break
except requests.RequestException:
time.sleep(5)
attempt -= 1
assert service_ok
def validate_ingress():
"""
Validate ingress by creating a ingress rule.
"""
daemonset = kubectl("get ds")
if "nginx-ingress-microk8s-controller" in daemonset:
wait_for_pod_state("", "default", "running", label="app=default-http-backend")
wait_for_pod_state(
"", "default", "running", label="name=nginx-ingress-microk8s"
)
else:
wait_for_pod_state(
"", "ingress", "running", label="name=nginx-ingress-microk8s"
)
manifest = TEMPLATES / "ingress.yaml"
update_yaml_with_arch(manifest)
kubectl("apply -f {}".format(manifest))
wait_for_pod_state("", "default", "running", label="app=microbot")
common_ingress()
kubectl("delete -f {}".format(manifest))
def validate_gpu():
"""
Validate gpu by trying a cuda-add.
"""
if platform.machine() != "x86_64":
print("GPU tests are only relevant in x86 architectures")
return
wait_for_pod_state(
"",
"gpu-operator-resources",
"running",
label="app=nvidia-device-plugin-daemonset",
)
manifest = TEMPLATES / "cuda-add.yaml"
get_pod = kubectl_get("po")
if "cuda-vector-add" in str(get_pod):
# Cleanup
kubectl("delete -f {}".format(manifest))
time.sleep(10)
kubectl("apply -f {}".format(manifest))
wait_for_pod_state("cuda-vector-add", "default", "terminated")
result = kubectl("logs pod/cuda-vector-add")
assert "PASSED" in result
def validate_registry():
"""
Validate the private registry.
"""
wait_for_pod_state("", "container-registry", "running", label="app=registry")
pvc_stdout = kubectl("get pvc registry-claim -n container-registry -o yaml")
pvc_yaml = yaml.safe_load(pvc_stdout)
storage = pvc_yaml["spec"]["resources"]["requests"]["storage"]
assert re.match("(^[2-9][0-9]{1,}|^[1-9][0-9]{2,})(Gi$)", storage)
docker("pull busybox")
docker("tag busybox localhost:32000/my-busybox")
docker("push localhost:32000/my-busybox")
manifest = TEMPLATES / "bbox-local.yaml"
kubectl("apply -f {}".format(manifest))
wait_for_pod_state("busybox", "default", "running")
output = kubectl("describe po busybox")
assert "localhost:32000/my-busybox" in output
kubectl("delete -f {}".format(manifest))
def validate_forward():
"""
Validate ports are forwarded
"""
manifest = TEMPLATES / "nginx-pod.yaml"
kubectl("apply -f {}".format(manifest))
wait_for_pod_state("", "default", "running", label="app=nginx")
os.system("killall kubectl")
os.system("/snap/bin/microk8s.kubectl port-forward pod/nginx 5123:80 &")
attempt = 10
while attempt >= 0:
try:
resp = requests.get("http://localhost:5123")
if resp.status_code == 200:
break
except requests.RequestException:
pass
attempt -= 1
time.sleep(2)
assert resp.status_code == 200
os.system("killall kubectl")
def validate_metrics_server():
"""
Validate the metrics server works
"""
wait_for_pod_state("", "kube-system", "running", label="k8s-app=metrics-server")
attempt = 30
while attempt > 0:
try:
output = kubectl("get --raw /apis/metrics.k8s.io/v1beta1/pods")
if "PodMetricsList" in output:
break
except subprocess.CalledProcessError:
pass
time.sleep(10)
attempt -= 1
assert attempt > 0
def validate_prometheus():
"""
Validate the prometheus operator
"""
if platform.machine() != "x86_64":
print("Prometheus tests are only relevant in x86 architectures")
return
wait_for_pod_state("prometheus-k8s-0", "monitoring", "running", timeout_insec=1200)
wait_for_pod_state(
"alertmanager-main-0", "monitoring", "running", timeout_insec=1200
)
def validate_rbac():
"""
Validate RBAC is actually on
"""
output = kubectl(
"auth can-i --as=system:serviceaccount:default:default view pod", err_out="no"
)
assert "no" in output
output = kubectl("auth can-i --as=admin --as-group=system:masters view pod")
assert "yes" in output
def validate_metallb_config(ip_ranges="192.168.0.105"):
"""
Validate Metallb
"""
if platform.machine() != "x86_64":
print("Metallb tests are only relevant in x86 architectures")
return
out = kubectl("get configmap config -n metallb-system -o jsonpath='{.data.config}'")
for ip_range in ip_ranges.split(","):
assert ip_range in out
def validate_coredns_config(ip_ranges="8.8.8.8,1.1.1.1"):
"""
Validate dns
"""
out = kubectl("get configmap coredns -n kube-system -o jsonpath='{.data.Corefile}'")
expected_forward_val = "forward ."
for ip_range in ip_ranges.split(","):
expected_forward_val = expected_forward_val + " " + ip_range
assert expected_forward_val in out
def validate_mayastor():
"""
Validate mayastor. Waits for the mayastor control plane to come up,
then ensures that we can create a test pod with a PVC.
"""
wait_for_pod_state("", "mayastor", "running", label="app=mayastor")
manifest = TEMPLATES / "mayastor-pvc.yaml"
kubectl("apply -f {}".format(manifest))
wait_for_pod_state("mayastor-test-pod", "default", "running")
attempt = 50
while attempt >= 0:
output = kubectl("get pvc")
if "Bound" in output:
break
time.sleep(2)
attempt -= 1
kubectl("delete -f {}".format(manifest))
def validate_cert_manager():
"""
Validate cert-manager. Wait for cert-manager deployment to come up,
then deploys a custom ingress and waits for the certificate to become ready.
"""
wait_for_pod_state(
"", "cert-manager", "running", label="app.kubernetes.io/name=cert-manager"
)
manifest = TEMPLATES / "cert-manager-aio-test.yaml"
kubectl("apply -f {}".format(manifest))
kubectl("wait cert/mock-ingress-tls --for=condition=ready=true")
kubectl("delete -f {}".format(manifest))
|
the-stack_0_21799 | import sys
import os
import re
for old_file_name in os.listdir('templatr'):
with open('templatr/{}'.format(old_file_name), 'r') as old_file:
f = old_file.read()
f = re.sub('## Base16 (.+)', r'scheme: "\1"', f)
f = re.sub('# Author: (.+)', r'author: "\1"', f)
f = re.sub('set \$base(..) #(......)', r'base\1: "\2"', f)
f = re.sub('#.*\n', r'', f)
new_file_name = re.sub('\.config$', '.yaml', old_file_name)
new_file_name = re.sub('^base16-', '', new_file_name)
with open('schemes/{}'.format(new_file_name), 'w') as new_file:
new_file.write(f)
|
the-stack_0_21802 | import numpy as np
from flask import Flask, request, jsonify, render_template
import tensorflow as tf
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
app = Flask(__name__)
## Load the model
# model = pickle.load(open('model.pkl', 'rb'))
# model = tf.keras.models.load_model('hotel_review_model.h5')
model = tf.keras.models.load_model('hotel_review_model.h5', custom_objects={
'Adam': lambda **kwargs: hvd.DistributedOptimizer(keras.optimizers.Adam(**kwargs))
})
# with open('model.pkl', 'rb') as f:
# model = pickle.load(f)
## Load the tokenizer
with open('tokenizer.pkl', 'rb') as f:
tokenizer = pickle.load(f)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
int_features = [request.form['review']]
final_features = request.form['review']
sample_sequences = tokenizer.texts_to_sequences(int_features)
fakes_padded = pad_sequences(sample_sequences, padding='post', maxlen=50)
output = model.predict(fakes_padded)
print(type(output))
if output>0.6:
sentiment_pred = 'Positive review'
elif output<0.4:
sentiment_pred = 'Negative review'
else:
sentiment_pred = 'Neutral review'
for x in range(len(int_features)):
print(int_features[x])
print(output[x])
print('\n')
return render_template('index.html', prediction_text='Predicted Sentiment {}'.format(output), text=final_features, sentiment=sentiment_pred)
@app.route('/results',methods=['POST'])
def results():
data = request.get_json(force=True)
prediction = model.predict([np.array(list(data.values()))])
output = prediction[0]
return jsonify(output)
if __name__ == "__main__":
app.run(debug=True) |
the-stack_0_21803 | from requests import get
from selectorlib import Extractor
def _walgreens(walgreens_url):
extractor = Extractor.from_yaml_string("""
name:
css: 'h1.product-name span.wag-text-black'
xpath: null
type: Text
dollars:
css: 'span.price__contain span.product__price span'
xpath: null
type: Text
cents:
css: 'span.price__contain sup:nth-of-type(2)'
xpath: null
type: Text
""")
headers = {
'authority': 'www.walgreens.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'dnt': '1',
'upgrade-insecure-requests': '1',
'user-agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',
'accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'none',
'sec-fetch-mode': 'navigate',
'sec-fetch-dest': 'document',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'
}
website = get(walgreens_url, headers=headers)
productdata = extractor.extract(website.text)
price = str(productdata["dollars"]) + "." + str(productdata["cents"])
if productdata["dollars"] == None and productdata["cents"] == None:
productdata["price"] = None
else:
del productdata["dollars"]
del productdata["cents"]
productdata["price"] = price
return productdata
|
the-stack_0_21805 | #LeetCode problem 200: Number of Islands
class Solution:
def check(self,grid,nodesVisited,row,col,m,n):
return (row>=0 and row<m and col>=0 and col<n and grid[row][col]=="1" and nodesVisited[row][col]==0)
def dfs(self,grid,nodesVisited,row,col,m,n):
a=[-1,1,0,0]
b=[0,0,1,-1]
nodesVisited[row][col]=1
for k in range(4):
if self.check(grid,nodesVisited,row+a[k],col+b[k],m,n):
self.dfs(grid,nodesVisited,row+a[k],col+b[k],m,n)
def numIslands(self, grid: List[List[str]]) -> int:
nodesVisited=[[0 for i in range(len(grid[0]))] for i in range(len(grid))]
count=0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j]=="0":
continue
elif grid[i][j]=="1" and nodesVisited[i][j]==0:
count+=1
self.dfs(grid,nodesVisited,i,j,len(grid),len(grid[0]))
return count |
the-stack_0_21808 | """Sample Wiki syntax extension plugin."""
from genshi.builder import tag
from trac.core import *
from trac.util.text import shorten_line
from trac.versioncontrol.api import NoSuchChangeset, RepositoryManager
from trac.versioncontrol.web_ui import ChangesetModule
from trac.wiki.api import IWikiSyntaxProvider
revision = "$Rev: 9156 $"
url = "$URL: https://svn.edgewall.org/repos/trac/trunk/sample-plugins/revision_links.py $"
class RevisionLinks(Component):
"""Adds a few more ways to refer to changesets."""
implements(IWikiSyntaxProvider)
KEYWORDS = ['[Rr]ev(?:ision)?', '[Cc]hangeset']
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
def revlink(f, match, fullmatch):
elts = match.split()
rev = elts[1] # ignore keyword
reponame = ''
if len(elts) > 2: # reponame specified
reponame = elts[-1]
return self._format_revision_link(f, 'revision', reponame, rev, rev,
fullmatch)
yield (r"!?(?:%s)\s+%s(?:\s+in\s+\w+)?" %
("|".join(self.KEYWORDS), ChangesetModule.CHANGESET_ID), revlink)
def get_link_resolvers(self):
def resolverev(f, ns, rev, label, fullmatch):
return self._format_revision_link(f, ns, '', rev, label, fullmatch)
yield ('revision', resolverev)
def _format_revision_link(self, formatter, ns, reponame, rev, label,
fullmatch=None):
rev, params, fragment = formatter.split_link(rev)
try:
repos = RepositoryManager(self.env).get_repository(reponame)
if repos:
changeset = repos.get_changeset(rev)
return tag.a(label, class_="changeset",
title=shorten_line(changeset.message),
href=(formatter.href.changeset(rev) +
params + fragment))
except NoSuchChangeset:
pass
return tag.a(label, class_="missing changeset", rel="nofollow",
href=formatter.href.changeset(rev))
|
the-stack_0_21809 | # base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# Materia scheme by Defman21
base00 = "#263238"
base01 = "#2C393F"
base02 = "#37474F"
base03 = "#707880"
base04 = "#C9CCD3"
base05 = "#CDD3DE"
base06 = "#D5DBE5"
base07 = "#FFFFFF"
base08 = "#EC5F67"
base09 = "#EA9560"
base0A = "#FFCC00"
base0B = "#8BD649"
base0C = "#80CBC4"
base0D = "#89DDFF"
base0E = "#82AAFF"
base0F = "#EC5F67"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base01
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base01
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base0A
# Top border color of the selected completion item.
c.colors.completion.item.selected.border.top = base0A
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base0A
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base08
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Background color of disabled items in the context menu.
c.colors.contextmenu.disabled.bg = base01
# Foreground color of disabled items in the context menu.
c.colors.contextmenu.disabled.fg = base04
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.bg = base00
# Foreground color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.fg = base05
# Background color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.bg = base0A
#Foreground color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.fg = base01
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base0A
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base01
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base01
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0C
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base07
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base07
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base05
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base00
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base05
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base0E
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base00
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base05
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base00
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base05
# Background color for webpages if unset (or empty to use the theme's
# color).
# c.colors.webpage.bg = base00
|
the-stack_0_21812 | import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import LeakyReLU
import numpy as np
tf.enable_eager_execution()
VERBOSE = 2
#Datasets
train_dataset_location = "wdbc_train_small.csv"
test_dataset_location = "wdbc.csv"
#Read training data and convert target to one-hot for use with categorical_cross_entropy per documentation
dataset = np.loadtxt(open(train_dataset_location), delimiter=',')
X_train = dataset[:,0:30].astype(np.float64)
Y_train = dataset[:,30].astype(int)
Y_train = tf.keras.utils.to_categorical(Y_train)
#Custom loss function
def my_cat_crossentropy(target,output,from_logits=False,axis=-1):
return tf.nn.softmax_cross_entropy_with_logits_v2(labels=target,logits=output)
my_batch_size = 20
#Model defintion:
my_model = Sequential()
my_model.add(Dense(15, input_dim=30, kernel_initializer='glorot_normal'))
my_model.add(Dropout(0.1))
my_model.add(Dense(2, kernel_initializer='glorot_normal',kernel_regularizer=tf.keras.regularizers.l2(l=0.01)))
#Training model using multi-stage optimiser:
#Stage 1
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.005,beta1=0.85,beta2=0.95), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=100, verbose=VERBOSE, shuffle=True)
#Stage 2
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.001,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=150, verbose=VERBOSE, shuffle=True)# new
#Stage 3
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0005,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=200, verbose=VERBOSE, shuffle=True)# new
#Stage 4
my_model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.0001,beta1=0.9,beta2=0.99), loss=my_cat_crossentropy, metrics=['accuracy'])
my_model.fit(x=X_train, y=Y_train, batch_size=my_batch_size, epochs=250, verbose=VERBOSE, shuffle=True)# new
#Evaluate model on training data
print("evaluation on training data", my_model.evaluate(x=X_train, y=Y_train, batch_size=my_batch_size))
# Read testing data and convert target to one-hot for use with categorical cross entropy per documentation
ds = np.loadtxt(open(test_dataset_location),delimiter=',')
X_test = ds[:,0:30].astype(np.float64)
Y_test = ds[:,30].astype(int)
Y_test = tf.keras.utils.to_categorical(Y_test)
#Evaluate model on test data
print("evaluation on test data", my_model.evaluate(x=X_test, y=Y_test, batch_size=my_batch_size))
|
the-stack_0_21814 | import os
import sys
import tempfile
import pytest
import mxnet as mx
import numpy as np
from numpy.testing import assert_almost_equal
from gluonnlp.data.loading import NumpyDataset, DatasetLoader
from gluonnlp.data.sampler import SplitSampler, FixedBucketSampler
mx.npx.set_np()
def prepare_dataset(filename, allow_pickle=False):
return NumpyDataset(filename[0], allow_pickle=allow_pickle)
def prepare_bucket_sampler(dataset, batch_size, shuffle=False, num_buckets=1):
lengths = dataset.transform(lambda x: len(x))
sampler = FixedBucketSampler(lengths,
batch_size=batch_size,
num_buckets=num_buckets,
ratio=0,
shuffle=shuffle)
return sampler
@pytest.mark.skipif(sys.version_info >= (3, 8),
reason='The test fails everytime in python3.8 due to the issues'
' in MXNet: '
'https://github.com/apache/incubator-mxnet/issues/17782, '
'https://github.com/apache/incubator-mxnet/issues/17774')
def test_dataset_loader():
with tempfile.TemporaryDirectory() as root:
num_files = 5
for i in range(num_files):
np.save(os.path.join(root, 'part_{}.npy'.format(i)),
np.random.uniform(size=(100, 20)))
data = os.path.join(root)
split_sampler = SplitSampler(num_files, num_parts=1, part_index=0, shuffle=False)
dataset_params = {'allow_pickle': True}
sampler_params = {'batch_size': 2}
all_data = np.load(os.path.join(data, 'part_{}.npy'.format(0)))
for i in range(1, num_files):
all_data = np.concatenate((all_data,
np.load(os.path.join(data, 'part_{}.npy'.format(i)))))
for num_dataset_workers in [1, 2]:
for num_batch_workers in [1, 2]:
dataloader = DatasetLoader(os.path.join(data, '*.npy'),
file_sampler=split_sampler,
dataset_fn=prepare_dataset,
dataset_params=dataset_params,
batch_sampler_fn=prepare_bucket_sampler,
batch_sampler_params=sampler_params,
num_dataset_workers=num_dataset_workers,
num_batch_workers=num_batch_workers,
pin_memory=True,
circle_length=1)
for i, x in enumerate(dataloader):
assert_almost_equal(x.asnumpy(), all_data[i * 2:(i + 1) * 2])
# test cache
split_sampler = SplitSampler(1, num_parts=1, part_index=0,
repeat=2, shuffle=False)
X = np.load(os.path.join(data, 'part_{}.npy'.format(0)))
X = np.concatenate((X, X))
for num_dataset_workers in [1]:
for num_batch_workers in [1]:
dataloader = DatasetLoader(os.path.join(data, 'part_0.npy'),
file_sampler=split_sampler,
dataset_fn=prepare_dataset,
dataset_params=dataset_params,
batch_sampler_fn=prepare_bucket_sampler,
batch_sampler_params=sampler_params,
num_dataset_workers=num_dataset_workers,
num_batch_workers=num_batch_workers,
pin_memory=True,
circle_length=1,
dataset_cached=True,
num_max_dataset_cached=1)
for i, x in enumerate(dataloader):
assert_almost_equal(x.asnumpy(), X[i * 2:(i + 1) * 2])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.