filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_17360 | from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.execution_engine import ExecutionEngine, PandasExecutionEngine
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...data_asset.util import parse_result_format
from ...render.renderer.renderer import renderer
from ...render.types import RenderedStringTemplateContent
from ...render.util import (
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from ..expectation import (
ColumnMapExpectation,
Expectation,
InvalidExpectationConfigurationError,
TableExpectation,
_format_map_output,
)
from ..registry import extract_metrics
class ExpectTableRowCountToEqual(TableExpectation):
"""Expect the number of rows to equal a value.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.validator.validator.Validator.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of rows.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
metric_dependencies = ("table.row_count",)
success_keys = ("value",)
default_kwarg_values = {
"value": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
"meta": None,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
value = configuration.kwargs.get("value")
try:
assert value is not None, "An expected row count must be provided"
if not isinstance(value, (int, dict)):
raise ValueError("Provided row count must be an integer")
if isinstance(value, dict):
assert (
"$PARAMETER" in value
), 'Evaluation Parameter dict for value kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["value", "row_condition", "condition_parser"],
)
template_str = "Must have exactly $value rows."
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = (
conditional_template_str
+ ", then "
+ template_str[0].lower()
+ template_str[1:]
)
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
expected_table_row_count = self.get_success_kwargs().get("value")
actual_table_row_count = metrics.get("table.row_count")
return {
"success": actual_table_row_count == expected_table_row_count,
"result": {"observed_value": actual_table_row_count},
}
|
the-stack_0_17362 | """
Miscellaneous Helpers for NetworkX.
These are not imported into the base networkx namespace but
can be accessed, for example, as
>>> import networkx
>>> networkx.utils.make_list_of_ints({1, 2, 3})
[1, 2, 3]
>>> networkx.utils.arbitrary_element({5, 1, 7}) # doctest: +SKIP
1
"""
from collections import defaultdict, deque
from collections.abc import Iterable, Iterator, Sized
import warnings
import sys
import uuid
from itertools import tee, chain
import networkx as nx
np = nx.lazy_import("numpy")
__all__ = [
"is_string_like",
"iterable",
"empty_generator",
"flatten",
"make_list_of_ints",
"is_list_of_ints",
"make_str",
"generate_unique_node",
"default_opener",
"dict_to_numpy_array",
"dict_to_numpy_array1",
"dict_to_numpy_array2",
"is_iterator",
"arbitrary_element",
"consume",
"pairwise",
"groups",
"to_tuple",
"create_random_state",
"create_py_random_state",
"PythonRandomInterface",
"nodes_equal",
"edges_equal",
"graphs_equal",
]
# some cookbook stuff
# used in deciding whether something is a bunch of nodes, edges, etc.
# see G.add_nodes and others in Graph Class in networkx/base.py
def is_string_like(obj): # from John Hunter, types-free version
"""Check if obj is string.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = (
"is_string_like is deprecated and will be removed in 3.0."
"Use isinstance(obj, str) instead."
)
warnings.warn(msg, DeprecationWarning)
return isinstance(obj, str)
def iterable(obj):
"""Return True if obj is iterable with a well-defined len().
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = (
"iterable is deprecated and will be removed in 3.0."
"Use isinstance(obj, (collections.abc.Iterable, collections.abc.Sized)) instead."
)
warnings.warn(msg, DeprecationWarning)
if hasattr(obj, "__iter__"):
return True
try:
len(obj)
except:
return False
return True
def empty_generator():
"""Return a generator with no members.
.. deprecated:: 2.6
"""
warnings.warn(
"empty_generator is deprecated and will be removed in v3.0.", DeprecationWarning
)
return (i for i in ())
def flatten(obj, result=None):
"""Return flattened version of (possibly nested) iterable object."""
if not isinstance(obj, (Iterable, Sized)) or isinstance(obj, str):
return obj
if result is None:
result = []
for item in obj:
if not isinstance(item, (Iterable, Sized)) or isinstance(item, str):
result.append(item)
else:
flatten(item, result)
return tuple(result)
def make_list_of_ints(sequence):
"""Return list of ints from sequence of integral numbers.
All elements of the sequence must satisfy int(element) == element
or a ValueError is raised. Sequence is iterated through once.
If sequence is a list, the non-int values are replaced with ints.
So, no new list is created
"""
if not isinstance(sequence, list):
result = []
for i in sequence:
errmsg = f"sequence is not all integers: {i}"
try:
ii = int(i)
except ValueError:
raise nx.NetworkXError(errmsg) from None
if ii != i:
raise nx.NetworkXError(errmsg)
result.append(ii)
return result
# original sequence is a list... in-place conversion to ints
for indx, i in enumerate(sequence):
errmsg = f"sequence is not all integers: {i}"
if isinstance(i, int):
continue
try:
ii = int(i)
except ValueError:
raise nx.NetworkXError(errmsg) from None
if ii != i:
raise nx.NetworkXError(errmsg)
sequence[indx] = ii
return sequence
def is_list_of_ints(intlist):
"""Return True if list is a list of ints.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = (
"is_list_of_ints is deprecated and will be removed in 3.0."
"See also: ``networkx.utils.make_list_of_ints.``"
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
if not isinstance(intlist, list):
return False
for i in intlist:
if not isinstance(i, int):
return False
return True
def make_str(x):
"""Returns the string representation of t.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = "make_str is deprecated and will be removed in 3.0. Use str instead."
warnings.warn(msg, DeprecationWarning)
return str(x)
def generate_unique_node():
"""Generate a unique node label.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
msg = "generate_unique_node is deprecated and will be removed in 3.0. Use uuid.uuid4 instead."
warnings.warn(msg, DeprecationWarning)
return str(uuid.uuid4())
def default_opener(filename):
"""Opens `filename` using system's default program.
.. deprecated:: 2.6
default_opener is deprecated and will be removed in version 3.0.
Consider an image processing library to open images, such as Pillow::
from PIL import Image
Image.open(filename).show()
Parameters
----------
filename : str
The path of the file to be opened.
"""
warnings.warn(
"default_opener is deprecated and will be removed in version 3.0. ",
DeprecationWarning,
)
from subprocess import call
cmds = {
"darwin": ["open"],
"linux": ["xdg-open"],
"linux2": ["xdg-open"],
"win32": ["cmd.exe", "/C", "start", ""],
}
cmd = cmds[sys.platform] + [filename]
call(cmd)
def dict_to_numpy_array(d, mapping=None):
"""Convert a dictionary of dictionaries to a numpy array
with optional mapping."""
try:
return dict_to_numpy_array2(d, mapping)
except (AttributeError, TypeError):
# AttributeError is when no mapping was provided and v.keys() fails.
# TypeError is when a mapping was provided and d[k1][k2] fails.
return dict_to_numpy_array1(d, mapping)
def dict_to_numpy_array2(d, mapping=None):
"""Convert a dictionary of dictionaries to a 2d numpy array
with optional mapping.
"""
if mapping is None:
s = set(d.keys())
for k, v in d.items():
s.update(v.keys())
mapping = dict(zip(s, range(len(s))))
n = len(mapping)
a = np.zeros((n, n))
for k1, i in mapping.items():
for k2, j in mapping.items():
try:
a[i, j] = d[k1][k2]
except KeyError:
pass
return a
def dict_to_numpy_array1(d, mapping=None):
"""Convert a dictionary of numbers to a 1d numpy array
with optional mapping.
"""
if mapping is None:
s = set(d.keys())
mapping = dict(zip(s, range(len(s))))
n = len(mapping)
a = np.zeros(n)
for k1, i in mapping.items():
i = mapping[k1]
a[i] = d[k1]
return a
def is_iterator(obj):
"""Returns True if and only if the given object is an iterator object.
.. deprecated:: 2.6.0
Deprecated in favor of ``isinstance(obj, collections.abc.Iterator)``
"""
msg = (
"is_iterator is deprecated and will be removed in version 3.0. "
"Use ``isinstance(obj, collections.abc.Iterator)`` instead."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
has_next_attr = hasattr(obj, "__next__") or hasattr(obj, "next")
return iter(obj) is obj and has_next_attr
def arbitrary_element(iterable):
"""Returns an arbitrary element of `iterable` without removing it.
This is most useful for "peeking" at an arbitrary element of a set,
but can be used for any list, dictionary, etc., as well.
Parameters
----------
iterable : `abc.collections.Iterable` instance
Any object that implements ``__iter__``, e.g. set, dict, list, tuple,
etc.
Returns
-------
The object that results from ``next(iter(iterable))``
Raises
------
ValueError
If `iterable` is an iterator (because the current implementation of
this function would consume an element from the iterator).
Examples
--------
Arbitrary elements from common Iterable objects:
>>> nx.utils.arbitrary_element([1, 2, 3]) # list
1
>>> nx.utils.arbitrary_element((1, 2, 3)) # tuple
1
>>> nx.utils.arbitrary_element({1, 2, 3}) # set
1
>>> d = {k: v for k, v in zip([1, 2, 3], [3, 2, 1])}
>>> nx.utils.arbitrary_element(d) # dict_keys
1
>>> nx.utils.arbitrary_element(d.values()) # dict values
3
`str` is also an Iterable:
>>> nx.utils.arbitrary_element("hello")
'h'
:exc:`ValueError` is raised if `iterable` is an iterator:
>>> iterator = iter([1, 2, 3]) # Iterator, *not* Iterable
>>> nx.utils.arbitrary_element(iterator)
Traceback (most recent call last):
...
ValueError: cannot return an arbitrary item from an iterator
Notes
-----
This function does not return a *random* element. If `iterable` is
ordered, sequential calls will return the same value::
>>> l = [1, 2, 3]
>>> nx.utils.arbitrary_element(l)
1
>>> nx.utils.arbitrary_element(l)
1
"""
if isinstance(iterable, Iterator):
raise ValueError("cannot return an arbitrary item from an iterator")
# Another possible implementation is ``for x in iterable: return x``.
return next(iter(iterable))
# Recipe from the itertools documentation.
def consume(iterator):
"""Consume the iterator entirely.
.. deprecated:: 2.6
This is deprecated and will be removed in NetworkX v3.0.
"""
# Feed the entire iterator into a zero-length deque.
msg = (
"consume is deprecated and will be removed in version 3.0. "
"Use ``collections.deque(iterator, maxlen=0)`` instead."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
deque(iterator, maxlen=0)
# Recipe from the itertools documentation.
def pairwise(iterable, cyclic=False):
"s -> (s0, s1), (s1, s2), (s2, s3), ..."
a, b = tee(iterable)
first = next(b, None)
if cyclic is True:
return zip(a, chain(b, (first,)))
return zip(a, b)
def groups(many_to_one):
"""Converts a many-to-one mapping into a one-to-many mapping.
`many_to_one` must be a dictionary whose keys and values are all
:term:`hashable`.
The return value is a dictionary mapping values from `many_to_one`
to sets of keys from `many_to_one` that have that value.
Examples
--------
>>> from networkx.utils import groups
>>> many_to_one = {"a": 1, "b": 1, "c": 2, "d": 3, "e": 3}
>>> groups(many_to_one) # doctest: +SKIP
{1: {'a', 'b'}, 2: {'c'}, 3: {'e', 'd'}}
"""
one_to_many = defaultdict(set)
for v, k in many_to_one.items():
one_to_many[k].add(v)
return dict(one_to_many)
def to_tuple(x):
"""Converts lists to tuples.
Examples
--------
>>> from networkx.utils import to_tuple
>>> a_list = [1, 2, [1, 4]]
>>> to_tuple(a_list)
(1, 2, (1, 4))
"""
if not isinstance(x, (tuple, list)):
return x
return tuple(map(to_tuple, x))
def create_random_state(random_state=None):
"""Returns a numpy.random.RandomState or numpy.random.Generator instance
depending on input.
Parameters
----------
random_state : int or NumPy RandomState or Generator instance, optional (default=None)
If int, return a numpy.random.RandomState instance set with seed=int.
if `numpy.random.RandomState` instance, return it.
if `numpy.random.Generator` instance, return it.
if None or numpy.random, return the global random number generator used
by numpy.random.
"""
if random_state is None or random_state is np.random:
return np.random.mtrand._rand
if isinstance(random_state, np.random.RandomState):
return random_state
if isinstance(random_state, int):
return np.random.RandomState(random_state)
if isinstance(random_state, np.random.Generator):
return random_state
msg = (
f"{random_state} cannot be used to create a numpy.random.RandomState or\n"
"numpy.random.Generator instance"
)
raise ValueError(msg)
class PythonRandomInterface:
def __init__(self, rng=None):
try:
import numpy as np
except ImportError:
msg = "numpy not found, only random.random available."
warnings.warn(msg, ImportWarning)
if rng is None:
self._rng = np.random.mtrand._rand
else:
self._rng = rng
def random(self):
return self._rng.random()
def uniform(self, a, b):
return a + (b - a) * self._rng.random()
def randrange(self, a, b=None):
if isinstance(self._rng, np.random.Generator):
return self._rng.integers(a, b)
return self._rng.randint(a, b)
# NOTE: the numpy implementations of `choice` don't support strings, so
# this cannot be replaced with self._rng.choice
def choice(self, seq):
if isinstance(self._rng, np.random.Generator):
idx = self._rng.integers(0, len(seq))
else:
idx = self._rng.randint(0, len(seq))
return seq[idx]
def gauss(self, mu, sigma):
return self._rng.normal(mu, sigma)
def shuffle(self, seq):
return self._rng.shuffle(seq)
# Some methods don't match API for numpy RandomState.
# Commented out versions are not used by NetworkX
def sample(self, seq, k):
return self._rng.choice(list(seq), size=(k,), replace=False)
def randint(self, a, b):
if isinstance(self._rng, np.random.Generator):
return self._rng.integers(a, b + 1)
return self._rng.randint(a, b + 1)
# exponential as expovariate with 1/argument,
def expovariate(self, scale):
return self._rng.exponential(1 / scale)
# pareto as paretovariate with 1/argument,
def paretovariate(self, shape):
return self._rng.pareto(shape)
# weibull as weibullvariate multiplied by beta,
# def weibullvariate(self, alpha, beta):
# return self._rng.weibull(alpha) * beta
#
# def triangular(self, low, high, mode):
# return self._rng.triangular(low, mode, high)
#
# def choices(self, seq, weights=None, cum_weights=None, k=1):
# return self._rng.choice(seq
def create_py_random_state(random_state=None):
"""Returns a random.Random instance depending on input.
Parameters
----------
random_state : int or random number generator or None (default=None)
If int, return a random.Random instance set with seed=int.
if random.Random instance, return it.
if None or the `random` package, return the global random number
generator used by `random`.
if np.random package, return the global numpy random number
generator wrapped in a PythonRandomInterface class.
if np.random.RandomState instance, return it wrapped in
PythonRandomInterface
if a PythonRandomInterface instance, return it
"""
import random
try:
import numpy as np
if random_state is np.random:
return PythonRandomInterface(np.random.mtrand._rand)
if isinstance(random_state, np.random.RandomState):
return PythonRandomInterface(random_state)
if isinstance(random_state, PythonRandomInterface):
return random_state
except ImportError:
pass
if random_state is None or random_state is random:
return random._inst
if isinstance(random_state, random.Random):
return random_state
if isinstance(random_state, int):
return random.Random(random_state)
msg = f"{random_state} cannot be used to generate a random.Random instance"
raise ValueError(msg)
def nodes_equal(nodes1, nodes2):
"""Check if nodes are equal.
Equality here means equal as Python objects.
Node data must match if included.
The order of nodes is not relevant.
Parameters
----------
nodes1, nodes2 : iterables of nodes, or (node, datadict) tuples
Returns
-------
bool
True if nodes are equal, False otherwise.
"""
nlist1 = list(nodes1)
nlist2 = list(nodes2)
try:
d1 = dict(nlist1)
d2 = dict(nlist2)
except (ValueError, TypeError):
d1 = dict.fromkeys(nlist1)
d2 = dict.fromkeys(nlist2)
return d1 == d2
def edges_equal(edges1, edges2):
"""Check if edges are equal.
Equality here means equal as Python objects.
Edge data must match if included.
The order of the edges is not relevant.
Parameters
----------
edges1, edges2 : iterables of with u, v nodes as
edge tuples (u, v), or
edge tuples with data dicts (u, v, d), or
edge tuples with keys and data dicts (u, v, k, d)
Returns
-------
bool
True if edges are equal, False otherwise.
"""
from collections import defaultdict
d1 = defaultdict(dict)
d2 = defaultdict(dict)
c1 = 0
for c1, e in enumerate(edges1):
u, v = e[0], e[1]
data = [e[2:]]
if v in d1[u]:
data = d1[u][v] + data
d1[u][v] = data
d1[v][u] = data
c2 = 0
for c2, e in enumerate(edges2):
u, v = e[0], e[1]
data = [e[2:]]
if v in d2[u]:
data = d2[u][v] + data
d2[u][v] = data
d2[v][u] = data
if c1 != c2:
return False
# can check one direction because lengths are the same.
for n, nbrdict in d1.items():
for nbr, datalist in nbrdict.items():
if n not in d2:
return False
if nbr not in d2[n]:
return False
d2datalist = d2[n][nbr]
for data in datalist:
if datalist.count(data) != d2datalist.count(data):
return False
return True
def graphs_equal(graph1, graph2):
"""Check if graphs are equal.
Equality here means equal as Python objects (not isomorphism).
Node, edge and graph data must match.
Parameters
----------
graph1, graph2 : graph
Returns
-------
bool
True if graphs are equal, False otherwise.
"""
return (
graph1.adj == graph2.adj
and graph1.nodes == graph2.nodes
and graph1.graph == graph2.graph
)
|
the-stack_0_17363 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
#
"""Userbot module for managing events. One of the main components of the userbot."""
import inspect
import re
import sys
from asyncio import create_subprocess_shell as asyncsubshell
from asyncio import subprocess as asyncsub
from pathlib import Path
from time import gmtime, strftime
from traceback import format_exc
from telethon import events
from AyiinXd import CMD_HANDLER, CMD_LIST, DEFAULT, DEVS, AYIIN2, AYIIN3, AYIIN4, AYIIN5, AYIIN6, AYIIN7, AYIIN8, AYIIN9, AYIIN10, bot
def ayiin_cmd(pattern=None, command=None, **args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
args.get("allow_sudo", False)
if pattern is not None:
if pattern.startswith(r"\#"):
args["pattern"] = re.compile(pattern)
elif pattern.startswith(r"^"):
args["pattern"] = re.compile(pattern)
cmd = pattern.replace("$", "").replace("^", "").replace("\\", "")
try:
CMD_LIST[file_test].append(cmd)
except BaseException:
CMD_LIST.update({file_test: [cmd]})
else:
if len(CMD_HANDLER) == 2:
catreg = "^" + CMD_HANDLER
reg = CMD_HANDLER[1]
elif len(CMD_HANDLER) == 1:
catreg = "^\\" + CMD_HANDLER
reg = CMD_HANDLER
args["pattern"] = re.compile(catreg + pattern)
if command is not None:
cmd = reg + command
else:
cmd = (
(reg +
pattern).replace(
"$",
"").replace(
"\\",
"").replace(
"^",
""))
try:
CMD_LIST[file_test].append(cmd)
except BaseException:
CMD_LIST.update({file_test: [cmd]})
if "allow_edited_updates" in args and args["allow_edited_updates"]:
del args["allow_edited_updates"]
return events.NewMessage(**args)
def command(**args):
args["func"] = lambda e: e.via_bot_id is None
stack = inspect.stack()
previous_stack_frame = stack[1]
file_test = Path(previous_stack_frame.filename)
file_test = file_test.stem.replace(".py", "")
pattern = args.get("pattern")
allow_edited_updates = args.get("allow_edited_updates", False)
args["incoming"] = args.get("incoming", False)
args["outgoing"] = True
if bool(args["incoming"]):
args["outgoing"] = False
try:
if pattern is not None and not pattern.startswith("(?i)"):
args["pattern"] = "(?i)" + pattern
except BaseException:
pass
reg = re.compile("(.*)")
if pattern is not None:
try:
cmd = re.search(reg, pattern)
try:
cmd = cmd.group(1).replace(
"$",
"").replace(
"\\",
"").replace(
"^",
"")
except BaseException:
pass
try:
CMD_LIST[file_test].append(cmd)
except BaseException:
CMD_LIST.update({file_test: [cmd]})
except BaseException:
pass
def decorator(func):
async def wrapper(check):
if check.edit_date and check.is_channel and not check.is_group:
return
if not trigger_on_fwd and check.fwd_from:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
if allow_edited_updates:
bot.add_event_handler(func, events.MessageEdited(**args))
bot.add_event_handler(func, events.NewMessage(**args))
return decorator
def register(**args):
"""Register a new event."""
pattern = args.get("pattern")
disable_edited = args.get("disable_edited", False)
ignore_unsafe = args.get("ignore_unsafe", False)
unsafe_pattern = r"^[^/!#@\$A-Za-z]"
groups_only = args.get("groups_only", False)
trigger_on_fwd = args.get("trigger_on_fwd", False)
disable_errors = args.get("disable_errors", False)
insecure = args.get("insecure", False)
args.get("sudo", False)
args.get("own", False)
if pattern is not None and not pattern.startswith("(?i)"):
args["pattern"] = "(?i)" + pattern
if "disable_edited" in args:
del args["disable_edited"]
if "sudo" in args:
del args["sudo"]
args["incoming"] = True
args["from_users"] = DEVS
if "ignore_unsafe" in args:
del args["ignore_unsafe"]
if "groups_only" in args:
del args["groups_only"]
if "disable_errors" in args:
del args["disable_errors"]
if "trigger_on_fwd" in args:
del args["trigger_on_fwd"]
if "own" in args:
del args["own"]
args["incoming"] = True
args["from_users"] = DEFAULT
if "insecure" in args:
del args["insecure"]
if pattern and not ignore_unsafe:
args["pattern"] = pattern.replace("^.", unsafe_pattern, 1)
def decorator(func):
async def wrapper(check):
if check.edit_date and check.is_channel and not check.is_group:
# Messages sent in channels can be edited by other users.
# Ignore edits that take place in channels.
return
if not trigger_on_fwd and check.fwd_from:
return
if groups_only and not check.is_group:
await check.respond("`I don't think this is a group.`")
return
if check.via_bot_id and not insecure and check.out:
return
try:
await func(check)
except events.StopPropagation:
raise events.StopPropagation
except KeyboardInterrupt:
pass
except BaseException:
# Check if we have to disable it.
# If not silence the log spam on the console,
# with a dumb except.
if not disable_errors:
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
text = "**✘ AYIIN-USERBOT ERROR REPORT ✘**\n\n"
link = "[Group Support](https://t.me/AyiinXdSupport)"
text += "Jika mau, Anda bisa melaporkan error ini, "
text += f"Cukup forward saja pesan ini ke {link}.\n\n"
ftext = "========== DISCLAIMER =========="
ftext += "\nFile ini HANYA diupload di sini,"
ftext += "\nkami hanya mencatat fakta error dan tanggal,"
ftext += "\nkami menghormati privasi Anda."
ftext += "\nJika mau, Anda bisa melaporkan error ini,"
ftext += "\ncukup forward saja pesan ini ke @AyiinXdSupport"
ftext += "\n================================\n\n"
ftext += "--------BEGIN USERBOT TRACEBACK LOG--------\n"
ftext += "\nTanggal : " + date
ftext += "\nChat ID : " + str(check.chat_id)
ftext += "\nUser ID : " + str(check.sender_id)
ftext += "\n\nEvent Trigger:\n"
ftext += str(check.text)
ftext += "\n\nTraceback info:\n"
ftext += str(format_exc())
ftext += "\n\nError text:\n"
ftext += str(sys.exc_info()[1])
ftext += "\n\n--------END USERBOT TRACEBACK LOG--------"
command = 'git log --pretty=format:"%an: %s" -10'
ftext += "\n\n\n10 commits Terakhir:\n"
process = await asyncsubshell(
command, stdout=asyncsub.PIPE, stderr=asyncsub.PIPE
)
stdout, stderr = await process.communicate()
result = str(stdout.decode().strip()) + \
str(stderr.decode().strip())
ftext += result
with open("error.log", "w+") as file:
file.write(ftext)
if bot:
if not disable_edited:
bot.add_event_handler(wrapper, events.MessageEdited(**args))
bot.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN2:
if not disable_edited:
AYIIN2.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN2.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN3:
if not disable_edited:
AYIIN3.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN3.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN4:
if not disable_edited:
AYIIN4.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN4.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN5:
if not disable_edited:
AYIIN5.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN5.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN6:
if not disable_edited:
AYIIN6.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN6.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN7:
if not disable_edited:
AYIIN7.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN7.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN8:
if not disable_edited:
AYIIN8.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN8.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN9:
if not disable_edited:
AYIIN9.add_event_handler(wrapper, events.MessageEdited(**args))
AYIIN9.add_event_handler(wrapper, events.NewMessage(**args))
if AYIIN10:
if not disable_edited:
AYIIN10.add_event_handler(
wrapper, events.MessageEdited(**args))
AYIIN10.add_event_handler(wrapper, events.NewMessage(**args))
return wrapper
return decorator
|
the-stack_0_17365 | """ 1) Stock_TSA - Approach: TSA models a yearly growth rate, combined with probabilistic model."""
# Disclaimer: There have been several attempts to predict financial markets and stock prices using time series analysis. Many of them were not successful!
# Neither trading nor investment decisions should be influenced by this repository and the code, which is built only to introduce and demonstrate a methodology for time series modeling.
# No responsibility is taken for correctness or completeness of historic, current or future data, models and / or predictions!
#-----------------------------------------------------------------------------------------------------------------------------------
__author__ = "Christian Simonis"
__copyright__ = "Copyright 2021"
__version__ = "1.1"
__maintainer__ = "Christian Simonis"
__email__ = "[email protected]"
__status__ = "work in progress"
# Approach: TSA models a yearly return rate, combined with a probabilistic model.
# While the general growth rate of the stock or index is described in a domain model,
# especially non-efficient artifacts are modeled in a probabilistic way,
# including these parts that the domain model is not capable of describing.
# Assumptions rely on the course tinancial markets by Robert Shiller.
# Information links (no promotion), see sources:
# https://www.coursera.org/learn/financial-markets-global (no promotion)
# and https://en.wikipedia.org/wiki/Brownian_model_of_financial_markets
#-----------------------------------------------------------------------------------------------------------------------------------
# Name Version License
# FinQuant 0.2.2 MIT License, Copyright (C) 2019 Frank Milthaler:https://github.com/fmilthaler/FinQuant/blob/master/LICENSE.txt
# numpy 1.19.5 BSD, Copyright (c) 2005-2020, NumPy Developers: https://numpy.org/doc/stable/license.html#:~:text=Copyright%20(c)%202005%2D2020%2C%20NumPy%20Developers.&text=THIS%20SOFTWARE%20IS%20PROVIDED%20BY,A%20PARTICULAR%20PURPOSE%20ARE%20DISCLAIMED.
# yfinance 0.1.59 Apache License, Version 2.0, Copyright (c) January 2004, Ran Aroussi: https://github.com/ranaroussi/yfinance
# matplotlib 3.4.2 Python Software Foundation License, Copyright (c) 2002 - 2012 John Hunter, Darren Dale, Eric Firing, Michael Droettboom and the Matplotlib development team; 2012 - 2021 The Matplotlib development team: https://matplotlib.org/stable/users/license.html
# scikit-learn 0.23.1 BSD 3-Clause License, Copyright (c) 2007-2021 The scikit-learn developers: https://github.com/scikit-learn/scikit-learn/blob/main/COPYING
# pandas 1.2.4 BSD 3-Clause License Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team: https://github.com/pandas-dev/pandas/blob/master/LICENSE
# seaborn 0.11.1 BSD 3-Clause "New" or "Revised" License, Copyright (c) 2012-2021, Michael L. Waskom: https://github.com/mwaskom/seaborn/blob/master/LICENSE
# scipy 1.5.2 BSD 3-Clause "New" or "Revised" License, Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers: https://github.com/scipy/scipy/blob/master/LICENSE.txt
# neuralprophet 0.2.7 MIT License, Copyright (c) 2020 Oskar Triebe: https://github.com/ourownstory/neural_prophet/blob/master/LICENSE
#-----------------------------------------------------------------------------------------------------------------------------------
import numpy as np
from finquant.portfolio import build_portfolio, EfficientFrontier
import yfinance as yfin
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (RBF, WhiteKernel, Matern, RationalQuadratic,
ExpSineSquared, DotProduct,
ConstantKernel)
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
import pandas as pd
import seaborn as sns
from scipy.optimize import curve_fit
import warnings # https://docs.python.org/3/library/warnings.html
import random # https://docs.python.org/3/library/random.html
import datetime as dt # https://docs.python.org/3/library/datetime.html
#-----------------------------------------------------------------------------------------------------------------------------------
# Hint: No responsibility is taken for correctness or completeness of historic, current or future data, models and / or predictions
#-----------------------------------------------------------------------------------------------------------------------------------
# Class definition TSA
#-----------------------------------------------------------------------------------------------------------------------------------
class Stock_Analysis:
"""The purpose of the class Stock_Analysis is:
- to model and predict the time series behavior
- to visualize the model results
"""
#Initialization
def __init__(self):
"""initial call"""
#Get data via YFIN API
def obtain_timeseries(self,Stock_Name, start):
#--------------------------------------------------------
""" obtain timeseries for stocks
e.g. --> obtain_timeseries("AAPL","2018-07-20")
Input:
Stock_Name: Name of stock, e.g. "AAPL"
start: Start data, from which stock data should be downloaded, e.g. "2018-07-20"
Output:
Time: Time data as numpy.ndarray
Stock: Stock data as numpy.ndarray
Time_idx: Time for user visualization: Raw time data as pandas.core.indexes.datetimes.DatetimeIndex
Class:
DF: Dataframe, consisting of time and closing price information
"""
#download with Yahoo Finance API
if hasattr(self,"end") == False:
stocks = yfin.download(Stock_Name, start) # till most recent value
else:
stocks = yfin.download(Stock_Name, start, end = self.end) # till definition
stocks.columns = stocks.columns.to_flat_index()
#Export time series of stock sequence
Stock = stocks.loc[:, "Close"].to_numpy()
Time = stocks.loc[:, "Close"].index.to_numpy().astype("float")
Time_idx = stocks.loc[:, "Close"].index
self.DF = pd.DataFrame({ 'ds': Time_idx,
'y': stocks.loc[:, "Close"]})
return Time, Stock, Time_idx
#Conduct train / test split based on target by user
def conduct_train_test_split(self,option,split_factor, Time, Stock):
#--------------------------------------------------------
""" predictics, using forecast model, consisting of a domain model and a data-driven model
e.g. --> conduct_train_test_split(1,0.3, np.array([1, 2, 3]))
Input:
option: User choice: #1 = real prediction , 2= backtest
split_factor: Train test split
Time: Time data
Stock: Stock data
Output:
Time_training: Time allocated to Training data
y_training: Labels allocated to Training data
Time_test: Time allocated to Test data
y_test: Labels allocated to Test data
"""
if option == 1: #Option 1) Real forecast
delta_T = Time[1]-Time[0]
Label_span = Time.max()-Time.min()
# Chosing Training data proportional split factor
Time_training = Time.copy()
y_training = Stock.copy()
#take most recent data as test data
Time_test = np.arange(Time.max(),Time.max() + (1-split_factor)*Label_span, delta_T)
y_test = []
else: #Option 2) Simulate real forecast (done in past)
length = len(Time)
till = int(np.round(split_factor*length))
# Chosing Training data proportional split factor
Time_training = Time[0:till]
y_training = Stock[0:till]
#take most recent data as test data
Time_test = Time[till+1:length]
y_test = Stock[till+1:length]
return Time_training, y_training, Time_test, y_test
#domain model for times series description in stock market
def func(self, x, a, c):
#--------------------------------------------------------
""" Domain model to describe exponential behavior
e.g. --> func(np.array([1, 2, 3]),7,8,9)
Input:
x: Input
a: Scaling factor, multiplied to exp-function
b: interest parameter --> can be specified by user to incorporate domain knowledge
c: Constant offset parameter
Output:
y: Ouput according to domain model
"""
#User choice, representing market knowledge
if hasattr(self,"exp_interest") == False:
b = 1.07 #interest in the long run, e.g. 1.07 = 7% interest
else:
b = self.exp_interest #otherwise, take class attribute
#Calculation of domain model
y = a * np.exp(b * x) + c
return y
#Forecasting model
def fit_ForecastMdl(self, X,Label,L,N):
#--------------------------------------------------------
""" fits forecast model to data, using a domain model, combined with a data-driven model
e.g. --> fit_ForecastMdl(np.array([[1,4]]).T,np.array([1,5]).T,2,2)
Input:
X: Feature as Input,
Label: Ground Truth as label to be learned (output)
L: Length scale: Hyperparameter of Gaussian process, kernel definition
N: restarts of optimizer: Hyperparameter of Gaussian process fitting
Output:
forecast: Forecast model regression value
sigma: Uncertainty, represented by standard deviation
y: Domain regression value
Class:
reg: Domain regression model (as part of class)
gpr: Gaussian process model (as part of class)
"""
# Domain model, e.g. via exponential approach (alternative linear model in bracket comments)
#Exp function fitting
reg, pcov = curve_fit(self.func, X[:,0], Label) #fit of domain model, Alternative: #reg = LinearRegression().fit(Time_scaled, Label)
#Exp function evaluation
y = self.func(X[:,0], *reg) #evaluation of domain model, Alternative: #y = reg.predict(Time_scaled) #linear function
#Calculation of Residuum
res = Label.copy() - y.copy() #exp function
sigma_est = np.std(res)*15 #safety margin
#Definition of Machine Learning model to learn residuum in supervised manner
kernel = 1.0 * RBF(length_scale=L, length_scale_bounds=(L*1e-1, L*1e1)) + 1e3*WhiteKernel(noise_level=1e2*sigma_est, noise_level_bounds=(1e1*sigma_est, 1e2*sigma_est)) # Alternative: #kernel = 1.0 * RationalQuadratic(length_scale=L) + WhiteKernel(noise_level=0, noise_level_bounds=(1e-8, sigma_est))
gpr = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=N,alpha = 0.5)
gpr.fit(X, res)
#Fit of Machine Learning model
GP_pred, sigma = gpr.predict(X, return_std=True)
#Combination of results
forecast = GP_pred + y
self.gpr = gpr # data-driven (probabilistic) model
self.reg = reg # domain model
return forecast, sigma, y
#Prediction function
def pred_ForecastMdl(self, X):
#--------------------------------------------------------
""" predictics, using forecast model, consisting of a domain model and a data-driven model
e.g. --> forecast, sigma, y = fit_ForecastMdl(np.array([[1,4]]).T,np.array([1,5]).T,2,2); pred_ForecastMdl(np.array([[1,1.2]]).T)
Input:
X: Feature as Input,
reg: Domain regression model
gpr: Gaussian process model
Output:
forecast_pred: Predicted forecast model regression value
sigma_pred: Predicted uncertainty, represented by standard deviation
y_pred: Predicted domain regression value
"""
#predict with domain model
y_pred = self.func(X[:,0], *self.reg) #exp function, Alternative: #y_pred = reg.predict(Time_scaled) # linear function
#predict with data-driven model
GP_pred, sigma_pred = self.gpr.predict(X, return_std=True)
#Combine predictions
forecast_pred = GP_pred + y_pred
return forecast_pred, sigma_pred, y_pred
#Visualization
def vis_results(self,Time_training,forecast,sigma,Time_test,forecast_future,sigma_future, Time,Stock, Stock_Name):
#--------------------------------------------------------
""" visualizes results of forecast model, consisting of a domain model and a data-driven model
e.g. --> runfile('RUN_Stock-Forecast.py')
Input:
Time_training: Time allocated to Training data
forecast: Forecast model regression value
sigma: Uncertainty, represented by standard deviation
Time_test: Time allocated to Test data
forecast_future: Predicted forecast model regression value
sigma_future: Predicted uncertainty, represented by standard deviation
Time: Time data as numpy.ndarray
Stock: Stock data as numpy.ndarray
Stock_Name: Name if Stock or Index
"""
#Fit & Prediction visualization of TSA (Time series analysis) approach
plt.style.use("seaborn")
plt.plot(Time_training,forecast,'b-',linewidth=3, label = 'Model Fit')
plt.fill(np.concatenate([Time_training, Time_training[::-1]]),np.concatenate([forecast - 3 * sigma,(forecast + 3 * sigma)[::-1]]),
alpha=.3, fc='y', ec='None', label='99% confidence interval for training')
plt.plot(Time_test,forecast_future,'k-.',linewidth=2, label = 'Forecast with Prediction Model')
plt.fill(np.concatenate([Time_test, Time_test[::-1]]),np.concatenate([forecast_future - 3 * sigma_future,(forecast_future + 3 * sigma_future)[::-1]]),
alpha=.2, fc='g', ec='None', label='99% confidence interval for prediction')
plt.fill(np.concatenate([Time_test, Time_test[::-1]]),np.concatenate([forecast_future - 1 * sigma_future,(forecast_future + 1 * sigma_future)[::-1]]),
alpha=.5, fc='g', ec='None', label='68% confidence interval for prediction')
plt.scatter(Time,Stock, label = Stock_Name, c="coral")
plt.xlabel('Time', fontsize=16)
plt.ylabel('Closing Price', fontsize=16)
plt.legend(loc='upper left', shadow=False, ncol=1)
return 0
#Optimize portfolio using finquant library
def optimize_pf(self, df_data, nr_mc, risk_free_rate):
#--------------------------------------------------------
""" optimizes portfolio (either historic or predictive) based on defined criteria
e.g. --> runfile('RUN_Pred-optimization.py')
Input:
df_data: Portfolio dataframe to be optimized
nr_mc: Number of samples for Monte Carlo simulation
risk_free_rate: risk free rate
Output:
opt_w: Optimized weights for asset allocation
"""
plt.style.use("seaborn-darkgrid")
# set line width
plt.rcParams["lines.linewidth"] = 2
# set font size for titles
plt.rcParams["axes.titlesize"] = 14
# set font size for labels on axes
plt.rcParams["axes.labelsize"] = 12
# set size of numbers on x-axis
plt.rcParams["xtick.labelsize"] = 10
# set size of numbers on y-axis
plt.rcParams["ytick.labelsize"] = 10
# set figure size
plt.rcParams["figure.figsize"] = (10, 6)
# building a portfolio by providing stock data
pf = build_portfolio(data=df_data)
pf.risk_free_rate = risk_free_rate # risk free rate
print(pf)
pf.properties()
# if needed, change risk free rate and frequency/time window of the portfolio
print("pf.risk_free_rate = {}".format(pf.risk_free_rate))
print("pf.freq = {}".format(pf.freq))
"""
pf.ef_minimum_volatility(verbose=True)
# optimisation for maximum Sharpe ratio
pf.ef_maximum_sharpe_ratio(verbose=True)
# minimum volatility for a given target return of 0.26
pf.ef_efficient_return(0.26, verbose=True)
"""
# optimisation for maximum Sharpe ratio
pf.ef_maximum_sharpe_ratio(verbose=True)
# Monte Carlo portfolios and Efficient Frontier solutions
opt_w, opt_res = pf.mc_optimisation(num_trials=nr_mc)
pf.mc_properties()
pf.mc_plot_results()
# visualization
pf.ef_plot_efrontier()
pf.ef.plot_optimal_portfolios()
pf.plot_stocks()
plt.show()
#provide result
self.optimized_weights = opt_w
self.optimized_weights.head()
return opt_w
#-----------------------------------------------------------------------------------------------------------------------------------
|
the-stack_0_17366 | import sys
import argparse
import csv
import sqlite3
import bz2
import gzip
from datetime import datetime
from six import string_types, text_type
if sys.version_info[0] > 2:
read_mode = 'rt'
else:
read_mode = 'rU'
def convert_header(filepath_or_fileobj, dbpath, table, headerspath_or_fileobj=None, compression=None, typespath_or_fileobj=None):
if isinstance(filepath_or_fileobj, string_types):
if compression is None:
fo = open(filepath_or_fileobj, mode=read_mode)
elif compression == 'bz2':
try:
fo = bz2.open(filepath_or_fileobj, mode=read_mode)
except AttributeError:
fo = bz2.BZ2File(filepath_or_fileobj, mode='r')
elif compression == 'gzip':
fo = gzip.open(filepath_or_fileobj, mode=read_mode)
else:
fo = filepath_or_fileobj
try:
dialect = csv.Sniffer().sniff(fo.readline())
except TypeError:
dialect = csv.Sniffer().sniff(str(fo.readline()))
fo.seek(0)
# get the headers
header_given = headerspath_or_fileobj is not None
if header_given:
if isinstance(headerspath_or_fileobj, string_types):
ho = open(headerspath_or_fileobj, mode=read_mode)
else:
ho = headerspath_or_fileobj
header_reader = csv.reader(ho, dialect)
headers = [header.strip() for header in next(header_reader)]
ho.close()
else:
reader = csv.reader(fo, dialect)
headers = [header.strip() for header in next(reader)]
print(headers)
fo.seek(0)
# get the types
if typespath_or_fileobj is not None:
if isinstance(typespath_or_fileobj, string_types):
to = open(typespath_or_fileobj, mode=read_mode)
else:
to = typespath_or_fileobj
type_reader = csv.reader(to, dialect)
types = [_type.strip() for _type in next(type_reader)]
to.close()
else:
# guess types
type_reader = csv.reader(fo, dialect)
if not header_given: next(type_reader)
types = _guess_types(type_reader, len(headers))
print(types)
fo.seek(0)
return
def createCSVSchema(filepath_or_fileobj, dbpath, table, headerspath_or_fileobj=None, compression=None, typespath_or_fileobj=None):
if isinstance(filepath_or_fileobj, string_types):
if compression is None:
fo = open(filepath_or_fileobj, mode=read_mode)
elif compression == 'bz2':
try:
fo = bz2.open(filepath_or_fileobj, mode=read_mode)
except AttributeError:
fo = bz2.BZ2File(filepath_or_fileobj, mode='r')
elif compression == 'gzip':
fo = gzip.open(filepath_or_fileobj, mode=read_mode)
else:
fo = filepath_or_fileobj
try:
dialect = csv.Sniffer().sniff(fo.readline())
except TypeError:
dialect = csv.Sniffer().sniff(str(fo.readline()))
fo.seek(0)
# get the headers
header_given = headerspath_or_fileobj is not None
if header_given:
if isinstance(headerspath_or_fileobj, string_types):
ho = open(headerspath_or_fileobj, mode=read_mode)
else:
ho = headerspath_or_fileobj
header_reader = csv.reader(ho, dialect)
headers = [header.strip() for header in next(header_reader)]
ho.close()
else:
reader = csv.reader(fo, dialect)
headers = [header.strip() for header in next(reader)]
fo.seek(0)
# get the types
if typespath_or_fileobj is not None:
if isinstance(typespath_or_fileobj, string_types):
to = open(typespath_or_fileobj, mode=read_mode)
else:
to = typespath_or_fileobj
type_reader = csv.reader(to, dialect)
types = [_type.strip() for _type in next(type_reader)]
to.close()
else:
# guess types
type_reader = csv.reader(fo, dialect)
if not header_given: next(type_reader)
types = _guess_types(type_reader, len(headers))
fo.seek(0)
# now load data
_columns = ','.join(
['"%s" %s' % (header, _type) for (header,_type) in zip(headers, types)]
)
return headers, types, _columns
def convert(filepath_or_fileobj, dbpath, table, headerspath_or_fileobj=None, compression=None, typespath_or_fileobj=None):
if isinstance(filepath_or_fileobj, string_types):
if compression is None:
fo = open(filepath_or_fileobj, mode=read_mode)
elif compression == 'bz2':
try:
fo = bz2.open(filepath_or_fileobj, mode=read_mode)
except AttributeError:
fo = bz2.BZ2File(filepath_or_fileobj, mode='r')
elif compression == 'gzip':
fo = gzip.open(filepath_or_fileobj, mode=read_mode)
else:
fo = filepath_or_fileobj
try:
dialect = csv.Sniffer().sniff(fo.readline())
except TypeError:
dialect = csv.Sniffer().sniff(str(fo.readline()))
fo.seek(0)
# get the headers
header_given = headerspath_or_fileobj is not None
if header_given:
if isinstance(headerspath_or_fileobj, string_types):
ho = open(headerspath_or_fileobj, mode=read_mode)
else:
ho = headerspath_or_fileobj
header_reader = csv.reader(ho, dialect)
headers = [header.strip() for header in next(header_reader)]
ho.close()
else:
reader = csv.reader(fo, dialect)
headers = [header.strip() for header in next(reader)]
fo.seek(0)
# get the types
if typespath_or_fileobj is not None:
if isinstance(typespath_or_fileobj, string_types):
to = open(typespath_or_fileobj, mode=read_mode)
else:
to = typespath_or_fileobj
type_reader = csv.reader(to, dialect)
types = [_type.strip() for _type in next(type_reader)]
to.close()
else:
# guess types
type_reader = csv.reader(fo, dialect)
if not header_given: next(type_reader)
types = _guess_types(type_reader, len(headers))
fo.seek(0)
# now load data
_columns = ','.join(
['"%s" %s' % (header, _type) for (header,_type) in zip(headers, types)]
)
reader = csv.reader(fo, dialect)
if not header_given: # Skip the header
next(reader)
conn = sqlite3.connect(dbpath)
# shz: fix error with non-ASCII input
conn.text_factory = str
c = conn.cursor()
try:
create_query = 'CREATE TABLE %s (%s)' % (table, _columns)
c.execute(create_query)
except:
pass
_insert_tmpl = 'INSERT INTO %s VALUES (%s)' % (table,
','.join(['?']*len(headers)))
line = 0
for row in reader:
line += 1
if len(row) == 0:
continue
# we need to take out commas from int and floats for sqlite to
# recognize them properly ...
try:
row = [
None if x == ''
else float(x.replace(',', '')) if y == 'real'
else int(x) if y == 'integer'
else x for (x,y) in zip(row, types) ]
c.execute(_insert_tmpl, row)
except ValueError as e:
print("Unable to convert value '%s' to type '%s' on line %d" % (x, y, line), file=sys.stderr)
except Exception as e:
print("Error on line %d: %s" % (line, e), file=sys.stderr)
conn.commit()
c.close()
def _guess_types(reader, number_of_columns, max_sample_size=100):
'''Guess column types (as for SQLite) of CSV.
:param fileobj: read-only file object for a CSV file.
'''
# we default to text for each field
types = ['text'] * number_of_columns
# order matters
# (order in form of type you want used in case of tie to be last)
options = [
('text', text_type),
('real', float),
('integer', int),
('date', lambda value: datetime.strptime(value, "%Y-%m-%d").date()),
('datetime', lambda value: datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f%z").date())
# 'date',
]
# for each column a set of bins for each type counting successful casts
perresult = {
'integer': 0,
'real': 0,
'text': 0,
'date':0,
'datetime':0
}
results = [ dict(perresult) for x in range(number_of_columns) ]
sample_counts = [ 0 for x in range(number_of_columns) ]
for row_index,row in enumerate(reader):
for column,cell in enumerate(row):
cell = cell.strip()
if len(cell) == 0:
continue
# replace ',' with '' to improve cast accuracy for ints and floats
if(cell.count(',') > 0):
cell = cell.replace(',', '')
if(cell.count('E') == 0):
cell = cell + "E0"
for data_type,cast in options:
try:
cast(cell)
if data_type=='integer' and len(cell)>20:
raise ValueError('too long integer to handle')
results[column][data_type] += 1
sample_counts[column] += 1
except ValueError:
pass
have_max_samples = True
for column,cell in enumerate(row):
if sample_counts[column] < max_sample_size:
have_max_samples = False
if have_max_samples:
break
for column,colresult in enumerate(results):
for _type, _ in options:
if colresult[_type] > 0 and colresult[_type] >= colresult[types[column]]:
types[column] = _type
return types
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
Convert a CSV file to a table in a SQLite database.
The database is created if it does not yet exist.
''')
parser.add_argument('csv_file', type=str, help='Input CSV file path')
parser.add_argument('sqlite_db_file', type=str, help='Output SQLite file')
parser.add_argument('table_name', type=str, nargs='?', help='Name of table to write to in SQLite file', default='data')
parser.add_argument('--headers', type=str, nargs='?', help='Headers are read from this file, if provided.', default=None)
parser.add_argument('--types', type=list, nargs='?', help='Types are read from this file, if provided.', default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument('--bz2', help='Input csv file is compressed using bzip2.', action='store_true')
group.add_argument('--gzip', help='Input csv file is compressed using gzip.', action='store_true')
args = parser.parse_args()
compression = None
if args.bz2:
compression = 'bz2'
elif args.gzip:
compression = 'gzip'
#convert(args.csv_file, args.sqlite_db_file, args.table_name, args.headers, compression, args.types)
convert(args.csv_file, args.sqlite_db_file, args.table_name, args.headers, compression, args.types)
|
the-stack_0_17369 | # C20 and C28 contained extra character "
# I decided to remove this character on both lines
# I assume that it was a mistype
# Tab-separated values should not contain these extra charachters
# please compare data.txt and data2.txt
''' I decided to work with data2.txt because if we had data coming
with an extra character, we should probably ensure that this data is fixed
'''
'''
# please uncomment this section for comparison
with open('data.txt') as tab_data:
print(tab_data.read())
with open('data2.txt') as tab_data2:
print(tab_data2.read())
# end comparison
'''
def main():
create_simple_html()
def create_simple_html():
f = open('data-to-html.html','w')
text_data = open_read_data()
# print(text_data)
# multi-line 'f strings' are used to insert into <pre> element
message = f"""<html>
<head></head>
<body><p>customer data</p>
<pre>{text_data}</pre></body>
</html>"""
message.format(text_data=text_data)
f.write(message)
f.close()
def open_read_data():
with open('data2.txt') as customers:
c =customers.read()
# print(c)
# print(type(c))
return c
main() |
the-stack_0_17371 | """
Validate serialized SPDXv3 files against schema
"""
import jadn
import json
import os
from urllib.parse import urlparse
SCHEMA = 'Schemas/spdx-v3.jidl'
DATA_DIR = 'Data3'
OUT_DIR = 'Out'
DEFAULT_PROPERTIES = ('specVersion', 'created', 'profile', 'dataLicense')
IRI_LOCATIONS = ('id', 'created/by', '*/elements/*', 'relationship/from', 'relationship/to/*',
'*/originator', 'elementRefs/id', 'annotation/subject')
def expand_iri(context: dict, element_id: str) -> str:
"""
Convert an Element ID in namespace:local form to an IRI
"""
if context:
u = urlparse(element_id)
if u.scheme:
if prefix := context.get('prefixes', {}).get(u.scheme, ''):
return prefix + u.path
return element_id
if element_id not in context.get('local_ids', []):
print(f' Undefined Element: {element_id}')
return context.get('baseIRI', '') + element_id
return element_id
def compress_iri(context: dict, iri: str) -> str:
"""
Convert an Element IRI to namespace:local form
"""
if context:
if base := context.get('baseIRI', ''):
if iri.startswith(base):
return iri.replace(base, '')
for k, v in context.get('prefixes', {}).items():
if iri.startswith(v):
return iri.replace(v, k + ':')
return iri
def expand_ids(context: dict, element: dict, paths: list) -> None:
"""
Convert all IRIs in element from namespace:local form to absolute IRI
Hardcode IRI locations for now; replace with path-driven dynamic update
"""
etype = element['type']
element.update({'id': expand_iri(context, element['id'])})
if 'created' in element:
element['created']['by'] = [expand_iri(context, k) for k in element['created']['by']]
for t in etype:
if 'elements' in etype[t]:
etype[t]['elements'] = [expand_iri(context, k) for k in etype[t]['elements']]
elif 'originator' in etype[t]:
etype[t]['originator'] = [expand_iri(context, k) for k in etype[t]['originator']]
if 'annotation' in etype:
etype['annotation']['subject'] = expand_iri(context, etype['annotation']['subject'])
if 'relationship' in etype:
etype['relationship']['from'] = expand_iri(context, etype['relationship']['from'])
etype['relationship']['to'] = [expand_iri(context, k) for k in etype['relationship']['to']]
def compress_ids(context: dict, element: dict) -> None:
etype = element['type']
element.update({'id': compress_iri(context, element['id'])})
if 'created' in element:
element['created']['by'] = [compress_iri(context, k) for k in element['created']['by']]
for t in etype:
if 'elements' in etype[t]:
etype[t]['elements'] = [compress_iri(context, k) for k in etype[t]['elements']]
elif 'originator' in etype[t]:
etype[t]['originator'] = [compress_iri(context, k) for k in etype[t]['originator']]
if 'annotation' in etype:
etype['annotation']['subject'] = compress_iri(context, etype['annotation']['subject'])
if 'relationship' in etype:
etype['relationship']['from'] = compress_iri(context, etype['relationship']['from'])
etype['relationship']['to'] = [compress_iri(context, k) for k in etype['relationship']['to']]
def expand_element(context: dict, element: dict) -> dict:
"""
Fill in Element properties from Context
"""
element_x = {'id': ''} # put id first
element_x.update({k: context[k] for k in DEFAULT_PROPERTIES if k in context})
element_x.update(element)
# print(f" {element_x}")
expand_ids(context, element_x, IRI_LOCATIONS)
print(f" {element_x}")
return element_x
def split_element_set(context: dict, element: dict) -> list:
"""
Split an Element + Context into a set of individual Elements
"""
context.update({k: element[k] for k in DEFAULT_PROPERTIES if k in element})
elist = [expand_element(context, element)]
for e in context.get('elementValues', []):
elist.append(expand_element(context, e))
return elist
def join_element_set(context: dict, element_id: str, elements: list) -> dict:
"""
Combine a set of individual Elements into a designated Element, update Context
"""
return
def load_any(path: str) -> (dict, None):
fn, ext = os.path.splitext(path)
try:
loader = {
'.jadn': jadn.load,
'.jidl': jadn.convert.jidl_load,
'.html': jadn.convert.html_load
}[ext]
except KeyError:
if os.path.isfile(path):
raise ValueError(f'Unsupported schema format: {path}')
return
return loader(path)
def make_dot(context: dict, elist: list, fp: str) -> None:
ex = {e['id']: k for k, e in enumerate(elist, start=1)}
with open(os.path.splitext(fp)[0] + '.dot', 'w') as fx:
fx.write('digraph G {\nnode [fontname=Arial, fontsize=8, shape=box, style=filled, fillcolor=lightskyblue1]\n')
for e in elist:
id = compress_iri(context, e['id'])
# print(f" n{ex[e['id']]}: {id}: {e.get('name', id)}")
fx.write(f"n{ex[e['id']]} [label=\"{id}\\n{e.get('name', '')}\"]\n")
for t in e['type']:
for n in e['type'][t].get('elements', []):
dest = f'n{ex[n]}' if n in ex else f'"{compress_iri(context, n)}"'
fx.write(f" n{ex[e['id']]} -> {dest}\n")
fx.write('}\n')
if __name__ == '__main__':
print(f'Installed JADN version: {jadn.__version__}\n')
os.makedirs(OUT_DIR, exist_ok=True)
s = load_any(SCHEMA)
sc = jadn.codec.Codec(s, verbose_rec=True, verbose_str=True)
for f in os.scandir(DATA_DIR):
print(f.name)
if not f.is_file():
continue
data = json.load(open(f.path))
el = sc.decode('Element', data)
cx = el.pop('context', {})
cx['local_ids'] = [compress_iri(cx, el['id'])] + [compress_iri(cx, ev['id']) for ev in cx.get('elementValues', {})]
elements = split_element_set(cx, el)
make_dot(cx, elements, os.path.join(OUT_DIR, f.name)) |
the-stack_0_17372 | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from typing import Dict, Optional, Tuple, Type, Union
from merlin.models.tf.blocks.core.aggregation import SequenceAggregation, SequenceAggregator
from merlin.models.tf.blocks.core.base import Block, BlockType
from merlin.models.tf.blocks.core.combinators import ParallelBlock, TabularAggregationType
from merlin.models.tf.blocks.core.masking import MaskingBlock, masking_registry
from merlin.models.tf.blocks.core.transformations import AsDenseFeatures
from merlin.models.tf.features.continuous import ContinuousFeatures
from merlin.models.tf.features.embedding import (
ContinuousEmbedding,
EmbeddingFeatures,
EmbeddingOptions,
SequenceEmbeddingFeatures,
)
from merlin.schema import Schema, Tags, TagsType
LOG = logging.getLogger("merlin-models")
def InputBlock(
schema: Schema,
branches: Optional[Dict[str, Block]] = None,
post: Optional[BlockType] = None,
aggregation: Optional[TabularAggregationType] = None,
seq: bool = False,
max_seq_length: Optional[int] = None,
add_continuous_branch: bool = True,
continuous_tags: Optional[Union[TagsType, Tuple[Tags]]] = (Tags.CONTINUOUS,),
continuous_projection: Optional[Block] = None,
add_embedding_branch: bool = True,
embedding_options: EmbeddingOptions = EmbeddingOptions(),
categorical_tags: Optional[Union[TagsType, Tuple[Tags]]] = (Tags.CATEGORICAL,),
sequential_tags: Optional[Union[TagsType, Tuple[Tags]]] = (Tags.SEQUENCE,),
split_sparse: bool = False,
masking: Optional[Union[str, MaskingBlock]] = None,
seq_aggregator: Block = SequenceAggregator(SequenceAggregation.MEAN),
**kwargs,
) -> Block:
"""The entry block of the model to process input features from a schema.
This function creates continuous and embedding layers, and connects them via `ParallelBlock`.
If aggregation argument is not set, it returns a dictionary of multiple tensors
each corresponds to an input feature.
Otherwise, it merges the tensors into one using the aggregation method.
Example usage::
mlp = ml.InputBlock(schema).connect(ml.MLPBlock([64, 32]))
Parameters:
----------
schema: Schema
Schema of the input data. This Schema object will be automatically generated using
[NVTabular](https://nvidia-merlin.github.io/NVTabular/main/Introduction.html).
Next to this, it's also possible to construct it manually.
branches: Dict[str, Block], optional
Dictionary of branches to use inside the InputBlock.
post: Optional[BlockType]
Transformations to apply on the inputs after the module is
called (so **after** `forward`).
Defaults to None
aggregation: Optional[TabularAggregationType]
Aggregation to apply after processing the `forward`-method to output a single Tensor.
Defaults to None
seq: bool
Whether to process inputs for sequential model (returns 3-D tensor)
or not (returns 2-D tensor). Use `seq=True` to treat the sparse (list) features
as sequences (e.g. for sequential recommendation) and `seq=False` to treat sparse
features as multi-hot categorical representations.
Defaults to False
add_continuous_branch: bool
If set, add the branch to process continuous features
Defaults to True
continuous_tags: Optional[Union[TagsType, Tuple[Tags]]]
Tags to filter the continuous features
Defaults to (Tags.CONTINUOUS,)
continuous_projection: Optional[Block]
If set, concatenate all numerical features and projet using the
specified Block.
Defaults to None
add_embedding_branch: bool
If set, add the branch to process categorical features
Defaults to True
categorical_tags: Optional[Union[TagsType, Tuple[Tags]]]
Tags to filter the continuous features
Defaults to (Tags.CATEGORICAL,)
sequential_tags: Optional[Union[TagsType, Tuple[Tags]]]
Tags to filter the sparse features
Defaults to (Tags.SEQUENCE,)
split_sparse: Optional[bool]
When True, separate the processing of context (2-D) and sparse features (3-D).
Defaults to False
masking: Optional[Union[str, MaskSequence]], optional
If set, Apply masking to the input embeddings and compute masked labels.
Defaults to None
seq_aggregator: Block
If non-sequential model (seq=False):
aggregate the sparse features tensor along the sequence axis.
Defaults to SequenceAggregator('mean')
"""
branches = branches or {}
if split_sparse:
sparse_schema = schema.select_by_tag(sequential_tags)
context_schema = schema.remove_by_tag(sequential_tags)
if not sparse_schema:
raise ValueError(
"Please make sure that schema has features tagged as 'sequence' when"
"`split_context` is set to True"
)
if not aggregation:
LOG.info(
"aggregation is not provided, "
"default `concat` will be used to merge sequential features"
)
aggregation = "concat"
agg = aggregation
sparse_interactions = InputBlock(
sparse_schema,
branches,
post,
aggregation=agg,
seq=True,
max_seq_length=max_seq_length,
add_continuous_branch=add_continuous_branch,
continuous_tags=continuous_tags,
continuous_projection=continuous_projection,
add_embedding_branch=add_embedding_branch,
embedding_options=embedding_options,
categorical_tags=categorical_tags,
split_sparse=False,
)
if masking:
if isinstance(masking, str):
masking = masking_registry.parse(masking)()
sparse_interactions = sparse_interactions.connect(masking)
if not seq:
sparse_interactions = sparse_interactions.connect(seq_aggregator)
if not context_schema:
return sparse_interactions
branches["sparse"] = sparse_interactions
return InputBlock(
context_schema,
branches,
post,
aggregation=agg,
seq=False,
add_continuous_branch=add_continuous_branch,
continuous_tags=continuous_tags,
continuous_projection=continuous_projection,
add_embedding_branch=add_embedding_branch,
embedding_options=embedding_options,
categorical_tags=categorical_tags,
split_sparse=False,
)
if add_continuous_branch and schema.select_by_tag(continuous_tags).column_schemas:
pre = None
if max_seq_length and seq:
pre = AsDenseFeatures(max_seq_length)
branches["continuous"] = ContinuousFeatures.from_schema( # type: ignore
schema,
tags=continuous_tags,
pre=pre,
)
if add_embedding_branch and schema.select_by_tag(categorical_tags).column_schemas:
emb_cls: Type[EmbeddingFeatures] = SequenceEmbeddingFeatures if seq else EmbeddingFeatures
emb_kwargs = {}
if max_seq_length and seq:
emb_kwargs["max_seq_length"] = max_seq_length
branches["categorical"] = emb_cls.from_schema( # type: ignore
schema, tags=categorical_tags, options=embedding_options, **emb_kwargs
)
if continuous_projection:
return ContinuousEmbedding(
ParallelBlock(branches),
continuous_projection,
aggregation=aggregation,
post=post,
name="continuous_projection",
)
return ParallelBlock(branches, aggregation=aggregation, post=post, is_input=True, **kwargs)
|
the-stack_0_17373 | """
Add run data to station info
Sourced from http://ourairports.com/data/
"""
import csv
import json
stations = json.load(open('stations.json'))
# Add runway data subset to station data
with open('runways.csv') as fin:
runways = csv.reader(fin)
header = True
for runway in runways:
# Skip header row
if header:
header = False
continue
data = {
'length': int(runway[3]) if runway[3] else 0,
'width': int(runway[4]) if runway[4] else 0,
'ident1': runway[8],
'ident2': runway[14],
}
station = runway[2]
if station in stations:
if 'runways' in stations[station]:
stations[station]['runways'].append(data)
else:
stations[station]['runways'] = [data]
# Sort runways by longest length and add missing nulls
for station in stations:
if 'runways' in stations[station]:
stations[station]['runways'].sort(key=lambda x: x['length'], reverse=True)
else:
stations[station]['runways'] = None
json.dump(stations, open('stations.1.json', 'w'))
|
the-stack_0_17374 | # ECE457A Assignment 2 Maze path finding
# Michael Lin
from maze import maze
# Each node in the maze is represented by an object
import pygame
import math
from queue import PriorityQueue
WIDTH = 700
WIN = pygame.display.set_mode((WIDTH, WIDTH + 100))
pygame.display.set_caption("A* Path Finding Algorithm")
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 255, 0)
YELLOW = (255, 255, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
PURPLE = (128, 0, 128)
ORANGE = (255, 165, 0)
GREY = (128, 128, 128)
TURQUOISE = (64, 224, 208)
BFS = 0
DFS = 1
ASTAR = 2
class Node:
def __init__(self, row, col, width, total_rows):
self.row = row
self.col = col
self.x = row * width
self.y = col * width
self.color = WHITE
self.neighbors = []
self.width = width
self.total_rows = total_rows
def get_pos(self):
return self.row, self.col
def is_closed(self):
return self.color == RED
def is_open(self):
return self.color == GREEN
def is_barrier(self):
return self.color == BLACK
def is_start(self):
return self.color == ORANGE
def is_end(self):
return self.color == TURQUOISE
def reset(self):
self.color = WHITE
def make_start(self):
self.color = ORANGE
def make_closed(self):
self.color = RED
def make_open(self):
self.color = GREEN
def make_barrier(self):
self.color = BLACK
def make_end(self):
self.color = TURQUOISE
def make_path(self):
self.color = PURPLE
def draw(self, win):
pygame.draw.rect(win, self.color, (self.x, self.y, self.width, self.width))
def update_neighbors(self, grid):
self.neighbors = []
# DOWN
if self.row < self.total_rows - 1 and not grid[self.row + 1][self.col].is_barrier():
self.neighbors.append(grid[self.row + 1][self.col])
if self.row > 0 and not grid[self.row - 1][self.col].is_barrier(): # UP
self.neighbors.append(grid[self.row - 1][self.col])
# RIGHT
if self.col < self.total_rows - 1 and not grid[self.row][self.col + 1].is_barrier():
self.neighbors.append(grid[self.row][self.col + 1])
if self.col > 0 and not grid[self.row][self.col - 1].is_barrier(): # LEFT
self.neighbors.append(grid[self.row][self.col - 1])
def __lt__(self, other):
return False
# pygame's x,y starts at top left
# Maze's co-ordinates start at bottom left
def h(p1, p2):
x1, y1 = p1
x2, y2 = p2
return abs(x1 - x2) + abs(y1 - y2)
def reconstruct_path(came_from, current, draw):
minimumPathCost=0
while current in came_from:
minimumPathCost += 1
current = came_from[current]
current.make_path()
draw()
return minimumPathCost + 1#include end node
#Three algorithms used:
#BFS
#DFS
#A* search
#output: complete path, its cost and the number of nodes explored
#bfs uses a queue to store its open set
def algorithm_bfs(draw, grid, start, end):
return False
#dfs uses a stack
def algorithm_dfs(draw, grid, start, end):
count = 0
open_set = []
open_set.append(start)
return False
#A* uses a priority queue, and a cost function f = g + h
#h is the heuristic function, using Manhattan distance
def algorithm_astar(draw, grid, start, end):
count = 0
minimumCost_=-1
nodesExplored=-1
open_set = PriorityQueue()#Fringe nodes to be visited
open_set.put((0, count, start))
came_from = {}
g_score = {node: float("inf") for row in grid for node in row}
g_score[start] = 0
f_score = {node: float("inf") for row in grid for node in row}
f_score[start] = h(start.get_pos(), end.get_pos())
open_set_hash = {start}#Visited nodes
while not open_set.empty():
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
current = open_set.get()[2]
open_set_hash.remove(current)
if current == end:
minimumCost_ = reconstruct_path(came_from, end, draw)
nodesExplored=count
end.make_end()
return minimumCost_, nodesExplored
for neighbor in current.neighbors:
temp_g_score = g_score[current] + 1
if temp_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = temp_g_score
f_score[neighbor] = temp_g_score + h(neighbor.get_pos(), end.get_pos())
if neighbor not in open_set_hash:
count += 1
open_set.put((f_score[neighbor], count, neighbor))
open_set_hash.add(neighbor)
neighbor.make_open()
draw()
if current != start:
current.make_closed()
return -1, -1
def make_grid(rows, width):
grid = []
gap = width // rows
for i in range(rows):
grid.append([])
for j in range(rows):
node = Node(i, j, gap, rows)
grid[i].append(node)
return grid
def draw_grid(win, rows, width):
gap = width // rows
for i in range(rows):
pygame.draw.line(win, GREY, (0, i * gap), (width, i * gap))
for j in range(rows):
pygame.draw.line(win, GREY, (j * gap, 0), (j * gap, width))
def draw(win, grid, rows, width, pathCost=-1, nodesExplored=-1):
win.fill(WHITE)
for row in grid:
for node in row:
node.draw(win)
draw_grid(win, rows, width)
pygame.font.init()
myFont = pygame.font.Font("./OpenSans-VariableFont_wdth,wght.ttf", 15)
if pathCost != -1:
label = myFont.render("Minimum path cost: " + str(pathCost), 1, BLACK)
WIN.blit(label, (100, WIDTH + 20))
if nodesExplored!=-1:
label2 = myFont.render("Nodes explored: " + str(nodesExplored), 1, BLACK)
WIN.blit(label2, (100, WIDTH + 50))
label3 = myFont.render("Press space to begin search ", 1, BLACK)
WIN.blit(label3, (300, WIDTH + 20))
label4 = myFont.render("Press c to clear board", 1, BLACK)
WIN.blit(label4, (300, WIDTH + 50))
pygame.display.update()
def get_clicked_pos(pos, rows, width):
gap = width // rows
y, x = pos
row = y // gap
col = x // gap
return row, col
def main(win, width):
ROWS = 25
algo = ASTAR
minimumCost = -1
nodesExplored = -1
grid = make_grid(ROWS, width)
# note, pygame uses top left as (0,0) but data is given as bottom left first
# using pygame coordinate as master
start = grid[2][13]
start.make_start()#orange
end = grid[23][5] # E1: 5, 23 E2: 2, 3
end.make_end()#blue
for j in range(ROWS): # col
for i in range(ROWS): # row
i_ = ROWS - 1 - i
node = grid[j][i]
if maze[i_][j] == 1:
node.make_barrier()
run = True
while run:
draw(win, grid, ROWS, width, minimumCost, nodesExplored)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE and start and end:
for row in grid:
for node in row:
node.update_neighbors(grid)
if algo == ASTAR:
minimumCost, nodesExplored= algorithm_astar(lambda: draw(win, grid, ROWS, width), grid, start, end)
elif algo == BFS:
algorithm_bfs(lambda: draw(win, grid, ROWS, width), grid, start, end)
elif algo == DFS:
algorithm_dfs(lambda: draw(win, grid, ROWS, width), grid, start, end)
if event.key == pygame.K_c:
start = None
end = None
minimumCost = -1
nodesExplored = -1
grid = make_grid(ROWS, width)
pygame.quit()
main(WIN, WIDTH)
|
the-stack_0_17376 | # -*- coding: utf-8 -*-
import os
import sys
import platform
import mathics.builtin.system as msystem
import mathics.builtin.datentime as datentime
import mathics.builtin.files_io.filesystem as filesystem
import mathics.builtin.atomic.numbers as numeric
from mathics.core.evaluation import Evaluation
def mathics_system_info(defs):
def eval(name, needs_head=True):
evaled = name().evaluate(evaluation)
if needs_head:
return evaled.head.to_python(string_quotes=False)
else:
return evaled.to_python(string_quotes=False)
evaluation = Evaluation(defs, output=None)
return {
"$Machine": sys.platform,
"$MachineName": platform.uname().node,
"$ProcessID": os.getppid(),
"$ProcessorType": platform.machine(),
"$SystemID": sys.platform,
"$UserName": eval(msystem.UserName),
"$SystemMemory": eval(msystem.SystemMemory),
"MemoryAvailable[]": eval(msystem.MemoryAvailable, needs_head=False),
"$SystemTimeZone": eval(datentime.SystemTimeZone),
"MachinePrecision": eval(numeric.MachinePrecision_),
"$BaseDirectory": eval(filesystem.BaseDirectory),
"$RootDirectory": eval(filesystem.RootDirectory),
"$HomeDirectory": eval(filesystem.HomeDirectory),
"$InstallationDirectory": eval(filesystem.InstallationDirectory),
"$TemporaryDirectory": eval(filesystem.TemporaryDirectory),
}
|
the-stack_0_17378 | import logging
import os, sys
import shutil
from datetime import datetime
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard.summary import hparams
from volsim.metrics import *
from volsim.params import *
class Logger(object):
def __init__(self, path:str, params:Params=None, override:bool=False, addNumber:bool=True, addDate:bool=False):
if addDate:
self.path = "runs/" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S_") + path
elif addNumber:
self.path = "runs/%s_%02d" % (path, 0)
else:
self.path = "runs/" + path
if os.path.isdir(self.path):
if override:
shutil.rmtree(self.path)
else:
if addNumber:
num = 1
while os.path.isdir(self.path):
self.path = "runs/%s_%02d" % (path, num)
num += 1
else:
raise ValueError("Model directory already exists!")
os.makedirs(self.path)
shutil.copy("src/training.py", os.path.join(self.path, "training.py"))
self.tfWriter = CustomSummaryWriter(self.path, flush_secs=20)
# hacky reload fix for logging to work properly
import importlib
importlib.reload(logging)
logging.basicConfig(filename=self.path+"/log.txt", format="%(asctime)s %(message)s", level=logging.INFO, datefmt="%H:%M:%S")
logging.info("Path: %s" % self.path)
logging.info("PyTorch Seed: %d" % torch.random.initial_seed())
if params:
logging.info(str(params.asDict()))
def setup(self, model:nn.Module, optimizer:Optimizer, lrScheduler:_LRScheduler, valSplit:dict, testSplit:dict):
self.model = model
self.optimizer = optimizer
self.lrScheduler = lrScheduler
datasetsCor = {}
for split in valSplit:
datasetsCor[split] = ["Multiline", ["datasets/Correlation_" + split]]
for split in testSplit:
datasetsCor[split] = ["Multiline", ["datasets/Correlation_" + split]]
datasetsCor["All (Val)"] = ["Multiline", ["datasets/Correlation_ValAll"]]
datasetsCor["All (Test)"] = ["Multiline", ["datasets/Correlation_TestAll"]]
layout = {
"Training":{
"Correlation": ["Multiline", ["train/Epoch_CorrelationFull"]],
"Correlation (Mean)": ["Margin", ["train/Epoch_CorrelationMean", "train/Epoch_CorrelationMeanLow", "train/Epoch_CorrelationMeanHigh"]],
"Loss": ["Multiline", ["train/Epoch_Loss", "train/Epoch_LossL2", "train/Epoch_LossCorr", "train/Epoch_LossSizeReg", "train/Epoch_LossSlConvReg"]],
},
"Training Batches":{
"Loss (Batch)": ["Multiline", ["train/Batch_Loss", "train/Batch_LossL2", "train/Batch_LossCorr", "train/Batch_LossSlConvReg"]],
"Correlation (Batch)": ["Multiline", ["train/Batch_Correlation"]],
"Correlation (Sample Sliced)": ["Multiline", ["train/Sample_Correlation"]],
},
"Validation":{
"Correlation": ["Multiline", ["val/Epoch_CorrelationFull"]],
"Correlation (Mean)": ["Margin", ["val/Epoch_CorrelationMean", "val/Epoch_CorrelationMeanLow", "val/Epoch_CorrelationMeanHigh"]],
"Distance": ["Margin", ["val/Epoch_Distance", "val/Epoch_DistanceLow", "val/Epoch_DistanceHigh"]],
},
"Validation Batches":{
"Correlation (Batch)": ["Multiline", ["val/Batch_Correlation"]],
},
"Test":{
"Correlation": ["Multiline", ["test/Epoch_CorrelationFull"]],
"Correlation (Mean)": ["Margin", ["test/Epoch_CorrelationMean", "test/Epoch_CorrelationMeanLow", "test/Epoch_CorrelationMeanHigh"]],
"Distance": ["Margin", ["test/Epoch_Distance", "test/Epoch_DistanceLow", "test/Epoch_DistanceHigh"]],
},
"Test Batches":{
"Correlation (Batch)": ["Multiline", ["test/Batch_Correlation"]],
},
"Datasets": datasetsCor,
}
self.tfWriter.add_custom_scalars(layout)
def close(self):
logging.info("\nLog completed.")
logging.shutdown()
self.tfWriter.close()
def saveTrainState(self, epoch:int, milestone:bool=False):
assert (self.model), "No model to save, setup logger first!"
saveDict = {
"epoch" : epoch,
"optimizer" : self.optimizer.state_dict,
"lrScheduler" : self.lrScheduler.state_dict
}
torch.save(saveDict, self.path + "/TrainState.pth")
if milestone:
self.model.save(self.path + "/Epoch%02d.pth" % (epoch), override=True, noPrint=True)
else:
self.model.save(self.path + "/Model.pth", override=True, noPrint=True)
def resumeTrainState(self, epoch:int):
if epoch <= 0:
return
assert (self.model), "No model to load, setup logger first!"
saveDict = torch.load(self.path + "/TrainState.pth")
assert (saveDict["epoch"] == epoch), "Epoch mismatch when loading train state."
self.model.resume(self.path + "Model.pth")
self.optimizer.load_state_dict(saveDict["optimizer"])
schedulerState = saveDict.get("lrScheduler", None)
if schedulerState:
self.lrScheduler.load_state_dict(schedulerState)
# Adjust hParam behavior of SummaryWriter to store results in a single folder
# Workaround from:
# https://github.com/pytorch/pytorch/issues/32651#issuecomment-643791116
class CustomSummaryWriter(SummaryWriter):
def add_hparams(self, hparam_dict, metric_dict):
# remove all lists from hParam dict since only int, float, str, bool and torch.Tensor are possible
for key, value in hparam_dict.items():
if type(value) is list:
valueStr = " ".join([str(elem) for elem in value])
hparam_dict[key] = valueStr
elif not type(value) in [int, float, str, bool, torch.Tensor]:
hparam_dict[key] = " "
torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
if type(hparam_dict) is not dict or type(metric_dict) is not dict:
raise TypeError('hparam_dict and metric_dict should be dictionary.')
exp, ssi, sei = hparams(hparam_dict, metric_dict)
logdir = self._get_file_writer().get_logdir()
with SummaryWriter(log_dir=logdir) as w_hp:
w_hp.file_writer.add_summary(exp)
w_hp.file_writer.add_summary(ssi)
w_hp.file_writer.add_summary(sei)
for k, v in metric_dict.items():
w_hp.add_scalar(k, v) |
the-stack_0_17380 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
import torch
import sys
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
DCNForMaskedLM,
AutoTokenizer,
BertTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPinyinIndexLanguageModeling,
HfArgumentParser,
LineByLineTextDataset,
PinyinShuffleLineByLineTextDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization. Leave None if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={
"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_data_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a text file)."}
)
eval_data_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_data_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
line_by_line: bool = field(
default=True,
metadata={
"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
mlm: bool = field(
default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."}
)
#do_pred: bool = field(
#default=False, metadata={"help": "do predict"}
#)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
block_size: int = field(
default=-1,
metadata={
"help": "Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def get_dataset(args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate=False, shuffle=True, test=False):
if test:
file_path = args.test_data_file
else:
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return PinyinShuffleLineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, shuffle=shuffle)
else:
return TextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [
-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
config = AutoConfig.from_pretrained(
model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning(
"You are instantiating a new config instance from scratch.")
if model_args.tokenizer_name:
tokenizer = BertTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
tokenizer = BertTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name"
)
if model_args.model_name_or_path:
model = DCNForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
logger.info("Training new model from scratch")
model = DCNBertForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling)."
)
if data_args.block_size <= 0:
data_args.block_size = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# Get datasets
train_dataset = get_dataset(
data_args, tokenizer=tokenizer, shuffle=True) if training_args.do_train else None
eval_dataset = get_dataset(
data_args, tokenizer=tokenizer, evaluate=True, shuffle=False) if training_args.do_eval or training_args.do_predict else None
data_collator = DataCollatorForPinyinIndexLanguageModeling(
tokenizer=tokenizer, mlm=data_args.mlm, mlm_probability=data_args.mlm_probability
)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
data_args=data_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
)
# Training
if training_args.do_train:
model_path = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
eval_output = trainer.evaluate()
perplexity = math.exp(eval_output["eval_loss"])
result = {"perplexity": perplexity}
output_eval_file = os.path.join(
training_args.output_dir, "eval_results_lm.txt")
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
results.update(result)
if training_args.do_predict:
trainer.evaluate_sighan()
return results
def result_predict(sentence_list, tokenizer, model, device, batch_size=50, max_seq_length=180):
eval_examples = []
for i in range(len(sentence_list)):
eval_examples.append(
InputExample(guid="1", text_a=sentence_list[i]))
eval_features = convert_examples_to_features(eval_examples,
max_seq_length,
tokenizer,
no_prefix=False)
sys.stdout.flush()
all_input_ids = torch.tensor([f.input_ids for f in eval_features],
dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features],
dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask,
all_segment_ids)
sys.stdout.flush()
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data,
sampler=eval_sampler,
batch_size=batch_size)
result = []
result_prob = []
i = 0
for input_ids, input_mask, segment_ids in eval_dataloader:
i += 1
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
sys.stdout.flush()
with torch.no_grad():
#logits = self.model(input_ids, segment_ids, input_mask)
logits = model(input_ids, input_mask, segment_ids)[0]
logits = torch.nn.Softmax(dim=-1)(logits)
pred_probs, preds = logits.max(dim=-1)
preds = preds.detach().cpu().numpy()
pred_probs = pred_probs.detach().cpu().numpy()
result.extend(preds.tolist())
result_prob.extend(pred_probs.tolist())
labels = [tokenizer.convert_ids_to_tokens(r)[1:] for r in result]
return labels
def convert_examples_to_features(examples,
max_seq_length,
tokenizer,
no_prefix=False):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = example.text_a
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
if no_prefix:
tokens = tokens_a
else:
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
sys.stdout.flush()
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
pinyin_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(pinyin_ids) == max_seq_length
#label_id = label_map[example.label]
#label_id = float(example.label)
label_id = example.label
"""
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
#logger.info("label: %s (id = %d)" % (example.label, label_id))
"""
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
pinyin_ids=pinyin_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, pinyin_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.pinyin_ids = pinyin_ids
self.label_id = label_id
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
|
the-stack_0_17381 | # IMPORT MODULES
from __future__ import division
import numpy as np
import h5py
from .Tools import timeToIntVec, validate_array_ndim
# DEFINE CLASSES FOR REPRESENTING SIMULATIONS
class GIFnet_Simulation(h5py.File):
"""Represents a GIFnet simulation.
Subclass of h5py.File.
Suggested usage:
sim = GIFnet_Simulation(
'example.hdf5', 'Example simulation',
T = 100., dt = 0.1, no_sweeps = 10,
no_ser_neurons = 10,
no_ser_examples = 3,
no_gaba_neurons = 5,
no_gaba_examples = 3,
propagation_delay = 2.
)
sim.set_connectivity_matrix(connectivity_matrix)
sim.init_ser_examples(**ser_examples)
sim.init_ser_spktrains()
for sweep_no in ser_spktrains.shape[0]:
for cell_no in ser_spktrains.shape[1]:
sim.ser_spktrains[sweep_no, cell_no, :] = ser_spktrains[sweep_no, cell_no, :]
sim.init_gaba_examples(
I = gaba_examples['I'],
V = gaba_examples['V'],
some_channel = gaba_examples['some_channel']
)
sim.init_gaba_spktrains(
spktrains = gaba_spktrains
)
"""
def __init__(
self,
fname,
name=None,
T=None,
dt=None,
no_sweeps=None,
no_ser_neurons=None,
no_ser_examples=None,
no_gaba_neurons=None,
no_gaba_examples=None,
propagation_delay=None,
**kwargs
):
"""Create a new GIFnet_Simulation object.
Inputs:
fname (str)
-- Name of file on disk in which to store
contents of GIFnet_Simulation. (Equivalent
to h5py.File's 'name' argument.)
name (str)
-- Meta-attribute with short description
of experiment.
T (float)
-- Duration of each sweep (ms).
dt (float)
-- Timestep (ms).
no_sweeps (int)
-- Number of sweeps in simulation.
no_ser_neurons, no_gaba_neurons (int)
-- Total number of ser/gaba neurons in population.
Spiketrains of this number of neurons are stored.
no_ser_examples, no_gaba_examples (int)
-- Number of neurons in population for which
full traces are stored.
propagation_delay (float)
-- Delay between GABA spike and start of IPSC
in 5HT neurons (ms).
kwargs
-- Keyword arguments to be passed to h5py.File
initializer.
"""
if kwargs.get('mode', 'a') not in ['r', 'a']:
raise ValueError('\'mode\' must be \'r\' or \'a\'')
super(GIFnet_Simulation, self).__init__(
name=fname, mode=kwargs.pop('mode', 'a'), **kwargs
)
if name is not None:
self.set_name(name)
if T is not None:
self.set_T(T)
if dt is not None:
self.set_dt(dt)
if no_sweeps is not None:
self.set_no_sweeps(no_sweeps)
if no_ser_neurons is not None:
self.set_no_ser_neurons(no_ser_neurons)
if no_ser_examples is not None:
self.set_no_ser_examples(no_ser_examples)
if no_gaba_neurons is not None:
self.set_no_gaba_neurons(no_gaba_neurons)
if no_gaba_examples is not None:
self.set_no_gaba_examples(no_gaba_examples)
if propagation_delay is not None:
self.set_propagation_delay(propagation_delay)
### Getters and setters for meta-attributes.
def get_name(self):
# 'name' is a short description, not a filename.
if 'name' not in self.attrs.keys():
raise KeyError('\'name\' not set.')
else:
return self.attrs['name']
def set_name(self, val):
# 'name' is a short description, not a filename.
self.attrs['name'] = val
def get_no_sweeps(self):
if 'no_sweeps' not in self.attrs.keys():
raise KeyError('\'no_sweeps\' not set.')
else:
return self.attrs['no_sweeps']
def set_no_sweeps(self, val):
self.attrs['no_sweeps'] = val
def get_T(self):
if 'T' not in self.attrs.keys():
raise KeyError('\'T\' not set.')
else:
return self.attrs['T']
def set_T(self, val):
self.attrs['T'] = val
def get_dt(self):
if 'dt' not in self.attrs.keys():
raise KeyError('\'dt\' not set.')
else:
return self.attrs['dt']
def set_dt(self, val):
self.attrs['dt'] = val
def get_no_timesteps(self):
if not ('dt' in self.attrs.keys() and 'T' in self.attrs.keys()):
raise KeyError('\'dt\' and \'T\' must both be set.')
else:
return int(self.get_T() / self.get_dt() + 0.5)
def get_no_ser_neurons(self):
if 'no_ser_neurons' not in self.attrs.keys():
raise KeyError('\'no_ser_neurons\' not set.')
else:
return self.attrs['no_ser_neurons']
def set_no_ser_neurons(self, val):
self.attrs['no_ser_neurons'] = val
def get_no_ser_examples(self):
if 'no_ser_examples' not in self.attrs.keys():
raise KeyError('\'no_ser_examples\' not set.')
else:
return self.attrs['no_ser_examples']
def set_no_ser_examples(self, val):
self.attrs['no_ser_examples'] = val
def get_no_gaba_neurons(self):
if 'no_gaba_neurons' not in self.attrs.keys():
raise KeyError('\'no_gaba_neurons\' not set.')
else:
return self.attrs['no_gaba_neurons']
def set_no_gaba_neurons(self, val):
self.attrs['no_gaba_neurons'] = val
def get_no_gaba_examples(self):
if 'no_gaba_examples' not in self.attrs.keys():
raise KeyError('\'no_gaba_examples\' not set.')
else:
return self.attrs['no_gaba_examples']
def set_no_gaba_examples(self, val):
self.attrs['no_gaba_examples'] = val
def get_propagation_delay(self):
if 'propagation_delay' not in self.attrs.keys():
raise KeyError('\'propagation_delay\' not set.')
else:
return self.attrs['propagation_delay']
def set_propagation_delay(self, val):
self.attrs['propagation_delay'] = val
### Getter and setter for connectivity matrix.
def get_connectivity_matrix(self):
if 'connectivity_matrix' not in self.keys():
raise AttributeError('connectivity_matrix not set.')
else:
return self['connectivity_matrix']
def set_connectivity_matrix(self, arr):
"""Create connectivity matrix for feedforward connections
Inputs:
arr (2D array)
-- 2D array with dimensionality
[no_ser_neurons, no_gaba_neurons] specifyinging
gaba->ser connections.
"""
self._validate_connectivity_matrix_shape(arr)
self.create_dataset(
'connectivity_matrix', data=arr, dtype=np.float32, compression=5
)
# Ensure attributes are up to date.
self.set_no_ser_neurons(arr.shape[0])
self.set_no_gaba_neurons(arr.shape[1])
def _validate_connectivity_matrix_shape(self, connectivity_matrix):
"""Ensure connectivity matrix shape matches existing attributes."""
validate_array_ndim('connectivity matrix', connectivity_matrix, 2)
for attr, axis in zip(['no_ser_neurons', 'no_gaba_neurons'], [0, 1]):
if (
hasattr(self.attrs, attr)
and self.attrs[attr] != np.shape(connectivity_matrix)[axis]
):
raise ValueError(
'Instance `no_ser_neurons`={nser} and `no_gaba_neurons`='
'{ngaba} imply connectivity matrix of size '
'({nser}, {ngaba}), got {cm_shape} instead.'.format(
nser=getattr(self.attrs, 'no_ser_neurons', 'any'),
ngaba=getattr(self.attrs, 'no_gaba_neurons', 'any'),
cm_shape=np.shape(connectivity_matrix),
)
)
### Properties and initializers for recorded signals.
@property
def ser_spktrains(self):
if 'ser' not in self.keys() or 'spktrains' not in self['ser'].keys():
raise AttributeError(
'ser_spktrains must be initialized via init_ser_spktrains '
'first.'
)
else:
return self['ser/spktrains']
def init_ser_spktrains(self, spktrains=None, spktimes=None):
"""Initialize ser spiketrains as an indicator tensor
Save spiketrains as an indicator tensor, starting
from a tensor of spiketrains or list of lists.
Note that both types of input are equivalent, but
at most one should be passed at a time.
If neither spktrains nor spktimes is passed in, an empty
spktrain array is simply created with the correct shape.
ser_pktrains can be written and read via instance
ser_spktrains attribute.
Inputs:
spktrains (3D array, or None)
-- 3D indicator tensor (1 when a spike
occurs, 0 otherwise) with dimensionality
[sweeps, cells, timesteps].
spktimes (nested list of depth == 3, or None)
-- Nested list laid out according to
[sweep][cell][spike_number] with times of
each spike for each cell on each sweep.
"""
if spktimes is not None and spktrains is not None:
raise ValueError(
'Only spktimes or spktrains should be provided, ' 'not both.'
)
sergroup = self.require_group('ser')
sspks = sergroup.create_dataset(
'spktrains',
shape=(
self.get_no_sweeps(),
self.get_no_ser_neurons(),
self.get_no_timesteps(),
),
dtype=np.int8,
compression=5,
)
# Case that spktrains have been provided directly.
if spktrains is not None:
sspks[:, :, :] = spktrains
# Case that nested list of spktimes has been provided.
elif spktimes is not None:
for i in range(len(spktimes)):
for j in range(len(spktimes[0])):
sspks[i, j, :] = timeToIntVec(
spktimes[i][j], self.get_T(), self.get_dt()
)
@property
def ser_examples(self):
if 'ser' not in self.keys() or 'examples' not in self['ser'].keys():
raise AttributeError(
'ser_examples must be initialized via init_ser_examples '
'first.'
)
else:
return self['ser/examples']
def init_ser_examples(
self, I=None, V=None, feedforward_input=None, **kwargs
):
"""Initialize ser example traces
Any inputs set to None will be initialized as empty
arrays.
Inputs:
I (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
input current channel.
V (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
recorded voltage channel.
feedforward_input (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
recorded gaba->ser feedforward_input.
kwargs (3D array or None)
-- Identically-shaped 3D arrays for
any other channels to initialize.
"""
sergroup = self.require_group('ser')
serex = sergroup.require_group('examples')
pairs = kwargs.copy()
pairs.update({'I': I, 'V': V, 'feedforward_input': feedforward_input})
for key, val in pairs.iteritems():
# Initialize with data, if available.
if val is not None:
serex.create_dataset(
key,
data=val,
shape=(
self.get_no_sweeps(),
self.get_no_ser_examples(),
self.get_no_timesteps(),
),
dtype=np.float32,
compression=5,
)
# Initialize empty if no data available.
else:
serex.create_dataset(
key,
fillvalue=0,
shape=(
self.get_no_sweeps(),
self.get_no_ser_examples(),
self.get_no_timesteps(),
),
dtype=np.float32,
compression=5,
)
@property
def gaba_examples(self):
if 'gaba' not in self.keys() or 'examples' not in self['gaba'].keys():
raise AttributeError(
'gaba_examples must be initialized via init_gaba_examples '
'first.'
)
else:
return self['gaba/examples']
def init_gaba_examples(self, I=None, V=None, **kwargs):
"""Initialize gaba example traces
Any inputs set to None will be initialized as empty
arrays.
Inputs:
I (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
input current channel.
V (3D array or None)
-- 3D array with dimensionality
[sweeps, cells, timesteps] to initialize
recorded voltage channel.
kwargs (3D array or None)
-- Identically-shaped 3D arrays for
any other channels to initialize.
"""
gabagroup = self.require_group('gaba')
gabaex = gabagroup.require_group('examples')
pairs = kwargs.copy()
pairs.update({'I': I, 'V': V})
for key, val in pairs.iteritems():
# Initialize with data, if available.
if val is not None:
gabaex.create_dataset(
key,
data=val,
shape=(
self.get_no_sweeps(),
self.get_no_gaba_examples(),
self.get_no_timesteps(),
),
dtype=np.float32,
compression=5,
)
# Initialize empty if no data available.
else:
gabaex.create_dataset(
key,
fillvalue=0,
shape=(
self.get_no_sweeps(),
self.get_no_gaba_examples(),
self.get_no_timesteps(),
),
dtype=np.float32,
compression=5,
)
@property
def gaba_spktrains(self):
if 'gaba' not in self.keys() or 'spktrains' not in self['gaba'].keys():
raise AttributeError(
'gaba_spktrains must be initialized via init_gaba_spktrains '
'first.'
)
else:
return self['gaba/spktrains']
def init_gaba_spktrains(self, spktrains=None, spktimes=None):
"""Initialize gaba spiketrains as an indicator tensor
Save spiketrains as an indicator tensor, starting
from a tensor of spiketrains or list of lists.
Note that both types of input are equivalent, but
only one should be passed at a time.
If neither spktrains nor spktimes is passed in, an empty
spktrain array is simply created with the correct shape.
gaba_pktrains can be written and read via instance
gaba_spktrains attribute.
Inputs:
spktrains (3D array, or None)
-- 3D indicator tensor (1 when a spike
occurs, 0 otherwise) with dimensionality
[sweeps, cells, timesteps].
spktimes (nested list of depth == 3, or None)
-- Nested list laid out according to
[sweep][cell][spike_number] with times of
each spike for each cell on each sweep.
"""
if spktimes is not None and spktrains is not None:
raise ValueError(
'Only spktimes or spktrains should be provided, ' 'not both.'
)
gabagroup = self.require_group('gaba')
gspks = gabagroup.create_dataset(
'spktrains',
shape=(
self.get_no_sweeps(),
self.get_no_gaba_neurons(),
self.get_no_timesteps(),
),
dtype=np.int8,
compression=5,
)
# Case that spktrains have been provided directly.
if spktrains is not None:
gspks[:, :, :] = spktrains
# Case that nested list of spktimes has been provided.
elif spktimes is not None:
for i in range(len(spktimes)):
for j in range(len(spktimes[0])):
gspks[i, j, :] = timeToIntVec(
spktimes[i][j], self.get_T(), self.get_dt()
)
### Data processing and support arrays.
def get_ser_spktimes(self):
"""Get nested list of 5HT neuron spktimes.
Nested list should be indexed according
to [sweep_no][cell_no][spk_no].
"""
spktimes = []
for sweep_no in range(self.get_no_sweeps()):
spktimes_singlesweep = []
for cell_no in range(self.get_no_ser_neurons()):
spktimes_singlesweep.append(
np.where(self.ser_spktrains[sweep_no, cell_no, :] > 0.5)[0]
* self.get_dt()
)
spktimes.append(spktimes_singlesweep)
return spktimes
def get_gaba_spktimes(self):
"""Get nested list of GABA neuron spktimes.
Nested list should be indexed according
to [sweep_no][cell_no][spk_no].
"""
spktimes = []
for sweep_no in range(self.get_no_sweeps()):
spktimes_singlesweep = []
for cell_no in range(self.get_no_gaba_neurons()):
spktimes_singlesweep.append(
np.where(self.gaba_spktrains[sweep_no, cell_no, :] > 0.5)[
0
]
* self.get_dt()
)
spktimes.append(spktimes_singlesweep)
return spktimes
def get_t_vec(self):
"""Return a time support vector (ms).
"""
t_vec = np.arange(0, self.get_T(), self.get_dt())
# Shape checks.
if 'ser' in self.keys() and 'spktrains' in self['ser'].keys():
assert self.ser_spktrains.shape[2] == len(
t_vec
), 'Bad t_vec length ({})'.format(len(t_vec))
if 'gaba' in self.keys() and 'spktrains' in self['gaba'].keys():
assert self.gaba_spktrains.shape[2] == len(
t_vec
), 'Bad t_vec length ({})'.format(len(t_vec))
return t_vec
def get_ser_examples_supp(self):
"""Get support array for ser_examples.
"""
return np.broadcast_to(
self.get_t_vec(),
self.ser_examples[self.ser_examples.keys()[0]].shape,
)
def get_gaba_examples_supp(self):
"""Get support array for gaba_examples.
"""
return np.broadcast_to(
self.get_t_vec(),
self.gaba_examples[self.gaba_examples.keys()[0]].shape,
)
|
the-stack_0_17382 | # Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import re
import threading
from collections import defaultdict
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common import conf
from azurelinuxagent.common.event import EVENTS_DIRECTORY, TELEMETRY_LOG_EVENT_ID, \
TELEMETRY_LOG_PROVIDER_ID, add_event, WALAEventOperation, add_log_event, get_event_logger
from azurelinuxagent.common.exception import InvalidExtensionEventError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.telemetryevent import TelemetryEventList, TelemetryEvent, TelemetryEventParam, \
GuestAgentGenericLogsSchema
from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
def get_extension_telemetry_handler(protocol_util):
return ExtensionTelemetryHandler(protocol_util)
class ExtensionEventSchema(object): # pylint: disable=R0903
"""
Class for defining the schema for Extension Events.
"""
Version = "Version"
Timestamp = "Timestamp"
TaskName = "TaskName"
EventLevel = "EventLevel"
Message = "Message"
EventPid = "EventPid"
EventTid = "EventTid"
OperationId = "OperationId"
class ProcessExtensionTelemetry(PeriodicOperation):
"""
Periodic operation for collecting and sending extension telemetry events to Wireserver.
"""
_EXTENSION_EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=5)
_EXTENSION_EVENT_FILE_NAME_REGEX = re.compile(r"^(\d+)\.json$", re.IGNORECASE)
# Limits
_MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD = 300
_EXTENSION_EVENT_FILE_MAX_SIZE = 4 * 1024 * 1024 # 4 MB = 4 * 1,048,576 Bytes
_EXTENSION_EVENT_MAX_SIZE = 1024 * 6 # 6Kb or 6144 characters. Limit for the whole event. Prevent oversized events.
_EXTENSION_EVENT_MAX_MSG_LEN = 1024 * 3 # 3Kb or 3072 chars.
_EXTENSION_EVENT_REQUIRED_FIELDS = [attr.lower() for attr in dir(ExtensionEventSchema) if
not callable(getattr(ExtensionEventSchema, attr)) and not attr.startswith("__")]
def __init__(self, protocol_util):
super(ProcessExtensionTelemetry, self).__init__(
name="collect and send extension events",
operation=self._collect_and_send_events,
period=ProcessExtensionTelemetry._EXTENSION_EVENT_COLLECTION_PERIOD)
self._protocol = protocol_util.get_protocol()
def _collect_and_send_events(self):
event_list = self._collect_extension_events()
if len(event_list.events) > 0: # pylint: disable=C1801
self._protocol.report_event(event_list)
def _collect_extension_events(self):
events_list = TelemetryEventList()
extension_handler_with_event_dirs = []
try:
extension_handler_with_event_dirs = self._get_extension_events_dir_with_handler_name(conf.get_ext_log_dir())
if len(extension_handler_with_event_dirs) == 0: # pylint: disable=C1801
logger.verbose("No Extension events directory exist")
return events_list
for extension_handler_with_event_dir in extension_handler_with_event_dirs:
handler_name = extension_handler_with_event_dir[0]
handler_event_dir_path = extension_handler_with_event_dir[1]
self._capture_extension_events(handler_name, handler_event_dir_path, events_list)
except Exception as e: # pylint: disable=C0103
msg = "Unknown error occurred when trying to collect extension events. Error: {0}".format(ustr(e))
add_event(op=WALAEventOperation.ExtensionTelemetryEventProcessing, message=msg, is_success=False)
finally:
# Always ensure that the events directory are being deleted each run,
# even if we run into an error and dont process them this run.
self._ensure_all_events_directories_empty(extension_handler_with_event_dirs)
return events_list
@staticmethod
def _get_extension_events_dir_with_handler_name(extension_log_dir):
"""
Get the full path to events directory for all extension handlers that have one
:param extension_log_dir: Base log directory for all extensions
:return: A list of full paths of existing events directory for all handlers
"""
extension_handler_with_event_dirs = []
for ext_handler_name in os.listdir(extension_log_dir):
# Check if its an Extension directory
if not os.path.isdir(os.path.join(extension_log_dir, ext_handler_name)) \
or re.match(HANDLER_NAME_PATTERN, ext_handler_name) is None:
continue
# Check if EVENTS_DIRECTORY directory exists
extension_event_dir = os.path.join(extension_log_dir, ext_handler_name, EVENTS_DIRECTORY)
if os.path.exists(extension_event_dir):
extension_handler_with_event_dirs.append((ext_handler_name, extension_event_dir))
return extension_handler_with_event_dirs
def _capture_extension_events(self, handler_name, handler_event_dir_path, events_list):
"""
Capture Extension events and add them to the events_list
:param handler_name: Complete Handler Name. Eg: Microsoft.CPlat.Core.RunCommandLinux
:param handler_event_dir_path: Full path. Eg: '/var/log/azure/Microsoft.CPlat.Core.RunCommandLinux/events'
:param events_list: List of captured extension events
"""
convert_to_mb = lambda x: (1.0 * x)/(1000 * 1000)
# Filter out the files that do not follow the pre-defined EXTENSION_EVENT_FILE_NAME_REGEX
event_files = [event_file for event_file in os.listdir(handler_event_dir_path) if
re.match(self._EXTENSION_EVENT_FILE_NAME_REGEX, event_file) is not None]
# Pick the latest files first, we'll discard older events if len(events) > MAX_EVENT_COUNT
event_files.sort(reverse=True)
captured_extension_events_count = 0
dropped_events_with_error_count = defaultdict(int)
for event_file in event_files:
event_file_path = os.path.join(handler_event_dir_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
# We only support _EXTENSION_EVENT_FILE_MAX_SIZE=4Mb max file size
event_file_size = os.stat(event_file_path).st_size
if event_file_size > self._EXTENSION_EVENT_FILE_MAX_SIZE:
msg = "Skipping file: {0} as its size is {1:.2f} Mb > Max size allowed {2:.1f} Mb".format(
event_file_path, convert_to_mb(event_file_size),
convert_to_mb(self._EXTENSION_EVENT_FILE_MAX_SIZE))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
continue
# We support multiple events in a file, read the file and parse events.
parsed_events = self._parse_event_file_and_capture_events(handler_name, event_file_path,
captured_extension_events_count,
dropped_events_with_error_count)
events_list.events.extend(parsed_events)
captured_extension_events_count += len(parsed_events)
# We only allow MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD=300 maximum events per period per handler
if captured_extension_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
msg = "Reached max count for the extension: {0}; Max Limit: {1}. Skipping the rest.".format(
handler_name, self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
break
except Exception as e: # pylint: disable=C0103
msg = "Failed to process event file {0}: {1}", event_file, ustr(e)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
finally:
os.remove(event_file_path)
if dropped_events_with_error_count is not None and len(dropped_events_with_error_count) > 0: # pylint: disable=C1801
msg = "Dropped events for Extension: {0}; Details:\n\t{1}".format(handler_name, '\n\t'.join(
["Reason: {0}; Dropped Count: {1}".format(k, v) for k, v in dropped_events_with_error_count.items()]))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
if captured_extension_events_count > 0:
logger.info("Collected {0} events for extension: {1}".format(captured_extension_events_count, handler_name))
@staticmethod
def _ensure_all_events_directories_empty(extension_events_directories):
if len(extension_events_directories) == 0: # pylint: disable=C1801
return
for extension_handler_with_event_dir in extension_events_directories:
event_dir_path = extension_handler_with_event_dir[1]
if not os.path.exists(event_dir_path):
return
err = None
# Delete any residue files in the events directory
for residue_file in os.listdir(event_dir_path):
try:
os.remove(os.path.join(event_dir_path, residue_file))
except Exception as e: # pylint: disable=C0103
# Only log the first error once per handler per run if unable to clean off residue files
err = ustr(e) if err is None else err
if err is not None:
logger.error("Failed to completely clear the {0} directory. Exception: {1}", event_dir_path, err)
def _parse_event_file_and_capture_events(self, handler_name, event_file_path, captured_events_count,
dropped_events_with_error_count):
events_list = []
event_file_time = datetime.datetime.fromtimestamp(os.path.getmtime(event_file_path))
# Read event file and decode it properly
with open(event_file_path, "rb") as fd: # pylint: disable=C0103
event_data = fd.read().decode("utf-8")
# Parse the string and get the list of events
events = json.loads(event_data)
# We allow multiple events in a file but there can be an instance where the file only has a single
# JSON event and not a list. Handling that condition too
if not isinstance(events, list):
events = [events]
for event in events:
try:
events_list.append(self._parse_telemetry_event(handler_name, event, event_file_time))
captured_events_count += 1
except InvalidExtensionEventError as e: # pylint: disable=C0103
# These are the errors thrown if there's an error parsing the event. We want to report these back to the
# extension publishers so that they are aware of the issues.
# The error messages are all static messages, we will use this to create a dict and emit an event at the
# end of each run to notify if there were any errors parsing events for the extension
dropped_events_with_error_count[ustr(e)] += 1
except Exception as e: # pylint: disable=C0103
logger.warn("Unable to parse and transmit event, error: {0}".format(e))
if captured_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
break
return events_list
def _parse_telemetry_event(self, handler_name, extension_unparsed_event, event_file_time):
"""
Parse the Json event file and convert it to TelemetryEvent object with the required data.
:return: Complete TelemetryEvent with all required fields filled up properly. Raises if event breaches contract.
"""
extension_event = self._parse_event_and_ensure_it_is_valid(extension_unparsed_event)
# Create a telemetry event, add all common parameters to the event
# and then overwrite all the common params with extension events params if same
event = TelemetryEvent(TELEMETRY_LOG_EVENT_ID, TELEMETRY_LOG_PROVIDER_ID)
event.file_type = "json"
self.add_common_params_to_extension_event(event, event_file_time)
replace_or_add_params = {
GuestAgentGenericLogsSchema.EventName: "{0}-{1}".format(handler_name, extension_event[
ExtensionEventSchema.Version.lower()]),
GuestAgentGenericLogsSchema.CapabilityUsed: extension_event[ExtensionEventSchema.EventLevel.lower()],
GuestAgentGenericLogsSchema.TaskName: extension_event[ExtensionEventSchema.TaskName.lower()],
GuestAgentGenericLogsSchema.Context1: extension_event[ExtensionEventSchema.Message.lower()],
GuestAgentGenericLogsSchema.Context2: extension_event[ExtensionEventSchema.Timestamp.lower()],
GuestAgentGenericLogsSchema.Context3: extension_event[ExtensionEventSchema.OperationId.lower()],
GuestAgentGenericLogsSchema.EventPid: extension_event[ExtensionEventSchema.EventPid.lower()],
GuestAgentGenericLogsSchema.EventTid: extension_event[ExtensionEventSchema.EventTid.lower()]
}
self._replace_or_add_param_in_event(event, replace_or_add_params)
return event
def _parse_event_and_ensure_it_is_valid(self, extension_event):
"""
Parse the Json event from file. Raise InvalidExtensionEventError if the event breaches pre-set contract.
:param extension_event: The json event from file
:return: Verified Json event that qualifies the contract.
"""
clean_string = lambda x: x.strip() if x is not None else x
event_size = 0
key_err_msg = "{0}: {1} not found"
# Convert the dict to all lower keys to avoid schema confusion.
# Only pick the params that we care about and skip the rest.
event = dict((k.lower(), clean_string(v)) for k, v in extension_event.items() if
k.lower() in self._EXTENSION_EVENT_REQUIRED_FIELDS)
# Trim message and only pick the first 3k chars
message_key = ExtensionEventSchema.Message.lower()
if message_key in event:
event[message_key] = event[message_key][:self._EXTENSION_EVENT_MAX_MSG_LEN]
else:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, ExtensionEventSchema.Message))
if event[message_key] is None or len(event[message_key]) == 0: # pylint: disable=C1801
raise InvalidExtensionEventError(
"{0}: {1} should not be empty".format(InvalidExtensionEventError.EmptyMessageError,
ExtensionEventSchema.Message))
for required_key in self._EXTENSION_EVENT_REQUIRED_FIELDS:
# If all required keys not in event then raise
if not required_key in event:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, required_key))
# If the event_size > _EXTENSION_EVENT_MAX_SIZE=6k, then raise
if event_size > self._EXTENSION_EVENT_MAX_SIZE:
raise InvalidExtensionEventError(
"{0}: max event size allowed: {1}".format(InvalidExtensionEventError.OversizeEventError,
self._EXTENSION_EVENT_MAX_SIZE))
event_size += len(event[required_key])
return event
@staticmethod
def _replace_or_add_param_in_event(event, replace_or_add_params):
for param in event.parameters:
if param.name in replace_or_add_params:
param.value = replace_or_add_params.pop(param.name)
if not replace_or_add_params:
# All values replaced, return
return
# Add the remaining params to the event
for param_name in replace_or_add_params:
event.parameters.append(TelemetryEventParam(param_name, replace_or_add_params[param_name]))
@staticmethod
def add_common_params_to_extension_event(event, event_time):
reporter = get_event_logger()
reporter.add_common_event_parameters(event, event_time)
class ExtensionTelemetryHandler(ThreadHandlerInterface):
"""
This Handler takes care of fetching the Extension Telemetry events from the {extension_events_dir} and sends it to
Kusto for advanced debuggability.
"""
_THREAD_NAME = "ExtensionTelemetryHandler"
def __init__(self, protocol_util):
self.protocol_util = protocol_util
self.should_run = True
self.thread = None
@staticmethod
def get_thread_name():
return ExtensionTelemetryHandler._THREAD_NAME
def run(self):
logger.info("Start Extension Telemetry service.")
self.start()
def is_alive(self):
return self.thread is not None and self.thread.is_alive()
def start(self):
self.thread = threading.Thread(target=self.daemon)
self.thread.setDaemon(True)
self.thread.setName(ExtensionTelemetryHandler.get_thread_name())
self.thread.start()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.should_run = False
if self.is_alive():
self.thread.join()
def stopped(self):
return not self.should_run
def daemon(self):
op = ProcessExtensionTelemetry(self.protocol_util) # pylint: disable=C0103
logger.info("Successfully started the {0} thread".format(self.get_thread_name()))
while not self.stopped():
try:
op.run()
except Exception as e: # pylint: disable=C0103
logger.warn(
"An error occurred in the Telemetry Extension thread main loop; will skip the current iteration.\n{0}",
ustr(e))
finally:
PeriodicOperation.sleep_until_next_operation([op]) |
the-stack_0_17384 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional, Sequence, Tuple, Union
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import datasets
from google.cloud.aiplatform.datasets import _datasources
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import schema
from google.cloud.aiplatform import utils
class TimeSeriesDataset(datasets._Dataset):
"""Managed time series dataset resource for Vertex AI"""
_supported_metadata_schema_uris: Optional[Tuple[str]] = (
schema.dataset.metadata.time_series,
)
@classmethod
def create(
cls,
display_name: str,
gcs_source: Optional[Union[str, Sequence[str]]] = None,
bq_source: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
request_metadata: Optional[Sequence[Tuple[str, str]]] = (),
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
) -> "TimeSeriesDataset":
"""Creates a new time series dataset.
Args:
display_name (str):
Required. The user-defined name of the Dataset.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
gcs_source (Union[str, Sequence[str]]):
Google Cloud Storage URI(-s) to the
input file(s). May contain wildcards. For more
information on wildcards, see
https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
examples:
str: "gs://bucket/file.csv"
Sequence[str]: ["gs://bucket/file1.csv", "gs://bucket/file2.csv"]
bq_source (str):
BigQuery URI to the input table.
example:
"bq://project.dataset.table_name"
project (str):
Project to upload this model to. Overrides project set in
aiplatform.init.
location (str):
Location to upload this model to. Overrides location set in
aiplatform.init.
credentials (auth_credentials.Credentials):
Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
request_metadata (Sequence[Tuple[str, str]]):
Strings which should be sent along with the request as metadata.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the dataset. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Dataset and all sub-resources of this Dataset will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
time_series_dataset (TimeSeriesDataset):
Instantiated representation of the managed time series dataset resource.
"""
utils.validate_display_name(display_name)
api_client = cls._instantiate_client(location=location, credentials=credentials)
metadata_schema_uri = schema.dataset.metadata.time_series
datasource = _datasources.create_datasource(
metadata_schema_uri=metadata_schema_uri,
gcs_source=gcs_source,
bq_source=bq_source,
)
return cls._create_and_import(
api_client=api_client,
parent=initializer.global_config.common_location_path(
project=project, location=location
),
display_name=display_name,
metadata_schema_uri=metadata_schema_uri,
datasource=datasource,
project=project or initializer.global_config.project,
location=location or initializer.global_config.location,
credentials=credentials or initializer.global_config.credentials,
request_metadata=request_metadata,
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name
),
sync=sync,
)
def import_data(self):
raise NotImplementedError(
f"{self.__class__.__name__} class does not support 'import_data'"
)
|
the-stack_0_17391 | import ray
import ray._private.services as services
import ray.worker
import ray._private.profiling as profiling
import ray._private.utils as utils
from ray import ray_constants
from ray.state import GlobalState
from ray._raylet import GcsClientOptions
__all__ = ["free", "global_gc"]
MAX_MESSAGE_LENGTH = ray._config.max_grpc_message_size()
def global_gc():
"""Trigger gc.collect() on all workers in the cluster."""
worker = ray.worker.global_worker
worker.core_worker.global_gc()
def memory_summary(
address=None,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
group_by="NODE_ADDRESS",
sort_by="OBJECT_SIZE",
units="B",
line_wrap=True,
stats_only=False,
num_entries=None,
):
from ray.dashboard.memory_utils import memory_summary
address = services.canonicalize_bootstrap_address(address)
state = GlobalState()
options = GcsClientOptions.from_gcs_address(address)
state._initialize_global_state(options)
if stats_only:
return get_store_stats(state)
return memory_summary(
state, group_by, sort_by, line_wrap, units, num_entries
) + get_store_stats(state)
def get_store_stats(state, node_manager_address=None, node_manager_port=None):
"""Returns a formatted string describing memory usage in the cluster."""
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info, that Raylet internally
# asks all nodes in the cluster for memory stats.
if node_manager_address is None or node_manager_port is None:
# We should ask for a raylet that is alive.
raylet = None
for node in state.node_table():
if node["Alive"]:
raylet = node
break
assert raylet is not None, "Every raylet is dead"
raylet_address = "{}:{}".format(
raylet["NodeManagerAddress"], raylet["NodeManagerPort"]
)
else:
raylet_address = "{}:{}".format(node_manager_address, node_manager_port)
channel = utils.init_grpc_channel(
raylet_address,
options=[
("grpc.max_send_message_length", MAX_MESSAGE_LENGTH),
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(include_memory_info=False),
timeout=30.0,
)
return store_stats_summary(reply)
def node_stats(
node_manager_address=None, node_manager_port=None, include_memory_info=True
):
"""Returns NodeStats object describing memory usage in the cluster."""
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info.
assert node_manager_address is not None and node_manager_port is not None
raylet_address = "{}:{}".format(node_manager_address, node_manager_port)
channel = utils.init_grpc_channel(
raylet_address,
options=[
("grpc.max_send_message_length", MAX_MESSAGE_LENGTH),
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
node_stats = stub.GetNodeStats(
node_manager_pb2.GetNodeStatsRequest(include_memory_info=include_memory_info),
timeout=30.0,
)
return node_stats
def store_stats_summary(reply):
"""Returns formatted string describing object store stats in all nodes."""
store_summary = "--- Aggregate object store stats across all nodes ---\n"
# TODO(ekl) it would be nice if we could provide a full memory usage
# breakdown by type (e.g., pinned by worker, primary, etc.)
store_summary += (
"Plasma memory usage {} MiB, {} objects, {}% full, {}% "
"needed\n".format(
int(reply.store_stats.object_store_bytes_used / (1024 * 1024)),
reply.store_stats.num_local_objects,
round(
100
* reply.store_stats.object_store_bytes_used
/ reply.store_stats.object_store_bytes_avail,
2,
),
round(
100
* reply.store_stats.object_store_bytes_primary_copy
/ reply.store_stats.object_store_bytes_avail,
2,
),
)
)
if reply.store_stats.object_store_bytes_fallback > 0:
store_summary += "Plasma filesystem mmap usage: {} MiB\n".format(
int(reply.store_stats.object_store_bytes_fallback / (1024 * 1024))
)
if reply.store_stats.spill_time_total_s > 0:
store_summary += (
"Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n".format(
int(reply.store_stats.spilled_bytes_total / (1024 * 1024)),
reply.store_stats.spilled_objects_total,
int(
reply.store_stats.spilled_bytes_total
/ (1024 * 1024)
/ reply.store_stats.spill_time_total_s
),
)
)
if reply.store_stats.restore_time_total_s > 0:
store_summary += (
"Restored {} MiB, {} objects, avg read throughput {} MiB/s\n".format(
int(reply.store_stats.restored_bytes_total / (1024 * 1024)),
reply.store_stats.restored_objects_total,
int(
reply.store_stats.restored_bytes_total
/ (1024 * 1024)
/ reply.store_stats.restore_time_total_s
),
)
)
if reply.store_stats.consumed_bytes > 0:
store_summary += "Objects consumed by Ray tasks: {} MiB.\n".format(
int(reply.store_stats.consumed_bytes / (1024 * 1024))
)
if reply.store_stats.object_pulls_queued:
store_summary += "Object fetches queued, waiting for available memory."
return store_summary
def free(object_refs, local_only=False):
"""Free a list of IDs from the in-process and plasma object stores.
This function is a low-level API which should be used in restricted
scenarios.
If local_only is false, the request will be send to all object stores.
This method will not return any value to indicate whether the deletion is
successful or not. This function is an instruction to the object store. If
some of the objects are in use, the object stores will delete them later
when the ref count is down to 0.
Examples:
>>> x_id = f.remote()
>>> ray.get(x_id) # wait for x to be created first
>>> free([x_id]) # unpin & delete x globally
Args:
object_refs (List[ObjectRef]): List of object refs to delete.
local_only (bool): Whether only deleting the list of objects in local
object store or all object stores.
"""
worker = ray.worker.global_worker
if isinstance(object_refs, ray.ObjectRef):
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise TypeError(
"free() expects a list of ObjectRef, got {}".format(type(object_refs))
)
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"Attempting to call `free` on the value {}, "
"which is not an ray.ObjectRef.".format(object_ref)
)
worker.check_connected()
with profiling.profile("ray.free"):
if len(object_refs) == 0:
return
worker.core_worker.free_objects(object_refs, local_only)
|
the-stack_0_17394 | """Annotate vibrations with a network."""
import logging
import numpy as np
import defopt
import os
import dss.utils, dss.data, dss.models, dss.event_utils, dss.predict
import scipy.signal as ss
import h5py
from glob import glob
from typing import List
import flammkuchen
# move to cli module
def deepss(data_name: str, save_name: str, model_save_name: str, data_key: str = 'samples',
nb_channels: int = 16,
event_thres: float = 0.5, event_tol: float = 0.01,
segment_thres: float = 0.5):
"""[summary]
Args:
data_name (str): [description]
save_name (str): [description]. Defaults to None.
model_save_name (str): [description]
data_key (str): [description]. Defaults to 'samples'.
nb_channels (int): Number of channels to take from data file. Defaults to 16.
event_thres (float): [description]. Defaults to 0.5.
event_tol (float): [description]. Defaults to 0.01 seconds (10ms).
Raises:
ValueError: if data_name or save_name are of unknown type (allowed: wav, h5, zarr, npy/npz)
"""
# load model
logging.info(f'loading parameters for {model_save_name}')
params = dss.utils.load_params(model_save_name)
logging.info(f'loading data for {data_name}')
with h5py.File(data_name, 'r') as f:
x = f[data_key][..., :nb_channels]
samplerate = f.attrs['rate']
# channel_names = f.attrs['analog_chans_in']
logging.info(f' filtering')
sos_bp = ss.butter(5, [50, 1000], 'bandpass', output='sos', fs=samplerate)
x = ss.sosfiltfilt(sos_bp, x, axis=0).astype(np.float16)
logging.info(f' annotating {x.shape[0]/samplerate:1.2f} seconds')
events, segments, class_probabilities = dss.predict.predict(x, model_save_name, params)
logging.info(f' saving results to "{save_name}".')
os.makedirs(os.path.dirname(save_name), exist_ok=True)
event_names = [k for k in events.keys() if k != 'samplerate_Hz']
segment_names = [k for k in segments.keys() if k != 'samplerate_Hz' and k != 'noise']
d = {'class_probabilities': class_probabilities,
'segment_names': segment_names,
'segment_probabilities': [segments[segment_name]['probabilities'] for segment_name in segment_names],
'segment_labels': [segments[segment_name]['samples'] for segment_name in segment_names],
'event_names': event_names,
'event_probabilities': [events[event_name]['probabilities'] for event_name in event_names ],
'event_indices': [events[event_name]['seconds'] * events['samplerate_Hz'] for event_name in event_names ],
'samplerate_Hz': params['samplerate_y_Hz'],
}
flammkuchen.save(save_name, d)
from snakemake.shell import shell
logging.basicConfig(level=logging.INFO)
params = snakemake.params[0][snakemake.rule]
for out in snakemake.output:
deepss(snakemake.input[0], save_name=out, model_save_name=params['modelname'])
|
the-stack_0_17395 | # Copyright (c) 2015 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from PyQt5.QtCore import QTimer
from UM.Application import Application
from UM.Scene.SceneNode import SceneNode
from UM.Scene.Iterator.BreadthFirstIterator import BreadthFirstIterator
from UM.Math.Vector import Vector
from UM.Scene.Selection import Selection
from UM.Preferences import Preferences
from cura.Scene.ConvexHullDecorator import ConvexHullDecorator
from cura.Operations import PlatformPhysicsOperation
from cura.Scene import ZOffsetDecorator
import random # used for list shuffling
class PlatformPhysics:
def __init__(self, controller, volume):
super().__init__()
self._controller = controller
self._controller.getScene().sceneChanged.connect(self._onSceneChanged)
self._controller.toolOperationStarted.connect(self._onToolOperationStarted)
self._controller.toolOperationStopped.connect(self._onToolOperationStopped)
self._build_volume = volume
self._enabled = True
self._change_timer = QTimer()
self._change_timer.setInterval(100)
self._change_timer.setSingleShot(True)
self._change_timer.timeout.connect(self._onChangeTimerFinished)
self._move_factor = 1.1 # By how much should we multiply overlap to calculate a new spot?
self._max_overlap_checks = 10 # How many times should we try to find a new spot per tick?
self._minimum_gap = 2 # It is a minimum distance (in mm) between two models, applicable for small models
Preferences.getInstance().addPreference("physics/automatic_push_free", True)
Preferences.getInstance().addPreference("physics/automatic_drop_down", True)
def _onSceneChanged(self, source):
if not source.getMeshData():
return
self._change_timer.start()
def _onChangeTimerFinished(self):
if not self._enabled:
return
root = self._controller.getScene().getRoot()
# Keep a list of nodes that are moving. We use this so that we don't move two intersecting objects in the
# same direction.
transformed_nodes = []
# We try to shuffle all the nodes to prevent "locked" situations, where iteration B inverts iteration A.
# By shuffling the order of the nodes, this might happen a few times, but at some point it will resolve.
nodes = list(BreadthFirstIterator(root))
# Only check nodes inside build area.
nodes = [node for node in nodes if (hasattr(node, "_outside_buildarea") and not node._outside_buildarea)]
random.shuffle(nodes)
for node in nodes:
if node is root or not isinstance(node, SceneNode) or node.getBoundingBox() is None:
continue
bbox = node.getBoundingBox()
# Move it downwards if bottom is above platform
move_vector = Vector()
if Preferences.getInstance().getValue("physics/automatic_drop_down") and not (node.getParent() and node.getParent().callDecoration("isGroup") or node.getParent() != root) and node.isEnabled(): #If an object is grouped, don't move it down
z_offset = node.callDecoration("getZOffset") if node.getDecorator(ZOffsetDecorator.ZOffsetDecorator) else 0
move_vector = move_vector.set(y = -bbox.bottom + z_offset)
# If there is no convex hull for the node, start calculating it and continue.
if not node.getDecorator(ConvexHullDecorator):
node.addDecorator(ConvexHullDecorator())
# only push away objects if this node is a printing mesh
if not node.callDecoration("isNonPrintingMesh") and Preferences.getInstance().getValue("physics/automatic_push_free"):
# Check for collisions between convex hulls
for other_node in BreadthFirstIterator(root):
# Ignore root, ourselves and anything that is not a normal SceneNode.
if other_node is root or not issubclass(type(other_node), SceneNode) or other_node is node or other_node.callDecoration("getBuildPlateNumber") != node.callDecoration("getBuildPlateNumber"):
continue
# Ignore collisions of a group with it's own children
if other_node in node.getAllChildren() or node in other_node.getAllChildren():
continue
# Ignore collisions within a group
if other_node.getParent() and node.getParent() and (other_node.getParent().callDecoration("isGroup") is not None or node.getParent().callDecoration("isGroup") is not None):
continue
# Ignore nodes that do not have the right properties set.
if not other_node.callDecoration("getConvexHull") or not other_node.getBoundingBox():
continue
if other_node in transformed_nodes:
continue # Other node is already moving, wait for next pass.
if other_node.callDecoration("isNonPrintingMesh"):
continue
overlap = (0, 0) # Start loop with no overlap
current_overlap_checks = 0
# Continue to check the overlap until we no longer find one.
while overlap and current_overlap_checks < self._max_overlap_checks:
current_overlap_checks += 1
head_hull = node.callDecoration("getConvexHullHead")
if head_hull: # One at a time intersection.
overlap = head_hull.translate(move_vector.x, move_vector.z).intersectsPolygon(other_node.callDecoration("getConvexHull"))
if not overlap:
other_head_hull = other_node.callDecoration("getConvexHullHead")
if other_head_hull:
overlap = node.callDecoration("getConvexHull").translate(move_vector.x, move_vector.z).intersectsPolygon(other_head_hull)
if overlap:
# Moving ensured that overlap was still there. Try anew!
move_vector = move_vector.set(x = move_vector.x + overlap[0] * self._move_factor,
z = move_vector.z + overlap[1] * self._move_factor)
else:
# Moving ensured that overlap was still there. Try anew!
move_vector = move_vector.set(x = move_vector.x + overlap[0] * self._move_factor,
z = move_vector.z + overlap[1] * self._move_factor)
else:
own_convex_hull = node.callDecoration("getConvexHull")
other_convex_hull = other_node.callDecoration("getConvexHull")
if own_convex_hull and other_convex_hull:
overlap = own_convex_hull.translate(move_vector.x, move_vector.z).intersectsPolygon(other_convex_hull)
if overlap: # Moving ensured that overlap was still there. Try anew!
temp_move_vector = move_vector.set(x = move_vector.x + overlap[0] * self._move_factor,
z = move_vector.z + overlap[1] * self._move_factor)
# if the distance between two models less than 2mm then try to find a new factor
if abs(temp_move_vector.x - overlap[0]) < self._minimum_gap and abs(temp_move_vector.y - overlap[1]) < self._minimum_gap:
temp_x_factor = (abs(overlap[0]) + self._minimum_gap) / overlap[0] if overlap[0] != 0 else 0 # find x move_factor, like (3.4 + 2) / 3.4 = 1.58
temp_y_factor = (abs(overlap[1]) + self._minimum_gap) / overlap[1] if overlap[1] != 0 else 0 # find y move_factor
temp_scale_factor = temp_x_factor if abs(temp_x_factor) > abs(temp_y_factor) else temp_y_factor
move_vector = move_vector.set(x = move_vector.x + overlap[0] * temp_scale_factor,
z = move_vector.z + overlap[1] * temp_scale_factor)
else:
move_vector = temp_move_vector
else:
# This can happen in some cases if the object is not yet done with being loaded.
# Simply waiting for the next tick seems to resolve this correctly.
overlap = None
if not Vector.Null.equals(move_vector, epsilon = 1e-5):
transformed_nodes.append(node)
op = PlatformPhysicsOperation.PlatformPhysicsOperation(node, move_vector)
op.push()
# After moving, we have to evaluate the boundary checks for nodes
build_volume = Application.getInstance().getBuildVolume()
build_volume.updateNodeBoundaryCheck()
def _onToolOperationStarted(self, tool):
self._enabled = False
def _onToolOperationStopped(self, tool):
# Selection tool should not trigger an update.
if tool.getPluginId() == "SelectionTool":
return
if tool.getPluginId() == "TranslateTool":
for node in Selection.getAllSelectedObjects():
if node.getBoundingBox().bottom < 0:
if not node.getDecorator(ZOffsetDecorator.ZOffsetDecorator):
node.addDecorator(ZOffsetDecorator.ZOffsetDecorator())
node.callDecoration("setZOffset", node.getBoundingBox().bottom)
else:
if node.getDecorator(ZOffsetDecorator.ZOffsetDecorator):
node.removeDecorator(ZOffsetDecorator.ZOffsetDecorator)
self._enabled = True
self._onChangeTimerFinished()
|
the-stack_0_17396 | import json
import pytz
import re
from collections import OrderedDict, defaultdict
from django.contrib import messages
from django.http import Http404, HttpResponse, JsonResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.urls import RegexURLResolver, Resolver404
from django.utils.translation import ugettext_lazy as _
from couchdbkit import ResourceConflict, ResourceNotFound
from dimagi.utils.web import json_response
from corehq import privileges, toggles
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.dbaccessors import get_app
from corehq.apps.app_manager.decorators import (
safe_cached_download,
safe_download,
)
from corehq.apps.app_manager.exceptions import (
AppManagerException,
FormNotFoundException,
ModuleNotFoundException,
)
from corehq.apps.app_manager.models import Application
from corehq.apps.app_manager.tasks import autogenerate_build
from corehq.apps.app_manager.util import (
add_odk_profile_after_build,
get_latest_enabled_versions_per_profile,
)
from corehq.apps.app_manager.views.utils import back_to_main, get_langs
from corehq.apps.builds.jadjar import convert_XML_To_J2ME
from corehq.apps.hqmedia.views import DownloadMultimediaZip
from corehq.util.soft_assert import soft_assert
from corehq.util.timezones.conversions import ServerTime
from corehq.util.view_utils import set_file_download
BAD_BUILD_MESSAGE = _("Sorry: this build is invalid. Try deleting it and rebuilding. "
"If error persists, please report an issue")
def _get_build_profile_id(request):
profile = request.GET.get('profile')
if profile in request.app.build_profiles:
return profile
else:
return None
@safe_download
def download_odk_profile(request, domain, app_id):
"""
See ApplicationBase.create_profile
"""
if not request.app.copy_of:
username = request.GET.get('username', 'unknown user')
autogenerate_build(request.app, username)
else:
request._always_allow_browser_caching = True
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_profile(is_odk=True, build_profile_id=profile),
content_type="commcare/profile"
)
@safe_download
def download_odk_media_profile(request, domain, app_id):
if not request.app.copy_of:
username = request.GET.get('username', 'unknown user')
autogenerate_build(request.app, username)
else:
request._always_allow_browser_caching = True
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_profile(is_odk=True, with_media=True, build_profile_id=profile),
content_type="commcare/profile"
)
@safe_cached_download
def download_suite(request, domain, app_id):
"""
See Application.create_suite
"""
if not request.app.copy_of:
request.app.set_form_versions()
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_suite(build_profile_id=profile)
)
@safe_cached_download
def download_media_suite(request, domain, app_id):
"""
See Application.create_media_suite
"""
if not request.app.copy_of:
request.app.set_media_versions()
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_media_suite(build_profile_id=profile)
)
@safe_cached_download
def download_app_strings(request, domain, app_id, lang):
"""
See Application.create_app_strings
"""
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_app_strings(lang, build_profile_id=profile)
)
@safe_cached_download
def download_xform(request, domain, app_id, module_id, form_id):
"""
See Application.fetch_xform
"""
profile = _get_build_profile_id(request)
try:
return HttpResponse(
request.app.fetch_xform(module_id, form_id, build_profile_id=profile)
)
except (IndexError, ModuleNotFoundException):
raise Http404()
except AppManagerException:
form_unique_id = request.app.get_module(module_id).get_form(form_id).unique_id
response = validate_form_for_build(request, domain, app_id, form_unique_id, ajax=False)
response.status_code = 404
return response
@safe_cached_download
def download_jad(request, domain, app_id):
"""
See ApplicationBase.create_jadjar_from_build_files
"""
app = request.app
if not app.copy_of:
app.set_media_versions()
jad, _ = app.create_jadjar_from_build_files()
try:
response = HttpResponse(jad)
except Exception:
messages.error(request, BAD_BUILD_MESSAGE)
return back_to_main(request, domain, app_id=app_id)
set_file_download(response, "CommCare.jad")
response["Content-Type"] = "text/vnd.sun.j2me.app-descriptor"
response["Content-Length"] = len(jad)
return response
@safe_cached_download
def download_jar(request, domain, app_id):
"""
See ApplicationBase.create_jadjar_from_build_files
This is the only view that will actually be called
in the process of downloading a complete CommCare.jar
build (i.e. over the air to a phone).
"""
response = HttpResponse(content_type="application/java-archive")
app = request.app
if not app.copy_of:
app.set_media_versions()
_, jar = app.create_jadjar_from_build_files()
set_file_download(response, 'CommCare.jar')
response['Content-Length'] = len(jar)
try:
response.write(jar)
except Exception:
messages.error(request, BAD_BUILD_MESSAGE)
return back_to_main(request, domain, app_id=app_id)
return response
@safe_cached_download
def download_raw_jar(request, domain, app_id):
"""
See ApplicationBase.fetch_jar
"""
response = HttpResponse(
request.app.fetch_jar()
)
response['Content-Type'] = "application/java-archive"
return response
class DownloadCCZ(DownloadMultimediaZip):
name = 'download_ccz'
compress_zip = True
include_index_files = True
@property
def zip_name(self):
return 'commcare_v{}.ccz'.format(self.app.version)
def check_before_zipping(self):
if self.app.is_remote_app():
self.include_multimedia_files = False
super(DownloadCCZ, self).check_before_zipping()
@safe_cached_download
def download_file(request, domain, app_id, path):
download_target_version = request.GET.get('download_target_version') == 'true'
if download_target_version:
parts = path.split('.')
assert len(parts) == 2
target = Application.get(app_id).commcare_flavor
assert target != 'none'
path = parts[0] + '-' + target + '.' + parts[1]
if path == "app.json":
return JsonResponse(request.app.to_json())
content_type_map = {
'ccpr': 'commcare/profile',
'jad': 'text/vnd.sun.j2me.app-descriptor',
'jar': 'application/java-archive',
'xml': 'application/xml',
'txt': 'text/plain',
}
try:
content_type = content_type_map[path.split('.')[-1]]
except KeyError:
content_type = None
response = HttpResponse(content_type=content_type)
if request.GET.get('download') == 'true':
response['Content-Disposition'] = "attachment; filename={}".format(path)
build_profile_id = _get_build_profile_id(request)
build_profile_access = domain_has_privilege(domain, privileges.BUILD_PROFILES)
if path in ('CommCare.jad', 'CommCare.jar'):
set_file_download(response, path)
full_path = path
elif build_profile_id and build_profile_id in request.app.build_profiles and build_profile_access:
full_path = 'files/%s/%s' % (build_profile_id, path)
else:
full_path = 'files/%s' % path
def resolve_path(path):
return RegexURLResolver(
r'^', 'corehq.apps.app_manager.download_urls').resolve(path)
def create_build_files(build_profile_id=None):
request.app.create_build_files(build_profile_id=build_profile_id)
request.app.save()
def create_build_files_if_necessary_handling_conflicts(is_retry=False):
try:
try:
# look for file guaranteed to exist if profile is created
request.app.fetch_attachment('files/{id}/profile.xml'.format(id=build_profile_id))
except ResourceNotFound:
create_build_files(build_profile_id)
except ResourceConflict:
if is_retry:
raise
request.app = Application.get(request.app.get_id)
create_build_files_if_necessary_handling_conflicts(True)
try:
assert request.app.copy_of
# create build files for default profile if they were not created during initial build
# or for language profiles for which build files have not been created yet
try:
payload = request.app.fetch_attachment(full_path)
except ResourceNotFound:
if not build_profile_id:
create_build_files()
elif build_profile_id in request.app.build_profiles and build_profile_access:
create_build_files_if_necessary_handling_conflicts()
else:
raise
payload = request.app.fetch_attachment(full_path)
if path in ['profile.xml', 'media_profile.xml']:
payload = convert_XML_To_J2ME(payload, path, request.app.use_j2me_endpoint)
response.write(payload)
if path in ['profile.ccpr', 'media_profile.ccpr'] and request.app.last_released:
last_released = request.app.last_released.replace(microsecond=0) # mobile doesn't want microseconds
last_released = ServerTime(last_released).user_time(pytz.UTC).done().isoformat()
response['X-CommCareHQ-AppReleasedOn'] = last_released
response['Content-Length'] = len(response.content)
return response
except (ResourceNotFound, AssertionError):
if request.app.copy_of:
if request.META.get('HTTP_USER_AGENT') == 'bitlybot':
raise Http404()
elif path == 'profile.ccpr':
# legacy: should patch build to add odk profile
# which wasn't made on build for a long time
add_odk_profile_after_build(request.app)
request.app.save()
return download_file(request, domain, app_id, path)
elif path in ('CommCare.jad', 'CommCare.jar'):
if not request.app.build_spec.supports_j2me():
raise Http404()
request.app.create_jadjar_from_build_files(save=True)
try:
request.app.save(increment_version=False)
except ResourceConflict:
# Likely that somebody tried to download the jad and jar
# files for the first time simultaneously.
pass
return download_file(request, domain, app_id, path)
else:
try:
resolve_path(path)
except Resolver404:
# ok this was just a url that doesn't exist
pass
else:
# this resource should exist but doesn't
_assert = soft_assert('@'.join(['jschweers', 'dimagi.com']))
_assert(False, 'Expected build resource %s not found' % path)
raise Http404()
try:
callback, callback_args, callback_kwargs = resolve_path(path)
except Resolver404:
raise Http404()
return callback(request, domain, app_id, *callback_args, **callback_kwargs)
@safe_download
def download_profile(request, domain, app_id):
"""
See ApplicationBase.create_profile
"""
if not request.app.copy_of:
username = request.GET.get('username', 'unknown user')
autogenerate_build(request.app, username)
else:
request._always_allow_browser_caching = True
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_profile(build_profile_id=profile)
)
@safe_download
def download_media_profile(request, domain, app_id):
if not request.app.copy_of:
username = request.GET.get('username', 'unknown user')
autogenerate_build(request.app, username)
else:
request._always_allow_browser_caching = True
profile = _get_build_profile_id(request)
return HttpResponse(
request.app.create_profile(with_media=True, build_profile_id=profile)
)
@safe_cached_download
def download_practice_user_restore(request, domain, app_id):
if not request.app.copy_of:
autogenerate_build(request.app, request.user.username)
return HttpResponse(
request.app.create_practice_user_restore()
)
@safe_download
def download_index(request, domain, app_id):
"""
A landing page, mostly for debugging, that has links the jad and jar as well as
all the resource files that will end up zipped into the jar.
"""
files = defaultdict(list)
try:
for file_ in source_files(request.app):
form_filename = re.search(r'modules-(\d+)\/forms-(\d+)', file_[0])
if form_filename:
module_id, form_id = form_filename.groups()
module = request.app.get_module(module_id)
form = module.get_form(form_id)
section_name = "m{} - {}".format(
module_id,
", ".join(["({}) {}".format(lang, name)
for lang, name in module.name.items()])
)
files[section_name].append({
'name': file_[0],
'source': file_[1],
'readable_name': "f{} - {}".format(
form_id,
", ".join(["({}) {}".format(lang, name)
for lang, name in form.name.items()])
),
})
else:
files[None].append({
'name': file_[0],
'source': file_[1],
'readable_name': None,
})
except Exception:
messages.error(
request,
_(
"We were unable to get your files "
"because your Application has errors. "
"Please click <strong>Make New Version</strong> "
"for feedback on how to fix these errors."
),
extra_tags='html'
)
enabled_build_profiles = []
latest_enabled_build_profiles = {}
if request.app.is_released and toggles.RELEASE_BUILDS_PER_PROFILE.enabled(domain):
latest_enabled_build_profiles = get_latest_enabled_versions_per_profile(request.app.copy_of)
enabled_build_profiles = [_id for _id, version in latest_enabled_build_profiles.items()
if version == request.app.version]
return render(request, "app_manager/download_index.html", {
'app': request.app,
'files': OrderedDict(sorted(files.items(), key=lambda x: x[0] or '')),
'supports_j2me': request.app.build_spec.supports_j2me(),
'enabled_build_profiles': enabled_build_profiles,
'latest_enabled_build_profiles': latest_enabled_build_profiles,
})
def validate_form_for_build(request, domain, app_id, form_unique_id, ajax=True):
app = get_app(domain, app_id)
try:
form = app.get_form(form_unique_id)
except FormNotFoundException:
# this can happen if you delete the form from another page
raise Http404()
errors = form.validate_for_build()
lang, langs = get_langs(request, app)
if ajax and "blank form" in [error.get('type') for error in errors]:
response_html = ""
else:
response_html = render_to_string("app_manager/partials/build_errors.html", {
'app': app,
'build_errors': errors,
'not_actual_build': True,
'domain': domain,
'langs': langs,
})
if ajax:
return json_response({
'error_html': response_html,
})
else:
return HttpResponse(response_html)
def download_index_files(app, build_profile_id=None):
if app.copy_of:
prefix = 'files/'
if build_profile_id is not None:
prefix += build_profile_id + '/'
needed_for_CCZ = lambda path: path.startswith(prefix)
else:
profiles = set(app.build_profiles)
needed_for_CCZ = lambda path: (path.startswith(prefix) and
path.split('/')[1] not in profiles)
if not (prefix + 'profile.ccpr') in app.blobs:
# profile hasnt been built yet
app.create_build_files(build_profile_id=build_profile_id)
app.save()
files = [(path[len(prefix):], app.fetch_attachment(path))
for path in app.blobs if needed_for_CCZ(path)]
else:
files = list(app.create_all_files().items())
files = [
(name, build_file if isinstance(build_file, str) else build_file.decode('utf-8'))
for (name, build_file) in files
]
return sorted(files)
def source_files(app):
"""
Return the app's source files, including the app json.
Return format is a list of tuples where the first item in the tuple is a
file name and the second is the file contents.
"""
if not app.copy_of:
app.set_media_versions()
files = download_index_files(app)
app_json = json.dumps(
app.to_json(), sort_keys=True, indent=4, separators=(',', ': ')
)
files.append(
("app.json", app_json)
)
return sorted(files)
|
the-stack_0_17398 | """
Base class for the reward priority queues
Copyright (c) 2021 Elix, Inc.
"""
import random
from functools import total_ordering
from itertools import product
from typing import Any, List, Optional, Tuple
import numpy as np
@total_ordering
class StorageElement:
def __init__(
self,
smile: str,
score: float,
expert_id: Optional[int] = None,
):
self.smile = smile
self.score = score
self.expert_id = expert_id
def __eq__(self, other):
return np.isclose(self.score, other.score)
def __lt__(self, other):
return self.score < other.score
def __hash__(self):
return hash(self.smile)
class MaxRewardPriorityMemory:
def __init__(
self,
) -> None:
self.elements: List[StorageElement] = []
def __len__(self) -> int:
return len(self.elements)
def add_list(
self,
smiles: List[str],
scores: List[float],
expert_id: Optional[int] = None,
) -> None:
new_elements = [
StorageElement(
smile=smile,
score=score,
expert_id=expert_id,
)
for smile, score in zip(smiles, scores)
]
self.elements.extend(new_elements)
self.elements = list(set(self.elements))
def get_elements(
self,
) -> Tuple[List[str], List[float], List[Any]]:
return unravel_elements(self.elements)
def squeeze_by_rank(self, top_k: int) -> None:
top_k = min(top_k, len(self.elements))
self.elements = sorted(self.elements, reverse=True)[:top_k]
def sample_batch(self, batch_size: int) -> Tuple[List[str], List[float], List[Any]]:
sampled_elements = random.choices(population=self.elements, k=batch_size)
return unravel_elements(sampled_elements)
def unravel_elements(
elements: List[StorageElement],
) -> Tuple[List[str], List[float], List[Any]]:
return tuple( # type: ignore
map(
list,
zip(
*[
(element.smile, element.score, element.expert_id)
for element in elements
]
),
)
)
|
the-stack_0_17399 | import numpy as np
import cgen as c
from sympy import Or, Max, Not
from devito.data import FULL
from devito.ir import (DummyEq, Conditional, Dereference, Expression, ExpressionBundle,
List, Prodder, ParallelIteration, ParallelBlock, While,
FindSymbols, FindNodes, Return, COLLAPSED, VECTORIZED, Transformer,
IsPerfectIteration, retrieve_iteration_tree, filter_iterations)
from devito.symbolics import CondEq, DefFunction, INT
from devito.parameters import configuration
from devito.passes.iet.engine import iet_pass
from devito.tools import as_tuple, is_integer, prod
from devito.types import PointerArray, Symbol, NThreadsMixin
__all__ = ['Ompizer', 'OpenMPIteration', 'ParallelTree']
def ncores():
return configuration['platform'].cores_physical
def nhyperthreads():
return configuration['platform'].threads_per_core
class OpenMPRegion(ParallelBlock):
def __init__(self, body, private=None):
# Normalize and sanity-check input. A bit ugly, but it makes everything
# much simpler to manage and reconstruct
body = as_tuple(body)
assert len(body) == 1
body = body[0]
assert body.is_List
if isinstance(body, ParallelTree):
partree = body
elif body.is_List:
assert len(body.body) == 1 and isinstance(body.body[0], ParallelTree)
assert len(body.footer) == 0
partree = body.body[0]
partree = partree._rebuild(prefix=(List(header=body.header,
body=partree.prefix)))
header = OpenMPRegion._make_header(partree.nthreads, private)
super(OpenMPRegion, self).__init__(header=header, body=partree)
@property
def partree(self):
return self.body[0]
@property
def root(self):
return self.partree.root
@property
def nthreads(self):
return self.partree.nthreads
@classmethod
def _make_header(cls, nthreads, private=None):
private = ('private(%s)' % ','.join(private)) if private else ''
return c.Pragma('omp parallel num_threads(%s) %s' % (nthreads.name, private))
class OpenMPIteration(ParallelIteration):
def __init__(self, *args, **kwargs):
pragmas, kwargs = self._make_header(**kwargs)
properties = as_tuple(kwargs.pop('properties', None))
properties += (COLLAPSED(kwargs.get('ncollapse', 1)),)
self.schedule = kwargs.pop('schedule', None)
self.parallel = kwargs.pop('parallel', False)
self.ncollapse = kwargs.pop('ncollapse', None)
self.chunk_size = kwargs.pop('chunk_size', None)
self.nthreads = kwargs.pop('nthreads', None)
self.reduction = kwargs.pop('reduction', None)
super(OpenMPIteration, self).__init__(*args, pragmas=pragmas,
properties=properties, **kwargs)
@classmethod
def _make_header(cls, **kwargs):
kwargs.pop('pragmas', None)
construct = cls._make_construct(**kwargs)
clauses = cls._make_clauses(**kwargs)
header = c.Pragma(' '.join([construct] + clauses))
return (header,), kwargs
@classmethod
def _make_construct(cls, parallel=False, **kwargs):
if parallel:
return 'omp parallel for'
else:
return 'omp for'
@classmethod
def _make_clauses(cls, ncollapse=None, chunk_size=None, nthreads=None,
reduction=None, schedule=None, **kwargs):
clauses = []
clauses.append('collapse(%d)' % (ncollapse or 1))
if chunk_size is not False:
clauses.append('schedule(%s,%s)' % (schedule or 'dynamic',
chunk_size or 1))
if nthreads:
clauses.append('num_threads(%s)' % nthreads)
if reduction:
args = []
for i in reduction:
if i.is_Indexed:
f = i.function
bounds = []
for k, d in zip(i.indices, f.dimensions):
if k.is_Number:
bounds.append('[%s]' % k)
else:
# OpenMP expects a range as input of reduction,
# such as reduction(+:f[0:f_vec->size[1]])
bounds.append('[0:%s]' % f._C_get_field(FULL, d).size)
args.append('%s%s' % (i.name, ''.join(bounds)))
else:
args.append(str(i))
clauses.append('reduction(+:%s)' % ','.join(args))
return clauses
class ParallelTree(List):
"""
This class is to group together a parallel for-loop with some setup
statements, for example:
.. code-block:: C
int chunk_size = ...
#pragma omp ... schedule(..., chunk_size)
for (int i = ...)
{
...
}
"""
_traversable = ['prefix', 'body']
def __init__(self, prefix, body, nthreads=None):
# Normalize and sanity-check input
body = as_tuple(body)
assert len(body) == 1 and body[0].is_Iteration
self.prefix = as_tuple(prefix)
self.nthreads = nthreads
super(ParallelTree, self).__init__(body=body)
def __getattr__(self, name):
if 'body' in self.__dict__:
# During unpickling, `__setattr__` calls `__getattr__(..., 'body')`,
# which would cause infinite recursion if we didn't check whether
# 'body' is present or not
return getattr(self.body[0], name)
raise AttributeError
@property
def functions(self):
return as_tuple(self.nthreads)
@property
def root(self):
return self.body[0]
class ThreadedProdder(Conditional, Prodder):
_traversable = []
def __init__(self, prodder):
# Atomic-ize any single-thread Prodders in the parallel tree
condition = CondEq(Ompizer.lang['thread-num'], 0)
# Prod within a while loop until all communications have completed
# In other words, the thread delegated to prodding is entrapped for as long
# as it's required
prod_until = Not(DefFunction(prodder.name, [i.name for i in prodder.arguments]))
then_body = List(header=c.Comment('Entrap thread until comms have completed'),
body=While(prod_until))
Conditional.__init__(self, condition, then_body)
Prodder.__init__(self, prodder.name, prodder.arguments, periodic=prodder.periodic)
class Ompizer(object):
lang = {
'simd-for': c.Pragma('omp simd'),
'simd-for-aligned': lambda i, j: c.Pragma('omp simd aligned(%s:%d)' % (i, j)),
'atomic': c.Pragma('omp atomic update'),
'thread-num': DefFunction('omp_get_thread_num')
}
"""
Shortcuts for the OpenMP language.
"""
_Region = OpenMPRegion
_Iteration = OpenMPIteration
def __init__(self, sregistry, options, key=None):
"""
Parameters
----------
sregistry : SymbolRegistry
The symbol registry, to quickly access the special symbols that may
appear in the IET (e.g., `sregistry.threadid`, `sregistry.nthreads`).
options : dict
The optimization options. Accepted: ['par-collapse-ncores',
'par-collapse-work', 'par-chunk-nonaffine', 'par-dynamic-work', 'par-nested']
* 'par-collapse-ncores': use a collapse clause if the number of
available physical cores is greater than this threshold.
* 'par-collapse-work': use a collapse clause if the trip count of the
collapsable Iterations is statically known to exceed this threshold.
* 'par-chunk-nonaffine': coefficient to adjust the chunk size in
non-affine parallel Iterations.
* 'par-dynamic-work': use dynamic scheduling if the operation count per
iteration exceeds this threshold. Otherwise, use static scheduling.
* 'par-nested': nested parallelism if the number of hyperthreads per core
is greater than this threshold.
key : callable, optional
Return True if an Iteration can be parallelized, False otherwise.
"""
self.sregistry = sregistry
self.collapse_ncores = options['par-collapse-ncores']
self.collapse_work = options['par-collapse-work']
self.chunk_nonaffine = options['par-chunk-nonaffine']
self.dynamic_work = options['par-dynamic-work']
self.nested = options['par-nested']
if key is not None:
self.key = key
else:
self.key = lambda i: i.is_ParallelRelaxed and not i.is_Vectorized
@property
def nthreads(self):
return self.sregistry.nthreads
@property
def nthreads_nested(self):
return self.sregistry.nthreads_nested
@property
def nthreads_nonaffine(self):
return self.sregistry.nthreads_nonaffine
@property
def threadid(self):
return self.sregistry.threadid
def _find_collapsable(self, root, candidates):
collapsable = []
if ncores() >= self.collapse_ncores:
for n, i in enumerate(candidates[1:], 1):
# The Iteration nest [root, ..., i] must be perfect
if not IsPerfectIteration(depth=i).visit(root):
break
# The OpenMP specification forbids collapsed loops to use iteration
# variables in initializer expressions. E.g., the following is forbidden:
#
# #pragma omp ... collapse(2)
# for (i = ... )
# for (j = i ...)
# ...
#
# Here, we make sure this won't happen
if any(j.dim in i.symbolic_min.free_symbols for j in candidates[:n]):
break
# Also, we do not want to collapse vectorizable Iterations
if i.is_Vectorized:
break
# Would there be enough work per parallel iteration?
nested = candidates[n+1:]
if nested:
try:
work = prod([int(j.dim.symbolic_size) for j in nested])
if work < self.collapse_work:
break
except TypeError:
pass
collapsable.append(i)
return collapsable
@classmethod
def _make_tid(cls, tid):
return c.Initializer(c.Value(tid._C_typedata, tid.name), cls.lang['thread-num'])
def _make_reductions(self, partree, collapsed):
if not any(i.is_ParallelAtomic for i in collapsed):
return partree
# Collect expressions inducing reductions
exprs = FindNodes(Expression).visit(partree)
exprs = [i for i in exprs if i.is_Increment and not i.is_ForeignExpression]
reduction = [i.output for i in exprs]
if (all(i.is_Affine for i in collapsed)
or all(not i.is_Indexed for i in reduction)):
# Introduce reduction clause
mapper = {partree.root: partree.root._rebuild(reduction=reduction)}
else:
# Introduce one `omp atomic` pragma for each increment
mapper = {i: i._rebuild(pragmas=self.lang['atomic']) for i in exprs}
partree = Transformer(mapper).visit(partree)
return partree
def _make_threaded_prodders(self, partree):
mapper = {i: ThreadedProdder(i) for i in FindNodes(Prodder).visit(partree)}
partree = Transformer(mapper).visit(partree)
return partree
def _make_partree(self, candidates, nthreads=None):
"""Parallelize the `candidates` Iterations attaching suitable OpenMP pragmas."""
assert candidates
root = candidates[0]
# Get the collapsable Iterations
collapsable = self._find_collapsable(root, candidates)
ncollapse = 1 + len(collapsable)
# Prepare to build a ParallelTree
if all(i.is_Affine for i in candidates):
bundles = FindNodes(ExpressionBundle).visit(root)
sops = sum(i.ops for i in bundles)
if sops >= self.dynamic_work:
schedule = 'dynamic'
else:
schedule = 'static'
if nthreads is None:
# pragma omp for ... schedule(..., 1)
nthreads = self.nthreads
body = OpenMPIteration(schedule=schedule, ncollapse=ncollapse,
**root.args)
else:
# pragma omp parallel for ... schedule(..., 1)
body = OpenMPIteration(schedule=schedule, parallel=True,
ncollapse=ncollapse, nthreads=nthreads,
**root.args)
prefix = []
else:
# pragma omp for ... schedule(..., expr)
assert nthreads is None
nthreads = self.nthreads_nonaffine
chunk_size = Symbol(name='chunk_size')
body = OpenMPIteration(ncollapse=ncollapse, chunk_size=chunk_size,
**root.args)
niters = prod([root.symbolic_size] + [j.symbolic_size for j in collapsable])
value = INT(Max(niters / (nthreads*self.chunk_nonaffine), 1))
prefix = [Expression(DummyEq(chunk_size, value, dtype=np.int32))]
# Create a ParallelTree
partree = ParallelTree(prefix, body, nthreads=nthreads)
collapsed = [partree] + collapsable
return root, partree, collapsed
def _make_parregion(self, partree, parrays):
arrays = [i for i in FindSymbols().visit(partree) if i.is_Array]
# Detect thread-private arrays on the heap and "map" them to shared
# vector-expanded (one entry per thread) Arrays
heap_private = [i for i in arrays if i._mem_heap and i._mem_local]
heap_globals = []
for i in heap_private:
if i in parrays:
pi = parrays[i]
else:
pi = parrays.setdefault(i, PointerArray(name=self.sregistry.make_name(),
dimensions=(self.threadid,),
array=i))
heap_globals.append(Dereference(i, pi))
if heap_globals:
prefix = List(header=self._make_tid(self.threadid),
body=heap_globals + list(partree.prefix),
footer=c.Line())
partree = partree._rebuild(prefix=prefix)
return self._Region(partree)
def _make_guard(self, partree, collapsed):
# Do not enter the parallel region if the step increment is 0; this
# would raise a `Floating point exception (core dumped)` in some OpenMP
# implementations. Note that using an OpenMP `if` clause won't work
cond = [CondEq(i.step, 0) for i in collapsed if isinstance(i.step, Symbol)]
cond = Or(*cond)
if cond != False: # noqa: `cond` may be a sympy.False which would be == False
partree = List(body=[Conditional(cond, Return()), partree])
return partree
def _make_nested_partree(self, partree):
# Apply heuristic
if nhyperthreads() <= self.nested:
return partree
# Note: there might be multiple sub-trees amenable to nested parallelism,
# hence we loop over all of them
#
# for (i = ... ) // outer parallelism
# for (j0 = ...) // first source of nested parallelism
# ...
# for (j1 = ...) // second source of nested parallelism
# ...
mapper = {}
for tree in retrieve_iteration_tree(partree):
outer = tree[:partree.ncollapsed]
inner = tree[partree.ncollapsed:]
# Heuristic: nested parallelism is applied only if the top nested
# parallel Iteration iterates *within* the top outer parallel Iteration
# (i.e., the outer is a loop over blocks, while the nested is a loop
# within a block)
candidates = []
for i in inner:
if self.key(i) and any(is_integer(j.step-i.symbolic_size) for j in outer):
candidates.append(i)
elif candidates:
# If there's at least one candidate but `i` doesn't honor the
# heuristic above, then we break, as the candidates must be
# perfectly nested
break
if not candidates:
continue
# Introduce nested parallelism
subroot, subpartree, _ = self._make_partree(candidates, self.nthreads_nested)
mapper[subroot] = subpartree
partree = Transformer(mapper).visit(partree)
return partree
def _make_parallel(self, iet):
mapper = {}
parrays = {}
for tree in retrieve_iteration_tree(iet):
# Get the omp-parallelizable Iterations in `tree`
candidates = filter_iterations(tree, key=self.key)
if not candidates:
continue
# Outer parallelism
root, partree, collapsed = self._make_partree(candidates)
if partree is None or root in mapper:
continue
# Nested parallelism
partree = self._make_nested_partree(partree)
# Handle reductions
partree = self._make_reductions(partree, collapsed)
# Atomicize and optimize single-thread prodders
partree = self._make_threaded_prodders(partree)
# Wrap within a parallel region, declaring private and shared variables
parregion = self._make_parregion(partree, parrays)
# Protect the parallel region in case of 0-valued step increments
parregion = self._make_guard(parregion, collapsed)
mapper[root] = parregion
iet = Transformer(mapper).visit(iet)
# The new arguments introduced by this pass
args = [i for i in FindSymbols().visit(iet) if isinstance(i, (NThreadsMixin))]
for n in FindNodes(Dereference).visit(iet):
args.extend([(n.array, True), n.parray])
return iet, {'args': args, 'includes': ['omp.h']}
@iet_pass
def make_parallel(self, iet):
"""
Create a new IET with shared-memory parallelism via OpenMP pragmas.
"""
return self._make_parallel(iet)
@iet_pass
def make_simd(self, iet, **kwargs):
"""
Create a new IET with SIMD parallelism via OpenMP pragmas.
"""
simd_reg_size = kwargs.pop('simd_reg_size')
mapper = {}
for tree in retrieve_iteration_tree(iet):
candidates = [i for i in tree if i.is_Parallel]
# As long as there's an outer level of parallelism, the innermost
# PARALLEL Iteration gets vectorized
if len(candidates) < 2:
continue
candidate = candidates[-1]
# Construct OpenMP SIMD pragma
aligned = [j for j in FindSymbols('symbolics').visit(candidate)
if j.is_DiscreteFunction]
if aligned:
simd = self.lang['simd-for-aligned']
simd = as_tuple(simd(','.join([j.name for j in aligned]),
simd_reg_size))
else:
simd = as_tuple(self.lang['simd-for'])
pragmas = candidate.pragmas + simd
# Add VECTORIZED property
properties = list(candidate.properties) + [VECTORIZED]
mapper[candidate] = candidate._rebuild(pragmas=pragmas, properties=properties)
iet = Transformer(mapper).visit(iet)
return iet, {}
|
the-stack_0_17400 | import validator.BaseValidator as BaseValidator
from builtins import str
from validate_email import validate_email
class EmailValidator(BaseValidator.BaseValidator):
message = "Value is not correct email address"
def validate(self, value):
#possible null values
if value is None:
return True
value = super(EmailValidator, self).validate(value)
if type(value) is str:
return validate_email(value)
return False
def __init__(self, params):
super(EmailValidator, self).__init__(params)
|
the-stack_0_17403 | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import numpy as np
import time
import pycocotools.mask as mask_util
import paddlex.utils.logging as logging
from paddlex.utils import is_pic
from .det_metrics.coco_utils import loadRes
def visualize_detection(image,
result,
threshold=0.5,
save_dir='./',
color=None):
"""
Visualize bbox and mask results
"""
if isinstance(image, np.ndarray):
image_name = str(int(time.time() * 1000)) + '.jpg'
else:
image_name = os.path.split(image)[-1]
image = cv2.imread(image)
image = draw_bbox_mask(image, result, threshold=threshold, color_map=color)
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
out_path = os.path.join(save_dir, 'visualize_{}'.format(image_name))
cv2.imwrite(out_path, image)
logging.info('The visualized result is saved at {}'.format(out_path))
else:
return image
def visualize_segmentation(image,
result,
weight=0.6,
save_dir='./',
color=None):
"""
Convert segment result to color image, and save added image.
Args:
image: the path of origin image
result: the predict result of image
weight: the image weight of visual image, and the result weight is (1 - weight)
save_dir: the directory for saving visual image
color: the list of a BGR-mode color for each label.
"""
label_map = result['label_map']
color_map = get_color_map_list(256)
if color is not None:
for i in range(len(color) // 3):
color_map[i] = color[i * 3:(i + 1) * 3]
color_map = np.array(color_map).astype("uint8")
# Use OpenCV LUT for color mapping
c1 = cv2.LUT(label_map, color_map[:, 0])
c2 = cv2.LUT(label_map, color_map[:, 1])
c3 = cv2.LUT(label_map, color_map[:, 2])
pseudo_img = np.dstack((c1, c2, c3))
if isinstance(image, np.ndarray):
im = image
image_name = str(int(time.time() * 1000)) + '.jpg'
if image.shape[2] != 3:
logging.info(
"The image is not 3-channel array, so predicted label map is shown as a pseudo color image."
)
weight = 0.
else:
image_name = os.path.split(image)[-1]
if not is_pic(image):
logging.info(
"The image cannot be opened by opencv, so predicted label map is shown as a pseudo color image."
)
image_name = image_name.split('.')[0] + '.jpg'
weight = 0.
else:
im = cv2.imread(image)
if abs(weight) < 1e-5:
vis_result = pseudo_img
else:
vis_result = cv2.addWeighted(im, weight,
pseudo_img.astype(im.dtype), 1 - weight,
0)
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
out_path = os.path.join(save_dir, 'visualize_{}'.format(image_name))
cv2.imwrite(out_path, vis_result)
logging.info('The visualized result is saved as {}'.format(out_path))
else:
return vis_result
def get_color_map_list(num_classes):
""" Returns the color map for visualizing the segmentation mask,
which can support arbitrary number of classes.
Args:
num_classes: Number of classes
Returns:
The color map
"""
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
# expand an array of boxes by a given scale.
def expand_boxes(boxes, scale):
"""
"""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def clip_bbox(bbox):
xmin = max(min(bbox[0], 1.), 0.)
ymin = max(min(bbox[1], 1.), 0.)
xmax = max(min(bbox[2], 1.), 0.)
ymax = max(min(bbox[3], 1.), 0.)
return xmin, ymin, xmax, ymax
def draw_bbox_mask(image, results, threshold=0.5, color_map=None):
_SMALL_OBJECT_AREA_THRESH = 1000
height, width = image.shape[:2]
default_font_scale = max(np.sqrt(height * width) // 900, .5)
linewidth = max(default_font_scale / 40, 2)
labels = list()
for dt in results:
if dt['category'] not in labels:
labels.append(dt['category'])
if color_map is None:
color_map = get_color_map_list(len(labels) + 2)[2:]
else:
color_map = np.asarray(color_map)
if color_map.shape[0] != len(labels) or color_map.shape[1] != 3:
raise Exception(
"The shape for color_map is required to be {}x3, but recieved shape is {}x{}.".
format(len(labels), color_map.shape))
if np.max(color_map) > 255 or np.min(color_map) < 0:
raise ValueError(
" The values in color_map should be within 0-255 range.")
keep_results = []
areas = []
for dt in results:
cname, bbox, score = dt['category'], dt['bbox'], dt['score']
if score < threshold:
continue
keep_results.append(dt)
areas.append(bbox[2] * bbox[3])
areas = np.asarray(areas)
sorted_idxs = np.argsort(-areas).tolist()
keep_results = [keep_results[k]
for k in sorted_idxs] if keep_results else []
for dt in keep_results:
cname, bbox, score = dt['category'], dt['bbox'], dt['score']
bbox = list(map(int, bbox))
xmin, ymin, w, h = bbox
xmax = xmin + w
ymax = ymin + h
color = tuple(color_map[labels.index(cname)])
# draw bbox
image = cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color,
linewidth)
# draw mask
if 'mask' in dt:
mask = mask_util.decode(dt['mask']) * 255
image = image.astype('float32')
alpha = .7
w_ratio = .4
color_mask = np.asarray(color, dtype=np.int)
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
image[idx[0], idx[1], :] *= 1.0 - alpha
image[idx[0], idx[1], :] += alpha * color_mask
image = image.astype("uint8")
contours = cv2.findContours(
mask.astype("uint8"), cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_NONE)[-2]
image = cv2.drawContours(
image,
contours,
contourIdx=-1,
color=color,
thickness=1,
lineType=cv2.LINE_AA)
# draw label
text_pos = (xmin, ymin)
instance_area = w * h
if (instance_area < _SMALL_OBJECT_AREA_THRESH or h < 40):
if ymin >= height - 5:
text_pos = (xmin, ymin)
else:
text_pos = (xmin, ymax)
height_ratio = h / np.sqrt(height * width)
font_scale = (np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2,
2) * 0.5 * default_font_scale)
text = "{} {:.2f}".format(cname, score)
(tw, th), baseline = cv2.getTextSize(
text,
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=font_scale,
thickness=1)
image = cv2.rectangle(
image,
text_pos, (text_pos[0] + tw, text_pos[1] + th + baseline),
color=color,
thickness=-1)
image = cv2.putText(
image,
text, (text_pos[0], text_pos[1] + th),
fontFace=cv2.FONT_HERSHEY_DUPLEX,
fontScale=font_scale,
color=(255, 255, 255),
thickness=1,
lineType=cv2.LINE_AA)
return image
def draw_pr_curve(eval_details_file=None,
gt=None,
pred_bbox=None,
pred_mask=None,
iou_thresh=0.5,
save_dir='./'):
if eval_details_file is not None:
import json
with open(eval_details_file, 'r') as f:
eval_details = json.load(f)
pred_bbox = eval_details['bbox']
if 'mask' in eval_details:
pred_mask = eval_details['mask']
gt = eval_details['gt']
if gt is None or pred_bbox is None:
raise Exception(
"gt/pred_bbox/pred_mask is None now, please set right eval_details_file or gt/pred_bbox/pred_mask."
)
if pred_bbox is not None and len(pred_bbox) == 0:
raise Exception("There is no predicted bbox.")
if pred_mask is not None and len(pred_mask) == 0:
raise Exception("There is no predicted mask.")
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
coco = COCO()
coco.dataset = gt
coco.createIndex()
def _summarize(coco_gt, ap=1, iouThr=None, areaRng='all', maxDets=100):
p = coco_gt.params
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = coco_gt.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
else:
# dimension of recall: [TxKxAxM]
s = coco_gt.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
return mean_s
def cal_pr(coco_gt, coco_dt, iou_thresh, save_dir, style='bbox'):
coco_dt = loadRes(coco_gt, coco_dt)
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.params.iouThrs = np.linspace(
iou_thresh, iou_thresh, 1, endpoint=True)
coco_eval.evaluate()
coco_eval.accumulate()
stats = _summarize(coco_eval, iouThr=iou_thresh)
catIds = coco_gt.getCatIds()
if len(catIds) != coco_eval.eval['precision'].shape[2]:
raise Exception(
"The category number must be same as the third dimension of precisions."
)
x = np.arange(0.0, 1.01, 0.01)
color_map = get_color_map_list(256)[1:256]
plt.subplot(1, 2, 1)
plt.title(style + " precision-recall IoU={}".format(iou_thresh))
plt.xlabel("recall")
plt.ylabel("precision")
plt.xlim(0, 1.01)
plt.ylim(0, 1.01)
plt.grid(linestyle='--', linewidth=1)
plt.plot([0, 1], [0, 1], 'r--', linewidth=1)
my_x_ticks = np.arange(0, 1.01, 0.1)
my_y_ticks = np.arange(0, 1.01, 0.1)
plt.xticks(my_x_ticks, fontsize=5)
plt.yticks(my_y_ticks, fontsize=5)
for idx, catId in enumerate(catIds):
pr_array = coco_eval.eval['precision'][0, :, idx, 0, 2]
precision = pr_array[pr_array > -1]
ap = np.mean(precision) if precision.size else float('nan')
nm = coco_gt.loadCats(catId)[0]['name'] + ' AP={:0.2f}'.format(
float(ap * 100))
color = tuple(color_map[idx])
color = [float(c) / 255 for c in color]
color.append(0.75)
plt.plot(x, pr_array, color=color, label=nm, linewidth=1)
plt.legend(loc="lower left", fontsize=5)
plt.subplot(1, 2, 2)
plt.title(style + " score-recall IoU={}".format(iou_thresh))
plt.xlabel('recall')
plt.ylabel('score')
plt.xlim(0, 1.01)
plt.ylim(0, 1.01)
plt.grid(linestyle='--', linewidth=1)
plt.xticks(my_x_ticks, fontsize=5)
plt.yticks(my_y_ticks, fontsize=5)
for idx, catId in enumerate(catIds):
nm = coco_gt.loadCats(catId)[0]['name']
sr_array = coco_eval.eval['scores'][0, :, idx, 0, 2]
color = tuple(color_map[idx])
color = [float(c) / 255 for c in color]
color.append(0.75)
plt.plot(x, sr_array, color=color, label=nm, linewidth=1)
plt.legend(loc="lower left", fontsize=5)
plt.savefig(
os.path.join(
save_dir,
"./{}_pr_curve(iou-{}).png".format(style, iou_thresh)),
dpi=800)
plt.close()
if not os.path.exists(save_dir):
os.makedirs(save_dir)
cal_pr(coco, pred_bbox, iou_thresh, save_dir, style='bbox')
if pred_mask is not None:
cal_pr(coco, pred_mask, iou_thresh, save_dir, style='segm')
|
the-stack_0_17404 | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
# noinspection SpellCheckingInspection
setuptools.setup(
name='hellpy',
license='MIT',
version='1.1.0',
python_requires=">=3.6",
author='Manan (mentix02)',
long_description=long_description,
description='A connector for HellDB.',
author_email='[email protected]',
packages=['hellpy', 'hellpy.structures'],
url='https://github.com/helldatabase/hellpy',
long_description_content_type='text/markdown',
classifiers=[
"Topic :: Database",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
)
|
the-stack_0_17406 | import sys
ros_path = '/opt/ros/kinetic/lib/python2.7/dist-packages'
if ros_path in sys.path:
sys.path.remove(ros_path)
import cv2
cap = cv2.VideoCapture(0)
count = 0
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.resize(frame, (1024, 768))
cv2.imshow("capture", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('s'):
cv2.imwrite('./image/'+str(count) + '.jpg', frame)
count = count + 1
if key == ord('q'):
break
cap.release()
cv2.destroyAllWindows() |
the-stack_0_17408 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3], redirect_stderr=True)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 TGIF from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 TGIF in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True, False, True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True, False, True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 TGIF normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.00001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 TGIF with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 TGIF
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 TGIF with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, False, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3], redirect_stderr=True))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
# set lower ancestor limit for later
self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
signedtx = self.nodes[0].signrawtransaction(rawtx)
singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit*2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
assert_equal(len(txid_list), chainlimit*2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))
assert(extra_txid not in self.nodes[0].getrawmempool())
assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()])
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*",99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
stop_node(self.nodes[0],0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_message(JSONRPCException, "mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
if __name__ == '__main__':
WalletTest().main()
|
the-stack_0_17409 | from collections import OrderedDict
import math
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
def load_weights_sequential(target, source_state):
new_dict = OrderedDict()
for (k1, v1), (k2, v2) in zip(target.state_dict().items(), source_state.items()):
new_dict[k1] = v2
target.load_state_dict(new_dict)
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, dilation=dilation,
padding=dilation, bias=False)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers=(3, 4, 23, 3)):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False)
)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x_3 = self.layer3(x)
x = self.layer4(x_3)
return x, x_3
def resnet18(pretrained=False):
model = ResNet(BasicBlock, [2, 2, 2, 2])
return model
def resnet34(pretrained=False):
model = ResNet(BasicBlock, [3, 4, 6, 3])
return model
def resnet50(pretrained=False):
model = ResNet(Bottleneck, [3, 4, 6, 3])
return model
def resnet101(pretrained=False):
model = ResNet(Bottleneck, [3, 4, 23, 3])
return model
def resnet152(pretrained=False):
model = ResNet(Bottleneck, [3, 8, 36, 3])
return model |
the-stack_0_17410 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related part of the Keras engine.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import values as ds_values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import callbacks as callbacks_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
from tensorflow.python.keras.engine import compile_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.engine import network
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer as lso
from tensorflow.python.keras.saving.saved_model import model_serialization
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils import version_utils
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.profiler import trace
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import keras_export
_keras_api_gauge = monitoring.BoolGauge('/tensorflow/api/keras',
'keras api usage', 'method')
def enable_multi_worker(method):
"""Decorator that handles running `method` with multi-worker strategy."""
def _method_wrapper(self, *args, **kwargs):
if not self._in_multi_worker_mode(): # pylint: disable=protected-access
return method(self, *args, **kwargs)
# Running inside `run_distribute_coordinator` already.
if dc_context.get_current_worker_context():
return method(self, *args, **kwargs)
return dc.run_distribute_coordinator(
lambda _: method(self, *args, **kwargs),
self.distribute_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
return tf_decorator.make_decorator(
target=method, decorator_func=_method_wrapper)
def disable_multi_worker(method):
"""Decorator that disallows multi-worker use of `method`."""
def _method_wrapper(self, *args, **kwargs):
if self._in_multi_worker_mode(): # pylint: disable=protected-access
raise ValueError('{} is not supported in multi-worker mode.'.format(
method.__name__))
return method(self, *args, **kwargs)
return tf_decorator.make_decorator(
target=method, decorator_func=_method_wrapper)
@keras_export('keras.Model', 'keras.models.Model')
class Model(network.Network, version_utils.ModelVersionSelector):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
Once the model is created, you can config the model with losses and metrics
with `model.compile()`, train the model with `model.fit()`, or use the model
to do prediction with `model.predict()`.
Checkout [guide](https://www.tensorflow.org/guide/keras/overview) for
additional details.
"""
_TF_MODULE_IGNORED_PROPERTIES = frozenset(
itertools.chain(('_train_counter', '_test_counter', '_predict_counter',
'_steps_per_execution'),
network.Network._TF_MODULE_IGNORED_PROPERTIES)) # pylint: disable=protected-access
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
_keras_api_gauge.get_cell('model').set(True)
# Model must be created under scope of DistStrat it will be trained with.
if ds_context.has_strategy():
self._distribution_strategy = ds_context.get_strategy()
else:
self._distribution_strategy = None
# Defaults to value of `tf.config.experimental_functions_run_eagerly`.
self._run_eagerly = None
self.stop_training = False
# Initialize cache attrs.
self._reset_compile_cache()
# Fault-tolerance handler. Set in `ModelCheckpoint`.
self._training_state = None
self.history = None
# These objects are used in the default `Model.compile`. They are not
# guaranteed to be set after `Model.compile` is called, as users can
# override compile with custom logic.
self.compiled_loss = None
self.compiled_metrics = None
self._init_batch_counters()
@trackable.no_automatic_dependency_tracking
def _init_batch_counters(self):
# Untracked Variables, used to keep track of mini-batches seen in `fit`,
# `evaluate`, and `predict`.
agg = variables.VariableAggregationV2.ONLY_FIRST_REPLICA
self._train_counter = variables.Variable(0, dtype='int64', aggregation=agg)
self._test_counter = variables.Variable(0, dtype='int64', aggregation=agg)
self._predict_counter = variables.Variable(
0, dtype='int64', aggregation=agg)
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
with self.distribute_strategy.scope():
return super(Model, self).get_weights()
def load_weights(self, filepath, by_name=False, skip_mismatch=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
skip_mismatch: Boolean, whether to skip loading of layers where there is
a mismatch in the number of weights, or a mismatch in the shape of
the weight (only valid when `by_name=True`).
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
ValueError: If `skip_mismatch` is set to `True` when `by_name` is
`False`.
"""
if dist_utils.is_tpu_strategy(self._distribution_strategy):
if (self._distribution_strategy.extended.steps_per_run > 1 and
(not network._is_hdf5_filepath(filepath))): # pylint: disable=protected-access
raise ValueError('Load weights is not yet supported with TPUStrategy '
'with steps_per_run greater than 1.')
return super(Model, self).load_weights(filepath, by_name, skip_mismatch)
def compile(self,
optimizer='rmsprop',
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
run_eagerly=None,
**kwargs):
"""Configures the model for training.
Arguments:
optimizer: String (name of optimizer) or optimizer instance. See
`tf.keras.optimizers`.
loss: String (name of objective function), objective function or
`tf.keras.losses.Loss` instance. See `tf.keras.losses`. An objective
function is any callable with the signature `loss = fn(y_true,
y_pred)`, where y_true = ground truth values with shape =
`[batch_size, d0, .. dN]`, except sparse loss functions such as sparse
categorical crossentropy where shape = `[batch_size, d0, .. dN-1]`.
y_pred = predicted values with shape = `[batch_size, d0, .. dN]`. It
returns a weighted loss float tensor. If a custom `Loss` instance is
used and reduction is set to NONE, return value has the shape
[batch_size, d0, .. dN-1] ie. per-sample or per-timestep loss values;
otherwise, it is a scalar. If the model has multiple outputs, you can
use a different loss on each output by passing a dictionary or a list
of losses. The loss value that will be minimized by the model will
then be the sum of all individual losses.
metrics: List of metrics to be evaluated by the model during training
and testing. Each of this can be a string (name of a built-in
function), function or a `tf.keras.metrics.Metric` instance. See
`tf.keras.metrics`. Typically you will use `metrics=['accuracy']`. A
function is any callable with the signature `result = fn(y_true,
y_pred)`. To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary, such as
`metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.
You can also pass a list (len = len(outputs)) of lists of metrics
such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or
`metrics=['accuracy', ['accuracy', 'mse']]`. When you pass the
strings 'accuracy' or 'acc', we convert this to one of
`tf.keras.metrics.BinaryAccuracy`,
`tf.keras.metrics.CategoricalAccuracy`,
`tf.keras.metrics.SparseCategoricalAccuracy` based on the loss
function used and the model output shape. We do a similar
conversion for the strings 'crossentropy' and 'ce' as well.
loss_weights: Optional list or dictionary specifying scalar coefficients
(Python floats) to weight the loss contributions of different model
outputs. The loss value that will be minimized by the model will then
be the *weighted sum* of all individual losses, weighted by the
`loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping to the model's
outputs. If a dict, it is expected to map output names (strings)
to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise sample weighting (2D
weights), set this to `"temporal"`. `None` defaults to sample-wise
weights (1D). If the model has multiple outputs, you can use a
different `sample_weight_mode` on each output by passing a dictionary
or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted by
sample_weight or class_weight during training and testing.
run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s
logic will not be wrapped in a `tf.function`. Recommended to leave
this as `None` unless your `Model` cannot be run inside a
`tf.function`.
**kwargs: Any additional arguments. Supported arguments:
`experimental_steps_per_execution`: Int. The number of batches to
run during each `tf.function` call. Running multiple batches
inside a single `tf.function` call can greatly improve performance
on TPUs or small models with a large Python overhead. Note that if
this value is set to `N`, `Callback.on_batch` methods will only be
called every `N` batches. This currently defaults to `1`. At most,
one full epoch will be run each execution. If a number larger than
the size of the epoch is passed, the execution will be truncated
to the size of the epoch.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
_keras_api_gauge.get_cell('compile').set(True)
with self.distribute_strategy.scope():
self._validate_compile(optimizer, metrics, **kwargs)
self._run_eagerly = kwargs.pop('run_eagerly', None)
self.optimizer = self._get_optimizer(optimizer)
self.compiled_loss = compile_utils.LossesContainer(
loss, loss_weights, output_names=self.output_names)
self.compiled_metrics = compile_utils.MetricsContainer(
metrics, weighted_metrics, output_names=self.output_names)
experimental_steps_per_execution = kwargs.pop(
'experimental_steps_per_execution', 1)
self._configure_steps_per_execution(experimental_steps_per_execution)
# Initializes attrs that are reset each time `compile` is called.
self._reset_compile_cache()
self._is_compiled = True
self.loss = loss or {} # Backwards compat.
def _get_optimizer(self, optimizer):
"""Wraps `optimizer` in `LossScaleOptimizer` if necessary."""
def _get_single_optimizer(opt):
opt = optimizers.get(opt)
if (self._dtype_policy.loss_scale is not None and
not isinstance(opt, lso.LossScaleOptimizer)):
opt = lso.LossScaleOptimizer(opt, self._dtype_policy.loss_scale)
return opt
return nest.map_structure(_get_single_optimizer, optimizer)
@trackable.no_automatic_dependency_tracking
def _reset_compile_cache(self):
self.train_function = None
self.test_function = None
self.predict_function = None
# Used to cache `trainable` attr of `Layer`s for `fit`.
self._compiled_trainable_state = self._get_trainable_state()
@trackable.no_automatic_dependency_tracking
def _configure_steps_per_execution(self, steps_per_execution):
self._steps_per_execution = variables.Variable(
steps_per_execution,
dtype='int64',
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
@property
def metrics(self):
"""Returns the model's metrics added using `compile`, `add_metric` APIs."""
metrics = []
if self._is_compiled:
# TODO(omalleyt): Track `LossesContainer` and `MetricsContainer` objects
# so that attr names are not load-bearing.
if self.compiled_loss is not None:
metrics += self.compiled_loss.metrics
if self.compiled_metrics is not None:
metrics += self.compiled_metrics.metrics
all_layers = self._gather_unique_layers()
for l in all_layers:
metrics.extend(l._metrics) # pylint: disable=protected-access
return metrics
@property
def metrics_names(self):
"""Returns the model's display labels for all outputs."""
# This property includes all output names including `loss` and per-output
# losses for backward compatibility.
return [m.name for m in self.metrics]
@property
def distribute_strategy(self):
"""The `tf.distribute.Strategy` this model was created under."""
return self._distribution_strategy or ds_context.get_strategy()
@property
def run_eagerly(self):
"""Settable attribute indicating whether the model should run eagerly.
Running eagerly means that your model will be run step by step,
like Python code. Your model might run slower, but it should become easier
for you to debug it by stepping into individual layer calls.
By default, we will attempt to compile your model to a static graph to
deliver the best execution performance.
Returns:
Boolean, whether the model should run eagerly.
"""
if self._run_eagerly is True and not context.executing_eagerly():
raise ValueError('You can only set `run_eagerly=True` if eager execution '
'is enabled.')
if not self.dynamic:
if self._run_eagerly is None:
# Respect `tf.config.experimental_run_functions_eagerly` unless
# `run_eagerly` was explicitly passed to `compile`.
return def_function.RUN_FUNCTIONS_EAGERLY
else:
return self._run_eagerly
else:
if not context.executing_eagerly():
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution (layers '
'constructed with `dynamic=True`). '
'You must enable eager execution with '
'`tf.enable_eager_execution()`.')
if self._run_eagerly is False:
# TODO(fchollet): consider using py_func to enable this.
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution (layers '
'constructed with `dynamic=True`). '
'You cannot set `run_eagerly=False`.')
return context.executing_eagerly()
@run_eagerly.setter
def run_eagerly(self, value):
self._run_eagerly = value
def train_step(self, data):
"""The logic for one training step.
This method can be overridden to support custom training logic.
This method is called by `Model._make_train_function`.
This method should contain the mathemetical logic for one step of training.
This typically includes the forward pass, loss calculation, backpropagation,
and metric updates.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model._make_train_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
A `dict` containing values that will be passed to
`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
# These are the only transformations `Model.fit` applies to user-input
# data when a `tf.data.Dataset` is provided. These utilities will be exposed
# publicly.
data = data_adapter.expand_1d(data)
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
with backprop.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
# For custom training steps, users can just write:
# trainable_variables = self.trainable_variables
# gradients = tape.gradient(loss, trainable_variables)
# self.optimizer.apply_gradients(zip(gradients, trainable_variables))
# The _minimize call does a few extra steps unnecessary in most cases,
# such as loss scaling and gradient clipping.
_minimize(self.distribute_strategy, tape, self.optimizer, loss,
self.trainable_variables)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
def make_train_function(self):
"""Creates a function that executes one step of training.
This method can be overridden to support custom training logic.
This method is called by `Model.fit` and `Model.train_on_batch`.
Typically, this method directly controls `tf.function` and
`tf.distribute.Strategy` settings, and delegates the actual training
logic to `Model._train_step`.
This function is cached the first time `Model.fit` or
`Model.train_on_batch` is called. The cache is cleared whenever
`Model.compile` is called.
Returns:
Function. The function created by this method should accept a
`tf.data.Iterator`, and return a `dict` containing values that will
be passed to `tf.keras.Callbacks.on_train_batch_end`, such as
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self.train_function is not None:
return self.train_function
def step_function(model, iterator):
"""Runs a single training step."""
def run_step(data):
outputs = model.train_step(data)
# Ensure counter is updated only if `train_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
model._train_counter.assign_add(1) # pylint: disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(
outputs, self.distribute_strategy, reduction='first')
write_scalar_summaries(outputs, step=model._train_counter) # pylint: disable=protected-access
return outputs
if self._steps_per_execution.numpy().item() == 1:
def train_function(iterator):
"""Runs a training execution with one step."""
return step_function(self, iterator)
else:
def train_function(iterator):
"""Runs a training execution with multiple steps."""
outputs = step_function(self, iterator)
for _ in math_ops.range(self._steps_per_execution - 1):
outputs = step_function(self, iterator)
return outputs
if not self.run_eagerly:
train_function = def_function.function(
train_function, experimental_relax_shapes=True)
self.train_function = train_function
return self.train_function
@enable_multi_worker
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A generator or `keras.utils.Sequence` returning `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
A more detailed description of unpacking behavior for iterator types
(Dataset, generator, Sequence) is given below.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, generator,
or `keras.utils.Sequence` instance, `y` should
not be specified (since targets will be obtained from `x`).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of datasets, generators, or `keras.utils.Sequence` instances
(since they generate batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
Note that the progress bar is not particularly useful when
logged to a file, so verbose=2 is recommended when not running
interactively (eg, in a production environment).
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See `tf.keras.callbacks`.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset, generator or
`keras.utils.Sequence` instance.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data. Thus, note the fact
that the validation loss of data provided using `validation_split`
or `validation_data` is not affected by regularization layers like
noise and dropuout.
`validation_data` will override `validation_split`.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
- tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
- dataset
For the first two cases, `batch_size` must be provided.
For the last case, `validation_steps` could be provided.
Note that `validation_data` does not support all the data types that
are supported in `x`, eg, dict, generator or `keras.utils.Sequence`.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch'). This argument is ignored
when `x` is a generator. 'batch' is a special option for dealing
with the limitations of HDF5 data; it shuffles in batch-sized
chunks. Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, generator, or
`keras.utils.Sequence` instance, instead provide the sample_weights
as the third element of `x`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined. If x is a
`tf.data` dataset, and 'steps_per_epoch'
is None, the epoch will run until the input dataset is exhausted.
When passing an infinitely repeating dataset, you must specify the
`steps_per_epoch` argument. This argument is not supported with
array inputs.
validation_steps: Only relevant if `validation_data` is provided and
is a `tf.data` dataset. Total number of steps (batches of
samples) to draw before stopping when performing validation
at the end of every epoch. If 'validation_steps' is None, validation
will run until the `validation_data` dataset is exhausted. In the
case of an infinitely repeated dataset, it will run into an
infinite loop. If 'validation_steps' is specified and only part of
the dataset will be consumed, the evaluation will start from the
beginning of the dataset at each epoch. This ensures that the same
validation samples are used every time.
validation_batch_size: Integer or `None`.
Number of samples per validation batch.
If unspecified, will default to `batch_size`.
Do not specify the `validation_batch_size` if your data is in the
form of datasets, generators, or `keras.utils.Sequence` instances
(since they generate batches).
validation_freq: Only relevant if validation data is provided. Integer
or `collections_abc.Container` instance (e.g. list, tuple, etc.).
If an integer, specifies how many training epochs to run before a
new validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up
when using process-based threading. If unspecified, `workers`
will default to 1. If 0, will execute the generator on the main
thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
**kwargs: Used for backwards compatibility.
Unpacking behavior for iterator-like inputs:
A common pattern is to pass a tf.data.Dataset, generator, or
tf.keras.utils.Sequence to the `x` argument of fit, which will in fact
yield not only features (x) but optionally targets (y) and sample weights.
Keras requires that the output of such iterator-likes be unambiguous. The
iterator should return a tuple of length 1, 2, or 3, where the optional
second and third elements will be used for y and sample_weight
respectively. Any other type provided will be wrapped in a length one
tuple, effectively treating everything as 'x'. When yielding dicts, they
should still adhere to the top-level tuple structure.
e.g. `({"x0": x0, "x1": x1}, y)`. Keras will not attempt to separate
features, targets, and weights from the keys of a single dict.
A notable unsupported data type is the namedtuple. The reason is that
it behaves like both an ordered datatype (tuple) and a mapping
datatype (dict). So given a namedtuple of the form:
`namedtuple("example_tuple", ["y", "x"])`
it is ambiguous whether to reverse the order of the elements when
interpreting the value. Even worse is a tuple of the form:
`namedtuple("other_tuple", ["x", "y", "z"])`
where it is unclear if the tuple was intended to be unpacked into x, y,
and sample_weight or passed through as a single element to `x`. As a
result the data processing code will simply raise a ValueError if it
encounters a namedtuple. (Along with instructions to remedy the issue.)
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
_keras_api_gauge.get_cell('fit').set(True)
# Legacy graph support is contained in `training_v1.Model`.
version_utils.disallow_legacy_graph('Model', 'fit')
self._assert_compile_was_called()
self._check_call_args('fit')
if validation_split:
# Create the validation data using the training data. Only supported for
# `Tensor` and `NumPy` input.
(x, y, sample_weight), validation_data = (
data_adapter.train_validation_split((x, y, sample_weight),
validation_split=validation_split,
shuffle=False))
with self.distribute_strategy.scope(), \
training_utils.RespectCompiledTrainableState(self):
# Creates a `tf.data.Dataset` and handles batch and epoch iteration.
data_handler = data_adapter.DataHandler(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
initial_epoch=initial_epoch,
epochs=epochs,
shuffle=shuffle,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
model=self,
steps_per_execution=self._steps_per_execution)
# Container that configures and calls `tf.keras.Callback`s.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
model=self,
verbose=verbose,
epochs=epochs,
steps=data_handler.inferred_steps)
self.stop_training = False
train_function = self.make_train_function()
self._train_counter.assign(0)
callbacks.on_train_begin()
# Handle fault-tolerance for multi-worker.
# TODO(omalleyt): Fix the ordering issues that mean this has to
# happen after `callbacks.on_train_begin`.
data_handler._initial_epoch = ( # pylint: disable=protected-access
self._maybe_load_initial_epoch_from_ckpt(initial_epoch))
for epoch, iterator in data_handler.enumerate_epochs():
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
with data_handler.catch_stop_iteration():
for step in data_handler.steps():
with trace.Trace(
'TraceContext',
graph_type='train',
epoch_num=epoch,
step_num=step,
batch_size=batch_size):
callbacks.on_train_batch_begin(step)
tmp_logs = train_function(iterator)
if data_handler.should_sync:
context.async_wait()
logs = tmp_logs # No error, now safe to assign to logs.
end_step = step + data_handler.step_increment
callbacks.on_train_batch_end(end_step, logs)
epoch_logs = copy.copy(logs)
# Run validation.
if validation_data and self._should_eval(epoch, validation_freq):
val_x, val_y, val_sample_weight = (
data_adapter.unpack_x_y_sample_weight(validation_data))
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
return_dict=True)
val_logs = {'val_' + name: val for name, val in val_logs.items()}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
if self.stop_training:
break
callbacks.on_train_end()
return self.history
def test_step(self, data):
"""The logic for one evaluation step.
This method can be overridden to support custom evaluation logic.
This method is called by `Model._make_test_function`.
This function should contain the mathemetical logic for one step of
evaluation.
This typically includes the forward pass, loss calculation, and metrics
updates.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model._make_test_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
A `dict` containing values that will be passed to
`tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
values of the `Model`'s metrics are returned.
"""
data = data_adapter.expand_1d(data)
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
y_pred = self(x, training=False)
# Updates stateful loss metrics.
self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
def make_test_function(self):
"""Creates a function that executes one step of evaluation.
This method can be overridden to support custom evaluation logic.
This method is called by `Model.evaluate` and `Model.test_on_batch`.
Typically, this method directly controls `tf.function` and
`tf.distribute.Strategy` settings, and delegates the actual evaluation
logic to `Model._test_step`.
This function is cached the first time `Model.evaluate` or
`Model.test_on_batch` is called. The cache is cleared whenever
`Model.compile` is called.
Returns:
Function. The function created by this method should accept a
`tf.data.Iterator`, and return a `dict` containing values that will
be passed to `tf.keras.Callbacks.on_test_batch_end`.
"""
if self.test_function is not None:
return self.test_function
def step_function(model, iterator):
"""Runs a single evaluation step."""
def run_step(data):
outputs = model.test_step(data)
# Ensure counter is updated only if `test_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
model._test_counter.assign_add(1) # pylint: disable=protected-access
return outputs
data = next(iterator)
outputs = model.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(
outputs, self.distribute_strategy, reduction='first')
return outputs
if self._steps_per_execution.numpy().item() == 1:
def test_function(iterator):
"""Runs an evaluation execution with one step."""
return step_function(self, iterator)
else:
def test_function(iterator):
"""Runs an evaluation execution with multiple steps."""
outputs = step_function(self, iterator)
for _ in math_ops.range(self._steps_per_execution - 1):
outputs = step_function(self, iterator)
return outputs
if not self.run_eagerly:
test_function = def_function.function(
test_function, experimental_relax_shapes=True)
self.test_function = test_function
return self.test_function
@enable_multi_worker
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
return_dict=False):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches.
Arguments:
x: Input data. It could be: - A Numpy array (or array-like), or a list
of arrays (in case the model has multiple inputs). - A TensorFlow
tensor, or a list of tensors (in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors, if
the model has named inputs. - A `tf.data` dataset. - A generator or
`keras.utils.Sequence` instance. A more detailed description of
unpacking behavior for iterator types (Dataset, generator, Sequence)
is given in the `Unpacking behavior for iterator-like inputs` section
of `Model.fit`.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely). If
`x` is a dataset, generator or `keras.utils.Sequence` instance, `y`
should not be specified (since targets will be obtained from the
iterator/dataset).
batch_size: Integer or `None`. Number of samples per gradient update. If
unspecified, `batch_size` will default to 32. Do not specify the
`batch_size` if your data is in the form of a dataset, generators,
or `keras.utils.Sequence` instances (since they generate batches).
verbose: 0 or 1. Verbosity mode. 0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for the test samples,
used for weighting the loss function. You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples), or in the case of
temporal data, you can pass a 2D array with shape `(samples,
sequence_length)`, to apply a different weight to every timestep
of every sample. In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is
not supported when `x` is a dataset, instead pass sample weights
as the third element of `x`.
steps: Integer or `None`. Total number of steps (batches of samples)
before declaring the evaluation round finished. Ignored with the
default value of `None`. If x is a `tf.data` dataset and `steps` is
None, 'evaluate' will run until the dataset is exhausted. This
argument is not supported with array inputs.
callbacks: List of `keras.callbacks.Callback` instances. List of
callbacks to apply during evaluation. See
[callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue. If unspecified,
`max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using process-based
threading. If unspecified, `workers` will default to 1. If 0, will
execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to the
generator as they can't be passed easily to children processes.
return_dict: If `True`, loss and metric results are returned as a dict,
with each key being the name of the metric. If `False`, they are
returned as a list.
See the discussion of `Unpacking behavior for iterator-like inputs` for
`Model.fit`.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
_keras_api_gauge.get_cell('evaluate').set(True)
version_utils.disallow_legacy_graph('Model', 'evaluate')
self._assert_compile_was_called()
self._check_call_args('evaluate')
with self.distribute_strategy.scope():
# Creates a `tf.data.Dataset` and handles batch and epoch iteration.
data_handler = data_adapter.DataHandler(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
initial_epoch=0,
epochs=1,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
model=self,
steps_per_execution=self._steps_per_execution)
# Container that configures and calls `tf.keras.Callback`s.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
model=self,
verbose=verbose,
epochs=1,
steps=data_handler.inferred_steps)
test_function = self.make_test_function()
self._test_counter.assign(0)
callbacks.on_test_begin()
for _, iterator in data_handler.enumerate_epochs(): # Single epoch.
self.reset_metrics()
with data_handler.catch_stop_iteration():
for step in data_handler.steps():
with trace.Trace('TraceContext', graph_type='test', step_num=step):
callbacks.on_test_batch_begin(step)
tmp_logs = test_function(iterator)
if data_handler.should_sync:
context.async_wait()
logs = tmp_logs # No error, now safe to assign to logs.
end_step = step + data_handler.step_increment
callbacks.on_test_batch_end(end_step, logs)
callbacks.on_test_end()
logs = tf_utils.to_numpy_or_python_type(logs)
if return_dict:
return logs
else:
results = [logs.get(name, None) for name in self.metrics_names]
if len(results) == 1:
return results[0]
return results
def predict_step(self, data):
"""The logic for one inference step.
This method can be overridden to support custom inference logic.
This method is called by `Model._make_predict_function`.
This method should contain the mathemetical logic for one step of inference.
This typically includes the forward pass.
Configuration details for *how* this logic is run (e.g. `tf.function` and
`tf.distribute.Strategy` settings), should be left to
`Model._make_predict_function`, which can also be overridden.
Arguments:
data: A nested structure of `Tensor`s.
Returns:
The result of one inference step, typically the output of calling the
`Model` on data.
"""
data = data_adapter.expand_1d(data)
x, _, _ = data_adapter.unpack_x_y_sample_weight(data)
return self(x, training=False)
def make_predict_function(self):
"""Creates a function that executes one step of inference.
This method can be overridden to support custom inference logic.
This method is called by `Model.predict` and `Model.predict_on_batch`.
Typically, this method directly controls `tf.function` and
`tf.distribute.Strategy` settings, and delegates the actual evaluation
logic to `Model._predict_step`.
This function is cached the first time `Model.predict` or
`Model.predict_on_batch` is called. The cache is cleared whenever
`Model.compile` is called.
Returns:
Function. The function created by this method should accept a
`tf.data.Iterator`, and return the outputs of the `Model`.
"""
if self.predict_function is not None:
return self.predict_function
def predict_function(iterator):
"""Runs one call to `self.predict_function`."""
def run_step(data):
outputs = self.predict_step(data)
# Ensure counter is updated only if `predict_step` succeeds.
with ops.control_dependencies(_minimum_control_deps(outputs)):
self._predict_counter.assign_add(1)
return outputs
data = next(iterator)
outputs = self.distribute_strategy.run(run_step, args=(data,))
outputs = reduce_per_replica(
outputs, self.distribute_strategy, reduction='concat')
return outputs
if not self.run_eagerly:
predict_function = def_function.function(
predict_function, experimental_relax_shapes=True)
self.predict_function = predict_function
return self.predict_function
@disable_multi_worker
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Generates output predictions for the input samples.
Computation is done in batches. This method is designed for performance in
large scale inputs. For small amount of inputs that fit in one batch,
directly using `__call__` is recommended for faster execution, e.g.,
`model(x)`, or `model(x, training=False)` if you have layers such as
`tf.keras.layers.BatchNormalization` that behaves differently during
inference. Also, note the fact that test loss is not affected by
regularization layers like noise and dropout.
Arguments:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset.
- A generator or `keras.utils.Sequence` instance.
A more detailed description of unpacking behavior for iterator types
(Dataset, generator, Sequence) is given in the `Unpacking behavior
for iterator-like inputs` section of `Model.fit`.
batch_size: Integer or `None`.
Number of samples per batch.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of dataset, generators, or `keras.utils.Sequence` instances
(since they generate batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`. If x is a `tf.data`
dataset and `steps` is None, `predict` will
run until the input dataset is exhausted.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during prediction.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
See the discussion of `Unpacking behavior for iterator-like inputs` for
`Model.fit`. Note that Model.predict uses the same interpretation rules as
`Model.fit` and `Model.evaluate`, so inputs must be unambiguous for all
three methods.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
print("=============================================test")
_keras_api_gauge.get_cell('predict').set(True)
version_utils.disallow_legacy_graph('Model', 'predict')
self._check_call_args('predict')
outputs = None
with self.distribute_strategy.scope():
# Creates a `tf.data.Dataset` and handles batch and epoch iteration.
data_handler = data_adapter.DataHandler(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
initial_epoch=0,
epochs=1,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
model=self)
# Container that configures and calls `tf.keras.Callback`s.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
model=self,
verbose=verbose,
epochs=1,
steps=data_handler.inferred_steps)
predict_function = self.make_predict_function()
self._predict_counter.assign(0)
callbacks.on_predict_begin()
for _, iterator in data_handler.enumerate_epochs(): # Single epoch.
with data_handler.catch_stop_iteration():
for step in data_handler.steps():
callbacks.on_predict_batch_begin(step)
tmp_batch_outputs = predict_function(iterator)
if data_handler.should_sync:
context.async_wait()
batch_outputs = tmp_batch_outputs # No error, now safe to assign.
if outputs is None:
outputs = nest.map_structure(lambda batch_output: [batch_output],
batch_outputs)
else:
nest.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs, batch_outputs)
callbacks.on_predict_batch_end(step, {'outputs': batch_outputs})
callbacks.on_predict_end()
all_outputs = nest.map_structure_up_to(batch_outputs, concat, outputs)
return tf_utils.to_numpy_or_python_type(all_outputs)
def reset_metrics(self):
"""Resets the state of metrics."""
for m in self.metrics:
m.reset_states()
def train_on_batch(self,
x,
y=None,
sample_weight=None,
class_weight=None,
reset_metrics=True,
return_dict=False):
"""Runs a single gradient update on a single batch of data.
Arguments:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of
every sample. In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
class_weight: Optional dictionary mapping class indices (integers) to a
weight (float) to apply to the model's loss for the samples from this
class during training. This can be useful to tell the model to "pay
more attention" to samples from an under-represented class.
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
return_dict: If `True`, loss and metric results are returned as a dict,
with each key being the name of the metric. If `False`, they are
returned as a list.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('train_on_batch')
with self.distribute_strategy.scope(), \
training_utils.RespectCompiledTrainableState(self):
iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x,
y, sample_weight,
class_weight)
train_function = self.make_train_function()
logs = train_function(iterator)
if reset_metrics:
self.reset_metrics()
logs = tf_utils.to_numpy_or_python_type(logs)
if return_dict:
return logs
else:
results = [logs.get(name, None) for name in self.metrics_names]
if len(results) == 1:
return results[0]
return results
def test_on_batch(self,
x,
y=None,
sample_weight=None,
reset_metrics=True,
return_dict=False):
"""Test the model on a single batch of samples.
Arguments:
x: Input data. It could be: - A Numpy array (or array-like), or a list
of arrays (in case the model has multiple inputs). - A TensorFlow
tensor, or a list of tensors (in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors, if
the model has named inputs.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of
every sample. In this case you should make sure to specify
sample_weight_mode="temporal" in compile().
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
return_dict: If `True`, loss and metric results are returned as a dict,
with each key being the name of the metric. If `False`, they are
returned as a list.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('test_on_batch')
with self.distribute_strategy.scope():
iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x,
y, sample_weight)
test_function = self.make_test_function()
logs = test_function(iterator)
if reset_metrics:
self.reset_metrics()
logs = tf_utils.to_numpy_or_python_type(logs)
if return_dict:
return logs
else:
results = [logs.get(name, None) for name in self.metrics_names]
if len(results) == 1:
return results[0]
return results
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Arguments:
x: Input data. It could be: - A Numpy array (or array-like), or a list
of arrays (in case the model has multiple inputs). - A TensorFlow
tensor, or a list of tensors (in case the model has multiple inputs).
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
self._check_call_args('predict_on_batch')
with self.distribute_strategy.scope():
iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x)
predict_function = self.make_predict_function()
outputs = predict_function(iterator)
return tf_utils.to_numpy_or_python_type(outputs)
@deprecation.deprecated(
None, 'Please use Model.fit, which supports generators.')
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
validation_freq=1,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
DEPRECATED:
`Model.fit` now supports generators, so there is no longer any need to use
this endpoint.
"""
_keras_api_gauge.get_cell('fit_generator').set(True)
return self.fit(
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
validation_freq=validation_freq,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
@deprecation.deprecated(
None, 'Please use Model.evaluate, which supports generators.')
def evaluate_generator(self,
generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
DEPRECATED:
`Model.evaluate` now supports generators, so there is no longer any need
to use this endpoint.
"""
_keras_api_gauge.get_cell('evaluate_generator').set(True)
self._check_call_args('evaluate_generator')
return self.evaluate(
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
callbacks=callbacks)
@deprecation.deprecated(
None, 'Please use Model.predict, which supports generators.')
def predict_generator(self,
generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
DEPRECATED:
`Model.predict` now supports generators, so there is no longer any need
to use this endpoint.
"""
_keras_api_gauge.get_cell('predict_generator').set(True)
return self.predict(
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
callbacks=callbacks)
def _check_call_args(self, method_name):
"""Check that `call` has only one positional arg."""
# Always allow first arg, regardless of arg name.
fullargspec = self._call_full_argspec
if fullargspec.defaults:
positional_args = fullargspec.args[:-len(fullargspec.defaults)]
else:
positional_args = fullargspec.args
if 'training' in positional_args:
positional_args.remove('training')
# self and first arg can be positional.
if len(positional_args) > 2:
extra_args = positional_args[2:]
raise ValueError(
'Models passed to `' + method_name + '` can only have `training` '
'and the first argument in `call` as positional arguments, '
'found: ' + str(extra_args) + '.')
def _validate_compile(self, optimizer, metrics, **kwargs):
"""Performs validation checks for the default `compile`."""
if any(
isinstance(opt, optimizers.Optimizer)
for opt in nest.flatten(optimizer)):
raise ValueError(
'`tf.compat.v1.keras` Optimizer (', optimizer, ') is '
'not supported when eager execution is enabled. Use a '
'`tf.keras` Optimizer instead, or disable eager '
'execution.')
kwargs.pop('cloning', None) # Legacy DistStrat argument, never used.
kwargs.pop('experimental_run_tf_function', None) # Always `True`.
if kwargs.pop('distribute', None) is not None:
raise ValueError(
'Distribute argument in compile is not available in TF 2.0 please '
'create the model under the distribution strategy scope.')
if kwargs.pop('target_tensors', None) is not None:
raise ValueError(
'target_tensors argument is not supported when executing eagerly.')
invalid_kwargs = set(kwargs) - {'experimental_steps_per_execution'}
if invalid_kwargs:
raise TypeError('Invalid keyword argument(s) in `compile`: %s' %
(invalid_kwargs,))
# Model must be created and compiled with the same DistStrat.
if self.built and ds_context.has_strategy():
strategy = ds_context.get_strategy()
for v in self.variables:
if not strategy.extended.variable_created_in_scope(v):
raise ValueError(
'Variable (%s) was not created in the distribution strategy '
'scope of (%s). It is most likely due to not all layers or '
'the model or optimizer being created outside the distribution '
'strategy scope. Try to make sure your code looks similar '
'to the following.\n'
'with strategy.scope():\n'
' model=_create_model()\n'
' model.compile(...)' % (v, strategy))
# Model metrics must be created in the same distribution strategy scope
# as the model.
strategy = self._get_distribution_strategy()
for metric in nest.flatten(metrics):
for v in getattr(metric, 'variables', []):
if not strategy.extended.variable_created_in_scope(v):
raise ValueError(
'Metric (%s) passed to model.compile was created inside of a '
'different distribution strategy scope than the model. All '
'metrics must be created in the same distribution strategy '
'scope as the model (in this case %s). If you pass in a string '
'identifier for a metric to compile the metric will '
'automatically be created in the correct distribution '
'strategy scope.' % (metric, strategy)
)
def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch):
"""Maybe load initial epoch from ckpt considering possible worker recovery.
Refer to tensorflow/python/keras/distribute/multi_worker_training_state.py
for more information.
Arguments:
initial_epoch: The original initial_epoch user passes in in `fit()`.
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the epoch the training is supposed to continue
at. Otherwise, return the `initial_epoch` the user passes in.
"""
if self._training_state is not None:
return self._training_state.maybe_load_initial_epoch_from_ckpt(
initial_epoch, mode=ModeKeys.TRAIN)
return initial_epoch
def _assert_compile_was_called(self):
# Checks whether `compile` has been called. If it has been called,
# then the optimizer is set. This is different from whether the
# model is compiled
# (i.e. whether the model is built and its inputs/outputs are set).
if not self._is_compiled:
raise RuntimeError('You must compile your model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
def _set_inputs(self, inputs, outputs=None, training=None):
"""This method is for compat with Modelv1. Only inputs are needed here."""
self._set_save_spec(inputs)
@property
def _trackable_saved_model_saver(self):
return model_serialization.ModelSavedModelSaver(self)
def _list_functions_for_serialization(self, serialization_cache):
# SavedModel needs to ignore the execution functions.
train_function = self.train_function
test_function = self.test_function
predict_function = self.predict_function
self.train_function = None
self.test_function = None
self.predict_function = None
functions = super(
Model, self)._list_functions_for_serialization(serialization_cache)
self.train_function = train_function
self.test_function = test_function
self.predict_function = predict_function
return functions
def _should_eval(self, epoch, validation_freq):
epoch = epoch + 1 # one-index the user-facing epoch.
if isinstance(validation_freq, int):
return epoch % validation_freq == 0
elif isinstance(validation_freq, list):
return epoch in validation_freq
else:
raise ValueError('Expected `validation_freq` to be a list or int.')
######################################################################
# Functions below exist only as v1 / v2 compatibility shims.
######################################################################
def _get_compile_args(self):
"""Used for saving or cloning a Model."""
self._assert_compile_was_called()
# pylint: disable=protected-access
compile_args = {
'optimizer': self.optimizer,
'loss': self.compiled_loss._user_losses,
'metrics': self.compiled_metrics._user_metrics,
'weighted_metrics': self.compiled_metrics._user_weighted_metrics,
'loss_weights': self.compiled_loss._user_loss_weights,
'sample_weight_mode': None,
}
# pylint: enable=protected-access
return compile_args
def _get_callback_model(self):
return self
def _in_multi_worker_mode(self):
return self.distribute_strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
def _get_distribution_strategy(self):
return self.distribute_strategy
@property
def _compile_was_called(self):
return self._is_compiled
def reduce_per_replica(values, strategy, reduction='first'):
"""Reduce PerReplica objects.
Arguments:
values: Structure of `PerReplica` objects or `Tensor`s. `Tensor`s are
returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of 'first', 'concat'.
Returns:
Structure of `Tensor`s.
"""
def _reduce(v):
"""Reduce a single `PerReplica` object."""
if not isinstance(v, ds_values.PerReplica):
return v
elif reduction == 'first':
return strategy.unwrap(v)[0]
elif reduction == 'concat':
if _is_tpu_multi_host(strategy):
return _tpu_multi_host_concat(v, strategy)
else:
return concat(strategy.unwrap(v))
else:
raise ValueError('`reduction` must be "first" or "concat".')
return nest.map_structure(_reduce, values)
def concat(tensors, axis=0):
"""Concats `tensor`s along `axis`."""
if isinstance(tensors[0], sparse_tensor.SparseTensor):
return sparse_ops.sparse_concat_v2(axis=axis, sp_inputs=tensors)
if isinstance(tensors[0], ragged_tensor.RaggedTensor):
return ragged_concat_ops.concat(tensors, axis=axis)
return array_ops.concat(tensors, axis=axis)
def _is_tpu_multi_host(strategy):
return (dist_utils.is_tpu_strategy(strategy) and
strategy.extended.num_hosts > 1)
def _tpu_multi_host_concat(v, strategy):
"""Correctly order TPU PerReplica objects."""
replicas = strategy.unwrap(v)
# When distributed datasets are created from Tensors / NumPy,
# TPUStrategy.experimental_distribute_dataset shards data in
# (Replica, Host) order, and TPUStrategy.unwrap returns it in
# (Host, Replica) order.
# TODO(b/150317897): Figure out long-term plan here.
num_replicas_per_host = strategy.extended.num_replicas_per_host
ordered_replicas = []
for replica_id in range(num_replicas_per_host):
ordered_replicas += replicas[replica_id::num_replicas_per_host]
return concat(ordered_replicas)
def _minimize(strategy, tape, optimizer, loss, trainable_variables):
"""Minimizes loss for one step by updating `trainable_variables`.
This is roughly equivalent to
```python
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
```
However, this function also applies gradient clipping and loss scaling if the
optimizer is a LossScaleOptimizer.
Args:
strategy: `tf.distribute.Strategy`.
tape: A gradient tape. The loss must have been computed under this tape.
optimizer: The optimizer used to minimize the loss.
loss: The loss tensor.
trainable_variables: The variables that will be updated in order to minimize
the loss.
"""
with tape:
if isinstance(optimizer, lso.LossScaleOptimizer):
loss = optimizer.get_scaled_loss(loss)
gradients = tape.gradient(loss, trainable_variables)
# Whether to aggregate gradients outside of optimizer. This requires support
# of the optimizer and doesn't work with ParameterServerStrategy and
# CentralStroageStrategy.
aggregate_grads_outside_optimizer = (
optimizer._HAS_AGGREGATE_GRAD and # pylint: disable=protected-access
not isinstance(strategy.extended,
parameter_server_strategy.ParameterServerStrategyExtended))
if aggregate_grads_outside_optimizer:
# We aggregate gradients before unscaling them, in case a subclass of
# LossScaleOptimizer all-reduces in fp16. All-reducing in fp16 can only be
# done on scaled gradients, not unscaled gradients, for numeric stability.
gradients = optimizer._aggregate_gradients(zip(gradients, # pylint: disable=protected-access
trainable_variables))
if isinstance(optimizer, lso.LossScaleOptimizer):
gradients = optimizer.get_unscaled_gradients(gradients)
gradients = optimizer._clip_gradients(gradients) # pylint: disable=protected-access
if trainable_variables:
if aggregate_grads_outside_optimizer:
optimizer.apply_gradients(
zip(gradients, trainable_variables),
experimental_aggregate_gradients=False)
else:
optimizer.apply_gradients(zip(gradients, trainable_variables))
def _is_scalar(x):
return isinstance(x, (ops.Tensor, variables.Variable)) and x.shape.rank == 0
def write_scalar_summaries(logs, step):
for name, value in logs.items():
if _is_scalar(value):
summary_ops_v2.scalar('batch_' + name, value, step=step)
def _minimum_control_deps(outputs):
"""Returns the minimum control dependencies to ensure step succeeded."""
if context.executing_eagerly():
return [] # Control dependencies not needed.
outputs = nest.flatten(outputs, expand_composites=True)
for out in outputs:
# Variables can't be control dependencies.
if not isinstance(out, variables.Variable):
return [out] # Return first Tensor or Op from outputs.
return [] # No viable Tensor or Op to use for control deps.
|
the-stack_0_17412 | # PPO (Percentage Price Oscillator)
# https://school.stockcharts.com/doku.php?id=technical_indicators:price_oscillators_ppo
# Yüzde Fiyat Osilatörü (PPO), iki hareketli ortalama arasındaki farkı
# daha büyük hareketli ortalamanın yüzdesi olarak ölçen bir momentum osilatörüdür.
# Argümanlar:
# close(pandas.Series): veri kümesi 'Fiyat' sütunu.
# window_slow(int): n dönem uzun vadeli.
# window_fast(int): n dönem kısa vadeli.
# window_sign(int): sinyal için n periyodu.
# fillna(bool): True ise, nan değerlerini doldur.
import pandas as pd
from _utilities import IndicatorMixin, _ema
class PercentagePriceOscillator(IndicatorMixin):
def __init__(
self,
close: pd.Series,
window_slow: int = 26,
window_fast: int = 12,
window_sign: int = 9,
fillna: bool = False,
):
self._close = close
self._window_slow = window_slow
self._window_fast = window_fast
self._window_sign = window_sign
self._fillna = fillna
self._run()
def _run(self):
_emafast = _ema(self._close, self._window_fast, self._fillna)
_emaslow = _ema(self._close, self._window_slow, self._fillna)
self._ppo = ((_emafast - _emaslow) / _emaslow) * 100
self._ppo_signal = _ema(self._ppo, self._window_sign, self._fillna)
self._ppo_hist = self._ppo - self._ppo_signal
def ppo(self):
ppo_series = self._check_fillna(self._ppo, value=0)
return pd.Series(ppo_series, name=f"PPO_{self._window_fast}_{self._window_slow}")
def ppo_signal(self):
ppo_signal_series = self._check_fillna(self._ppo_signal, value=0)
return pd.Series(ppo_signal_series, name=f"PPO_sign_{self._window_fast}_{self._window_slow}")
def ppo_hist(self):
ppo_hist_series = self._check_fillna(self._ppo_hist, value=0)
return pd.Series(ppo_hist_series, name=f"PPO_hist_{self._window_fast}_{self._window_slow}") |
the-stack_0_17414 | from decoder import Parser
from extract_training_data import FeatureExtractor
from conll_reader import conll_reader
import sys
def compare_parser(target, predict):
target_unlabeled = set((d.id,d.head) for d in target.deprels.values())
target_labeled = set((d.id,d.head,d.deprel) for d in target.deprels.values())
predict_unlabeled = set((d.id,d.head) for d in predict.deprels.values())
predict_labeled = set((d.id,d.head,d.deprel) for d in predict.deprels.values())
labeled_correct = len(predict_labeled.intersection(target_labeled))
unlabeled_correct = len(predict_unlabeled.intersection(target_unlabeled))
num_words = len(predict_labeled)
return labeled_correct, unlabeled_correct, num_words
if __name__ == "__main__":
WORD_VOCAB_FILE = 'data/words.vocab'
POS_VOCAB_FILE = 'data/pos.vocab'
try:
word_vocab_f = open(WORD_VOCAB_FILE,'r')
pos_vocab_f = open(POS_VOCAB_FILE,'r')
except FileNotFoundError:
print("Could not find vocabulary files {} and {}".format(WORD_VOCAB_FILE, POS_VOCAB_FILE))
sys.exit(1)
extractor = FeatureExtractor(word_vocab_f, pos_vocab_f)
parser = Parser(extractor, sys.argv[1])
total_labeled_correct = 0
total_unlabeled_correct = 0
total_words = 0
las_list = []
uas_list = []
count = 0
with open(sys.argv[2],'r') as in_file:
print("Evaluating. (Each . represents 100 test dependency trees)")
for dtree in conll_reader(in_file):
words = dtree.words()
pos = dtree.pos()
predict = parser.parse_sentence(words, pos)
labeled_correct, unlabeled_correct, num_words = compare_parser(dtree, predict)
las_s = labeled_correct / float(num_words)
uas_s = unlabeled_correct / float(num_words)
las_list.append(las_s)
uas_list.append(uas_s)
total_labeled_correct += labeled_correct
total_unlabeled_correct += unlabeled_correct
total_words += num_words
count +=1
if count % 100 == 0:
print(".",end="")
sys.stdout.flush()
print()
las_micro = total_labeled_correct / float(total_words)
uas_micro = total_unlabeled_correct / float(total_words)
las_macro = sum(las_list) / len(las_list)
uas_macro = sum(uas_list) / len(uas_list)
print("{} sentence.\n".format(len(las_list)))
print("Micro Avg. Labeled Attachment Score: {}".format(las_micro))
print("Micro Avg. Unlabeled Attachment Score: {}\n".format(uas_micro))
print("Macro Avg. Labeled Attachment Score: {}".format(las_macro))
print("Macro Avg. Unlabeled Attachment Score: {}".format(uas_macro))
|
the-stack_0_17419 | import os
from pint import UnitRegistry
# load up the registry to be used everywhere
ureg = UnitRegistry()
# add currency as a type of unit since it is not part of the default
ureg.define('usd = [currency]')
Q_ = ureg.Quantity
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
CREDIT = "CREDIT"
CHECKING = "CHECKING"
VALID_ACCT_TYPES = [CREDIT, CHECKING]
DEBT_RATIO = "DEBT/CREDIT RATIO HIGH"
OVERDRAFT = "CHECKING ACCOUNT OVERDRAFT"
OVERCREDIT = "BALANCE HIGHER THAN CREDIT LIMIT"
ISSUE_NOTES = {DEBT_RATIO: "FICO recommends keeping a debt to limit ratio of under 25%",
OVERDRAFT: "Spent more money than you have in this checking account",
OVERCREDIT: "Credit card balance exceeds your limit."}
FOREVER_RECURRING = "recurring payment like forever"
|
the-stack_0_17420 | import logging
from abc import ABC
from abc import abstractmethod
from typing import Tuple
import transformers
import tensorflow as tf
from tensorflow.keras import layers
logger = logging.getLogger('absa.model')
class ABSClassifier(tf.keras.Model, ABC):
"""
The model's aim is to classify the sentiment. The model contains the
fine-tuned language model, which holds most parameters. The classifier
itself is a tiny linear layer on top of a language model.
We use the BERT language model, because we can benefit from the BERT's
next-sentence prediction and formulate the task as the sequence-pair
classification. Each example is described as one sequence in the format:
"[CLS] text subtokens [SEP] aspect subtokens [SEP]". The relation between
the text and aspect is encoded into the CLS token. The classifier just
makes a linear transformation of the final special CLS token representation.
The pipeline applies the softmax to get distribution over sentiment classes.
Note how to train a model. We start with the original BERT version as a
basis, and we divide the training into two stages. Firstly, due to the
fact that the BERT is pretrained on dry Wikipedia texts, we wish to bias
language model towards more informal language or a specific domain. To do
so, we select texts close to the target domain and do the self-supervised
**language model** post-training. The routine is the same as for the
pre-training, but we need carefully set up optimization parameters.
Secondly, we do regular supervised training. We train the whole model
using a labeled dataset to classify a sentiment.
Please note that the package contains the submodule `absa.training`. You
can find there complete routines to tune or train either the language
model or the classifier. Check out examples on the package website.
References:
[BERT: Pre-training of Deep Bidirectional Transformers for Language
Understanding](https://arxiv.org/abs/1810.04805)
[Utilizing BERT for Aspect-Based Sentiment Analysis via Constructing
Auxiliary Sentence](http://arxiv.org/abs/1903.09588)
[BERT Post-Training for Review Reading Comprehension and Aspect-based
Sentiment Analysis](http://arxiv.org/abs/1904.02232)
[Adapt or Get Left Behind: Domain Adaptation through BERT Language
Model Finetuning for Aspect-Target Sentiment Classification]
(http://arxiv.org/abs/1908.11860)
"""
@abstractmethod
def call(
self,
token_ids: tf.Tensor,
attention_mask: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
training: bool = False,
**bert_kwargs
) -> Tuple[tf.Tensor, Tuple[tf.Tensor, ...], Tuple[tf.Tensor, ...]]:
"""
Perform the sentiment classification. We formulate the task as the
sequence-pair classification. Each example is described as one
sequence in the format:
"[CLS] text subtokens [SEP] aspect subtokens [SEP]".
Parameters
----------
token_ids
Indices of input sequence subtokens in the vocabulary.
attention_mask
Bool mask used to avoid performing attention on padding token
indices in a batch (this is not related with masks from the
language modeling task).
token_type_ids
Segment token indices to indicate first and second portions
of the inputs, zeros and ones.
training
Whether to activate a dropout (True) during training or
to de-activate them (False) for evaluation.
bert_kwargs
Auxiliary parameters which we forward directly to
the **transformers** language model implementation.
Returns
-------
logits
The classifier final outputs.
hidden_states
Tuple of tensors: one for the output of the embeddings and one
for the output of each layer.
attentions
Tuple of tensors: Attentions weights after the attention softmax,
used to compute the weighted average in the self-attention heads.
"""
def force_to_return_details(kwargs: dict):
""" Force a model to output attentions and hidden states due to the fixed
definition of the output batch (the well-defined interface). """
condition = not kwargs.get('output_attentions', False) or \
not kwargs.get('output_hidden_states', False)
if condition:
logger.info('Model should output attentions and hidden states.')
kwargs['output_attentions'] = True
kwargs['output_hidden_states'] = True
class BertABSCConfig(transformers.BertConfig):
def __init__(self, num_polarities: int = 3, **kwargs):
force_to_return_details(kwargs)
super().__init__(**kwargs)
self.num_polarities = num_polarities
class BertABSClassifier(ABSClassifier, transformers.TFBertPreTrainedModel):
def __init__(self, config: BertABSCConfig, **kwargs):
super().__init__(config, **kwargs)
self.bert = transformers.TFBertMainLayer(
config, name="bert")
initializer = transformers.modeling_tf_utils.get_initializer(
config.initializer_range)
self.dropout = layers.Dropout(config.hidden_dropout_prob)
self.classifier = layers.Dense(
config.num_polarities,
kernel_initializer=initializer,
name='classifier'
)
def call(
self,
token_ids: tf.Tensor,
attention_mask: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
training: bool = False,
**bert_kwargs
) -> Tuple[tf.Tensor, Tuple[tf.Tensor, ...], Tuple[tf.Tensor, ...]]:
outputs = self.bert(
inputs=token_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
training=training,
**bert_kwargs
)
sequence_output, pooled_output, hidden_states, attentions = outputs
pooled_output = self.dropout(pooled_output, training=training)
logits = self.classifier(pooled_output)
return logits, hidden_states, attentions
|
the-stack_0_17421 | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import hashlib
import logging
import os
import subprocess
from dataclasses import dataclass
from enum import Enum
from textwrap import dedent
from typing import Iterable, Sequence
from pants.core.subsystems import python_bootstrap
from pants.core.subsystems.python_bootstrap import PythonBootstrap
from pants.engine.collection import DeduplicatedCollection
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.fs import CreateDigest, FileContent
from pants.engine.internals.native_engine import Digest
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.process import FallibleProcessResult, Process, ProcessCacheScope, ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.util.logging import LogLevel
from pants.util.meta import frozen_after_init
from pants.util.ordered_set import OrderedSet
from pants.util.strutil import create_path_env_var, pluralize
logger = logging.getLogger(__name__)
# -------------------------------------------------------------------------------------------
# `BinaryPath` types
# -------------------------------------------------------------------------------------------
# TODO(#14492): This should be configurable via `[system-binaries]` subsystem, likely per-binary.
SEARCH_PATHS = ("/usr/bin", "/bin", "/usr/local/bin")
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPath:
path: str
fingerprint: str
def __init__(self, path: str, fingerprint: str | None = None) -> None:
self.path = path
self.fingerprint = self._fingerprint() if fingerprint is None else fingerprint
@staticmethod
def _fingerprint(content: bytes | bytearray | memoryview | None = None) -> str:
hasher = hashlib.sha256() if content is None else hashlib.sha256(content)
return hasher.hexdigest()
@classmethod
def fingerprinted(
cls, path: str, representative_content: bytes | bytearray | memoryview
) -> BinaryPath:
return cls(path, fingerprint=cls._fingerprint(representative_content))
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPathTest:
args: tuple[str, ...]
fingerprint_stdout: bool
def __init__(self, args: Iterable[str], fingerprint_stdout: bool = True) -> None:
self.args = tuple(args)
self.fingerprint_stdout = fingerprint_stdout
class SearchPath(DeduplicatedCollection[str]):
"""The search path for binaries; i.e.: the $PATH."""
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPathRequest:
"""Request to find a binary of a given name.
If `check_file_entries` is `True` a BinaryPathRequest will consider any entries in the
`search_path` that are file paths in addition to traditional directory paths.
If a `test` is specified all binaries that are found will be executed with the test args and
only those binaries whose test executions exit with return code 0 will be retained.
Additionally, if test execution includes stdout content, that will be used to fingerprint the
binary path so that upgrades and downgrades can be detected. A reasonable test for many programs
might be `BinaryPathTest(args=["--version"])` since it will both ensure the program runs and
also produce stdout text that changes upon upgrade or downgrade of the binary at the discovered
path.
"""
search_path: SearchPath
binary_name: str
check_file_entries: bool
test: BinaryPathTest | None
def __init__(
self,
*,
search_path: Iterable[str],
binary_name: str,
check_file_entries: bool = False,
test: BinaryPathTest | None = None,
) -> None:
self.search_path = SearchPath(search_path)
self.binary_name = binary_name
self.check_file_entries = check_file_entries
self.test = test
@frozen_after_init
@dataclass(unsafe_hash=True)
class BinaryPaths(EngineAwareReturnType):
binary_name: str
paths: tuple[BinaryPath, ...]
def __init__(self, binary_name: str, paths: Iterable[BinaryPath] | None = None):
self.binary_name = binary_name
self.paths = tuple(OrderedSet(paths) if paths else ())
def message(self) -> str:
if not self.paths:
return f"failed to find {self.binary_name}"
found_msg = f"found {self.binary_name} at {self.paths[0]}"
if len(self.paths) > 1:
found_msg = f"{found_msg} and {pluralize(len(self.paths) - 1, 'other location')}"
return found_msg
@property
def first_path(self) -> BinaryPath | None:
"""Return the first path to the binary that was discovered, if any."""
return next(iter(self.paths), None)
def first_path_or_raise(self, request: BinaryPathRequest, *, rationale: str) -> BinaryPath:
"""Return the first path to the binary that was discovered, if any."""
first_path = self.first_path
if not first_path:
raise BinaryNotFoundError.from_request(request, rationale=rationale)
return first_path
class BinaryNotFoundError(EnvironmentError):
@classmethod
def from_request(
cls,
request: BinaryPathRequest,
*,
rationale: str | None = None,
alternative_solution: str | None = None,
) -> BinaryNotFoundError:
"""When no binary is found via `BinaryPaths`, and it is not recoverable.
:param rationale: A short description of why this binary is needed, e.g.
"download the tools Pants needs" or "run Python programs".
:param alternative_solution: A description of what else users can do to fix the issue,
beyond installing the program. For example, "Alternatively, you can set the option
`--python-bootstrap-search-path` to change the paths searched."
"""
msg = (
f"Cannot find `{request.binary_name}` on `{sorted(request.search_path)}`. Please "
"ensure that it is installed"
)
msg += f" so that Pants can {rationale}." if rationale else "."
if alternative_solution:
msg += f"\n\n{alternative_solution}"
return BinaryNotFoundError(msg)
# -------------------------------------------------------------------------------------------
# Binary shims
# Creates a Digest with a shim for each requested binary in a directory suitable for PATH.
# -------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class BinaryShimsRequest:
"""Request to create shims for one or more system binaries."""
output_directory: str
rationale: str = dataclasses.field(compare=False)
# Create shims for provided binary paths
paths: tuple[BinaryPath, ...] = tuple()
# Create shims for the provided binary names after looking up the paths.
requests: tuple[BinaryPathRequest, ...] = tuple()
@classmethod
def for_binaries(
cls, *names: str, rationale: str, output_directory: str, search_path: Sequence[str]
) -> BinaryShimsRequest:
return cls(
requests=tuple(
BinaryPathRequest(binary_name=binary_name, search_path=search_path)
for binary_name in names
),
rationale=rationale,
output_directory=output_directory,
)
@classmethod
def for_paths(
cls, *paths: BinaryPath, rationale: str, output_directory: str
) -> BinaryShimsRequest:
return cls(paths=paths, rationale=rationale, output_directory=output_directory)
@dataclass(frozen=True)
class BinaryShims:
"""The shims created for a BinaryShimsRequest is placed in `bin_directory` of the `digest`.
The purpose of these shims is so that a Process may be executed with `bin_directory` added to
PATH so that the binaries are available for execution.
The alternative is to add the directories hosting the binaries directly, but that opens up for
many more unrelated binaries to also be executable from PATH, leaking into the sandbox
unnecessarily.
"""
bin_directory: str
digest: Digest
# -------------------------------------------------------------------------------------------
# Binaries
# -------------------------------------------------------------------------------------------
class BashBinary(BinaryPath):
"""The `bash` binary."""
DEFAULT_SEARCH_PATH = SearchPath(("/usr/bin", "/bin", "/usr/local/bin"))
@dataclass(frozen=True)
class BashBinaryRequest:
search_path: SearchPath = BashBinary.DEFAULT_SEARCH_PATH
class PythonBinary(BinaryPath):
"""A Python3 interpreter for use by `@rule` code as an alternative to BashBinary scripts.
Python is usable for `@rule` scripting independently of `pants.backend.python`, but currently
thirdparty dependencies are not supported, because PEX lives in that backend.
TODO: Consider extracting PEX out into the core in order to support thirdparty dependencies.
"""
# Note that updating this will impact the `archive` target defined in `core/target_types.py`.
class ArchiveFormat(Enum):
TAR = "tar"
TGZ = "tar.gz"
TBZ2 = "tar.bz2"
TXZ = "tar.xz"
ZIP = "zip"
class ZipBinary(BinaryPath):
def create_archive_argv(
self, output_filename: str, input_files: Sequence[str]
) -> tuple[str, ...]:
return (self.path, output_filename, *input_files)
class UnzipBinary(BinaryPath):
def extract_archive_argv(self, archive_path: str, extract_path: str) -> tuple[str, ...]:
# Note that the `output_dir` does not need to already exist.
# The caller should validate that it's a valid `.zip` file.
return (self.path, archive_path, "-d", extract_path)
@dataclass(frozen=True)
class GunzipBinary:
python: PythonBinary
def extract_archive_argv(self, archive_path: str, extract_path: str) -> tuple[str, ...]:
archive_name = os.path.basename(archive_path)
dest_file_name = os.path.splitext(archive_name)[0]
dest_path = os.path.join(extract_path, dest_file_name)
script = dedent(
f"""
import gzip
import shutil
with gzip.GzipFile(filename={archive_path!r}, mode="rb") as source:
with open({dest_path!r}, "wb") as dest:
shutil.copyfileobj(source, dest)
"""
)
return (self.python.path, "-c", script)
class TarBinary(BinaryPath):
def create_archive_argv(
self, output_filename: str, input_files: Sequence[str], tar_format: ArchiveFormat
) -> tuple[str, ...]:
# Note that the parent directory for the output_filename must already exist.
#
# We do not use `-a` (auto-set compression) because it does not work with older tar
# versions. Not all tar implementations will support these compression formats - in that
# case, the user will need to choose a different format.
compression = {ArchiveFormat.TGZ: "z", ArchiveFormat.TBZ2: "j", ArchiveFormat.TXZ: "J"}.get(
tar_format, ""
)
return (self.path, f"c{compression}f", output_filename, *input_files)
def extract_archive_argv(
self, archive_path: str, extract_path: str, *, archive_suffix: str
) -> tuple[str, ...]:
# Note that the `output_dir` must already exist.
# The caller should validate that it's a valid `.tar` file.
prog_args = ("-Ilz4",) if archive_suffix == ".tar.lz4" else ()
return (self.path, *prog_args, "-xf", archive_path, "-C", extract_path)
class MkdirBinary(BinaryPath):
pass
class ChmodBinary(BinaryPath):
pass
class DiffBinary(BinaryPath):
pass
class ReadlinkBinary(BinaryPath):
pass
class GitBinaryException(Exception):
pass
class GitBinary(BinaryPath):
def _invoke_unsandboxed(self, cmd: list[str]) -> str:
"""Invoke the given git command, _without_ the sandboxing provided by the `Process` API.
This API is for internal use only: users should prefer to consume methods of the
`GitWorktree` class.
"""
cmd = [self.path, *cmd]
self._log_call(cmd)
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
# Binary DNE or is not executable
cmd_str = " ".join(cmd)
raise GitBinaryException(f"Failed to execute command {cmd_str}: {e!r}")
out, err = process.communicate()
self._check_result(cmd, process.returncode, err.decode())
return out.decode().strip()
def _check_result(
self, cmd: Iterable[str], result: int, failure_msg: str | None = None
) -> None:
if result != 0:
cmd_str = " ".join(cmd)
raise GitBinaryException(failure_msg or f"{cmd_str} failed with exit code {result}")
def _log_call(self, cmd: Iterable[str]) -> None:
logger.debug("Executing: " + " ".join(cmd))
# -------------------------------------------------------------------------------------------
# Binaries Rules
# -------------------------------------------------------------------------------------------
@rule
async def create_binary_shims(
binary_shims_request: BinaryShimsRequest,
bash: BashBinary,
mkdir: MkdirBinary,
chmod: ChmodBinary,
) -> BinaryShims:
"""Creates a bin directory with shims for all requested binaries.
Useful as input digest for a Process to setup a `bin` directory for PATH.
"""
paths = binary_shims_request.paths
requests = binary_shims_request.requests
if requests:
all_binary_paths = await MultiGet(
Get(BinaryPaths, BinaryPathRequest, request) for request in requests
)
first_paths = tuple(
binary_paths.first_path_or_raise(request, rationale=binary_shims_request.rationale)
for binary_paths, request in zip(all_binary_paths, requests)
)
paths += first_paths
all_paths = (binary.path for binary in paths)
bin_relpath = binary_shims_request.output_directory
script = ";".join(
(
f"{mkdir.path} -p {bin_relpath}",
*(
" && ".join(
[
(
# The `printf` cmd is a bash builtin, so always available.
f"printf '{_create_shim(bash.path, binary_path)}'"
f" > '{bin_relpath}/{os.path.basename(binary_path)}'"
),
f"{chmod.path} +x '{bin_relpath}/{os.path.basename(binary_path)}'",
]
)
for binary_path in all_paths
),
)
)
result = await Get(
ProcessResult,
Process(
argv=(bash.path, "-c", script),
description=f"Setup binary shims so that Pants can {binary_shims_request.rationale}.",
output_directories=(bin_relpath,),
level=LogLevel.DEBUG,
),
)
return BinaryShims(bin_relpath, result.output_digest)
def _create_shim(bash: str, binary: str) -> str:
"""The binary shim script to be placed in the output directory for the digest."""
return dedent(
f"""\
#!{bash}
exec "{binary}" "$@"
"""
)
@rule(desc="Finding the `bash` binary", level=LogLevel.DEBUG)
async def find_bash(bash_request: BashBinaryRequest) -> BashBinary:
request = BinaryPathRequest(
binary_name="bash",
search_path=bash_request.search_path,
test=BinaryPathTest(args=["--version"]),
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path
if not first_path:
raise BinaryNotFoundError.from_request(request)
return BashBinary(first_path.path, first_path.fingerprint)
@rule
async def get_bash() -> BashBinary:
# Expose bash to external consumers.
return await Get(BashBinary, BashBinaryRequest())
@rule
async def find_binary(request: BinaryPathRequest) -> BinaryPaths:
# If we are not already locating bash, recurse to locate bash to use it as an absolute path in
# our shebang. This avoids mixing locations that we would search for bash into the search paths
# of the request we are servicing.
# TODO(#10769): Replace this script with a statically linked native binary so we don't
# depend on either /bin/bash being available on the Process host.
if request.binary_name == "bash":
shebang = "#!/usr/bin/env bash"
else:
bash = await Get(BashBinary, BashBinaryRequest())
shebang = f"#!{bash.path}"
script_path = "./find_binary.sh"
script_header = dedent(
f"""\
{shebang}
set -euox pipefail
CHECK_FILE_ENTRIES={'1' if request.check_file_entries else ''}
"""
)
script_body = dedent(
"""\
for path in ${PATH//:/ }; do
if [[ -d "${path}" ]]; then
# Handle traditional directory PATH element.
maybe_exe="${path}/$1"
elif [[ -n "${CHECK_FILE_ENTRIES}" ]]; then
# Handle PATH elements that are filenames to allow for precise selection.
maybe_exe="${path}"
else
maybe_exe=
fi
if [[ "$1" == "${maybe_exe##*/}" && -f "${maybe_exe}" && -x "${maybe_exe}" ]]
then
echo "${maybe_exe}"
fi
done
"""
)
script_content = script_header + script_body
script_digest = await Get(
Digest,
CreateDigest([FileContent(script_path, script_content.encode(), is_executable=True)]),
)
# Some subtle notes about executing this script:
#
# - We run the script with `ProcessResult` instead of `FallibleProcessResult` so that we
# can catch bugs in the script itself, given an earlier silent failure.
# - We set `ProcessCacheScope.PER_RESTART_SUCCESSFUL` to force re-run since any binary found
# on the host system today could be gone tomorrow. Ideally we'd only do this for local
# processes since all known remoting configurations include a static container image as
# part of their cache key which automatically avoids this problem. See #10769 for a
# solution that is less of a tradeoff.
search_path = create_path_env_var(request.search_path)
result = await Get(
ProcessResult,
Process(
description=f"Searching for `{request.binary_name}` on PATH={search_path}",
level=LogLevel.DEBUG,
input_digest=script_digest,
argv=[script_path, request.binary_name],
env={"PATH": search_path},
cache_scope=ProcessCacheScope.PER_RESTART_SUCCESSFUL,
),
)
binary_paths = BinaryPaths(binary_name=request.binary_name)
found_paths = result.stdout.decode().splitlines()
if not request.test:
return dataclasses.replace(binary_paths, paths=[BinaryPath(path) for path in found_paths])
results = await MultiGet(
Get(
FallibleProcessResult,
Process(
description=f"Test binary {path}.",
level=LogLevel.DEBUG,
argv=[path, *request.test.args],
# NB: Since a failure is a valid result for this script, we always cache it for
# `pantsd`'s lifetime, regardless of success or failure.
cache_scope=ProcessCacheScope.PER_RESTART_ALWAYS,
),
)
for path in found_paths
)
return dataclasses.replace(
binary_paths,
paths=[
(
BinaryPath.fingerprinted(path, result.stdout)
if request.test.fingerprint_stdout
else BinaryPath(path, result.stdout.decode())
)
for path, result in zip(found_paths, results)
if result.exit_code == 0
],
)
@rule(desc="Finding a `python` binary", level=LogLevel.TRACE)
async def find_python(python_bootstrap: PythonBootstrap) -> PythonBinary:
# PEX files are compatible with bootstrapping via Python 2.7 or Python 3.5+, but we select 3.6+
# for maximum compatibility with internal scripts.
interpreter_search_paths = python_bootstrap.interpreter_search_paths()
all_python_binary_paths = await MultiGet(
Get(
BinaryPaths,
BinaryPathRequest(
search_path=interpreter_search_paths,
binary_name=binary_name,
check_file_entries=True,
test=BinaryPathTest(
args=[
"-c",
# N.B.: The following code snippet must be compatible with Python 3.6+.
#
# We hash the underlying Python interpreter executable to ensure we detect
# changes in the real interpreter that might otherwise be masked by Pyenv
# shim scripts found on the search path. Naively, just printing out the full
# version_info would be enough, but that does not account for supported abi
# changes (e.g.: a pyenv switch from a py27mu interpreter to a py27m
# interpreter.)
#
# When hashing, we pick 8192 for efficiency of reads and fingerprint updates
# (writes) since it's a common OS buffer size and an even multiple of the
# hash block size.
dedent(
"""\
import sys
major, minor = sys.version_info[:2]
if not (major == 3 and minor >= 6):
sys.exit(1)
import hashlib
hasher = hashlib.sha256()
with open(sys.executable, "rb") as fp:
for chunk in iter(lambda: fp.read(8192), b""):
hasher.update(chunk)
sys.stdout.write(hasher.hexdigest())
"""
),
],
fingerprint_stdout=False, # We already emit a usable fingerprint to stdout.
),
),
)
for binary_name in python_bootstrap.interpreter_names
)
for binary_paths in all_python_binary_paths:
path = binary_paths.first_path
if path:
return PythonBinary(
path=path.path,
fingerprint=path.fingerprint,
)
raise BinaryNotFoundError(
"Was not able to locate a Python interpreter to execute rule code.\n"
"Please ensure that Python is available in one of the locations identified by "
"`[python-bootstrap] search_path`, which currently expands to:\n"
f" {interpreter_search_paths}"
)
@rule(desc="Finding the `zip` binary", level=LogLevel.DEBUG)
async def find_zip() -> ZipBinary:
request = BinaryPathRequest(
binary_name="zip", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["-v"])
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(request, rationale="create `.zip` archives")
return ZipBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `unzip` binary", level=LogLevel.DEBUG)
async def find_unzip() -> UnzipBinary:
request = BinaryPathRequest(
binary_name="unzip", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["-v"])
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(
request, rationale="download the tools Pants needs to run"
)
return UnzipBinary(first_path.path, first_path.fingerprint)
@rule
def find_gunzip(python: PythonBinary) -> GunzipBinary:
return GunzipBinary(python)
@rule(desc="Finding the `tar` binary", level=LogLevel.DEBUG)
async def find_tar() -> TarBinary:
request = BinaryPathRequest(
binary_name="tar", search_path=SEARCH_PATHS, test=BinaryPathTest(args=["--version"])
)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(
request, rationale="download the tools Pants needs to run"
)
return TarBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `mkdir` binary", level=LogLevel.DEBUG)
async def find_mkdir() -> MkdirBinary:
request = BinaryPathRequest(binary_name="mkdir", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(request, rationale="create directories")
return MkdirBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `chmod` binary", level=LogLevel.DEBUG)
async def find_chmod() -> ChmodBinary:
request = BinaryPathRequest(binary_name="chmod", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(
request, rationale="change file modes or Access Control Lists"
)
return ChmodBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `diff` binary", level=LogLevel.DEBUG)
async def find_diff() -> DiffBinary:
request = BinaryPathRequest(binary_name="diff", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(request, rationale="compare files line by line")
return DiffBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `readlink` binary", level=LogLevel.DEBUG)
async def find_readlink() -> ReadlinkBinary:
request = BinaryPathRequest(binary_name="readlink", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(request, rationale="defererence symlinks")
return ReadlinkBinary(first_path.path, first_path.fingerprint)
@rule(desc="Finding the `git` binary", level=LogLevel.DEBUG)
async def find_git() -> GitBinary:
request = BinaryPathRequest(binary_name="git", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path_or_raise(
request, rationale="track changes to files in your build environment"
)
return GitBinary(first_path.path, first_path.fingerprint)
# -------------------------------------------------------------------------------------------
# Rules for lazy requests
# TODO(#12946): Get rid of this when it becomes possible to use `Get()` with only one arg.
# -------------------------------------------------------------------------------------------
class ZipBinaryRequest:
pass
class UnzipBinaryRequest:
pass
class GunzipBinaryRequest:
pass
class TarBinaryRequest:
pass
class MkdirBinaryRequest:
pass
class ChmodBinaryRequest:
pass
class DiffBinaryRequest:
pass
class ReadlinkBinaryRequest:
pass
class GitBinaryRequest:
pass
@rule
async def find_zip_wrapper(_: ZipBinaryRequest, zip_binary: ZipBinary) -> ZipBinary:
return zip_binary
@rule
async def find_unzip_wrapper(_: UnzipBinaryRequest, unzip_binary: UnzipBinary) -> UnzipBinary:
return unzip_binary
@rule
async def find_gunzip_wrapper(_: GunzipBinaryRequest, gunzip: GunzipBinary) -> GunzipBinary:
return gunzip
@rule
async def find_tar_wrapper(_: TarBinaryRequest, tar_binary: TarBinary) -> TarBinary:
return tar_binary
@rule
async def find_mkdir_wrapper(_: MkdirBinaryRequest, mkdir_binary: MkdirBinary) -> MkdirBinary:
return mkdir_binary
@rule
async def find_readlink_wrapper(
_: ReadlinkBinaryRequest, readlink_binary: ReadlinkBinary
) -> ReadlinkBinary:
return readlink_binary
@rule
async def find_chmod_wrapper(_: ChmodBinaryRequest, chmod_binary: ChmodBinary) -> ChmodBinary:
return chmod_binary
@rule
async def find_diff_wrapper(_: DiffBinaryRequest, diff_binary: DiffBinary) -> DiffBinary:
return diff_binary
@rule
async def find_git_wrapper(_: GitBinaryRequest, git_binary: GitBinary) -> GitBinary:
return git_binary
def rules():
return [*collect_rules(), *python_bootstrap.rules()]
# -------------------------------------------------------------------------------------------
# Rules for fallible binaries
# -------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class MaybeGitBinary:
git_binary: GitBinary | None = None
@rule(desc="Finding the `git` binary", level=LogLevel.DEBUG)
async def maybe_find_git() -> MaybeGitBinary:
request = BinaryPathRequest(binary_name="git", search_path=SEARCH_PATHS)
paths = await Get(BinaryPaths, BinaryPathRequest, request)
first_path = paths.first_path
if not first_path:
return MaybeGitBinary()
return MaybeGitBinary(GitBinary(first_path.path, first_path.fingerprint))
class MaybeGitBinaryRequest:
pass
@rule
async def maybe_find_git_wrapper(
_: MaybeGitBinaryRequest, maybe_git_binary: MaybeGitBinary
) -> MaybeGitBinary:
return maybe_git_binary
|
the-stack_0_17422 |
# Filename: zbnode.py
# Description: This contains all the values code (and static data structures) act as a zigbee node
# and to communicate with Alertme nodes
from pyalertme.node import Node
import time
import threading
from xbee import ZigBee
import copy
import struct
import pprint
# ZigBee Addressing
BROADCAST_LONG = b'\x00\x00\x00\x00\x00\x00\xff\xff'
BROADCAST_SHORT = b'\xff\xfe'
# ZigBee Profile IDs
PROFILE_ID_ZDP = b'\x00\x00' # ZigBee Device Profile
PROFILE_ID_HA = b'\x01\x04' # HA Device Profile
PROFILE_ID_LL = b'\xc0\x5e' # Light Link Profile
PROFILE_ID_ALERTME = b'\xc2\x16' # AlertMe Private Profile
# ZigBee Endpoints
ENDPOINT_ZDO = b'\x00' # ZigBee Device Objects Endpoint
ENDPOINT_ALERTME = b'\x02' # AlertMe / Iris Endpoint
# ZDP Status
ZDP_STATUS_OK = b'\x00'
ZDP_STATUS_INVALID = b'\x80'
ZDP_STATUS_NOT_FOUND = b'\x81'
# ZDO Clusters
# See:
# http://ftp1.digi.com/support/images/APP_NOTE_XBee_ZigBee_Device_Profile.pdf
# http://www.cel.com/pdf/misc/zic09_zdp_api.pdf
CLUSTER_ID_ZDO_NWK_ADDR_REQ = b'\x00\x00' # Network (16-bit) Address Request
CLUSTER_ID_ZDO_NWK_ADDR_RSP = b'\x80\x00' # Network (16-bit) Address Response
CLUSTER_ID_ZDO_SIMPLE_DESC_REQ = b'\x00\x04' # Simple Descriptor Request
CLUSTER_ID_ZDO_ACTIVE_EP_REQ = b'\x00\x05' # Active Endpoints Request
CLUSTER_ID_ZDO_ACTIVE_EP_RSP = b'\x80\x05' # Active Endpoints Response
CLUSTER_ID_ZDO_MATCH_DESC_REQ = b'\x00\x06' # Match Descriptor Request
CLUSTER_ID_ZDO_MATCH_DESC_RSP = b'\x80\x06' # Match Descriptor Response
CLUSTER_ID_ZDO_END_DEVICE_ANNCE = b'\x00\x13' # End Device Announce
CLUSTER_ID_ZDO_MGMT_RTG_REQ = b'\x00\x32' # Management Routing Request
CLUSTER_ID_ZDO_MGMT_RTG_RSP = b'\x80\x32' # Management Routing Response (seen in outputs as x802 but the '2' char is really 0x32 printed as a char)
CLUSTER_ID_ZDO_MGMT_PERMIT_JOIN_REQ = b'\x00\x36' # Permit Join Request Request
CLUSTER_ID_ZDO_MGMT_NETWORK_UPDATE = b'\x80\x36' # Management Network Update (seen in outputs as x806 but the '6' is really 0x36
# AlertMe Clusters
# See:
# http://www.desert-home.com/2015/06/hacking-into-iris-door-sensor-part-4.html
CLUSTER_ID_AM_SWITCH = b'\x00\xee' # SmartPlug Switch Cluster
CLUSTER_ID_AM_POWER = b'\x00\xef' # Power Information
CLUSTER_ID_AM_STATUS = b'\x00\xf0' # Device Status
CLUSTER_ID_AM_TAMPER = b'\x00\xf2' # Device Tamper Cluster
CLUSTER_ID_AM_BUTTON = b'\x00\xf3' # Keyfob / Button
CLUSTER_ID_AM_DISCOVERY = b'\x00\xf6' # Device Discovery
CLUSTER_ID_AM_SECURITY = b'\x05\x00' # Security
# AlertMe Cluster Commands
# Security IasZoneCluster commands cluster b'\x05\x00' = 1280
CLUSTER_CMD_AM_SEC_STATUS_CHANGE = b'\x00' # Security Event (Sensors)
CLUSTER_CMD_AM_SEC_ENROLL_REQ = b'\x01' #
# AmGeneralCluster commands cluster b'\x00\xf0' = 240
CLUSTER_CMD_AM_SET_RTC_CMD = b'\x00' # SET_RTC_CMD = 0
CLUSTER_CMD_AM_RTC_CMD_REQ = b'\x80' # REQUEST_RTC_CMD = 128
CLUSTER_CMD_AM_LIFESIGN_CMD = b'\xfb' # LIFESIGN_CMD = 251
CLUSTER_CMD_AM_SET_MODE_CMD = b'\xfa' # SET_MODE_CMD = 250
CLUSTER_CMD_AM_STOP_POLLING_CMD = b'\xfd' # STOP_POLLING_CMD = 253
DEVICE_MODE_NORMAL_OPS = 0
DEVICE_MODE_RANGE_TEST = 1
DEVICE_MODE_TEST = 2
DEVICE_MODE_SEEKING = 3
DEVICE_MODE_IDLE = 4
DEVICE_MODE_QUIESCENT = 5
DEVICE_MODE_OPT_NONE = 0
DEVICE_MODE_OPT_SET_HNF = 1
DEVICE_MODE_OPT_CLEAR_HNF = 2
# AmPowerCtrlCluster commands cluster b'\x00\xee' = 238
CLUSTER_CMD_AM_STATE_REQ = b'\x01' # CMD_SET_OPERATING_MODE = 1 # State Request (SmartPlug)
CLUSTER_CMD_AM_STATE_CHANGE = b'\x02' # CMD_SET_RELAY_STATE = 2 # Change State (SmartPlug)
CLUSTER_CMD_AM_STATE_REPORT_REQ = b'\x03' # CMD_REQUEST_REPORT = 3
CLUSTER_CMD_AM_STATE_RESP = b'\x80' # CMD_STATUS_REPORT = 128 # Switch Status Update
# AmPowerMonCluster commands cluster b'\x00\xef = 239
CLUSTER_CMD_AM_PWR_SET_REPT_PARAMS = b'\x00' # CMD_SET_REPT_PARAMS = 0
CLUSTER_CMD_AM_PWR_REQUEST_REPORT = b'\x03' # CMD_REQUEST_REPORT = 3
CLUSTER_CMD_AM_PWR_SET_REPORT_RATE = b'\x04' # CMD_SET_REPORT_RATE= 4
CLUSTER_CMD_AM_PWR_DEMAND = b'\x81' # CMD_POWER_REPORT = 129 # Power Demand Update
CLUSTER_CMD_AM_PWR_CONSUMPTION = b'\x82' # CMD_ENERGY_REPORT = 130 #Power Consumption & Uptime Update
CLUSTER_CMD_AM_PWD_BATCH_POWER_REPORT = b'\x84' # CMD_BATCH_POWER_REPORT = 132
CLUSTER_CMD_AM_PWD_BATCH_ENERGY_REPORT = b'\x85' # CMD_BATCH_ENERGY_REPORT = 133
CLUSTER_CMD_AM_PWD_POWER_ENERGY_REPORT = b'\x86' # CMD_POWER_ENERGY_REPORT = 134
CLUSTER_CMD_AM_PWD_BATCH_POWER_ENERGY_REPORT = b'\x87' # CMD_BATCH_POWER_ENERGY_REPORT = 135
CLUSTER_CMD_AM_PWR_UNKNOWN = b'\x86' # Unknown British Gas Power Meter Update
# AmMaintenanceCluster commands cluster b'\x00\xf6' = 246
CLUSTER_CMD_AM_MAINT_HELLO_REQ = b'\xfc' # HELLO_WORLD_REQ = 252
CLUSTER_CMD_AM_MAINT_HELLO_RESP = b'\xfe' # HELLO_WORLD_RESP = 254
CLUSTER_CMD_AM_MAINT_RANGE_TEST_REQ = b'\xfd' # RANGE_TEST_SEND_CMD = 253
CLUSTER_CMD_AM_MAINT_RANGE_TEST_RESP = b'\xfd' # RANGE_TEST_RECV_CMD = 253
CLUSTER_CMD_AM_MODE_REQ = b'\xfa' # Mode Change Request
CLUSTER_CMD_AM_STATUS = b'\xfb' # Status Update
CLUSTER_CMD_AM_VERSION_REQ = b'\xfc' # Version Information Request
CLUSTER_CMD_AM_RSSI = b'\xfd' # RSSI Range Test Update
CLUSTER_CMD_AM_VERSION_RESP = b'\xfe' # Version Information Response
# At the moment I am not sure what/if the following dictionary will be used?
# It is here to describe the relationship between Cluster ID and Cmd.
# One day this may be used by the parse_message() function and link with the parse_xxxxx() functions?
alertme_cluster_cmds = {
CLUSTER_ID_AM_SWITCH: {
CLUSTER_CMD_AM_STATE_CHANGE: "Relay State Change (SmartPlug)",
CLUSTER_CMD_AM_STATE_RESP: "Switch Status Update"
},
CLUSTER_ID_AM_POWER: {
CLUSTER_CMD_AM_PWR_DEMAND: "Power Demand Update",
CLUSTER_CMD_AM_PWR_CONSUMPTION: "Power Consumption & Uptime Update",
CLUSTER_CMD_AM_PWR_UNKNOWN: "Unknown British Gas Power Meter Update"
},
CLUSTER_ID_AM_STATUS: {
CLUSTER_CMD_AM_MODE_REQ: "Mode Change Request",
CLUSTER_CMD_AM_STATUS: "Status Update"
},
CLUSTER_ID_AM_TAMPER: {},
CLUSTER_ID_AM_BUTTON: {},
CLUSTER_ID_AM_DISCOVERY: {
CLUSTER_CMD_AM_RSSI: "RSSI Range Test Update",
CLUSTER_CMD_AM_VERSION_REQ: "Version Information Request",
CLUSTER_CMD_AM_VERSION_RESP: "Version Information Response"
},
CLUSTER_ID_AM_SECURITY: {
CLUSTER_CMD_AM_SEC_ENROLL_REQ: "Security Command"
}
}
# This messages dict holds the skeleton for the various ZDO and AlertMe messages.
# it is used in conjunction with get_message() to generate the messages.
# Those with a lambda in the data key make use of the generate_xxxx() functions
# to generate the data based on parameters pasded.
messages = {
'version_info_request': {
'name': 'Version Info Request',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_DISCOVERY,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_version_info_request(params)
}
},
'version_info_update': {
'name': 'Version Info Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_DISCOVERY,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_version_info_update(params)
},
'expected_params': ['hwMajorVersion', 'hwMinorVersion', 'type', 'manu_string', 'manu_date']
},
'range_update': {
'name': 'Range Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_DISCOVERY,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_range_update(params)
},
'expected_params': ['rssi']
},
'switch_state_request': {
'name': 'Relay State Request',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_SWITCH,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_switch_state_request(params)
},
'expected_params': ['switch_state']
},
'switch_state_update': {
'name': 'Relay State Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_SWITCH,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_switch_state_update(params)
},
'expected_params': ['switch_state']
},
'mode_change_request': {
'name': 'Mode Change Request',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_STATUS,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_mode_change_request(params)
},
'expected_params': ['mode']
},
'status_update': {
'name': 'Status Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_STATUS,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_status_update(params)
}
},
'power_demand_update': {
'name': 'Power Demand Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_POWER,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_power_demand_update(params)
},
'expected_params': ['power_demand']
},
'power_consumption_update': {
'name': 'Power Consumption Update',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_POWER,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_power_consumption_update(params)
},
'expected_params': ['power_consumption', 'up_time']
},
'button_press': {
'name': 'Button Press',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_BUTTON,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_button_press(params)
},
'params': ['button_state', 'counter']
},
'security_init': {
'name': 'Security Initialization',
'frame': {
'profile': PROFILE_ID_ALERTME,
'cluster': CLUSTER_ID_AM_SECURITY,
'src_endpoint': ENDPOINT_ALERTME,
'dest_endpoint': ENDPOINT_ALERTME,
'data': lambda self, params: self.generate_security_init(params)
}
},
'active_endpoints_request': {
'name': 'Active Endpoints Request',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_ACTIVE_EP_REQ,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': lambda self, params: self.generate_active_endpoints_request(params)
},
'expected_params': ['zdo_sequence', 'addr_short']
},
'match_descriptor_request': {
'name': 'Match Descriptor Request',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_MATCH_DESC_REQ,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': lambda self, params: self.generate_match_descriptor_request(params)
},
'expected_params': ['zdo_sequence', 'addr_short', 'profile_id', 'in_cluster_list', 'out_cluster_list']
},
'match_descriptor_response': {
'name': 'Match Descriptor Response',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_MATCH_DESC_RSP,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': lambda self, params: self.generate_match_descriptor_response(params)
},
'expected_params': ['zdo_sequence', 'addr_short', 'endpoint_list']
},
'routing_table_request': {
'name': 'Management Routing Table Request',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_MGMT_RTG_REQ,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': b'\x12\x01'
}
},
'permit_join_request': {
'name': 'Management Permit Join Request',
'frame': {
'profile': PROFILE_ID_ZDP,
'cluster': CLUSTER_ID_ZDO_MGMT_PERMIT_JOIN_REQ,
'src_endpoint': ENDPOINT_ZDO,
'dest_endpoint': ENDPOINT_ZDO,
'data': b'\xff\x00'
}
}
}
class ZBNode(Node):
"""
ZigBee Node object.
"""
def __init__(self, serial, callback=None):
"""
ZigBee Node Constructor.
:param serial: Serial Object
:param callback: Optional
"""
Node.__init__(self, callback)
# Type Info
self.type = 'ZBNode'
self.hwMajorVersion = 123
self.hwMinorVersion = 45
self.manu_string = 'PyAlertMe'
self.manu_date = '2017-01-01'
# Start up Serial and ZigBee
self._serial = serial
self._xbee = ZigBee(ser=self._serial, callback=self.receive_message, error_callback=self.xbee_error, escaped=True)
# My addresses
self.addr_long = None
self.addr_short = None
# Fire off messages to discover own addresses
self._addr_long_list = [b'', b'']
self.read_addresses()
# Scheduler Thread
self._started = True
self._schedule_thread = threading.Thread(target=self._schedule_loop)
self._schedule_interval = 2
self._schedule_thread.start()
self.endpoint_list = [ENDPOINT_ZDO, ENDPOINT_ALERTME]
def _schedule_loop(self):
"""
Continual Updates Thread calls the _updates() function every at
intervals set in self._schedule_interval.
"""
while self._started:
if self.associated:
self._schedule_event()
# The following for loop is being used in place of a simple:
# time.sleep(self._schedule_interval)
# This is done so we can interrupt the thread quicker and
# Unit tests finish faster.
for i in range(self._schedule_interval * 10):
if self._started:
time.sleep(0.1)
def _schedule_event(self):
"""
The _schedule_event() function is called by the _schedule_loop()
thread function called at regular intervals.
Stub, to be overwritten by ZBHub or ZBDevice.
"""
self._logger.debug('[STUB] schedule_event: Continual Update')
def halt(self):
"""
Halt Device.
Close XBee and Serial.
:return:
"""
self._started = False # This should kill the updates thread
self._schedule_thread.join() # Wait for updates thread to finish
self._xbee.halt()
self._serial.close()
def generate_message(self, message_id, params=None):
"""
Generate Message.
:param message_id: Message ID
:return:
"""
self._logger.debug('Generating message %s' % message_id)
if message_id in messages.keys():
# Take a deep copy of the message
message = copy.deepcopy(messages[message_id])
if params:
# If we have manually been provided any params then use these
if 'expected_params' in message.keys():
expected_params = sorted(message['expected_params'])
provided_params = sorted(params.keys())
missing_params = sorted(set(expected_params).difference(set(provided_params)))
# We need to check if there are any missing
if len(missing_params) > 0:
raise Exception("Missing Parameters: %s" % missing_params)
else:
# Otherwise attempt to auto calculate params from the device object
params = {}
if 'expected_params' in message.keys():
for param in message['expected_params']:
params[param] = self.get_attribute(param)
# If 'data' is a lambda, then call it and replace with the return value
data = message['frame']['data']
if callable(message['frame']['data']):
message['frame']['data'] = data(self, params)
# Return processed message
return message['frame']
else:
raise Exception("Message '%s' does not exist" % message_id)
def list_messages(self):
"""
List messages.
:return:
"""
actions = {}
for message_id, message in messages.items():
actions[message_id] = {'name': message['name']}
if 'expected_params' in message.keys():
actions[message_id]['expected_params'] = message['expected_params']
return actions
def xbee_error(self, error):
"""
On XBee error this function is called.
:param error:
:return:
"""
self._logger.critical('XBee Error: %s', error)
def read_addresses(self):
"""
Work out own address.
"""
self._logger.debug('Requesting own addresses')
self._xbee.send('at', command='MY')
time.sleep(0.05)
self._xbee.send('at', command='SH')
time.sleep(0.05)
self._xbee.send('at', command='SL')
time.sleep(0.05)
def send_message(self, message, dest_addr_long, dest_addr_short):
"""
Send message to XBee.
:param message: Dict message
:param dest_addr_long: 48-bits Long Address
:param dest_addr_short: 16-bit Short Address
:return:
"""
# Tack on destination addresses
message['dest_addr_long'] = dest_addr_long
message['dest_addr'] = dest_addr_short
self._logger.debug('Sending Message: %s', message)
self._xbee.send('tx_explicit', **message)
def receive_message(self, message):
"""
Receive message from XBee.
Parse incoming message.
Process parsed result.
:param message: Dict of message
:return:
"""
ret = self.parse_message(message)
if message['id'] == 'rx_explicit':
source_addr_long = message['source_addr_long']
source_addr_short = message['source_addr']
# Send any replies which may need sending
for reply in ret['replies']:
message_id = reply['message_id']
if 'params' in reply.keys():
params = reply['params']
else:
params = {}
reply = self.generate_message(message_id, params)
self.send_message(reply, source_addr_long, source_addr_short)
time.sleep(0.5)
# Update any attributes which may need updating
self.process_message(source_addr_long, source_addr_short, ret['attributes'])
def parse_message(self, message):
"""
Parse ZigBee message. Work out any attribute changes and reply messages.
:param message: Dict of message
:return:
"""
self._logger.debug('Received Message: %s ', message)
try:
nicestring = ' '.join(('%#04x' % ord(c) for c in message['rf_data']))
self._logger.debug('RF_data: %s ', nicestring)
except:
self._logger.debug('no RF_data')
try:
nicestring = ' '.join(('%#04x' % ord(c) for c in message['cluster']))
self._logger.debug('Cluster: %s ', nicestring)
except:
self._logger.debug('Issue with decoding cluster')
attributes = {}
replies = []
# AT Packets
if message['id'] == 'at_response':
if message['command'] == 'MY':
self.addr_short = message['parameter']
if message['command'] == 'SH':
self._addr_long_list[0] = message['parameter']
if message['command'] == 'SL':
self._addr_long_list[1] = message['parameter']
# If we have worked out both the High and Low addresses then calculate the full addr_long
if self._addr_long_list[0] and self._addr_long_list[1]:
self.addr_long = b''.join(self._addr_long_list)
# ZigBee Explicit Packets
if message['id'] == 'rx_explicit':
source_addr_long = message['source_addr_long']
source_addr_short = message['source_addr']
profile_id = message['profile']
cluster_id = message['cluster']
if profile_id == PROFILE_ID_ZDP:
# ZigBee Device Profile ID
self._logger.debug('Received ZigBee Device Profile Packet')
zdo_sequence = message['rf_data'][0:1]
if cluster_id == CLUSTER_ID_ZDO_NWK_ADDR_REQ:
# Network (16-bit) Address Request
self._logger.debug('Received Network (16-bit) Address Request')
elif cluster_id == CLUSTER_ID_ZDO_NWK_ADDR_RSP:
# Network (16-bit) Address Response
self._logger.debug('Received Network (16-bit) Address Response')
elif cluster_id == CLUSTER_ID_ZDO_MGMT_RTG_REQ:
# Management Routing Table Request
self._logger.debug('Received Management Routing Table Request')
elif cluster_id == CLUSTER_ID_ZDO_MGMT_RTG_RSP:
# Management Routing Response
self._logger.debug('Received Management Routing Response')
elif cluster_id == CLUSTER_ID_ZDO_SIMPLE_DESC_REQ:
# Simple Descriptor Request
self._logger.debug('Received Simple Descriptor Request')
elif cluster_id == CLUSTER_ID_ZDO_ACTIVE_EP_REQ: #0x0005
# Active Endpoint Request
self._logger.debug('Received Active Endpoint Request')
elif cluster_id == CLUSTER_ID_ZDO_ACTIVE_EP_RSP: #0x8005
# Active Endpoints Response
# This message tells us what the device can do, but it isn't
# constructed correctly to match what the switch can do according
# to the spec. This is another message that gets it's response
# after we receive the Match Descriptor below.
self._logger.debug('Received Active Endpoint Response')
elif cluster_id == CLUSTER_ID_ZDO_MATCH_DESC_REQ: #0x0006
# Match Descriptor Request
self._logger.debug('Received Match Descriptor Request')
# This is the point where we finally respond to the switch.
# A couple of messages are sent to cause the switch to join with
# the controller at a network level and to cause it to regard
# this controller as valid.
# First send the Match Descriptor Response
params = {
'zdo_sequence': zdo_sequence,
'addr_short': source_addr_short,
'endpoint_list': self.endpoint_list
}
replies.append({'message_id': 'match_descriptor_response', 'params': params})
elif cluster_id == CLUSTER_ID_ZDO_MATCH_DESC_RSP:
# Match Descriptor Response
self._logger.debug('Received Match Descriptor Response')
elif cluster_id == CLUSTER_ID_ZDO_END_DEVICE_ANNCE: #0x0013
# Device Announce Message
self._logger.debug('Received Device Announce Message')
# This will tell me the address of the new thing,
# so we're going to send an Active Endpoint Request.
params = {
'zdo_sequence': zdo_sequence,
'addr_short': source_addr_short
}
replies.append({'message_id': 'active_endpoints_request', 'params': params})
elif cluster_id == CLUSTER_ID_ZDO_MGMT_NETWORK_UPDATE:
# Management Network Update Notify
self._logger.debug('Received Management Network Update Notify')
else:
self._logger.error('Unrecognised Cluster ID: %r', cluster_id)
elif profile_id == PROFILE_ID_ALERTME:
# AlertMe Profile ID
self._logger.debug('Received AlertMe Specific Profile Packet')
cluster_cmd = message['rf_data'][2:3]
if cluster_id == CLUSTER_ID_AM_SWITCH:
if cluster_cmd == CLUSTER_CMD_AM_STATE_REQ:
# Switch State Request
# b'\x11\x00\x01\x01'
self._logger.debug('Received Switch State Request')
replies.append({'message_id': 'switch_state_update'})
elif cluster_cmd == CLUSTER_CMD_AM_STATE_RESP:
self._logger.debug('Received Switch State Update')
attributes = self.parse_switch_state_update(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_STATE_CHANGE:
# Switch Change State
# b'\x11\x00\x02\x01\x01' On
# b'\x11\x00\x02\x00\x01' Off
self._logger.debug('Received Switch State Change')
attributes = self.parse_switch_state_request(message['rf_data'])
replies.append({'message_id': 'switch_state_update'})
else:
self._logger.error('Unrecognised Cluster Command: %r', cluster_cmd)
elif cluster_id == CLUSTER_ID_AM_POWER:
if cluster_cmd == CLUSTER_CMD_AM_PWR_DEMAND:
self._logger.debug('Received Power Demand Update')
attributes = self.parse_power_demand(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_PWR_CONSUMPTION:
self._logger.debug('Received Power Consumption & Uptime Update')
attributes = self.parse_power_consumption(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_PWR_UNKNOWN:
self._logger.debug('Unknown Power Update')
attributes = self.parse_power_unknown(message['rf_data'])
else:
self._logger.error('Unrecognised Cluster Command: %r', cluster_cmd)
elif cluster_id == CLUSTER_ID_AM_TAMPER:
self._logger.debug('Received Tamper Switch Triggered')
attributes = self.parse_tamper_state(message['rf_data'])
elif cluster_id == CLUSTER_ID_AM_BUTTON:
self._logger.debug('Received Button Pressed')
attributes = self.parse_button_press(message['rf_data'])
elif cluster_id == CLUSTER_ID_AM_SECURITY:
self._logger.debug('Received Security Event')
# Security Cluster
# When the device first connects, it comes up in a state that
# needs initialization, this command seems to take care of that.
# So, look at the value of the data and send the command.
if message['rf_data'][3:7] == b'\x15\x00\x39\x10':
replies.append({'message_id': 'security_init'})
attributes = self.parse_security_state(message['rf_data'])
elif cluster_id == CLUSTER_ID_AM_DISCOVERY:
if cluster_cmd == CLUSTER_CMD_AM_RSSI:
self._logger.debug('Received RSSI Range Update')
attributes = self.parse_range_info_update(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_VERSION_RESP:
self._logger.debug('Received Version Information')
attributes = self.parse_version_info_update(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_VERSION_REQ:
# b'\x11\x00\xfc\x00\x01'
self._logger.debug('Received Version Request')
replies.append({'message_id': 'version_info_update'})
else:
self._logger.error('Unrecognised Cluster Command: %r', cluster_cmd)
elif cluster_id == CLUSTER_ID_AM_STATUS:
if cluster_cmd == CLUSTER_CMD_AM_STATUS:
self._logger.debug('Received Status Update')
attributes = self.parse_status_update(message['rf_data'])
elif cluster_cmd == CLUSTER_CMD_AM_MODE_REQ:
self._logger.debug('Received Mode Change Request')
mode_cmd = message['rf_data'][3] + message['rf_data'][4]
mode = 'normal'
if mode_cmd == b'\x00\x01':
# Normal
# b'\x11\x00\xfa\x00\x01'
self._logger.debug('Normal Mode')
mode = 'normal'
elif mode_cmd == b'\x01\x01':
# Range Test
# b'\x11\x00\xfa\x01\x01'
self._logger.debug('Range Test Mode')
mode = 'range'
elif mode_cmd == b'\x02\x01':
# Locked
# b'\x11\x00\xfa\x02\x01'
self._logger.debug('Locked Mode')
mode = 'locked'
elif mode_cmd == b'\x03\x01':
# Silent
# b'\x11\x00\xfa\x03\x01'
self._logger.debug('Silent Mode')
mode = 'silent'
attributes = {'mode': mode}
else:
self._logger.error('Unrecognised Cluster Command: %r', cluster_cmd)
elif profile_id == PROFILE_ID_HA:
# HA Profile ID
self._logger.debug('Received HA Profile Packet')
else:
self._logger.error('Unrecognised Profile ID: %r', profile_id)
return {'attributes': attributes, 'replies': replies}
def process_message(self, addr_long, addr_short, attributes):
"""
Process after message received. Stub, to be overwritten by ZBHub or ZBDevice.
:param addr_long: Short Address
:param addr_short: Long Address
:param attributes: Dict of message
:return:
"""
self._logger.debug('[STUB] process_message: %s', attributes)
def generate_version_info_request(self, params=None):
"""
Generate Version Info Request.
This message is sent FROM the Hub TO the SmartPlug requesting version information.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Version Information Request (b'\xfc')
:param params: Parameter dictionary (none required)
:return: Message data
"""
preamble = b'\x11\x00'
cluster_cmd = CLUSTER_CMD_AM_VERSION_REQ
payload = b'' # No data required in request
data = preamble + cluster_cmd + payload
return data
def generate_version_info_update(self, params):
"""
Generate Version Info Update.
This message is sent TO the Hub FROM the SmartPlug advertising version information.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Version Information Response (b'\xfe')
Unknown 17 Unknown Values TBC. There may be more interesting stuff in here?
HW Version 2 Hardware Version
Type Info Variable Type Information (b'AlertMe.com\nSmartPlug\n2013-09-26')
:param params: Parameter dictionary of version info
:return: Message data
"""
preamble = b'\x09\x71' # b'\tq'
cluster_cmd = CLUSTER_CMD_AM_VERSION_RESP
payload = b'\x48\x41' + b'\xd2\x1b\x19\x00\x00\x6f\x0d\x00' + b'\x39\x10' \
+ struct.pack('<HBBBB', 7, 1, 28, params['hwMinorVersion'], params['hwMajorVersion']) \
+ struct.pack('B', len(params['manu_string'])) \
+ params['manu_string'] \
+ struct.pack('B', len(params['type'])) \
+ params['type'] \
+ struct.pack('B', len(params['manu_date'])) \
+ params['manu_date']
data = preamble + cluster_cmd + payload
return data
def parse_version_info_update(self, data):
"""
Process message, parse for version information:
Version, Type, Manufacturer, Date
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Version Information Response (b'\xfe')
NodeID 2 unsigned short (H)
EUI64Str 8 8x Char (8s)
mfgID 2 unsigned short (H)
DeviceType 2 unsigned short (H)
AppRelease 1 unsigned inter (B)
AppVersion 1 unsigned inter (B)
HWMinor 1 unsigned inter (B)
HWMajor 1 unsigned inter (B
Type Info Variable Type Information (b'AlertMe.com\nSmartPlug\n2013-09-26')
:param data: Message data
:return: Parameter dictionary of version info
"""
ret = dict()
ret['nodeId'], Eui64str, ret['mfgId'], ret['deviceType'], ret['appRelease'], ret['appVersion'], ret['hwMinorVersion'], ret['hwMajorVersion'] = struct.unpack('<H8sHHBBBB', data[3:21])
# In ZclStrings the first byte is the lenght of that string feild, followed by more string feilds
ret['manu_string'], message = self.getZclString(data[21:])
ret['type'], message = self.getZclString(message)
ret['manu_date'], message = self.getZclString(message)
return ret
def generate_range_update(self, params):
"""
Generate range message.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - RSSI Range Test Update (b'\xfd')
RSSI Value 1 RSSI Range Test Value
Unknown 1 ???
:param params: Parameter dictionary of RSSI value
:return: Message data
"""
preamble = b'\x09\x2b' # b'\t+'
cluster_cmd = CLUSTER_CMD_AM_RSSI
payload = struct.pack('B 1x', params['rssi'])
data = preamble + cluster_cmd + payload
return data
def parse_range_info_update(self, data):
"""
Process message, parse for RSSI range test value.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - RSSI Range Test Update (b'\xfd')
RSSI Value 1 RSSI Range Test Value
Unknown 1 ???
:param data: Message data
:return: Parameter dictionary of RSSI value
"""
values = dict(zip(
('cluster_cmd', 'rssi'),
struct.unpack('< 2x s B 1x', data)
))
rssi = values['rssi']
return {'rssi': rssi}
def generate_power_demand_update(self, params):
"""
Generate Power Demand Update message data.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Power Demand Update (b'\x81')
Power Value 2 Power Demand Value (kW)
:param params: Parameter dictionary of power demand value
:return: Message data
"""
preamble = b'\x09\x6a' # b'\tj'
cluster_cmd = CLUSTER_CMD_AM_PWR_DEMAND
payload = struct.pack('H', params['power_demand'])
data = preamble + cluster_cmd + payload
return data
def generate_power_consumption_update(self, params):
"""
Power Consumption & Uptime Update.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Power Consumption & Uptime Update (b'\x82')
Power Value 4 Power Consumption Value (kWh)
Up Time 4 Up Time Value (seconds)
Unknown 1 ???
:return: Message
"""
params = {
'power_consumption': 19973,
'up_time': 33207
}
# At the moment this just generates a hard coded message.
# Also see parse_power_consumption().
data = b'\tn\x82\x05N\x00\x00\xb7\x81\x00\x00\x01'
return data
def parse_power_demand(self, data):
"""
Process message, parse for power demand value.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Power Demand Update (b'\x81')
Power Value 2 Power Demand Value (kW)
Examples:
b'\tj\x81\x00\x00' {'PowerDemand': 0}
b'\tj\x81%\x00' {'PowerDemand': 37}
b'\tj\x81\x16\x00' {'PowerDemand': 22}
:param data: Message data
:return: Parameter dictionary of power demand value
"""
ret = dict(zip(
('cluster_cmd', 'power_demand'),
struct.unpack('< 2x s H', data)
))
del ret['cluster_cmd']
return ret
def parse_power_unknown(self, data):
"""
Parse unknown power message seen from British Gas (AlertMe) power monitor.
Could this be the same or merged with parse_power_demand() or parse_power_consumption()?
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC (b'\t\x00')
Cluster Command 1 Cluster Command - Unknown Power (b'\x86')
Unknown 11 ?? TODO Work out what power values this message contains!
Examples:
b'\t\x00\x86\x00\x00\x00\x00\x00\x00/\x00\x00\x00\x00' = 0
b'\t\x00\x86\x91\x012"\x00\x00M\x00\x00\x00\x00' = ?
b'\t\x00\x86F\x01{\xc9\x02\x007\x02\x00\x00\x00' = ?
:param data: Message data
:return: Parameter dictionary of power demand value
"""
value = struct.unpack('<H', data[3:5])[0] # TBC
return {'power_demand': value}
def parse_power_consumption(self, data):
"""
Process message, parse for power consumption value.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Power Consumption & Uptime Update (b'\x82')
Power Value 4 Power Consumption Value (kWh)
Up Time 4 Up Time Value (seconds)
Unknown 1 ???
:param data: Message data
:return: Parameter dictionary of usage stats
"""
ret = dict(zip(
('cluster_cmd', 'power_consumption', 'up_time'),
struct.unpack('< 2x s I I 1x', data)
))
del ret['cluster_cmd']
return ret
def generate_mode_change_request(self, params=None):
"""
Generate Mode Change Request.
Available Modes: 'Normal', 'RangeTest', 'Locked', 'Silent'
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Mode Change Request (b'\xfa')
Mode 2 Requested Mode (1: Normal, 257: Range Test, 513: Locked, 769: Silent)
:param params: Parameter dictionary of requested mode
:return: Message data
"""
preamble = b'\x11\x00'
cluster_cmd = CLUSTER_CMD_AM_MODE_REQ
payload = b'\x00\x01' # Default normal if no mode
if not params:
mode = 'normal'
else:
mode = params['mode']
if mode == 'normal':
payload = b'\x00\x01'
elif mode == 'range':
payload = b'\x01\x01'
elif mode == 'locked':
payload = b'\x02\x01'
elif mode == 'silent':
payload = b'\x03\x01'
elif mode == 'idle':
payload = b'\x04\x01'
else:
self._logger.error('Invalid mode request %s', mode)
data = preamble + cluster_cmd + payload
return data
def generate_switch_state_request(self, params):
"""
Generate Switch State Change request data.
This message is sent FROM the Hub TO the SmartPlug requesting state change.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Change State (SmartPlug) (b'\x01' / b'\x02')
Requested Relay State 2* b'\x01' = Check Only, b'\x01\x01' = On, b'\x00\x01' = Off
* Size = 1 if check only
:param params: Parameter dictionary of switch relay state
:return: Message data
"""
preamble = b'\x11\x00'
if params['switch_state'] != '':
cluster_cmd = CLUSTER_CMD_AM_STATE_CHANGE
if int(params['switch_state']) == 1:
payload = b'\x01\x01' # On
else:
payload = b'\x00\x01' # Off
else:
# Check Only
cluster_cmd = CLUSTER_CMD_AM_STATE_REQ
payload = b'\x01'
data = preamble + cluster_cmd + payload
return data
def parse_switch_state_request(self, data):
"""
Process message, parse for switch relay state change request.
This message is sent FROM the Hub TO the SmartPlug requesting state change.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Change State (SmartPlug) (b'\x02')
Requested Relay State 2 b'\x01\x01' = On, b'\x00\x01' = Off
:param data: Message data
:return: Parameter dictionary of switch relay state
"""
# Parse Switch State Request
if data == b'\x11\x00\x02\x01\x01':
return {'switch_state': 1}
elif data == b'\x11\x00\x02\x00\x01':
return {'switch_state': 0}
else:
self._logger.error('Unknown State Request')
def generate_switch_state_update(self, params):
"""
Generate Switch State update message data.
This message is sent TO the Hub FROM the SmartPlug advertising state change.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Switch Status Update (b'\x80')
Relay State 2 b'\x07\x01' = On, b'\x06\x00' = Off
:param params: Parameter dictionary of switch relay state
:return: Message data
"""
preamble = b'\x09\x68' # b'\th'
cluster_cmd = CLUSTER_CMD_AM_STATE_RESP
payload = b'\x07\x01' if params['switch_state'] else b'\x06\x00'
data = preamble + cluster_cmd + payload
return data
def parse_switch_state_update(self, data):
"""
Process message, parse for switch status.
This message is sent TO the Hub FROM the SmartPlug advertising state change.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC
Cluster Command 1 Cluster Command - Switch Status Update (b'\x80')
Relay State 2 b'\x07\x01' = On, b'\x06\x00' = Off
Examples:
b'\th\x80\x07\x01'
b'\th\x80\x06\x00'
:param data: Message data
:return: Parameter dictionary of switch status
"""
values = struct.unpack('< 2x b b b', data)
if values[2] & 0x01:
return {'switch_state': 1}
else:
return {'switch_state': 0}
def generate_button_press(self, params=None):
"""
Button Press Update.
Field Name Size Description
---------- ---- -----------
Preamble 1 Unknown Preamble TBC (b'\t')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Button State 1 Button State (b'\x01' = On, b'\x00' = Off)
Unknown 1 ??? (b'\x00')
Unknown 1 ??? (b'\x01')
Counter 2 Counter (milliseconds) (b'X\xf4')
Unknown 2 ??? (b'\x00\x00')
:return: Message
"""
params = {
'button_state': 1,
'counter': 62552
}
# At the moment this just generates a hard coded message.
# Also see parse_button_press().
data = b'\t\x00\x01\x00\x01X\xf4\x00\x00'
return data
def parse_button_press(self, data):
"""
Process message, parse for button press status.
Field Name Size Description
---------- ---- -----------
Preamble 1 Unknown Preamble TBC (b'\t')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Button State 1 Button State (b'\x01' = On, b'\x00' = Off)
Unknown 1 ??? (b'\x00')
Unknown 1 ??? (b'\x01', b'\x02')
Counter 2 Counter (milliseconds) (b'\xbf\xc3', b\x12\xca)
Unknown 2 ??? (b'\x00\x00')
Examples:
b'\t\x00\x00\x00\x02\xbf\xc3\x00\x00' {'State': 0, 'Counter': 50111}
b'\t\x00\x01\x00\x01\x12\xca\x00\x00' {'State': 1, 'Counter': 51730}
:param data: Message data
:return: Parameter dictionary of button status
"""
ret = {}
if ord(data[2]) == 0x00:
ret['button_state'] = 0
elif ord(data[2]) == 0x01:
ret['button_state'] = 1
ret['counter'] = struct.unpack('<H', data[5:7])[0]
return ret
def parse_tamper_state(self, data):
"""
Process message, parse for Tamper Switch State Change.
Field Name Size Description
---------- ---- -----------
Preamble 1 Unknown Preamble TBC (b'\t')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Unknown 1 ??? (b'\x00', b'\x01')
Tamper State 1 Tamper State (b'\x01' = Closed, b'\x02' = Open)
Counter 2 Counter (milliseconds) (b'\xe8\xa6')
Unknown 2 ??? (b'\x00\x00')
Examples:
b'\t\x00\x00\x02\xe8\xa6\x00\x00' {'Counter': 42728, 'TamperState': 1}
b'\t\x00\x01\x01+\xab\x00\x00' {'Counter': 43819, 'TamperState': 0}
:param data: Message data
:return: Parameter dictionary of tamper status
"""
ret = {}
if ord(data[3]) == 0x02:
ret['tamper_state'] = 1 # Open
else:
ret['tamper_state'] = 0 # Closed
ret['counter'] = struct.unpack('<H', data[4:6])[0]
return ret
def parse_security_state(self, data):
"""
Process message, parse for security state.
TODO: Is this the SAME AS parse_tamper_state!?!
Field Name Size Description
---------- ---- -----------
Preamble 1 Unknown Preamble TBC (b'\t')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Unknown 1 ??? (b'\x00')
Button State 1 Security States Bitfield (b'\00', b'\01', b'\04', b'\05')
Unknown 2 ??? (b'\x00\x00')
Examples:
b'\t\x00\x00\x00\x00\x00' {'TriggerState': 0, 'TamperState': 0}
b'\t\x00\x00\x01\x00\x00' {'TriggerState': 1, 'TamperState': 0}
b'\t\x00\x00\x04\x00\x00' {'TriggerState': 0, 'TamperState': 1}
b'\t\x00\x00\x05\x00\x00' {'TriggerState': 1, 'TamperState': 1}
:param data: Message data
:return: Parameter dictionary of security state
"""
ret = {}
# The security states are in byte [3] and is a bitfield:
# bit 0 is the magnetic reed switch state
# bit 3 is the tamper switch state
state = ord(data[3])
if state & 0x01:
ret['trigger_state'] = 1 # Open
else:
ret['trigger_state'] = 0 # Closed
if state & 0x04:
ret['tamper_state'] = 1 # Open
else:
ret['tamper_state'] = 0 # Closed
return ret
def generate_security_init(self, params=None):
"""
Generate Security Initialisation.
Keeps security devices joined?
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC (b'\x11\x80')
Cluster Command 1 Cluster Command - Security Event (b'\x00')
Unknown 2 ??? (b'\x00\x05')
:param params: Parameter dictionary (none required)
:return: Message data
"""
preamble = b'\x11\x80'
cluster_cmd = CLUSTER_CMD_AM_SEC_STATUS_CHANGE
payload = b'\x00\x05'
data = preamble + cluster_cmd + payload
return data
def parse_status_update(self, data):
"""
Process message, parse for status update.
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC (b'\t\x89')
Cluster Command 1 Cluster Command - Status Update (b'\xfb')
Type 1 b'\x1b' Clamp, b'\x1c' Switch, b'\x1d' Key Fob, b'\x1e', b'\x1f' Door
Counter 4 Counter (b'\xdb2\x00\x00')
TempFahrenheit 2 Temperature (Fahrenheit) (b'\xf0\x0b')
Unknown 6 ??? (b'na\xd3\xff\x03\x00')
Examples:
b'\t\x89\xfb\x1d\xdb2\x00\x00\xf0\x0bna\xd3\xff\x03\x00' {'Temperature': 87.008, 'Counter': 13019}
b'\t\r\xfb\x1f<\xf1\x08\x02/\x10D\x02\xcf\xff\x01\x00' {'Temperature': 106.574, 'TriggerState': 0, 'TamperState': 1}
:param data: Message data
:return: Parameter dictionary of state
"""
ret = {}
_type = data[3]
if _type == b'\x1b':
# Power Clamp
# Unknown
pass
elif _type == b'\x1c':
# Power Switch
# Unknown
pass
elif _type == b'\x1d':
# Key Fob
ret['temperature'] = float(struct.unpack("<h", data[8:10])[0]) / 100.0 * 1.8 + 32
ret['counter'] = struct.unpack('<I', data[4:8])[0]
elif _type == b'\x1e' or _type == b'\x1f':
# Door Sensor
ret['temperature'] = float(struct.unpack("<h", data[8:10])[0]) / 100.0 * 1.8 + 32
if ord(data[-1]) & 0x01 == 1:
ret['trigger_state'] = 1 # Open
else:
ret['trigger_state'] = 0 # Closed
if ord(data[-1]) & 0x02 == 0:
ret['tamper_state'] = 1 # Open
else:
ret['tamper_state'] = 0 # Closed
else:
self._logger.error('Unrecognised Device Status %r %r', _type, data)
return ret
def generate_status_update(self, params):
"""
Generate Status Update
Field Name Size Description
---------- ---- -----------
Preamble 2 Unknown Preamble TBC b'\t\r'
Cluster Command 1 Cluster Command - Status Update (b'\xfb')
Type 1 b'\x1b' Clamp, b'\x1c' Switch, b'\x1d' Key Fob, b'\x1e', b'\x1f' Door
Counter 4 Counter
TempFahrenheit 2 Temperature (Fahrenheit)
Unknown 6 ???
:return: Message
"""
params = {
'trigger_state': 0,
'temperature': 106.574,
'tamper_state': 1
}
# At the moment this just generates a hard coded message.
# The below is just one type of status update, see parse_status_update() for more.
data = b'\t\r\xfb\x1f<\xf1\x08\x02/\x10D\x02\xcf\xff\x01\x00'
return data
def generate_active_endpoints_request(self, params):
"""
Generate Active Endpoints Request.
The active endpoint request needs the short address of the device
in the payload. Remember, it needs to be little endian (backwards)
The first byte in the payload is simply a number to identify the message
the response will have the same number in it.
Field Name Size Description
---------- ---- -----------
Sequence 1 Frame Sequence
Network Address 2 16-bit address of a device in the network whose active endpoint list being requested.
:param params:
:return: Message data
Example:
b'\xaa\x9f\x88'
"""
zdo_sequence = params['zdo_sequence'] # b'\xaa'
net_addr = params['addr_short'][1] + params['addr_short'][0] # b'\x9f\x88'
data = zdo_sequence + net_addr
return data
def generate_match_descriptor_request(self, params):
"""
Generate Match Descriptor Request.
Broadcast or unicast transmission used to discover the device(s) that supports
a specified profile ID and/or clusters.
Field Name Size Description
---------- ---- -----------
Sequence 1 Frame Sequence
Network Address 2 16-bit address of a device in the network whose power descriptor is being requested.
Profile ID 2 Profile ID to be matched at the destination.
Number of Input Clusters 1 The number of input clusters in the In Cluster List for matching. Set to 0 if no clusters supplied.
Input Cluster List 2* List of input cluster IDs to be used for matching.
Number of Output Clusters 1 The number of output clusters in the Output Cluster List for matching. Set to 0 if no clusters supplied.
Output Cluster List 2* List of output cluster IDs to be used for matching.
* Number of Input Clusters
Example:
b'\x01\xfd\xff\x16\xc2\x00\x01\xf0\x00'
:param params:
:return: Message data
"""
zdo_sequence = params['zdo_sequence'] # b'\x01'
net_addr = params['addr_short'][1] + params['addr_short'][0] # b'\xfd\xff'
profile_id = params['profile_id'][1] + params['profile_id'][0] # b'\x16\xc2' PROFILE_ID_ALERTME (reversed)
num_input_clusters = struct.pack('B', len(params['in_cluster_list']) / 2) # b'\x00'
input_cluster_list = params['in_cluster_list'] # b''
num_output_clusters = struct.pack('B', len(params['out_cluster_list']) / 2) # b'\x01'
output_cluster_list = params['out_cluster_list'][1] + params['out_cluster_list'][0] # b'\xf0\x00' CLUSTER_ID_AM_STATUS (reversed)
# TODO Finish this off! At the moment this does not support multiple clusters, it just supports one!
data = zdo_sequence + net_addr + profile_id + num_input_clusters + input_cluster_list + num_output_clusters + output_cluster_list
return data
def generate_match_descriptor_response(self, params):
"""
Generate Match Descriptor Response.
If a descriptor match is found on the device, this response contains a list of endpoints that
support the request criteria.
Field Name Size Description
---------- ---- -----------
Sequence 1 Frame Sequence
Status 1 Response Status
Network Address 2 Indicates the 16-bit address of the responding device.
Length 1 The number of endpoints on the remote device that match the request criteria.
Match List Variable List of endpoints on the remote that match the request criteria.
Example:
b'\x01\x00\x00\xe1\x02\x00\x02'
:param params:
:return: Message data
"""
zdo_sequence = params['zdo_sequence'] # b'\x04'
status = ZDP_STATUS_OK # b'\x00'
status = ZDP_STATUS_OK # b'\x00'
net_addr = params['addr_short'][1] + params['addr_short'][0] # b'\x00\x00'
length = struct.pack('B', len(params['endpoint_list'])) # b'\x02'
match_list = b''.join(params['endpoint_list']) # b'\x00\x02'
data = zdo_sequence + status + net_addr + length + match_list
return data
def getZclString(self, message):
zclStringLength = ord(message[0])
zclString = message[1:1 + zclStringLength]
remainder = message[1 + zclStringLength:]
return (
zclString, remainder)
|
the-stack_0_17430 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class Email(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'anchor_allow_white_space_in_characters': 'str',
'anchor_allow_white_space_in_characters_metadata': 'PropertyMetadata',
'anchor_case_sensitive': 'str',
'anchor_case_sensitive_metadata': 'PropertyMetadata',
'anchor_horizontal_alignment': 'str',
'anchor_horizontal_alignment_metadata': 'PropertyMetadata',
'anchor_ignore_if_not_present': 'str',
'anchor_ignore_if_not_present_metadata': 'PropertyMetadata',
'anchor_match_whole_word': 'str',
'anchor_match_whole_word_metadata': 'PropertyMetadata',
'anchor_string': 'str',
'anchor_string_metadata': 'PropertyMetadata',
'anchor_tab_processor_version': 'str',
'anchor_tab_processor_version_metadata': 'PropertyMetadata',
'anchor_units': 'str',
'anchor_units_metadata': 'PropertyMetadata',
'anchor_x_offset': 'str',
'anchor_x_offset_metadata': 'PropertyMetadata',
'anchor_y_offset': 'str',
'anchor_y_offset_metadata': 'PropertyMetadata',
'bold': 'str',
'bold_metadata': 'PropertyMetadata',
'conceal_value_on_document': 'str',
'conceal_value_on_document_metadata': 'PropertyMetadata',
'conditional_parent_label': 'str',
'conditional_parent_label_metadata': 'PropertyMetadata',
'conditional_parent_value': 'str',
'conditional_parent_value_metadata': 'PropertyMetadata',
'custom_tab_id': 'str',
'custom_tab_id_metadata': 'PropertyMetadata',
'disable_auto_size': 'str',
'disable_auto_size_metadata': 'PropertyMetadata',
'document_id': 'str',
'document_id_metadata': 'PropertyMetadata',
'error_details': 'ErrorDetails',
'font': 'str',
'font_color': 'str',
'font_color_metadata': 'PropertyMetadata',
'font_metadata': 'PropertyMetadata',
'font_size': 'str',
'font_size_metadata': 'PropertyMetadata',
'form_order': 'str',
'form_order_metadata': 'PropertyMetadata',
'form_page_label': 'str',
'form_page_label_metadata': 'PropertyMetadata',
'form_page_number': 'str',
'form_page_number_metadata': 'PropertyMetadata',
'height': 'str',
'height_metadata': 'PropertyMetadata',
'italic': 'str',
'italic_metadata': 'PropertyMetadata',
'locale_policy': 'LocalePolicyTab',
'locked': 'str',
'locked_metadata': 'PropertyMetadata',
'max_length': 'str',
'max_length_metadata': 'PropertyMetadata',
'merge_field': 'MergeField',
'merge_field_xml': 'str',
'name': 'str',
'name_metadata': 'PropertyMetadata',
'original_value': 'str',
'original_value_metadata': 'PropertyMetadata',
'page_number': 'str',
'page_number_metadata': 'PropertyMetadata',
'recipient_id': 'str',
'recipient_id_guid': 'str',
'recipient_id_guid_metadata': 'PropertyMetadata',
'recipient_id_metadata': 'PropertyMetadata',
'require_all': 'str',
'require_all_metadata': 'PropertyMetadata',
'required': 'str',
'required_metadata': 'PropertyMetadata',
'require_initial_on_shared_change': 'str',
'require_initial_on_shared_change_metadata': 'PropertyMetadata',
'sender_required': 'str',
'sender_required_metadata': 'PropertyMetadata',
'shared': 'str',
'shared_metadata': 'PropertyMetadata',
'share_to_recipients': 'str',
'share_to_recipients_metadata': 'PropertyMetadata',
'smart_contract_information': 'SmartContractInformation',
'source': 'str',
'status': 'str',
'status_metadata': 'PropertyMetadata',
'tab_group_labels': 'list[str]',
'tab_group_labels_metadata': 'PropertyMetadata',
'tab_id': 'str',
'tab_id_metadata': 'PropertyMetadata',
'tab_label': 'str',
'tab_label_metadata': 'PropertyMetadata',
'tab_order': 'str',
'tab_order_metadata': 'PropertyMetadata',
'tab_type': 'str',
'tab_type_metadata': 'PropertyMetadata',
'template_locked': 'str',
'template_locked_metadata': 'PropertyMetadata',
'template_required': 'str',
'template_required_metadata': 'PropertyMetadata',
'tooltip': 'str',
'tool_tip_metadata': 'PropertyMetadata',
'underline': 'str',
'underline_metadata': 'PropertyMetadata',
'validation_message': 'str',
'validation_message_metadata': 'PropertyMetadata',
'validation_pattern': 'str',
'validation_pattern_metadata': 'PropertyMetadata',
'value': 'str',
'value_metadata': 'PropertyMetadata',
'width': 'str',
'width_metadata': 'PropertyMetadata',
'x_position': 'str',
'x_position_metadata': 'PropertyMetadata',
'y_position': 'str',
'y_position_metadata': 'PropertyMetadata'
}
attribute_map = {
'anchor_allow_white_space_in_characters': 'anchorAllowWhiteSpaceInCharacters',
'anchor_allow_white_space_in_characters_metadata': 'anchorAllowWhiteSpaceInCharactersMetadata',
'anchor_case_sensitive': 'anchorCaseSensitive',
'anchor_case_sensitive_metadata': 'anchorCaseSensitiveMetadata',
'anchor_horizontal_alignment': 'anchorHorizontalAlignment',
'anchor_horizontal_alignment_metadata': 'anchorHorizontalAlignmentMetadata',
'anchor_ignore_if_not_present': 'anchorIgnoreIfNotPresent',
'anchor_ignore_if_not_present_metadata': 'anchorIgnoreIfNotPresentMetadata',
'anchor_match_whole_word': 'anchorMatchWholeWord',
'anchor_match_whole_word_metadata': 'anchorMatchWholeWordMetadata',
'anchor_string': 'anchorString',
'anchor_string_metadata': 'anchorStringMetadata',
'anchor_tab_processor_version': 'anchorTabProcessorVersion',
'anchor_tab_processor_version_metadata': 'anchorTabProcessorVersionMetadata',
'anchor_units': 'anchorUnits',
'anchor_units_metadata': 'anchorUnitsMetadata',
'anchor_x_offset': 'anchorXOffset',
'anchor_x_offset_metadata': 'anchorXOffsetMetadata',
'anchor_y_offset': 'anchorYOffset',
'anchor_y_offset_metadata': 'anchorYOffsetMetadata',
'bold': 'bold',
'bold_metadata': 'boldMetadata',
'conceal_value_on_document': 'concealValueOnDocument',
'conceal_value_on_document_metadata': 'concealValueOnDocumentMetadata',
'conditional_parent_label': 'conditionalParentLabel',
'conditional_parent_label_metadata': 'conditionalParentLabelMetadata',
'conditional_parent_value': 'conditionalParentValue',
'conditional_parent_value_metadata': 'conditionalParentValueMetadata',
'custom_tab_id': 'customTabId',
'custom_tab_id_metadata': 'customTabIdMetadata',
'disable_auto_size': 'disableAutoSize',
'disable_auto_size_metadata': 'disableAutoSizeMetadata',
'document_id': 'documentId',
'document_id_metadata': 'documentIdMetadata',
'error_details': 'errorDetails',
'font': 'font',
'font_color': 'fontColor',
'font_color_metadata': 'fontColorMetadata',
'font_metadata': 'fontMetadata',
'font_size': 'fontSize',
'font_size_metadata': 'fontSizeMetadata',
'form_order': 'formOrder',
'form_order_metadata': 'formOrderMetadata',
'form_page_label': 'formPageLabel',
'form_page_label_metadata': 'formPageLabelMetadata',
'form_page_number': 'formPageNumber',
'form_page_number_metadata': 'formPageNumberMetadata',
'height': 'height',
'height_metadata': 'heightMetadata',
'italic': 'italic',
'italic_metadata': 'italicMetadata',
'locale_policy': 'localePolicy',
'locked': 'locked',
'locked_metadata': 'lockedMetadata',
'max_length': 'maxLength',
'max_length_metadata': 'maxLengthMetadata',
'merge_field': 'mergeField',
'merge_field_xml': 'mergeFieldXml',
'name': 'name',
'name_metadata': 'nameMetadata',
'original_value': 'originalValue',
'original_value_metadata': 'originalValueMetadata',
'page_number': 'pageNumber',
'page_number_metadata': 'pageNumberMetadata',
'recipient_id': 'recipientId',
'recipient_id_guid': 'recipientIdGuid',
'recipient_id_guid_metadata': 'recipientIdGuidMetadata',
'recipient_id_metadata': 'recipientIdMetadata',
'require_all': 'requireAll',
'require_all_metadata': 'requireAllMetadata',
'required': 'required',
'required_metadata': 'requiredMetadata',
'require_initial_on_shared_change': 'requireInitialOnSharedChange',
'require_initial_on_shared_change_metadata': 'requireInitialOnSharedChangeMetadata',
'sender_required': 'senderRequired',
'sender_required_metadata': 'senderRequiredMetadata',
'shared': 'shared',
'shared_metadata': 'sharedMetadata',
'share_to_recipients': 'shareToRecipients',
'share_to_recipients_metadata': 'shareToRecipientsMetadata',
'smart_contract_information': 'smartContractInformation',
'source': 'source',
'status': 'status',
'status_metadata': 'statusMetadata',
'tab_group_labels': 'tabGroupLabels',
'tab_group_labels_metadata': 'tabGroupLabelsMetadata',
'tab_id': 'tabId',
'tab_id_metadata': 'tabIdMetadata',
'tab_label': 'tabLabel',
'tab_label_metadata': 'tabLabelMetadata',
'tab_order': 'tabOrder',
'tab_order_metadata': 'tabOrderMetadata',
'tab_type': 'tabType',
'tab_type_metadata': 'tabTypeMetadata',
'template_locked': 'templateLocked',
'template_locked_metadata': 'templateLockedMetadata',
'template_required': 'templateRequired',
'template_required_metadata': 'templateRequiredMetadata',
'tooltip': 'tooltip',
'tool_tip_metadata': 'toolTipMetadata',
'underline': 'underline',
'underline_metadata': 'underlineMetadata',
'validation_message': 'validationMessage',
'validation_message_metadata': 'validationMessageMetadata',
'validation_pattern': 'validationPattern',
'validation_pattern_metadata': 'validationPatternMetadata',
'value': 'value',
'value_metadata': 'valueMetadata',
'width': 'width',
'width_metadata': 'widthMetadata',
'x_position': 'xPosition',
'x_position_metadata': 'xPositionMetadata',
'y_position': 'yPosition',
'y_position_metadata': 'yPositionMetadata'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""Email - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._anchor_allow_white_space_in_characters = None
self._anchor_allow_white_space_in_characters_metadata = None
self._anchor_case_sensitive = None
self._anchor_case_sensitive_metadata = None
self._anchor_horizontal_alignment = None
self._anchor_horizontal_alignment_metadata = None
self._anchor_ignore_if_not_present = None
self._anchor_ignore_if_not_present_metadata = None
self._anchor_match_whole_word = None
self._anchor_match_whole_word_metadata = None
self._anchor_string = None
self._anchor_string_metadata = None
self._anchor_tab_processor_version = None
self._anchor_tab_processor_version_metadata = None
self._anchor_units = None
self._anchor_units_metadata = None
self._anchor_x_offset = None
self._anchor_x_offset_metadata = None
self._anchor_y_offset = None
self._anchor_y_offset_metadata = None
self._bold = None
self._bold_metadata = None
self._conceal_value_on_document = None
self._conceal_value_on_document_metadata = None
self._conditional_parent_label = None
self._conditional_parent_label_metadata = None
self._conditional_parent_value = None
self._conditional_parent_value_metadata = None
self._custom_tab_id = None
self._custom_tab_id_metadata = None
self._disable_auto_size = None
self._disable_auto_size_metadata = None
self._document_id = None
self._document_id_metadata = None
self._error_details = None
self._font = None
self._font_color = None
self._font_color_metadata = None
self._font_metadata = None
self._font_size = None
self._font_size_metadata = None
self._form_order = None
self._form_order_metadata = None
self._form_page_label = None
self._form_page_label_metadata = None
self._form_page_number = None
self._form_page_number_metadata = None
self._height = None
self._height_metadata = None
self._italic = None
self._italic_metadata = None
self._locale_policy = None
self._locked = None
self._locked_metadata = None
self._max_length = None
self._max_length_metadata = None
self._merge_field = None
self._merge_field_xml = None
self._name = None
self._name_metadata = None
self._original_value = None
self._original_value_metadata = None
self._page_number = None
self._page_number_metadata = None
self._recipient_id = None
self._recipient_id_guid = None
self._recipient_id_guid_metadata = None
self._recipient_id_metadata = None
self._require_all = None
self._require_all_metadata = None
self._required = None
self._required_metadata = None
self._require_initial_on_shared_change = None
self._require_initial_on_shared_change_metadata = None
self._sender_required = None
self._sender_required_metadata = None
self._shared = None
self._shared_metadata = None
self._share_to_recipients = None
self._share_to_recipients_metadata = None
self._smart_contract_information = None
self._source = None
self._status = None
self._status_metadata = None
self._tab_group_labels = None
self._tab_group_labels_metadata = None
self._tab_id = None
self._tab_id_metadata = None
self._tab_label = None
self._tab_label_metadata = None
self._tab_order = None
self._tab_order_metadata = None
self._tab_type = None
self._tab_type_metadata = None
self._template_locked = None
self._template_locked_metadata = None
self._template_required = None
self._template_required_metadata = None
self._tooltip = None
self._tool_tip_metadata = None
self._underline = None
self._underline_metadata = None
self._validation_message = None
self._validation_message_metadata = None
self._validation_pattern = None
self._validation_pattern_metadata = None
self._value = None
self._value_metadata = None
self._width = None
self._width_metadata = None
self._x_position = None
self._x_position_metadata = None
self._y_position = None
self._y_position_metadata = None
self.discriminator = None
setattr(self, "_{}".format('anchor_allow_white_space_in_characters'), kwargs.get('anchor_allow_white_space_in_characters', None))
setattr(self, "_{}".format('anchor_allow_white_space_in_characters_metadata'), kwargs.get('anchor_allow_white_space_in_characters_metadata', None))
setattr(self, "_{}".format('anchor_case_sensitive'), kwargs.get('anchor_case_sensitive', None))
setattr(self, "_{}".format('anchor_case_sensitive_metadata'), kwargs.get('anchor_case_sensitive_metadata', None))
setattr(self, "_{}".format('anchor_horizontal_alignment'), kwargs.get('anchor_horizontal_alignment', None))
setattr(self, "_{}".format('anchor_horizontal_alignment_metadata'), kwargs.get('anchor_horizontal_alignment_metadata', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present'), kwargs.get('anchor_ignore_if_not_present', None))
setattr(self, "_{}".format('anchor_ignore_if_not_present_metadata'), kwargs.get('anchor_ignore_if_not_present_metadata', None))
setattr(self, "_{}".format('anchor_match_whole_word'), kwargs.get('anchor_match_whole_word', None))
setattr(self, "_{}".format('anchor_match_whole_word_metadata'), kwargs.get('anchor_match_whole_word_metadata', None))
setattr(self, "_{}".format('anchor_string'), kwargs.get('anchor_string', None))
setattr(self, "_{}".format('anchor_string_metadata'), kwargs.get('anchor_string_metadata', None))
setattr(self, "_{}".format('anchor_tab_processor_version'), kwargs.get('anchor_tab_processor_version', None))
setattr(self, "_{}".format('anchor_tab_processor_version_metadata'), kwargs.get('anchor_tab_processor_version_metadata', None))
setattr(self, "_{}".format('anchor_units'), kwargs.get('anchor_units', None))
setattr(self, "_{}".format('anchor_units_metadata'), kwargs.get('anchor_units_metadata', None))
setattr(self, "_{}".format('anchor_x_offset'), kwargs.get('anchor_x_offset', None))
setattr(self, "_{}".format('anchor_x_offset_metadata'), kwargs.get('anchor_x_offset_metadata', None))
setattr(self, "_{}".format('anchor_y_offset'), kwargs.get('anchor_y_offset', None))
setattr(self, "_{}".format('anchor_y_offset_metadata'), kwargs.get('anchor_y_offset_metadata', None))
setattr(self, "_{}".format('bold'), kwargs.get('bold', None))
setattr(self, "_{}".format('bold_metadata'), kwargs.get('bold_metadata', None))
setattr(self, "_{}".format('conceal_value_on_document'), kwargs.get('conceal_value_on_document', None))
setattr(self, "_{}".format('conceal_value_on_document_metadata'), kwargs.get('conceal_value_on_document_metadata', None))
setattr(self, "_{}".format('conditional_parent_label'), kwargs.get('conditional_parent_label', None))
setattr(self, "_{}".format('conditional_parent_label_metadata'), kwargs.get('conditional_parent_label_metadata', None))
setattr(self, "_{}".format('conditional_parent_value'), kwargs.get('conditional_parent_value', None))
setattr(self, "_{}".format('conditional_parent_value_metadata'), kwargs.get('conditional_parent_value_metadata', None))
setattr(self, "_{}".format('custom_tab_id'), kwargs.get('custom_tab_id', None))
setattr(self, "_{}".format('custom_tab_id_metadata'), kwargs.get('custom_tab_id_metadata', None))
setattr(self, "_{}".format('disable_auto_size'), kwargs.get('disable_auto_size', None))
setattr(self, "_{}".format('disable_auto_size_metadata'), kwargs.get('disable_auto_size_metadata', None))
setattr(self, "_{}".format('document_id'), kwargs.get('document_id', None))
setattr(self, "_{}".format('document_id_metadata'), kwargs.get('document_id_metadata', None))
setattr(self, "_{}".format('error_details'), kwargs.get('error_details', None))
setattr(self, "_{}".format('font'), kwargs.get('font', None))
setattr(self, "_{}".format('font_color'), kwargs.get('font_color', None))
setattr(self, "_{}".format('font_color_metadata'), kwargs.get('font_color_metadata', None))
setattr(self, "_{}".format('font_metadata'), kwargs.get('font_metadata', None))
setattr(self, "_{}".format('font_size'), kwargs.get('font_size', None))
setattr(self, "_{}".format('font_size_metadata'), kwargs.get('font_size_metadata', None))
setattr(self, "_{}".format('form_order'), kwargs.get('form_order', None))
setattr(self, "_{}".format('form_order_metadata'), kwargs.get('form_order_metadata', None))
setattr(self, "_{}".format('form_page_label'), kwargs.get('form_page_label', None))
setattr(self, "_{}".format('form_page_label_metadata'), kwargs.get('form_page_label_metadata', None))
setattr(self, "_{}".format('form_page_number'), kwargs.get('form_page_number', None))
setattr(self, "_{}".format('form_page_number_metadata'), kwargs.get('form_page_number_metadata', None))
setattr(self, "_{}".format('height'), kwargs.get('height', None))
setattr(self, "_{}".format('height_metadata'), kwargs.get('height_metadata', None))
setattr(self, "_{}".format('italic'), kwargs.get('italic', None))
setattr(self, "_{}".format('italic_metadata'), kwargs.get('italic_metadata', None))
setattr(self, "_{}".format('locale_policy'), kwargs.get('locale_policy', None))
setattr(self, "_{}".format('locked'), kwargs.get('locked', None))
setattr(self, "_{}".format('locked_metadata'), kwargs.get('locked_metadata', None))
setattr(self, "_{}".format('max_length'), kwargs.get('max_length', None))
setattr(self, "_{}".format('max_length_metadata'), kwargs.get('max_length_metadata', None))
setattr(self, "_{}".format('merge_field'), kwargs.get('merge_field', None))
setattr(self, "_{}".format('merge_field_xml'), kwargs.get('merge_field_xml', None))
setattr(self, "_{}".format('name'), kwargs.get('name', None))
setattr(self, "_{}".format('name_metadata'), kwargs.get('name_metadata', None))
setattr(self, "_{}".format('original_value'), kwargs.get('original_value', None))
setattr(self, "_{}".format('original_value_metadata'), kwargs.get('original_value_metadata', None))
setattr(self, "_{}".format('page_number'), kwargs.get('page_number', None))
setattr(self, "_{}".format('page_number_metadata'), kwargs.get('page_number_metadata', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('recipient_id_guid'), kwargs.get('recipient_id_guid', None))
setattr(self, "_{}".format('recipient_id_guid_metadata'), kwargs.get('recipient_id_guid_metadata', None))
setattr(self, "_{}".format('recipient_id_metadata'), kwargs.get('recipient_id_metadata', None))
setattr(self, "_{}".format('require_all'), kwargs.get('require_all', None))
setattr(self, "_{}".format('require_all_metadata'), kwargs.get('require_all_metadata', None))
setattr(self, "_{}".format('required'), kwargs.get('required', None))
setattr(self, "_{}".format('required_metadata'), kwargs.get('required_metadata', None))
setattr(self, "_{}".format('require_initial_on_shared_change'), kwargs.get('require_initial_on_shared_change', None))
setattr(self, "_{}".format('require_initial_on_shared_change_metadata'), kwargs.get('require_initial_on_shared_change_metadata', None))
setattr(self, "_{}".format('sender_required'), kwargs.get('sender_required', None))
setattr(self, "_{}".format('sender_required_metadata'), kwargs.get('sender_required_metadata', None))
setattr(self, "_{}".format('shared'), kwargs.get('shared', None))
setattr(self, "_{}".format('shared_metadata'), kwargs.get('shared_metadata', None))
setattr(self, "_{}".format('share_to_recipients'), kwargs.get('share_to_recipients', None))
setattr(self, "_{}".format('share_to_recipients_metadata'), kwargs.get('share_to_recipients_metadata', None))
setattr(self, "_{}".format('smart_contract_information'), kwargs.get('smart_contract_information', None))
setattr(self, "_{}".format('source'), kwargs.get('source', None))
setattr(self, "_{}".format('status'), kwargs.get('status', None))
setattr(self, "_{}".format('status_metadata'), kwargs.get('status_metadata', None))
setattr(self, "_{}".format('tab_group_labels'), kwargs.get('tab_group_labels', None))
setattr(self, "_{}".format('tab_group_labels_metadata'), kwargs.get('tab_group_labels_metadata', None))
setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None))
setattr(self, "_{}".format('tab_id_metadata'), kwargs.get('tab_id_metadata', None))
setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None))
setattr(self, "_{}".format('tab_label_metadata'), kwargs.get('tab_label_metadata', None))
setattr(self, "_{}".format('tab_order'), kwargs.get('tab_order', None))
setattr(self, "_{}".format('tab_order_metadata'), kwargs.get('tab_order_metadata', None))
setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None))
setattr(self, "_{}".format('tab_type_metadata'), kwargs.get('tab_type_metadata', None))
setattr(self, "_{}".format('template_locked'), kwargs.get('template_locked', None))
setattr(self, "_{}".format('template_locked_metadata'), kwargs.get('template_locked_metadata', None))
setattr(self, "_{}".format('template_required'), kwargs.get('template_required', None))
setattr(self, "_{}".format('template_required_metadata'), kwargs.get('template_required_metadata', None))
setattr(self, "_{}".format('tooltip'), kwargs.get('tooltip', None))
setattr(self, "_{}".format('tool_tip_metadata'), kwargs.get('tool_tip_metadata', None))
setattr(self, "_{}".format('underline'), kwargs.get('underline', None))
setattr(self, "_{}".format('underline_metadata'), kwargs.get('underline_metadata', None))
setattr(self, "_{}".format('validation_message'), kwargs.get('validation_message', None))
setattr(self, "_{}".format('validation_message_metadata'), kwargs.get('validation_message_metadata', None))
setattr(self, "_{}".format('validation_pattern'), kwargs.get('validation_pattern', None))
setattr(self, "_{}".format('validation_pattern_metadata'), kwargs.get('validation_pattern_metadata', None))
setattr(self, "_{}".format('value'), kwargs.get('value', None))
setattr(self, "_{}".format('value_metadata'), kwargs.get('value_metadata', None))
setattr(self, "_{}".format('width'), kwargs.get('width', None))
setattr(self, "_{}".format('width_metadata'), kwargs.get('width_metadata', None))
setattr(self, "_{}".format('x_position'), kwargs.get('x_position', None))
setattr(self, "_{}".format('x_position_metadata'), kwargs.get('x_position_metadata', None))
setattr(self, "_{}".format('y_position'), kwargs.get('y_position', None))
setattr(self, "_{}".format('y_position_metadata'), kwargs.get('y_position_metadata', None))
@property
def anchor_allow_white_space_in_characters(self):
"""Gets the anchor_allow_white_space_in_characters of this Email. # noqa: E501
# noqa: E501
:return: The anchor_allow_white_space_in_characters of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_allow_white_space_in_characters
@anchor_allow_white_space_in_characters.setter
def anchor_allow_white_space_in_characters(self, anchor_allow_white_space_in_characters):
"""Sets the anchor_allow_white_space_in_characters of this Email.
# noqa: E501
:param anchor_allow_white_space_in_characters: The anchor_allow_white_space_in_characters of this Email. # noqa: E501
:type: str
"""
self._anchor_allow_white_space_in_characters = anchor_allow_white_space_in_characters
@property
def anchor_allow_white_space_in_characters_metadata(self):
"""Gets the anchor_allow_white_space_in_characters_metadata of this Email. # noqa: E501
:return: The anchor_allow_white_space_in_characters_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_allow_white_space_in_characters_metadata
@anchor_allow_white_space_in_characters_metadata.setter
def anchor_allow_white_space_in_characters_metadata(self, anchor_allow_white_space_in_characters_metadata):
"""Sets the anchor_allow_white_space_in_characters_metadata of this Email.
:param anchor_allow_white_space_in_characters_metadata: The anchor_allow_white_space_in_characters_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_allow_white_space_in_characters_metadata = anchor_allow_white_space_in_characters_metadata
@property
def anchor_case_sensitive(self):
"""Gets the anchor_case_sensitive of this Email. # noqa: E501
When set to **true**, the anchor string does not consider case when matching strings in the document. The default value is **true**. # noqa: E501
:return: The anchor_case_sensitive of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_case_sensitive
@anchor_case_sensitive.setter
def anchor_case_sensitive(self, anchor_case_sensitive):
"""Sets the anchor_case_sensitive of this Email.
When set to **true**, the anchor string does not consider case when matching strings in the document. The default value is **true**. # noqa: E501
:param anchor_case_sensitive: The anchor_case_sensitive of this Email. # noqa: E501
:type: str
"""
self._anchor_case_sensitive = anchor_case_sensitive
@property
def anchor_case_sensitive_metadata(self):
"""Gets the anchor_case_sensitive_metadata of this Email. # noqa: E501
:return: The anchor_case_sensitive_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_case_sensitive_metadata
@anchor_case_sensitive_metadata.setter
def anchor_case_sensitive_metadata(self, anchor_case_sensitive_metadata):
"""Sets the anchor_case_sensitive_metadata of this Email.
:param anchor_case_sensitive_metadata: The anchor_case_sensitive_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_case_sensitive_metadata = anchor_case_sensitive_metadata
@property
def anchor_horizontal_alignment(self):
"""Gets the anchor_horizontal_alignment of this Email. # noqa: E501
Specifies the alignment of anchor tabs with anchor strings. Possible values are **left** or **right**. The default value is **left**. # noqa: E501
:return: The anchor_horizontal_alignment of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_horizontal_alignment
@anchor_horizontal_alignment.setter
def anchor_horizontal_alignment(self, anchor_horizontal_alignment):
"""Sets the anchor_horizontal_alignment of this Email.
Specifies the alignment of anchor tabs with anchor strings. Possible values are **left** or **right**. The default value is **left**. # noqa: E501
:param anchor_horizontal_alignment: The anchor_horizontal_alignment of this Email. # noqa: E501
:type: str
"""
self._anchor_horizontal_alignment = anchor_horizontal_alignment
@property
def anchor_horizontal_alignment_metadata(self):
"""Gets the anchor_horizontal_alignment_metadata of this Email. # noqa: E501
:return: The anchor_horizontal_alignment_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_horizontal_alignment_metadata
@anchor_horizontal_alignment_metadata.setter
def anchor_horizontal_alignment_metadata(self, anchor_horizontal_alignment_metadata):
"""Sets the anchor_horizontal_alignment_metadata of this Email.
:param anchor_horizontal_alignment_metadata: The anchor_horizontal_alignment_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_horizontal_alignment_metadata = anchor_horizontal_alignment_metadata
@property
def anchor_ignore_if_not_present(self):
"""Gets the anchor_ignore_if_not_present of this Email. # noqa: E501
When set to **true**, this tab is ignored if anchorString is not found in the document. # noqa: E501
:return: The anchor_ignore_if_not_present of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_ignore_if_not_present
@anchor_ignore_if_not_present.setter
def anchor_ignore_if_not_present(self, anchor_ignore_if_not_present):
"""Sets the anchor_ignore_if_not_present of this Email.
When set to **true**, this tab is ignored if anchorString is not found in the document. # noqa: E501
:param anchor_ignore_if_not_present: The anchor_ignore_if_not_present of this Email. # noqa: E501
:type: str
"""
self._anchor_ignore_if_not_present = anchor_ignore_if_not_present
@property
def anchor_ignore_if_not_present_metadata(self):
"""Gets the anchor_ignore_if_not_present_metadata of this Email. # noqa: E501
:return: The anchor_ignore_if_not_present_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_ignore_if_not_present_metadata
@anchor_ignore_if_not_present_metadata.setter
def anchor_ignore_if_not_present_metadata(self, anchor_ignore_if_not_present_metadata):
"""Sets the anchor_ignore_if_not_present_metadata of this Email.
:param anchor_ignore_if_not_present_metadata: The anchor_ignore_if_not_present_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_ignore_if_not_present_metadata = anchor_ignore_if_not_present_metadata
@property
def anchor_match_whole_word(self):
"""Gets the anchor_match_whole_word of this Email. # noqa: E501
When set to **true**, the anchor string in this tab matches whole words only (strings embedded in other strings are ignored.) The default value is **true**. # noqa: E501
:return: The anchor_match_whole_word of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_match_whole_word
@anchor_match_whole_word.setter
def anchor_match_whole_word(self, anchor_match_whole_word):
"""Sets the anchor_match_whole_word of this Email.
When set to **true**, the anchor string in this tab matches whole words only (strings embedded in other strings are ignored.) The default value is **true**. # noqa: E501
:param anchor_match_whole_word: The anchor_match_whole_word of this Email. # noqa: E501
:type: str
"""
self._anchor_match_whole_word = anchor_match_whole_word
@property
def anchor_match_whole_word_metadata(self):
"""Gets the anchor_match_whole_word_metadata of this Email. # noqa: E501
:return: The anchor_match_whole_word_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_match_whole_word_metadata
@anchor_match_whole_word_metadata.setter
def anchor_match_whole_word_metadata(self, anchor_match_whole_word_metadata):
"""Sets the anchor_match_whole_word_metadata of this Email.
:param anchor_match_whole_word_metadata: The anchor_match_whole_word_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_match_whole_word_metadata = anchor_match_whole_word_metadata
@property
def anchor_string(self):
"""Gets the anchor_string of this Email. # noqa: E501
Anchor text information for a radio button. # noqa: E501
:return: The anchor_string of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_string
@anchor_string.setter
def anchor_string(self, anchor_string):
"""Sets the anchor_string of this Email.
Anchor text information for a radio button. # noqa: E501
:param anchor_string: The anchor_string of this Email. # noqa: E501
:type: str
"""
self._anchor_string = anchor_string
@property
def anchor_string_metadata(self):
"""Gets the anchor_string_metadata of this Email. # noqa: E501
:return: The anchor_string_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_string_metadata
@anchor_string_metadata.setter
def anchor_string_metadata(self, anchor_string_metadata):
"""Sets the anchor_string_metadata of this Email.
:param anchor_string_metadata: The anchor_string_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_string_metadata = anchor_string_metadata
@property
def anchor_tab_processor_version(self):
"""Gets the anchor_tab_processor_version of this Email. # noqa: E501
# noqa: E501
:return: The anchor_tab_processor_version of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_tab_processor_version
@anchor_tab_processor_version.setter
def anchor_tab_processor_version(self, anchor_tab_processor_version):
"""Sets the anchor_tab_processor_version of this Email.
# noqa: E501
:param anchor_tab_processor_version: The anchor_tab_processor_version of this Email. # noqa: E501
:type: str
"""
self._anchor_tab_processor_version = anchor_tab_processor_version
@property
def anchor_tab_processor_version_metadata(self):
"""Gets the anchor_tab_processor_version_metadata of this Email. # noqa: E501
:return: The anchor_tab_processor_version_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_tab_processor_version_metadata
@anchor_tab_processor_version_metadata.setter
def anchor_tab_processor_version_metadata(self, anchor_tab_processor_version_metadata):
"""Sets the anchor_tab_processor_version_metadata of this Email.
:param anchor_tab_processor_version_metadata: The anchor_tab_processor_version_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_tab_processor_version_metadata = anchor_tab_processor_version_metadata
@property
def anchor_units(self):
"""Gets the anchor_units of this Email. # noqa: E501
Specifies units of the X and Y offset. Units could be pixels, millimeters, centimeters, or inches. # noqa: E501
:return: The anchor_units of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_units
@anchor_units.setter
def anchor_units(self, anchor_units):
"""Sets the anchor_units of this Email.
Specifies units of the X and Y offset. Units could be pixels, millimeters, centimeters, or inches. # noqa: E501
:param anchor_units: The anchor_units of this Email. # noqa: E501
:type: str
"""
self._anchor_units = anchor_units
@property
def anchor_units_metadata(self):
"""Gets the anchor_units_metadata of this Email. # noqa: E501
:return: The anchor_units_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_units_metadata
@anchor_units_metadata.setter
def anchor_units_metadata(self, anchor_units_metadata):
"""Sets the anchor_units_metadata of this Email.
:param anchor_units_metadata: The anchor_units_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_units_metadata = anchor_units_metadata
@property
def anchor_x_offset(self):
"""Gets the anchor_x_offset of this Email. # noqa: E501
Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501
:return: The anchor_x_offset of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_x_offset
@anchor_x_offset.setter
def anchor_x_offset(self, anchor_x_offset):
"""Sets the anchor_x_offset of this Email.
Specifies the X axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501
:param anchor_x_offset: The anchor_x_offset of this Email. # noqa: E501
:type: str
"""
self._anchor_x_offset = anchor_x_offset
@property
def anchor_x_offset_metadata(self):
"""Gets the anchor_x_offset_metadata of this Email. # noqa: E501
:return: The anchor_x_offset_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_x_offset_metadata
@anchor_x_offset_metadata.setter
def anchor_x_offset_metadata(self, anchor_x_offset_metadata):
"""Sets the anchor_x_offset_metadata of this Email.
:param anchor_x_offset_metadata: The anchor_x_offset_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_x_offset_metadata = anchor_x_offset_metadata
@property
def anchor_y_offset(self):
"""Gets the anchor_y_offset of this Email. # noqa: E501
Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501
:return: The anchor_y_offset of this Email. # noqa: E501
:rtype: str
"""
return self._anchor_y_offset
@anchor_y_offset.setter
def anchor_y_offset(self, anchor_y_offset):
"""Sets the anchor_y_offset of this Email.
Specifies the Y axis location of the tab, in anchorUnits, relative to the anchorString. # noqa: E501
:param anchor_y_offset: The anchor_y_offset of this Email. # noqa: E501
:type: str
"""
self._anchor_y_offset = anchor_y_offset
@property
def anchor_y_offset_metadata(self):
"""Gets the anchor_y_offset_metadata of this Email. # noqa: E501
:return: The anchor_y_offset_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._anchor_y_offset_metadata
@anchor_y_offset_metadata.setter
def anchor_y_offset_metadata(self, anchor_y_offset_metadata):
"""Sets the anchor_y_offset_metadata of this Email.
:param anchor_y_offset_metadata: The anchor_y_offset_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._anchor_y_offset_metadata = anchor_y_offset_metadata
@property
def bold(self):
"""Gets the bold of this Email. # noqa: E501
When set to **true**, the information in the tab is bold. # noqa: E501
:return: The bold of this Email. # noqa: E501
:rtype: str
"""
return self._bold
@bold.setter
def bold(self, bold):
"""Sets the bold of this Email.
When set to **true**, the information in the tab is bold. # noqa: E501
:param bold: The bold of this Email. # noqa: E501
:type: str
"""
self._bold = bold
@property
def bold_metadata(self):
"""Gets the bold_metadata of this Email. # noqa: E501
:return: The bold_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._bold_metadata
@bold_metadata.setter
def bold_metadata(self, bold_metadata):
"""Sets the bold_metadata of this Email.
:param bold_metadata: The bold_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._bold_metadata = bold_metadata
@property
def conceal_value_on_document(self):
"""Gets the conceal_value_on_document of this Email. # noqa: E501
When set to **true**, the field appears normally while the recipient is adding or modifying the information in the field, but the data is not visible (the characters are hidden by asterisks) to any other signer or the sender. When an envelope is completed the information is available to the sender through the Form Data link in the DocuSign Console. This setting applies only to text boxes and does not affect list boxes, radio buttons, or check boxes. # noqa: E501
:return: The conceal_value_on_document of this Email. # noqa: E501
:rtype: str
"""
return self._conceal_value_on_document
@conceal_value_on_document.setter
def conceal_value_on_document(self, conceal_value_on_document):
"""Sets the conceal_value_on_document of this Email.
When set to **true**, the field appears normally while the recipient is adding or modifying the information in the field, but the data is not visible (the characters are hidden by asterisks) to any other signer or the sender. When an envelope is completed the information is available to the sender through the Form Data link in the DocuSign Console. This setting applies only to text boxes and does not affect list boxes, radio buttons, or check boxes. # noqa: E501
:param conceal_value_on_document: The conceal_value_on_document of this Email. # noqa: E501
:type: str
"""
self._conceal_value_on_document = conceal_value_on_document
@property
def conceal_value_on_document_metadata(self):
"""Gets the conceal_value_on_document_metadata of this Email. # noqa: E501
:return: The conceal_value_on_document_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._conceal_value_on_document_metadata
@conceal_value_on_document_metadata.setter
def conceal_value_on_document_metadata(self, conceal_value_on_document_metadata):
"""Sets the conceal_value_on_document_metadata of this Email.
:param conceal_value_on_document_metadata: The conceal_value_on_document_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._conceal_value_on_document_metadata = conceal_value_on_document_metadata
@property
def conditional_parent_label(self):
"""Gets the conditional_parent_label of this Email. # noqa: E501
For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility. # noqa: E501
:return: The conditional_parent_label of this Email. # noqa: E501
:rtype: str
"""
return self._conditional_parent_label
@conditional_parent_label.setter
def conditional_parent_label(self, conditional_parent_label):
"""Sets the conditional_parent_label of this Email.
For conditional fields this is the TabLabel of the parent tab that controls this tab's visibility. # noqa: E501
:param conditional_parent_label: The conditional_parent_label of this Email. # noqa: E501
:type: str
"""
self._conditional_parent_label = conditional_parent_label
@property
def conditional_parent_label_metadata(self):
"""Gets the conditional_parent_label_metadata of this Email. # noqa: E501
:return: The conditional_parent_label_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._conditional_parent_label_metadata
@conditional_parent_label_metadata.setter
def conditional_parent_label_metadata(self, conditional_parent_label_metadata):
"""Sets the conditional_parent_label_metadata of this Email.
:param conditional_parent_label_metadata: The conditional_parent_label_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._conditional_parent_label_metadata = conditional_parent_label_metadata
@property
def conditional_parent_value(self):
"""Gets the conditional_parent_value of this Email. # noqa: E501
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active. # noqa: E501
:return: The conditional_parent_value of this Email. # noqa: E501
:rtype: str
"""
return self._conditional_parent_value
@conditional_parent_value.setter
def conditional_parent_value(self, conditional_parent_value):
"""Sets the conditional_parent_value of this Email.
For conditional fields, this is the value of the parent tab that controls the tab's visibility. If the parent tab is a Checkbox, Radio button, Optional Signature, or Optional Initial use \"on\" as the value to show that the parent tab is active. # noqa: E501
:param conditional_parent_value: The conditional_parent_value of this Email. # noqa: E501
:type: str
"""
self._conditional_parent_value = conditional_parent_value
@property
def conditional_parent_value_metadata(self):
"""Gets the conditional_parent_value_metadata of this Email. # noqa: E501
:return: The conditional_parent_value_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._conditional_parent_value_metadata
@conditional_parent_value_metadata.setter
def conditional_parent_value_metadata(self, conditional_parent_value_metadata):
"""Sets the conditional_parent_value_metadata of this Email.
:param conditional_parent_value_metadata: The conditional_parent_value_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._conditional_parent_value_metadata = conditional_parent_value_metadata
@property
def custom_tab_id(self):
"""Gets the custom_tab_id of this Email. # noqa: E501
The DocuSign generated custom tab ID for the custom tab to be applied. This can only be used when adding new tabs for a recipient. When used, the new tab inherits all the custom tab properties. # noqa: E501
:return: The custom_tab_id of this Email. # noqa: E501
:rtype: str
"""
return self._custom_tab_id
@custom_tab_id.setter
def custom_tab_id(self, custom_tab_id):
"""Sets the custom_tab_id of this Email.
The DocuSign generated custom tab ID for the custom tab to be applied. This can only be used when adding new tabs for a recipient. When used, the new tab inherits all the custom tab properties. # noqa: E501
:param custom_tab_id: The custom_tab_id of this Email. # noqa: E501
:type: str
"""
self._custom_tab_id = custom_tab_id
@property
def custom_tab_id_metadata(self):
"""Gets the custom_tab_id_metadata of this Email. # noqa: E501
:return: The custom_tab_id_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._custom_tab_id_metadata
@custom_tab_id_metadata.setter
def custom_tab_id_metadata(self, custom_tab_id_metadata):
"""Sets the custom_tab_id_metadata of this Email.
:param custom_tab_id_metadata: The custom_tab_id_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._custom_tab_id_metadata = custom_tab_id_metadata
@property
def disable_auto_size(self):
"""Gets the disable_auto_size of this Email. # noqa: E501
When set to **true**, disables the auto sizing of single line text boxes in the signing screen when the signer enters data. If disabled users will only be able enter as much data as the text box can hold. By default this is false. This property only affects single line text boxes. # noqa: E501
:return: The disable_auto_size of this Email. # noqa: E501
:rtype: str
"""
return self._disable_auto_size
@disable_auto_size.setter
def disable_auto_size(self, disable_auto_size):
"""Sets the disable_auto_size of this Email.
When set to **true**, disables the auto sizing of single line text boxes in the signing screen when the signer enters data. If disabled users will only be able enter as much data as the text box can hold. By default this is false. This property only affects single line text boxes. # noqa: E501
:param disable_auto_size: The disable_auto_size of this Email. # noqa: E501
:type: str
"""
self._disable_auto_size = disable_auto_size
@property
def disable_auto_size_metadata(self):
"""Gets the disable_auto_size_metadata of this Email. # noqa: E501
:return: The disable_auto_size_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._disable_auto_size_metadata
@disable_auto_size_metadata.setter
def disable_auto_size_metadata(self, disable_auto_size_metadata):
"""Sets the disable_auto_size_metadata of this Email.
:param disable_auto_size_metadata: The disable_auto_size_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._disable_auto_size_metadata = disable_auto_size_metadata
@property
def document_id(self):
"""Gets the document_id of this Email. # noqa: E501
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute. # noqa: E501
:return: The document_id of this Email. # noqa: E501
:rtype: str
"""
return self._document_id
@document_id.setter
def document_id(self, document_id):
"""Sets the document_id of this Email.
Specifies the document ID number that the tab is placed on. This must refer to an existing Document's ID attribute. # noqa: E501
:param document_id: The document_id of this Email. # noqa: E501
:type: str
"""
self._document_id = document_id
@property
def document_id_metadata(self):
"""Gets the document_id_metadata of this Email. # noqa: E501
:return: The document_id_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._document_id_metadata
@document_id_metadata.setter
def document_id_metadata(self, document_id_metadata):
"""Sets the document_id_metadata of this Email.
:param document_id_metadata: The document_id_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._document_id_metadata = document_id_metadata
@property
def error_details(self):
"""Gets the error_details of this Email. # noqa: E501
:return: The error_details of this Email. # noqa: E501
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this Email.
:param error_details: The error_details of this Email. # noqa: E501
:type: ErrorDetails
"""
self._error_details = error_details
@property
def font(self):
"""Gets the font of this Email. # noqa: E501
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default. # noqa: E501
:return: The font of this Email. # noqa: E501
:rtype: str
"""
return self._font
@font.setter
def font(self, font):
"""Sets the font of this Email.
The font to be used for the tab value. Supported Fonts: Arial, Arial, ArialNarrow, Calibri, CourierNew, Garamond, Georgia, Helvetica, LucidaConsole, Tahoma, TimesNewRoman, Trebuchet, Verdana, MSGothic, MSMincho, Default. # noqa: E501
:param font: The font of this Email. # noqa: E501
:type: str
"""
self._font = font
@property
def font_color(self):
"""Gets the font_color of this Email. # noqa: E501
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White. # noqa: E501
:return: The font_color of this Email. # noqa: E501
:rtype: str
"""
return self._font_color
@font_color.setter
def font_color(self, font_color):
"""Sets the font_color of this Email.
The font color used for the information in the tab. Possible values are: Black, BrightBlue, BrightRed, DarkGreen, DarkRed, Gold, Green, NavyBlue, Purple, or White. # noqa: E501
:param font_color: The font_color of this Email. # noqa: E501
:type: str
"""
self._font_color = font_color
@property
def font_color_metadata(self):
"""Gets the font_color_metadata of this Email. # noqa: E501
:return: The font_color_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._font_color_metadata
@font_color_metadata.setter
def font_color_metadata(self, font_color_metadata):
"""Sets the font_color_metadata of this Email.
:param font_color_metadata: The font_color_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._font_color_metadata = font_color_metadata
@property
def font_metadata(self):
"""Gets the font_metadata of this Email. # noqa: E501
:return: The font_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._font_metadata
@font_metadata.setter
def font_metadata(self, font_metadata):
"""Sets the font_metadata of this Email.
:param font_metadata: The font_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._font_metadata = font_metadata
@property
def font_size(self):
"""Gets the font_size of this Email. # noqa: E501
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72. # noqa: E501
:return: The font_size of this Email. # noqa: E501
:rtype: str
"""
return self._font_size
@font_size.setter
def font_size(self, font_size):
"""Sets the font_size of this Email.
The font size used for the information in the tab. Possible values are: Size7, Size8, Size9, Size10, Size11, Size12, Size14, Size16, Size18, Size20, Size22, Size24, Size26, Size28, Size36, Size48, or Size72. # noqa: E501
:param font_size: The font_size of this Email. # noqa: E501
:type: str
"""
self._font_size = font_size
@property
def font_size_metadata(self):
"""Gets the font_size_metadata of this Email. # noqa: E501
:return: The font_size_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._font_size_metadata
@font_size_metadata.setter
def font_size_metadata(self, font_size_metadata):
"""Sets the font_size_metadata of this Email.
:param font_size_metadata: The font_size_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._font_size_metadata = font_size_metadata
@property
def form_order(self):
"""Gets the form_order of this Email. # noqa: E501
# noqa: E501
:return: The form_order of this Email. # noqa: E501
:rtype: str
"""
return self._form_order
@form_order.setter
def form_order(self, form_order):
"""Sets the form_order of this Email.
# noqa: E501
:param form_order: The form_order of this Email. # noqa: E501
:type: str
"""
self._form_order = form_order
@property
def form_order_metadata(self):
"""Gets the form_order_metadata of this Email. # noqa: E501
:return: The form_order_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._form_order_metadata
@form_order_metadata.setter
def form_order_metadata(self, form_order_metadata):
"""Sets the form_order_metadata of this Email.
:param form_order_metadata: The form_order_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._form_order_metadata = form_order_metadata
@property
def form_page_label(self):
"""Gets the form_page_label of this Email. # noqa: E501
# noqa: E501
:return: The form_page_label of this Email. # noqa: E501
:rtype: str
"""
return self._form_page_label
@form_page_label.setter
def form_page_label(self, form_page_label):
"""Sets the form_page_label of this Email.
# noqa: E501
:param form_page_label: The form_page_label of this Email. # noqa: E501
:type: str
"""
self._form_page_label = form_page_label
@property
def form_page_label_metadata(self):
"""Gets the form_page_label_metadata of this Email. # noqa: E501
:return: The form_page_label_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._form_page_label_metadata
@form_page_label_metadata.setter
def form_page_label_metadata(self, form_page_label_metadata):
"""Sets the form_page_label_metadata of this Email.
:param form_page_label_metadata: The form_page_label_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._form_page_label_metadata = form_page_label_metadata
@property
def form_page_number(self):
"""Gets the form_page_number of this Email. # noqa: E501
# noqa: E501
:return: The form_page_number of this Email. # noqa: E501
:rtype: str
"""
return self._form_page_number
@form_page_number.setter
def form_page_number(self, form_page_number):
"""Sets the form_page_number of this Email.
# noqa: E501
:param form_page_number: The form_page_number of this Email. # noqa: E501
:type: str
"""
self._form_page_number = form_page_number
@property
def form_page_number_metadata(self):
"""Gets the form_page_number_metadata of this Email. # noqa: E501
:return: The form_page_number_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._form_page_number_metadata
@form_page_number_metadata.setter
def form_page_number_metadata(self, form_page_number_metadata):
"""Sets the form_page_number_metadata of this Email.
:param form_page_number_metadata: The form_page_number_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._form_page_number_metadata = form_page_number_metadata
@property
def height(self):
"""Gets the height of this Email. # noqa: E501
Height of the tab in pixels. # noqa: E501
:return: The height of this Email. # noqa: E501
:rtype: str
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this Email.
Height of the tab in pixels. # noqa: E501
:param height: The height of this Email. # noqa: E501
:type: str
"""
self._height = height
@property
def height_metadata(self):
"""Gets the height_metadata of this Email. # noqa: E501
:return: The height_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._height_metadata
@height_metadata.setter
def height_metadata(self, height_metadata):
"""Sets the height_metadata of this Email.
:param height_metadata: The height_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._height_metadata = height_metadata
@property
def italic(self):
"""Gets the italic of this Email. # noqa: E501
When set to **true**, the information in the tab is italic. # noqa: E501
:return: The italic of this Email. # noqa: E501
:rtype: str
"""
return self._italic
@italic.setter
def italic(self, italic):
"""Sets the italic of this Email.
When set to **true**, the information in the tab is italic. # noqa: E501
:param italic: The italic of this Email. # noqa: E501
:type: str
"""
self._italic = italic
@property
def italic_metadata(self):
"""Gets the italic_metadata of this Email. # noqa: E501
:return: The italic_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._italic_metadata
@italic_metadata.setter
def italic_metadata(self, italic_metadata):
"""Sets the italic_metadata of this Email.
:param italic_metadata: The italic_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._italic_metadata = italic_metadata
@property
def locale_policy(self):
"""Gets the locale_policy of this Email. # noqa: E501
:return: The locale_policy of this Email. # noqa: E501
:rtype: LocalePolicyTab
"""
return self._locale_policy
@locale_policy.setter
def locale_policy(self, locale_policy):
"""Sets the locale_policy of this Email.
:param locale_policy: The locale_policy of this Email. # noqa: E501
:type: LocalePolicyTab
"""
self._locale_policy = locale_policy
@property
def locked(self):
"""Gets the locked of this Email. # noqa: E501
When set to **true**, the signer cannot change the data of the custom tab. # noqa: E501
:return: The locked of this Email. # noqa: E501
:rtype: str
"""
return self._locked
@locked.setter
def locked(self, locked):
"""Sets the locked of this Email.
When set to **true**, the signer cannot change the data of the custom tab. # noqa: E501
:param locked: The locked of this Email. # noqa: E501
:type: str
"""
self._locked = locked
@property
def locked_metadata(self):
"""Gets the locked_metadata of this Email. # noqa: E501
:return: The locked_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._locked_metadata
@locked_metadata.setter
def locked_metadata(self, locked_metadata):
"""Sets the locked_metadata of this Email.
:param locked_metadata: The locked_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._locked_metadata = locked_metadata
@property
def max_length(self):
"""Gets the max_length of this Email. # noqa: E501
An optional value that describes the maximum length of the property when the property is a string. # noqa: E501
:return: The max_length of this Email. # noqa: E501
:rtype: str
"""
return self._max_length
@max_length.setter
def max_length(self, max_length):
"""Sets the max_length of this Email.
An optional value that describes the maximum length of the property when the property is a string. # noqa: E501
:param max_length: The max_length of this Email. # noqa: E501
:type: str
"""
self._max_length = max_length
@property
def max_length_metadata(self):
"""Gets the max_length_metadata of this Email. # noqa: E501
:return: The max_length_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._max_length_metadata
@max_length_metadata.setter
def max_length_metadata(self, max_length_metadata):
"""Sets the max_length_metadata of this Email.
:param max_length_metadata: The max_length_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._max_length_metadata = max_length_metadata
@property
def merge_field(self):
"""Gets the merge_field of this Email. # noqa: E501
:return: The merge_field of this Email. # noqa: E501
:rtype: MergeField
"""
return self._merge_field
@merge_field.setter
def merge_field(self, merge_field):
"""Sets the merge_field of this Email.
:param merge_field: The merge_field of this Email. # noqa: E501
:type: MergeField
"""
self._merge_field = merge_field
@property
def merge_field_xml(self):
"""Gets the merge_field_xml of this Email. # noqa: E501
# noqa: E501
:return: The merge_field_xml of this Email. # noqa: E501
:rtype: str
"""
return self._merge_field_xml
@merge_field_xml.setter
def merge_field_xml(self, merge_field_xml):
"""Sets the merge_field_xml of this Email.
# noqa: E501
:param merge_field_xml: The merge_field_xml of this Email. # noqa: E501
:type: str
"""
self._merge_field_xml = merge_field_xml
@property
def name(self):
"""Gets the name of this Email. # noqa: E501
# noqa: E501
:return: The name of this Email. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Email.
# noqa: E501
:param name: The name of this Email. # noqa: E501
:type: str
"""
self._name = name
@property
def name_metadata(self):
"""Gets the name_metadata of this Email. # noqa: E501
:return: The name_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._name_metadata
@name_metadata.setter
def name_metadata(self, name_metadata):
"""Sets the name_metadata of this Email.
:param name_metadata: The name_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._name_metadata = name_metadata
@property
def original_value(self):
"""Gets the original_value of this Email. # noqa: E501
The initial value of the tab when it was sent to the recipient. # noqa: E501
:return: The original_value of this Email. # noqa: E501
:rtype: str
"""
return self._original_value
@original_value.setter
def original_value(self, original_value):
"""Sets the original_value of this Email.
The initial value of the tab when it was sent to the recipient. # noqa: E501
:param original_value: The original_value of this Email. # noqa: E501
:type: str
"""
self._original_value = original_value
@property
def original_value_metadata(self):
"""Gets the original_value_metadata of this Email. # noqa: E501
:return: The original_value_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._original_value_metadata
@original_value_metadata.setter
def original_value_metadata(self, original_value_metadata):
"""Sets the original_value_metadata of this Email.
:param original_value_metadata: The original_value_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._original_value_metadata = original_value_metadata
@property
def page_number(self):
"""Gets the page_number of this Email. # noqa: E501
Specifies the page number on which the tab is located. # noqa: E501
:return: The page_number of this Email. # noqa: E501
:rtype: str
"""
return self._page_number
@page_number.setter
def page_number(self, page_number):
"""Sets the page_number of this Email.
Specifies the page number on which the tab is located. # noqa: E501
:param page_number: The page_number of this Email. # noqa: E501
:type: str
"""
self._page_number = page_number
@property
def page_number_metadata(self):
"""Gets the page_number_metadata of this Email. # noqa: E501
:return: The page_number_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._page_number_metadata
@page_number_metadata.setter
def page_number_metadata(self, page_number_metadata):
"""Sets the page_number_metadata of this Email.
:param page_number_metadata: The page_number_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._page_number_metadata = page_number_metadata
@property
def recipient_id(self):
"""Gets the recipient_id of this Email. # noqa: E501
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:return: The recipient_id of this Email. # noqa: E501
:rtype: str
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
"""Sets the recipient_id of this Email.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:param recipient_id: The recipient_id of this Email. # noqa: E501
:type: str
"""
self._recipient_id = recipient_id
@property
def recipient_id_guid(self):
"""Gets the recipient_id_guid of this Email. # noqa: E501
# noqa: E501
:return: The recipient_id_guid of this Email. # noqa: E501
:rtype: str
"""
return self._recipient_id_guid
@recipient_id_guid.setter
def recipient_id_guid(self, recipient_id_guid):
"""Sets the recipient_id_guid of this Email.
# noqa: E501
:param recipient_id_guid: The recipient_id_guid of this Email. # noqa: E501
:type: str
"""
self._recipient_id_guid = recipient_id_guid
@property
def recipient_id_guid_metadata(self):
"""Gets the recipient_id_guid_metadata of this Email. # noqa: E501
:return: The recipient_id_guid_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._recipient_id_guid_metadata
@recipient_id_guid_metadata.setter
def recipient_id_guid_metadata(self, recipient_id_guid_metadata):
"""Sets the recipient_id_guid_metadata of this Email.
:param recipient_id_guid_metadata: The recipient_id_guid_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._recipient_id_guid_metadata = recipient_id_guid_metadata
@property
def recipient_id_metadata(self):
"""Gets the recipient_id_metadata of this Email. # noqa: E501
:return: The recipient_id_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._recipient_id_metadata
@recipient_id_metadata.setter
def recipient_id_metadata(self, recipient_id_metadata):
"""Sets the recipient_id_metadata of this Email.
:param recipient_id_metadata: The recipient_id_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._recipient_id_metadata = recipient_id_metadata
@property
def require_all(self):
"""Gets the require_all of this Email. # noqa: E501
When set to **true** and shared is true, information must be entered in this field to complete the envelope. # noqa: E501
:return: The require_all of this Email. # noqa: E501
:rtype: str
"""
return self._require_all
@require_all.setter
def require_all(self, require_all):
"""Sets the require_all of this Email.
When set to **true** and shared is true, information must be entered in this field to complete the envelope. # noqa: E501
:param require_all: The require_all of this Email. # noqa: E501
:type: str
"""
self._require_all = require_all
@property
def require_all_metadata(self):
"""Gets the require_all_metadata of this Email. # noqa: E501
:return: The require_all_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._require_all_metadata
@require_all_metadata.setter
def require_all_metadata(self, require_all_metadata):
"""Sets the require_all_metadata of this Email.
:param require_all_metadata: The require_all_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._require_all_metadata = require_all_metadata
@property
def required(self):
"""Gets the required of this Email. # noqa: E501
When set to **true**, the signer is required to fill out this tab # noqa: E501
:return: The required of this Email. # noqa: E501
:rtype: str
"""
return self._required
@required.setter
def required(self, required):
"""Sets the required of this Email.
When set to **true**, the signer is required to fill out this tab # noqa: E501
:param required: The required of this Email. # noqa: E501
:type: str
"""
self._required = required
@property
def required_metadata(self):
"""Gets the required_metadata of this Email. # noqa: E501
:return: The required_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._required_metadata
@required_metadata.setter
def required_metadata(self, required_metadata):
"""Sets the required_metadata of this Email.
:param required_metadata: The required_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._required_metadata = required_metadata
@property
def require_initial_on_shared_change(self):
"""Gets the require_initial_on_shared_change of this Email. # noqa: E501
Optional element for field markup. When set to **true**, the signer is required to initial when they modify a shared field. # noqa: E501
:return: The require_initial_on_shared_change of this Email. # noqa: E501
:rtype: str
"""
return self._require_initial_on_shared_change
@require_initial_on_shared_change.setter
def require_initial_on_shared_change(self, require_initial_on_shared_change):
"""Sets the require_initial_on_shared_change of this Email.
Optional element for field markup. When set to **true**, the signer is required to initial when they modify a shared field. # noqa: E501
:param require_initial_on_shared_change: The require_initial_on_shared_change of this Email. # noqa: E501
:type: str
"""
self._require_initial_on_shared_change = require_initial_on_shared_change
@property
def require_initial_on_shared_change_metadata(self):
"""Gets the require_initial_on_shared_change_metadata of this Email. # noqa: E501
:return: The require_initial_on_shared_change_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._require_initial_on_shared_change_metadata
@require_initial_on_shared_change_metadata.setter
def require_initial_on_shared_change_metadata(self, require_initial_on_shared_change_metadata):
"""Sets the require_initial_on_shared_change_metadata of this Email.
:param require_initial_on_shared_change_metadata: The require_initial_on_shared_change_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._require_initial_on_shared_change_metadata = require_initial_on_shared_change_metadata
@property
def sender_required(self):
"""Gets the sender_required of this Email. # noqa: E501
When set to **true**, the sender must populate the tab before an envelope can be sent using the template. This value tab can only be changed by modifying (PUT) the template. Tabs with a `senderRequired` value of true cannot be deleted from an envelope. # noqa: E501
:return: The sender_required of this Email. # noqa: E501
:rtype: str
"""
return self._sender_required
@sender_required.setter
def sender_required(self, sender_required):
"""Sets the sender_required of this Email.
When set to **true**, the sender must populate the tab before an envelope can be sent using the template. This value tab can only be changed by modifying (PUT) the template. Tabs with a `senderRequired` value of true cannot be deleted from an envelope. # noqa: E501
:param sender_required: The sender_required of this Email. # noqa: E501
:type: str
"""
self._sender_required = sender_required
@property
def sender_required_metadata(self):
"""Gets the sender_required_metadata of this Email. # noqa: E501
:return: The sender_required_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._sender_required_metadata
@sender_required_metadata.setter
def sender_required_metadata(self, sender_required_metadata):
"""Sets the sender_required_metadata of this Email.
:param sender_required_metadata: The sender_required_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._sender_required_metadata = sender_required_metadata
@property
def shared(self):
"""Gets the shared of this Email. # noqa: E501
When set to **true**, this custom tab is shared. # noqa: E501
:return: The shared of this Email. # noqa: E501
:rtype: str
"""
return self._shared
@shared.setter
def shared(self, shared):
"""Sets the shared of this Email.
When set to **true**, this custom tab is shared. # noqa: E501
:param shared: The shared of this Email. # noqa: E501
:type: str
"""
self._shared = shared
@property
def shared_metadata(self):
"""Gets the shared_metadata of this Email. # noqa: E501
:return: The shared_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._shared_metadata
@shared_metadata.setter
def shared_metadata(self, shared_metadata):
"""Sets the shared_metadata of this Email.
:param shared_metadata: The shared_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._shared_metadata = shared_metadata
@property
def share_to_recipients(self):
"""Gets the share_to_recipients of this Email. # noqa: E501
# noqa: E501
:return: The share_to_recipients of this Email. # noqa: E501
:rtype: str
"""
return self._share_to_recipients
@share_to_recipients.setter
def share_to_recipients(self, share_to_recipients):
"""Sets the share_to_recipients of this Email.
# noqa: E501
:param share_to_recipients: The share_to_recipients of this Email. # noqa: E501
:type: str
"""
self._share_to_recipients = share_to_recipients
@property
def share_to_recipients_metadata(self):
"""Gets the share_to_recipients_metadata of this Email. # noqa: E501
:return: The share_to_recipients_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._share_to_recipients_metadata
@share_to_recipients_metadata.setter
def share_to_recipients_metadata(self, share_to_recipients_metadata):
"""Sets the share_to_recipients_metadata of this Email.
:param share_to_recipients_metadata: The share_to_recipients_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._share_to_recipients_metadata = share_to_recipients_metadata
@property
def smart_contract_information(self):
"""Gets the smart_contract_information of this Email. # noqa: E501
:return: The smart_contract_information of this Email. # noqa: E501
:rtype: SmartContractInformation
"""
return self._smart_contract_information
@smart_contract_information.setter
def smart_contract_information(self, smart_contract_information):
"""Sets the smart_contract_information of this Email.
:param smart_contract_information: The smart_contract_information of this Email. # noqa: E501
:type: SmartContractInformation
"""
self._smart_contract_information = smart_contract_information
@property
def source(self):
"""Gets the source of this Email. # noqa: E501
# noqa: E501
:return: The source of this Email. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this Email.
# noqa: E501
:param source: The source of this Email. # noqa: E501
:type: str
"""
self._source = source
@property
def status(self):
"""Gets the status of this Email. # noqa: E501
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:return: The status of this Email. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Email.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later. # noqa: E501
:param status: The status of this Email. # noqa: E501
:type: str
"""
self._status = status
@property
def status_metadata(self):
"""Gets the status_metadata of this Email. # noqa: E501
:return: The status_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._status_metadata
@status_metadata.setter
def status_metadata(self, status_metadata):
"""Sets the status_metadata of this Email.
:param status_metadata: The status_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._status_metadata = status_metadata
@property
def tab_group_labels(self):
"""Gets the tab_group_labels of this Email. # noqa: E501
# noqa: E501
:return: The tab_group_labels of this Email. # noqa: E501
:rtype: list[str]
"""
return self._tab_group_labels
@tab_group_labels.setter
def tab_group_labels(self, tab_group_labels):
"""Sets the tab_group_labels of this Email.
# noqa: E501
:param tab_group_labels: The tab_group_labels of this Email. # noqa: E501
:type: list[str]
"""
self._tab_group_labels = tab_group_labels
@property
def tab_group_labels_metadata(self):
"""Gets the tab_group_labels_metadata of this Email. # noqa: E501
:return: The tab_group_labels_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_group_labels_metadata
@tab_group_labels_metadata.setter
def tab_group_labels_metadata(self, tab_group_labels_metadata):
"""Sets the tab_group_labels_metadata of this Email.
:param tab_group_labels_metadata: The tab_group_labels_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_group_labels_metadata = tab_group_labels_metadata
@property
def tab_id(self):
"""Gets the tab_id of this Email. # noqa: E501
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:return: The tab_id of this Email. # noqa: E501
:rtype: str
"""
return self._tab_id
@tab_id.setter
def tab_id(self, tab_id):
"""Sets the tab_id of this Email.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:param tab_id: The tab_id of this Email. # noqa: E501
:type: str
"""
self._tab_id = tab_id
@property
def tab_id_metadata(self):
"""Gets the tab_id_metadata of this Email. # noqa: E501
:return: The tab_id_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_id_metadata
@tab_id_metadata.setter
def tab_id_metadata(self, tab_id_metadata):
"""Sets the tab_id_metadata of this Email.
:param tab_id_metadata: The tab_id_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_id_metadata = tab_id_metadata
@property
def tab_label(self):
"""Gets the tab_label of this Email. # noqa: E501
The label string associated with the tab. # noqa: E501
:return: The tab_label of this Email. # noqa: E501
:rtype: str
"""
return self._tab_label
@tab_label.setter
def tab_label(self, tab_label):
"""Sets the tab_label of this Email.
The label string associated with the tab. # noqa: E501
:param tab_label: The tab_label of this Email. # noqa: E501
:type: str
"""
self._tab_label = tab_label
@property
def tab_label_metadata(self):
"""Gets the tab_label_metadata of this Email. # noqa: E501
:return: The tab_label_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_label_metadata
@tab_label_metadata.setter
def tab_label_metadata(self, tab_label_metadata):
"""Sets the tab_label_metadata of this Email.
:param tab_label_metadata: The tab_label_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_label_metadata = tab_label_metadata
@property
def tab_order(self):
"""Gets the tab_order of this Email. # noqa: E501
# noqa: E501
:return: The tab_order of this Email. # noqa: E501
:rtype: str
"""
return self._tab_order
@tab_order.setter
def tab_order(self, tab_order):
"""Sets the tab_order of this Email.
# noqa: E501
:param tab_order: The tab_order of this Email. # noqa: E501
:type: str
"""
self._tab_order = tab_order
@property
def tab_order_metadata(self):
"""Gets the tab_order_metadata of this Email. # noqa: E501
:return: The tab_order_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_order_metadata
@tab_order_metadata.setter
def tab_order_metadata(self, tab_order_metadata):
"""Sets the tab_order_metadata of this Email.
:param tab_order_metadata: The tab_order_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_order_metadata = tab_order_metadata
@property
def tab_type(self):
"""Gets the tab_type of this Email. # noqa: E501
# noqa: E501
:return: The tab_type of this Email. # noqa: E501
:rtype: str
"""
return self._tab_type
@tab_type.setter
def tab_type(self, tab_type):
"""Sets the tab_type of this Email.
# noqa: E501
:param tab_type: The tab_type of this Email. # noqa: E501
:type: str
"""
self._tab_type = tab_type
@property
def tab_type_metadata(self):
"""Gets the tab_type_metadata of this Email. # noqa: E501
:return: The tab_type_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tab_type_metadata
@tab_type_metadata.setter
def tab_type_metadata(self, tab_type_metadata):
"""Sets the tab_type_metadata of this Email.
:param tab_type_metadata: The tab_type_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tab_type_metadata = tab_type_metadata
@property
def template_locked(self):
"""Gets the template_locked of this Email. # noqa: E501
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients. # noqa: E501
:return: The template_locked of this Email. # noqa: E501
:rtype: str
"""
return self._template_locked
@template_locked.setter
def template_locked(self, template_locked):
"""Sets the template_locked of this Email.
When set to **true**, the sender cannot change any attributes of the recipient. Used only when working with template recipients. # noqa: E501
:param template_locked: The template_locked of this Email. # noqa: E501
:type: str
"""
self._template_locked = template_locked
@property
def template_locked_metadata(self):
"""Gets the template_locked_metadata of this Email. # noqa: E501
:return: The template_locked_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._template_locked_metadata
@template_locked_metadata.setter
def template_locked_metadata(self, template_locked_metadata):
"""Sets the template_locked_metadata of this Email.
:param template_locked_metadata: The template_locked_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._template_locked_metadata = template_locked_metadata
@property
def template_required(self):
"""Gets the template_required of this Email. # noqa: E501
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients. # noqa: E501
:return: The template_required of this Email. # noqa: E501
:rtype: str
"""
return self._template_required
@template_required.setter
def template_required(self, template_required):
"""Sets the template_required of this Email.
When set to **true**, the sender may not remove the recipient. Used only when working with template recipients. # noqa: E501
:param template_required: The template_required of this Email. # noqa: E501
:type: str
"""
self._template_required = template_required
@property
def template_required_metadata(self):
"""Gets the template_required_metadata of this Email. # noqa: E501
:return: The template_required_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._template_required_metadata
@template_required_metadata.setter
def template_required_metadata(self, template_required_metadata):
"""Sets the template_required_metadata of this Email.
:param template_required_metadata: The template_required_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._template_required_metadata = template_required_metadata
@property
def tooltip(self):
"""Gets the tooltip of this Email. # noqa: E501
# noqa: E501
:return: The tooltip of this Email. # noqa: E501
:rtype: str
"""
return self._tooltip
@tooltip.setter
def tooltip(self, tooltip):
"""Sets the tooltip of this Email.
# noqa: E501
:param tooltip: The tooltip of this Email. # noqa: E501
:type: str
"""
self._tooltip = tooltip
@property
def tool_tip_metadata(self):
"""Gets the tool_tip_metadata of this Email. # noqa: E501
:return: The tool_tip_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._tool_tip_metadata
@tool_tip_metadata.setter
def tool_tip_metadata(self, tool_tip_metadata):
"""Sets the tool_tip_metadata of this Email.
:param tool_tip_metadata: The tool_tip_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._tool_tip_metadata = tool_tip_metadata
@property
def underline(self):
"""Gets the underline of this Email. # noqa: E501
When set to **true**, the information in the tab is underlined. # noqa: E501
:return: The underline of this Email. # noqa: E501
:rtype: str
"""
return self._underline
@underline.setter
def underline(self, underline):
"""Sets the underline of this Email.
When set to **true**, the information in the tab is underlined. # noqa: E501
:param underline: The underline of this Email. # noqa: E501
:type: str
"""
self._underline = underline
@property
def underline_metadata(self):
"""Gets the underline_metadata of this Email. # noqa: E501
:return: The underline_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._underline_metadata
@underline_metadata.setter
def underline_metadata(self, underline_metadata):
"""Sets the underline_metadata of this Email.
:param underline_metadata: The underline_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._underline_metadata = underline_metadata
@property
def validation_message(self):
"""Gets the validation_message of this Email. # noqa: E501
The message displayed if the custom tab fails input validation (either custom of embedded). # noqa: E501
:return: The validation_message of this Email. # noqa: E501
:rtype: str
"""
return self._validation_message
@validation_message.setter
def validation_message(self, validation_message):
"""Sets the validation_message of this Email.
The message displayed if the custom tab fails input validation (either custom of embedded). # noqa: E501
:param validation_message: The validation_message of this Email. # noqa: E501
:type: str
"""
self._validation_message = validation_message
@property
def validation_message_metadata(self):
"""Gets the validation_message_metadata of this Email. # noqa: E501
:return: The validation_message_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._validation_message_metadata
@validation_message_metadata.setter
def validation_message_metadata(self, validation_message_metadata):
"""Sets the validation_message_metadata of this Email.
:param validation_message_metadata: The validation_message_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._validation_message_metadata = validation_message_metadata
@property
def validation_pattern(self):
"""Gets the validation_pattern of this Email. # noqa: E501
A regular expression used to validate input for the tab. # noqa: E501
:return: The validation_pattern of this Email. # noqa: E501
:rtype: str
"""
return self._validation_pattern
@validation_pattern.setter
def validation_pattern(self, validation_pattern):
"""Sets the validation_pattern of this Email.
A regular expression used to validate input for the tab. # noqa: E501
:param validation_pattern: The validation_pattern of this Email. # noqa: E501
:type: str
"""
self._validation_pattern = validation_pattern
@property
def validation_pattern_metadata(self):
"""Gets the validation_pattern_metadata of this Email. # noqa: E501
:return: The validation_pattern_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._validation_pattern_metadata
@validation_pattern_metadata.setter
def validation_pattern_metadata(self, validation_pattern_metadata):
"""Sets the validation_pattern_metadata of this Email.
:param validation_pattern_metadata: The validation_pattern_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._validation_pattern_metadata = validation_pattern_metadata
@property
def value(self):
"""Gets the value of this Email. # noqa: E501
Specifies the value of the tab. # noqa: E501
:return: The value of this Email. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Email.
Specifies the value of the tab. # noqa: E501
:param value: The value of this Email. # noqa: E501
:type: str
"""
self._value = value
@property
def value_metadata(self):
"""Gets the value_metadata of this Email. # noqa: E501
:return: The value_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._value_metadata
@value_metadata.setter
def value_metadata(self, value_metadata):
"""Sets the value_metadata of this Email.
:param value_metadata: The value_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._value_metadata = value_metadata
@property
def width(self):
"""Gets the width of this Email. # noqa: E501
Width of the tab in pixels. # noqa: E501
:return: The width of this Email. # noqa: E501
:rtype: str
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this Email.
Width of the tab in pixels. # noqa: E501
:param width: The width of this Email. # noqa: E501
:type: str
"""
self._width = width
@property
def width_metadata(self):
"""Gets the width_metadata of this Email. # noqa: E501
:return: The width_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._width_metadata
@width_metadata.setter
def width_metadata(self, width_metadata):
"""Sets the width_metadata of this Email.
:param width_metadata: The width_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._width_metadata = width_metadata
@property
def x_position(self):
"""Gets the x_position of this Email. # noqa: E501
This indicates the horizontal offset of the object on the page. DocuSign uses 72 DPI when determining position. # noqa: E501
:return: The x_position of this Email. # noqa: E501
:rtype: str
"""
return self._x_position
@x_position.setter
def x_position(self, x_position):
"""Sets the x_position of this Email.
This indicates the horizontal offset of the object on the page. DocuSign uses 72 DPI when determining position. # noqa: E501
:param x_position: The x_position of this Email. # noqa: E501
:type: str
"""
self._x_position = x_position
@property
def x_position_metadata(self):
"""Gets the x_position_metadata of this Email. # noqa: E501
:return: The x_position_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._x_position_metadata
@x_position_metadata.setter
def x_position_metadata(self, x_position_metadata):
"""Sets the x_position_metadata of this Email.
:param x_position_metadata: The x_position_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._x_position_metadata = x_position_metadata
@property
def y_position(self):
"""Gets the y_position of this Email. # noqa: E501
This indicates the vertical offset of the object on the page. DocuSign uses 72 DPI when determining position. # noqa: E501
:return: The y_position of this Email. # noqa: E501
:rtype: str
"""
return self._y_position
@y_position.setter
def y_position(self, y_position):
"""Sets the y_position of this Email.
This indicates the vertical offset of the object on the page. DocuSign uses 72 DPI when determining position. # noqa: E501
:param y_position: The y_position of this Email. # noqa: E501
:type: str
"""
self._y_position = y_position
@property
def y_position_metadata(self):
"""Gets the y_position_metadata of this Email. # noqa: E501
:return: The y_position_metadata of this Email. # noqa: E501
:rtype: PropertyMetadata
"""
return self._y_position_metadata
@y_position_metadata.setter
def y_position_metadata(self, y_position_metadata):
"""Sets the y_position_metadata of this Email.
:param y_position_metadata: The y_position_metadata of this Email. # noqa: E501
:type: PropertyMetadata
"""
self._y_position_metadata = y_position_metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Email, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Email):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Email):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_17431 | from fastapi import APIRouter, Body
from fastapi.encoders import jsonable_encoder
from ..crud.student import (
add_student,
delete_student,
retrieve_student,
retrieve_students,
update_student,
)
from ..models.student import (
ErrorResponseModel,
ResponseModel,
StudentSchema,
UpdateStudentModel,
)
router = APIRouter()
@router.post("/", response_description="Student data added into the database")
async def add_student_data(student: StudentSchema = Body(...)):
student = jsonable_encoder(student)
new_student = await add_student(student)
return ResponseModel(new_student, "Student added successfully.")
@router.get("/", response_description="Students retrieved")
async def get_students():
students = await retrieve_students()
if students:
return ResponseModel(students, "Students data retrieved successfully")
return ResponseModel(students, "Empty list returned")
@router.get("/{id}", response_description="Student data retrieved")
async def get_student_data(id):
student = await retrieve_student(id)
if student:
return ResponseModel(student, "Student data retrieved successfully")
return ErrorResponseModel("An error occurred.", 404, "Student doesn't exist.")
@router.put("/{id}")
async def update_student_data(id: str, req: UpdateStudentModel = Body(...)):
req = {k: v for k, v in req.dict().items() if v is not None}
updated_student = await update_student(id, req)
if updated_student:
return ResponseModel(
"Student with ID: {} name update is successful".format(id),
"Student name updated successfully",
)
return ErrorResponseModel(
"An error occurred",
404,
"There was an error updating the student data.",
)
@router.delete("/{id}", response_description="Student data deleted from the database")
async def delete_student_data(id: str):
deleted_student = await delete_student(id)
if deleted_student:
return ResponseModel(
"Student with ID: {} removed".format(id), "Student deleted successfully"
)
return ErrorResponseModel(
"An error occurred", 404, "Student with id {0} doesn't exist".format(id)
)
|
the-stack_0_17436 | import torch
import torch.nn as nn
import dgl
import dgl.function as fn
from dgl.nn.pytorch import GraphConv
class GCNZinc(nn.Module):
def __init__(self,
g,
num_feats,
num_layers,
num_hidden,
num_atom_type,
num_bond_type):
super(GCNZinc, self).__init__()
self.g = g
self.num_atom_type = num_atom_type
self.num_bond_type = num_bond_type
self.gcn_layers = nn.ModuleList()
self.BNs = nn.ModuleList()
# atom_type embedding
self.embed = nn.Embedding(num_atom_type, num_feats)
self.gcn_layers.append(GraphConv(num_feats, num_hidden))
self.BNs.append(nn.BatchNorm1d(num_hidden))
for i in range(num_layers):
self.gcn_layers.append(GraphConv(num_hidden, num_hidden))
self.BNs.append(nn.BatchNorm1d(num_hidden))
self.regressor1 = nn.Linear(num_hidden, num_hidden//2)
self.regressor2 = nn.Linear(num_hidden//2, 1)
def forward(self, x, e, snorm_n, snorm_e):
h = self.embed(x)
for layer, bn in zip(self.gcn_layers, self.BNs):
h = layer(self.g, h)
h = h * snorm_n
h = bn(h)
h = torch.tanh(h)
self.g.ndata['h'] = h
h = dgl.mean_nodes(self.g, 'h')
h = torch.relu(h)
h = self.regressor1(h)
h = torch.relu(h)
logits = self.regressor2(h)
return logits |
the-stack_0_17437 | """Poll related commands."""
from telegram.ext import run_async
from pollbot.i18n import i18n
from pollbot.helper.session import message_wrapper
from pollbot.display.creation import get_init_text
from pollbot.display.misc import get_poll_list
from pollbot.telegram.keyboard import (
get_cancel_creation_keyboard,
get_init_keyboard,
)
from pollbot.models import Poll
@run_async
@message_wrapper(private=True)
def create_poll(bot, update, session, user):
"""Create a new poll."""
# The previous unfinished poll will be removed
user.started = True
if user.current_poll is not None and not user.current_poll.created:
update.message.chat.send_message(
i18n.t("creation.already_creating", locale=user.locale),
reply_markup=get_cancel_creation_keyboard(user.current_poll),
)
return
poll = Poll.create(user, session)
text = get_init_text(poll)
keyboard = get_init_keyboard(poll)
update.message.chat.send_message(
text,
parse_mode="markdown",
reply_markup=keyboard,
disable_web_page_preview=True,
)
@run_async
@message_wrapper(private=True)
def list_polls(bot, update, session, user):
"""Get a list of all active polls."""
text, keyboard = get_poll_list(session, user)
update.message.chat.send_message(text, reply_markup=keyboard)
@run_async
@message_wrapper(private=True)
def list_closed_polls(bot, update, session, user):
"""Get a list of all closed polls."""
text, keyboard = get_poll_list(session, user, closed=True)
update.message.chat.send_message(text, reply_markup=keyboard)
|
the-stack_0_17440 | # Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for Bio.File module."""
import os.path
import shutil
import tempfile
import unittest
from io import StringIO
from Bio import bgzf
from Bio import File
class RandomAccess(unittest.TestCase):
"""Random access tests."""
def test_plain(self):
"""Test plain text file."""
with File._open_for_random_access("Quality/example.fastq") as handle:
self.assertIn("r", handle.mode)
self.assertIn("b", handle.mode)
def test_bgzf(self):
"""Test BGZF compressed file."""
with File._open_for_random_access("Quality/example.fastq.bgz") as handle:
self.assertIsInstance(handle, bgzf.BgzfReader)
def test_gzip(self):
"""Test gzip compressed file."""
self.assertRaises(
ValueError, File._open_for_random_access, "Quality/example.fastq.gz"
)
class AsHandleTestCase(unittest.TestCase):
"""Tests for as_handle function."""
def setUp(self):
"""Initialise temporary directory."""
# Create a directory to work in
self.temp_dir = tempfile.mkdtemp(prefix="biopython-test")
def tearDown(self):
"""Remove temporary directory."""
shutil.rmtree(self.temp_dir)
def _path(self, *args):
return os.path.join(self.temp_dir, *args)
def test_handle(self):
"""Test as_handle with a file-like object argument."""
p = self._path("test_file.fasta")
with open(p, "wb") as fp:
with File.as_handle(fp) as handle:
self.assertEqual(
fp,
handle,
"as_handle should "
"return argument when given a "
"file-like object",
)
self.assertFalse(handle.closed)
self.assertFalse(
handle.closed,
"Exiting as_handle given a file-like object "
"should not close the file",
)
def test_string_path(self):
"""Test as_handle with a string path argument."""
p = self._path("test_file.fasta")
mode = "wb"
with File.as_handle(p, mode=mode) as handle:
self.assertEqual(p, handle.name)
self.assertEqual(mode, handle.mode)
self.assertFalse(handle.closed)
self.assertTrue(handle.closed)
def test_path_object(self):
"""Test as_handle with a pathlib.Path object."""
from pathlib import Path
p = Path(self._path("test_file.fasta"))
mode = "wb"
with File.as_handle(p, mode=mode) as handle:
self.assertEqual(str(p.absolute()), handle.name)
self.assertEqual(mode, handle.mode)
self.assertFalse(handle.closed)
self.assertTrue(handle.closed)
def test_custom_path_like_object(self):
"""Test as_handle with a custom path-like object."""
class CustomPathLike:
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
p = CustomPathLike(self._path("test_file.fasta"))
mode = "wb"
with File.as_handle(p, mode=mode) as handle:
self.assertEqual(p.path, handle.name)
self.assertEqual(mode, handle.mode)
self.assertFalse(handle.closed)
self.assertTrue(handle.closed)
def test_stringio(self):
"""Testing passing StringIO handles."""
s = StringIO()
with File.as_handle(s) as handle:
self.assertIs(s, handle)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
the-stack_0_17441 | """Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
from uniborg.util import admin_cmd
@borg.on(admin_cmd("emoji (.*)"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 16)
input_str = event.pattern_match.group(1)
if input_str == "shrug":
await event.edit("¯\_(ツ)_/¯")
elif input_str == "apple":
await event.edit("\uF8FF")
elif input_str == ":/":
await event.edit(input_str)
animation_chars = [
":\\",
":/"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 2])
elif input_str == "-_-":
await event.edit(input_str)
animation_chars = [
"-__-",
"-_-"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 2])
|
the-stack_0_17443 | import argparse
import pandas as pd
import numpy as np
import param
import os
def preprocess_sam(r1_sam, r2_sam):
"""
preprocess sam files
"""
#if not os.path.isfile(r1_sam) or not os.path.isfile(r2_sam):
# print("file doesn't exist")
# exit(0)
dir_name = os.path.dirname(r1_sam)
r1_basename = os.path.basename(r1_sam)
r2_basename = os.path.basename(r2_sam)
sorted_r1 = os.path.join(dir_name, r1_basename.replace(".sam", "_sorted.sam"))
sort_r1 = param.SAMTOOLS + "sort -n -o " + sorted_r1 + " " + r1_sam
sorted_r2 = os.path.join(dir_name, r2_basename.replace(".sam","_sorted.sam"))
sort_r2 = param.SAMTOOLS + "sort -n -o " + sorted_r2 + " " + r2_sam
# remove headers
r1 = os.path.join(dir_name, r1_basename.replace(".sam", "_noh.sam"))
r2 = os.path.join(dir_name, r2_basename.replace(".sam", "_noh.sam"))
os.system(sort_r1)
os.system(sort_r2)
#os.system("rm "+r1_sam)
#os.system("rm "+r2_sam)
os.system("grep -v \"^@\" "+sorted_r1+" > "+r1)
os.system("grep -v \"^@\" "+sorted_r2+" > "+r2)
r1_csv = os.path.join(dir_name, r1.replace(".sam", ".csv"))
r2_csv = os.path.join(dir_name, r2.replace(".sam", ".csv"))
os.system("cut -f 1-5 "+r1+" > "+ r1_csv)
os.system("cut -f 1-5 "+r2+" > "+ r2_csv)
os.system("rm "+r1)
os.system("rm "+r2)
return r1_csv, r2_csv
def read_count_hap(r1_csv, r2_csv, DB_genes):
empty_matrix = pd.DataFrame(0, index = DB_genes, columns = DB_genes)
f1 = open(r1_csv, "rb")
f2 = open(r2_csv, "rb")
i = True
lines = 0
pairs = {}
fail = 0
count = 0
while 1:
r1_line = f1.readline() # uptag
r2_line = f2.readline() # dntag
if r1_line == "" or r2_line == "":
i = False
print("End of file")
break
r1_line = r1_line.strip().split("\t")
r2_line = r2_line.strip().split("\t")
if r1_line[0] != r2_line[0]:
i = False
print("# READ ID DOES NOT MATCH #")
break
if int(r1_line[4]) < param.cut_off or int(r2_line[4]) < param.cut_off: # check quality
fail += 1
continue
if r1_line[2] == "*" or r2_line[2] =="*":
fail +=1
continue
r1_name = r1_line[2].split(";")
r2_name = r2_line[2].split(";")
if r1_name[-1] != r2_name[-1]:
count+=1
pairs[(r2_name[1], r1_name[1])] = pairs.get((r2_name[1], r1_name[1]), 0) + 1
matrix = (pd.Series(pairs)
.unstack(fill_value=0)
.T
.reindex(index=empty_matrix.index, columns=empty_matrix.columns, fill_value=0))
f1.close()
f2.close()
diag = pd.Series(np.diag(matrix), index=[matrix.index, matrix.columns])
print(diag)
return diag
def read_DB(hDB):
"""
get a list of db gene from hDB summary
"""
summary = pd.read_table(hDB, sep="\t")
DB_genes = summary.Locus.tolist()
return DB_genes
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='read count from sam files')
parser.add_argument("--r1sam", help="sam file for read one")
parser.add_argument("--r2sam", help="sam file for read two")
parser.add_argument("--mode", help="human or yeast")
parser.add_argument("--r1csv", help="csv file for read one")
parser.add_argument("--r2csv", help="csv file for read two")
parser.add_argument("--prefix", help= "output prefix")
args = parser.parse_args()
prefix = args.prefix
r1_sam = args.r1sam
r2_sam = args.r2sam
if r1_sam:
r1_csv, r2_csv = preprocess_sam(r1_sam, r2_sam)
DB_genes = read_DB(param.hDB_summary)
diag = read_count_hap(r1_csv, r2_csv, DB_genes)
else:
r1_csv = args.r1csv
r2_csv = args.r2csv
DB_genes = read_DB(param.hDB_summary)
diag = read_count_hap(r1_csv, r2_csv, DB_genes)
diag.to_csv(prefix+"_matrix.csv")
|
the-stack_0_17444 | from collections import (
Mapping,
)
import json
import os
import warnings
from cytoolz import (
dissoc,
)
from eth_account._utils.keyfile import (
create_keyfile_json,
decode_keyfile_json,
)
from eth_keys import (
KeyAPI,
keys,
)
from eth_keys.exceptions import (
ValidationError,
)
from eth_utils.curried import (
combomethod,
hexstr_if_str,
is_dict,
keccak,
text_if_str,
to_bytes,
to_int,
)
from hexbytes import (
HexBytes,
)
from eth_account._utils.structured_data.signing import (
hash_of_signed_transaction,
sign_message_hash,
sign_transaction_dict,
to_standard_signature_bytes,
to_standard_v,
)
from client.transactions import (
Transaction,
vrs_from,
)
from eth_account.datastructures import (
AttributeDict,
)
from eth_account.messages import (
SignableMessage,
_hash_eip191_message,
)
from eth_account.signers.local import (
LocalAccount,
)
class Account(object):
"""
The primary entry point for working with Ethereum private keys.
It does **not** require a connection to an Ethereum node.
"""
_keys = keys
_default_kdf = os.getenv('ETH_ACCOUNT_KDF', 'scrypt')
@combomethod
def create(self, extra_entropy=''):
r"""
Creates a new private key, and returns it as a :class:`~eth_account.local.LocalAccount`.
:param extra_entropy: Add extra randomness to whatever randomness your OS can provide
:type extra_entropy: str or bytes or int
:returns: an object with private key and convenience methods
.. code-block:: python
>>> from eth_account import Account
>>> acct = Account.create('KEYSMASH FJAFJKLDSKF7JKFDJ 1530')
>>> acct.address
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> acct.key
b"\xb2\}\xb3\x1f\xee\xd9\x12''\xbf\t9\xdcv\x9a\x96VK-\xe4\xc4rm\x03[6\xec\xf1\xe5\xb3d"
# These methods are also available: sign_message(), sign_transaction(), encrypt()
# They correspond to the same-named methods in Account.*
# but without the private key argument
"""
extra_key_bytes = text_if_str(to_bytes, extra_entropy)
key_bytes = keccak(os.urandom(32) + extra_key_bytes)
return self.from_key(key_bytes)
@staticmethod
def decrypt(keyfile_json, password):
"""
Decrypts a private key that was encrypted using an Ethereum client or
:meth:`~Account.encrypt`.
:param keyfile_json: The encrypted key
:type keyfile_json: dict or str
:param str password: The password that was used to encrypt the key
:returns: the raw private key
:rtype: ~hexbytes.main.HexBytes
.. code-block:: python
>>> encrypted = {
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {'cipher': 'aes-128-ctr',
'cipherparams': {'iv': '78f214584844e0b241b433d7c3bb8d5f'},
'ciphertext': 'd6dbb56e4f54ba6db2e8dc14df17cb7352fdce03681dd3f90ce4b6c1d5af2c4f',
'kdf': 'pbkdf2',
'kdfparams': {'c': 1000000,
'dklen': 32,
'prf': 'hmac-sha256',
'salt': '45cf943b4de2c05c2c440ef96af914a2'},
'mac': 'f5e1af09df5ded25c96fcf075ada313fb6f79735a914adc8cb02e8ddee7813c3'},
'id': 'b812f3f9-78cc-462a-9e89-74418aa27cb0',
'version': 3}
>>> import getpass
>>> Account.decrypt(encrypted, getpass.getpass())
HexBytes('0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364')
"""
if isinstance(keyfile_json, str):
keyfile = json.loads(keyfile_json)
elif is_dict(keyfile_json):
keyfile = keyfile_json
else:
raise TypeError("The keyfile should be supplied as a JSON string, or a dictionary.")
password_bytes = text_if_str(to_bytes, password)
return HexBytes(decode_keyfile_json(keyfile, password_bytes))
@classmethod
def encrypt(cls, private_key, password, kdf=None, iterations=None):
"""
Creates a dictionary with an encrypted version of your private key.
To import this keyfile into Ethereum clients like geth and parity:
encode this dictionary with :func:`json.dumps` and save it to disk where your
client keeps key files.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:param str password: The password which you will need to unlock the account in your client
:param str kdf: The key derivation function to use when encrypting your private key
:param int iterations: The work factor for the key derivation function
:returns: The data to use in your encrypted file
:rtype: dict
If kdf is not set, the default key derivation function falls back to the
environment variable :envvar:`ETH_ACCOUNT_KDF`. If that is not set, then
'scrypt' will be used as the default.
.. code-block:: python
>>> import getpass
>>> encrypted = Account.encrypt(
0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364,
getpass.getpass()
)
{
'address': '5ce9454909639d2d17a3f753ce7d93fa0b9ab12e',
'crypto': {
'cipher': 'aes-128-ctr',
'cipherparams': {
'iv': '0b7845a5c3597d3d378bde9b7c7319b7'
},
'ciphertext': 'a494f1feb3c854e99c1ff01e6aaa17d43c0752009073503b908457dc8de5d2a5', # noqa: E501
'kdf': 'scrypt',
'kdfparams': {
'dklen': 32,
'n': 262144,
'p': 8,
'r': 1,
'salt': '13c4a48123affaa29189e9097726c698'
},
'mac': 'f4cfb027eb0af9bd7a320b4374a3fa7bef02cfbafe0ec5d1fd7ad129401de0b1'
},
'id': 'a60e0578-0e5b-4a75-b991-d55ec6451a6f',
'version': 3
}
>>> with open('my-keyfile', 'w') as f:
f.write(json.dumps(encrypted))
"""
# print(private_key)
if isinstance(private_key, keys.PrivateKey):
key_bytes = private_key.to_bytes()
# print("private_key:to_bytes",len(private_key))
else:
key_bytes = HexBytes(private_key)
# print("private_key:HexBytes", len(private_key))
if kdf is None:
kdf = cls._default_kdf
password_bytes = text_if_str(to_bytes, password)
assert len(key_bytes) == 32
return create_keyfile_json(key_bytes, password_bytes, kdf=kdf, iterations=iterations)
@combomethod
def privateKeyToAccount(self, private_key):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.from_key`.
This method will be removed in v0.5
"""
warnings.warn(
"privateKeyToAccount is deprecated in favor of from_key",
category=DeprecationWarning,
)
return self.from_key(private_key)
@combomethod
def from_key(self, private_key):
r"""
Returns a convenient object for working with the given private key.
:param private_key: The raw private key
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:return: object with methods for signing and encrypting
:rtype: LocalAccount
.. code-block:: python
>>> acct = Account.from_key(
0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364)
>>> acct.address
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
>>> acct.key
b"\xb2\}\xb3\x1f\xee\xd9\x12''xbf\t9\xdcv\x9a\x96VK-\xe4\xc4rm\x03[6\xec\xf1\xe5\xb3d"
# These methods are also available: sign_message(), sign_transaction(), encrypt()
# They correspond to the same-named methods in Account.*
# but without the private key argument
"""
key = self._parsePrivateKey(private_key)
return LocalAccount(key, self)
@combomethod
def recover_message(self, signable_message: SignableMessage, vrs=None, signature=None):
r"""
Get the address of the account that signed the given message.
You must specify exactly one of: vrs or signature
:param signable_message: the message that was signed
:param vrs: the three pieces generated by an elliptic curve signature
:type vrs: tuple(v, r, s), each element is hex str, bytes or int
:param signature: signature bytes concatenated as r+s+v
:type signature: hex str or bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
.. code-block:: python
>>> from eth_account.messages import encode_defunct
>>> message = encode_defunct(text="I♥SF")
>>> vrs = (
28,
'0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3',
'0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce')
>>> Account.recover_message(message, vrs=vrs)
'0x5ce9454909639D2D17A3F753ce7d93fa0b9aB12E'
# All of these recover calls are equivalent:
# variations on vrs
>>> vrs = (
'0x1c',
'0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3',
'0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce')
>>> Account.recover_message(message, vrs=vrs)
>>> vrs = (
b'\x1c',
b'\\xe6\\xca\\x9b\\xbaX\\xc8\\x86\\x11\\xfa\\xd6jl\\xe8\\xf9\\x96\\x90\\x81\\x95Y8\\x07\\xc4\\xb3\\x8b\\xd5(\\xd2\\xcf\\xf0\\x9dN\\xb3', # noqa: E501
b'>[\\xfb\\xbfM>9\\xb1\\xa2\\xfd\\x81jv\\x80\\xc1\\x9e\\xbe\\xba\\xf3\\xa1A\\xb29\\x93J\\xd4<\\xb3?\\xce\\xc8\\xce') # noqa: E501
>>> Account.recover_message(message, vrs=vrs)
>>> # Caution about this approach: likely problems if there are leading 0s
>>> vrs = (
0x1c,
0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb3,
0x3e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce)
>>> Account.recover_message(message, vrs=vrs)
# variations on signature
>>> signature = '0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c' # noqa: E501
>>> Account.recover_message(message, signature=signature)
>>> signature = b'\\xe6\\xca\\x9b\\xbaX\\xc8\\x86\\x11\\xfa\\xd6jl\\xe8\\xf9\\x96\\x90\\x81\\x95Y8\\x07\\xc4\\xb3\\x8b\\xd5(\\xd2\\xcf\\xf0\\x9dN\\xb3>[\\xfb\\xbfM>9\\xb1\\xa2\\xfd\\x81jv\\x80\\xc1\\x9e\\xbe\\xba\\xf3\\xa1A\\xb29\\x93J\\xd4<\\xb3?\\xce\\xc8\\xce\\x1c' # noqa: E501
>>> Account.recover_message(message, signature=signature)
>>> # Caution about this approach: likely problems if there are leading 0s
>>> signature = 0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c # noqa: E501
>>> Account.recover_message(message, signature=signature)
"""
message_hash = _hash_eip191_message(signable_message)
return self._recover_hash(message_hash, vrs, signature)
@combomethod
def recoverHash(self, message_hash, vrs=None, signature=None):
"""
Get the address of the account that signed the message with the given hash.
You must specify exactly one of: vrs or signature
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.recover_message`.
This method might be removed as early as v0.5
:param message_hash: the hash of the message that you want to verify
:type message_hash: hex str or bytes or int
:param vrs: the three pieces generated by an elliptic curve signature
:type vrs: tuple(v, r, s), each element is hex str, bytes or int
:param signature: signature bytes concatenated as r+s+v
:type signature: hex str or bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
"""
warnings.warn(
"recoverHash is deprecated in favor of recover_message",
category=DeprecationWarning,
)
return self._recover_hash(message_hash, vrs, signature)
@combomethod
def _recover_hash(self, message_hash, vrs=None, signature=None):
hash_bytes = HexBytes(message_hash)
if len(hash_bytes) != 32:
raise ValueError("The message hash must be exactly 32-bytes")
if vrs is not None:
v, r, s = map(hexstr_if_str(to_int), vrs)
v_standard = to_standard_v(v)
signature_obj = self._keys.Signature(vrs=(v_standard, r, s))
elif signature is not None:
signature_bytes = HexBytes(signature)
signature_bytes_standard = to_standard_signature_bytes(signature_bytes)
signature_obj = self._keys.Signature(signature_bytes=signature_bytes_standard)
else:
raise TypeError("You must supply the vrs tuple or the signature bytes")
pubkey = signature_obj.recover_public_key_from_msg_hash(hash_bytes)
return pubkey.to_checksum_address()
@combomethod
def recoverTransaction(self, serialized_transaction):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.recover_transaction`.
This method will be removed in v0.5
"""
warnings.warn(
"recoverTransaction is deprecated in favor of recover_transaction",
category=DeprecationWarning,
)
return self.recover_transaction(serialized_transaction)
@combomethod
def recover_transaction(self, serialized_transaction):
"""
Get the address of the account that signed this transaction.
:param serialized_transaction: the complete signed transaction
:type serialized_transaction: hex str, bytes or int
:returns: address of signer, hex-encoded & checksummed
:rtype: str
.. code-block:: python
>>> raw_transaction = '0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428', # noqa: E501
>>> Account.recover_transaction(raw_transaction)
'0x2c7536E3605D9C16a7a3D7b1898e529396a65c23'
"""
txn_bytes = HexBytes(serialized_transaction)
txn = Transaction.from_bytes(txn_bytes)
msg_hash = hash_of_signed_transaction(txn)
return self._recover_hash(msg_hash, vrs=vrs_from(txn))
def setKeyBackend(self, backend):
"""
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.set_key_backend`.
This method will be removed in v0.5
"""
warnings.warn(
"setKeyBackend is deprecated in favor of set_key_backend",
category=DeprecationWarning,
)
self.set_key_backend(backend)
def set_key_backend(self, backend):
"""
Change the backend used by the underlying eth-keys library.
*(The default is fine for most users)*
:param backend: any backend that works in
`eth_keys.KeyApi(backend) <https://github.com/ethereum/eth-keys/#keyapibackendnone>`_
"""
self._keys = KeyAPI(backend)
@combomethod
def sign_message(self, signable_message: SignableMessage, private_key):
r"""
Sign the provided message.
This API supports any messaging format that will encode to EIP-191_ messages.
If you would like historical compatibility with
:meth:`w3.eth.sign() <web3.eth.Eth.sign>`
you can use :meth:`~eth_account.messages.encode_defunct`.
Other options are the "validator", or "structured data" standards. (Both of these
are in *DRAFT* status currently, so be aware that the implementation is not
guaranteed to be stable). You can import all supported message encoders in
``eth_account.messages``.
:param signable_message: the encoded message for signing
:param private_key: the key to sign the message with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most importantly the fields: v, r, and s
:rtype: ~eth_account.datastructures.AttributeDict
.. code-block:: python
>>> msg = "I♥SF"
>>> from eth_account.messages import encode_defunct
>>> msghash = encode_defunct(text=msg)
SignableMessage(version=b'E', header=b'thereum Signed Message:\n6', body=b'I\xe2\x99\xa5SF')
>>> # If you're curious about the internal fields of SignableMessage, take a look at EIP-191, linked above
>>> key = "0xb25c7db31feed9122727bf0939dc769a96564b2de4c4726d035b36ecf1e5b364"
>>> Account.sign_message(msghash, key)
{'messageHash': HexBytes('0x1476abb745d423bf09273f1afd887d951181d25adc66c4834a70491911b7f750'), # noqa: E501
'r': 104389933075820307925104709181714897380569894203213074526835978196648170704563,
's': 28205917190874851400050446352651915501321657673772411533993420917949420456142,
'signature': HexBytes('0xe6ca9bba58c88611fad66a6ce8f996908195593807c4b38bd528d2cff09d4eb33e5bfbbf4d3e39b1a2fd816a7680c19ebebaf3a141b239934ad43cb33fcec8ce1c'), # noqa: E501
'v': 28}
.. _EIP-191: https://eips.ethereum.org/EIPS/eip-191
"""
message_hash = _hash_eip191_message(signable_message)
return self._sign_hash(message_hash, private_key)
@combomethod
def signHash(self, message_hash, private_key):
"""
.. WARNING:: *Never* sign a hash that you didn't generate,
it can be an arbitrary transaction. For example, it might
send all of your account's ether to an attacker.
Instead, prefer :meth:`~eth_account.account.Account.sign_message`,
which cannot accidentally sign a transaction.
Sign the provided hash.
.. CAUTION:: Deprecated for :meth:`~eth_account.account.Account.sign_message`.
This method will be removed in v0.5
:param message_hash: the 32-byte message hash to be signed
:type message_hash: hex str, bytes or int
:param private_key: the key to sign the message with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most
importantly the fields: v, r, and s
:rtype: ~eth_account.datastructures.AttributeDict
"""
warnings.warn(
"signHash is deprecated in favor of sign_message",
category=DeprecationWarning,
)
return self._sign_hash(message_hash, private_key)
@combomethod
def _sign_hash(self, message_hash, private_key):
msg_hash_bytes = HexBytes(message_hash)
if len(msg_hash_bytes) != 32:
raise ValueError("The message hash must be exactly 32-bytes")
key = self._parsePrivateKey(private_key)
(v, r, s, eth_signature_bytes) = sign_message_hash(key, msg_hash_bytes)
return AttributeDict({
'messageHash': msg_hash_bytes,
'r': r,
's': s,
'v': v,
'signature': HexBytes(eth_signature_bytes),
})
@combomethod
def sign_transaction(self, transaction_dict, private_key):
"""
Sign a transaction using a local private key. Produces signature details
and the hex-encoded transaction suitable for broadcast using
:meth:`w3.eth.sendRawTransaction() <web3.eth.Eth.sendRawTransaction>`.
Create the transaction dict for a contract method with
`my_contract.functions.my_function().buildTransaction()
<http://web3py.readthedocs.io/en/latest/contracts.html#methods>`_
:param dict transaction_dict: the transaction with keys:
nonce, chainId, to, data, value, gas, and gasPrice.
:param private_key: the private key to sign the data with
:type private_key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: Various details about the signature - most
importantly the fields: v, r, and s
:rtype: AttributeDict
.. code-block:: python
>>> transaction = {
# Note that the address must be in checksum format or native bytes:
'to': '0xF0109fC8DF283027b6285cc889F5aA624EaC1F55',
'value': 1000000000,
'gas': 2000000,
'gasPrice': 234567897654321,
'nonce': 0,
'chainId': 1
}
>>> key = '0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318'
>>> signed = Account.sign_transaction(transaction, key)
{'hash': HexBytes('0x6893a6ee8df79b0f5d64a180cd1ef35d030f3e296a5361cf04d02ce720d32ec5'),
'r': 4487286261793418179817841024889747115779324305375823110249149479905075174044,
'rawTransaction': HexBytes('0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428'), # noqa: E501
's': 30785525769477805655994251009256770582792548537338581640010273753578382951464,
'v': 37}
>>> w3.eth.sendRawTransaction(signed.rawTransaction)
"""
if not isinstance(transaction_dict, Mapping):
raise TypeError("transaction_dict must be dict-like, got %r" % transaction_dict)
account = self.from_key(private_key)
# allow from field, *only* if it matches the private key
if 'from' in transaction_dict:
if transaction_dict['from'] == account.address:
sanitized_transaction = dissoc(transaction_dict, 'from')
else:
raise TypeError("from field must match key's %s, but it was %s" % (
account.address,
transaction_dict['from'],
))
else:
sanitized_transaction = transaction_dict
# sign transaction
(
v,
r,
s,
rlp_encoded,
) = sign_transaction_dict(account._key_obj, sanitized_transaction)
transaction_hash = keccak(rlp_encoded)
return AttributeDict({
'rawTransaction': HexBytes(rlp_encoded),
'hash': HexBytes(transaction_hash),
'r': r,
's': s,
'v': v,
})
@combomethod
def _parsePrivateKey(self, key):
"""
Generate a :class:`eth_keys.datatypes.PrivateKey` from the provided key. If the
key is already of type :class:`eth_keys.datatypes.PrivateKey`, return the key.
:param key: the private key from which a :class:`eth_keys.datatypes.PrivateKey`
will be generated
:type key: hex str, bytes, int or :class:`eth_keys.datatypes.PrivateKey`
:returns: the provided key represented as a :class:`eth_keys.datatypes.PrivateKey`
"""
if isinstance(key, self._keys.PrivateKey):
return key
try:
return self._keys.PrivateKey(HexBytes(key))
except ValidationError as original_exception:
raise ValueError(
"The private key must be exactly 32 bytes long, instead of "
"%d bytes." % len(key)
) from original_exception
|
the-stack_0_17445 | import flee.flee as flee
import datamanager.handle_refugee_data as handle_refugee_data
import numpy as np
import outputanalysis.analysis as a
"""
Generation 1 code. Incorporates only distance, travel always takes one day.
"""
if __name__ == "__main__":
print("Testing basic data handling and simulation kernel.")
flee.SimulationSettings.MinMoveSpeed=10.0
flee.SimulationSettings.MaxMoveSpeed=10.0
end_time = 100
e = flee.Ecosystem()
l1 = e.addLocation("A", movechance=1.0)
l2 = e.addLocation("B", movechance=0.0)
e.linkUp("A","B","5.0")
for t in range(0,end_time):
# Insert refugee agents
e.addAgent(location=l1)
# Propagate the model by one time step.
e.evolve()
if t==2:
assert e.close_location("B")
print(t, l1.numAgents, l2.numAgents)
e.printComplete()
assert t==99
assert l2.numAgents==3 # Location is closed after 3 steps, refugees underway will still arrive but others are blocked.
assert l1.numAgents==97
print("Test successful!")
|
the-stack_0_17446 | #!/usr/bin/env python3
import requests
import socket
import random
import string
import concurrent.futures
from paramiko.client import SSHClient
import paramiko
# VARIABLES
ports = [80,443,445,8080,3389,22,21]
#ports = list(range(1,65536))
domain = "letmeoutofyour.net"
verbose = False
printOpen = True
printClosed = True
threadcount = 100
random.shuffle(ports)
# Verbosity - set to False above if you don't want output
def vprint(status):
if verbose == True:
print(status)
# Print open ports
def print_open(status):
if printOpen == True:
print("[+] " + status)
# Print closed ports
def print_closed(status):
if printClosed == True:
print("[-] " + status)
def check_web(base, domain, port):
vprint("Testing: " + base + domain + ":" + str(port))
try:
r = requests.get(base + domain + ":" + str(port), timeout=1)
result = r.text.strip()
if result == "w00tw00t":
print_open("Success! " + base + domain + ":" + str(port))
except requests.exceptions.ConnectionError:
print_closed("Failed! " + base + domain + ":" + str(port))
def check_ssh(domain, port):
client = SSHClient()
vprint("Trying SSH to " + domain + " Port: " + str(port))
try:
client.connect(domain, port, timeout=1)
except paramiko.ssh_exception.SSHException:
pass
except socket.timeout:
print_closed("Failed! SSH to " + domain + " Port: " + str(port))
return
key = client.get_transport().get_remote_server_key()
if key.get_base64() == "AAAAC3NzaC1lZDI1NTE5AAAAIIrfkWLMzwGKRliVsJOjm5OJRJo6AZt7NsqAH8bk9tYc":
print_open("Success! SSH to " + domain + " Port: " + str(port))
with concurrent.futures.ThreadPoolExecutor(threadcount) as executor:
for port in ports:
# Test HTTP
base = "http://"
executor.submit(check_web, base, domain, port)
# Test HTTPS
base = "https://"
executor.submit(check_web, base, domain, port)
# Test SSH
executor.submit(check_ssh, domain, port)
|
the-stack_0_17450 | # Copyright (c) 2019 Guo Yejun
#
# This file is part of FFmpeg.
#
# FFmpeg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# FFmpeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with FFmpeg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ==============================================================================
import tensorflow as tf
import numpy as np
import sys, struct
import convert_header as header
__all__ = ['convert_from_tensorflow']
class Operand(object):
IOTYPE_INPUT = 1
IOTYPE_OUTPUT = 2
IOTYPE_INTERMEDIATE = IOTYPE_INPUT | IOTYPE_OUTPUT
DTYPE_FLOAT = 1
DTYPE_UINT8 = 4
index = 0
def __init__(self, name, dtype, dims):
self.name = name
self.dtype = dtype
self.dims = dims
self.iotype = 0
self.used_count = 0
self.index = Operand.index
Operand.index = Operand.index + 1
self.iotype2str = {Operand.IOTYPE_INPUT: 'in', Operand.IOTYPE_OUTPUT: 'out', Operand.IOTYPE_INTERMEDIATE: 'inout'}
self.dtype2str = {Operand.DTYPE_FLOAT: 'DT_FLOAT', Operand.DTYPE_UINT8: 'DT_UINT8'}
def add_iotype(self, iotype):
self.iotype = self.iotype | iotype
if iotype == Operand.IOTYPE_INPUT:
self.used_count = self.used_count + 1
def __str__(self):
return "{}: (name: {}, iotype: {}, dtype: {}, dims: {}, used_count: {})".format(self.index,
self.name, self.iotype2str[self.iotype], self.dtype2str[self.dtype],
self.dims, self.used_count)
def __lt__(self, other):
return self.index < other.index
class TFConverter:
def __init__(self, graph_def, nodes, outfile, dump4tb):
self.graph_def = graph_def
self.nodes = nodes
self.outfile = outfile
self.dump4tb = dump4tb
self.layer_number = 0
self.output_names = []
self.name_node_dict = {}
self.edges = {}
self.conv_activations = {'Relu':0, 'Tanh':1, 'Sigmoid':2, 'None':3, 'LeakyRelu':4}
self.conv_paddings = {'VALID':0, 'SAME':1}
self.pool_paddings = {'VALID':0, 'SAME':1}
self.converted_nodes = set()
self.conv2d_scope_names = set()
self.conv2d_scopename_inputname_dict = {}
self.dense_scope_names = set()
self.dense_scopename_inputname_dict = {}
self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4,
'MathBinary':5, 'MathUnary':6, 'AvgPool':7, 'MatMul':8}
self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 'Minimum':4, 'FloorMod':5}
self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4,
'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10,
'Acosh':11, 'Atanh':12, 'Ceil':13, 'Floor':14, 'Round':15}
self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
self.name_operand_dict = {}
def add_operand(self, name, type):
node = self.name_node_dict[name]
if name not in self.name_operand_dict:
dtype = node.attr['dtype'].type
if dtype == 0:
dtype = node.attr['T'].type
dims = [-1,-1,-1,-1]
if 'shape' in node.attr:
dims[0] = node.attr['shape'].shape.dim[0].size
dims[1] = node.attr['shape'].shape.dim[1].size
dims[2] = node.attr['shape'].shape.dim[2].size
dims[3] = node.attr['shape'].shape.dim[3].size
operand = Operand(name, dtype, dims)
self.name_operand_dict[name] = operand;
self.name_operand_dict[name].add_iotype(type)
return self.name_operand_dict[name].index
def dump_for_tensorboard(self):
graph = tf.get_default_graph()
tf.import_graph_def(self.graph_def, name="")
tf.summary.FileWriter('/tmp/graph', graph)
print('graph saved, run "tensorboard --logdir=/tmp/graph" to see it')
def get_conv2d_params(self, conv2d_scope_name):
knode = self.name_node_dict[conv2d_scope_name + '/kernel']
bnode = self.name_node_dict[conv2d_scope_name + '/bias']
if conv2d_scope_name + '/dilation_rate' in self.name_node_dict:
dnode = self.name_node_dict[conv2d_scope_name + '/dilation_rate']
else:
dnode = None
# the BiasAdd name is possible be changed into the output name,
# if activation is None, and BiasAdd.next is the last op which is Identity
if conv2d_scope_name + '/BiasAdd' in self.edges:
anode = self.edges[conv2d_scope_name + '/BiasAdd'][0]
if anode.op not in self.conv_activations:
anode = None
else:
anode = None
return knode, bnode, dnode, anode
def get_dense_params(self, dense_scope_name):
knode = self.name_node_dict[dense_scope_name + '/kernel']
bnode = self.name_node_dict.get(dense_scope_name + '/bias')
# the BiasAdd name is possible be changed into the output name,
# if activation is None, and BiasAdd.next is the last op which is Identity
anode = None
if bnode:
if dense_scope_name + '/BiasAdd' in self.edges:
anode = self.edges[dense_scope_name + '/BiasAdd'][0]
if anode.op not in self.conv_activations:
anode = None
else:
anode = None
return knode, bnode, anode
def dump_complex_conv2d_to_file(self, node, f):
assert(node.op == 'Conv2D')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
scope_name = TFConverter.get_scope_name(node.name)
#knode for kernel, bnode for bias, dnode for dilation, anode for activation
knode, bnode, dnode, anode = self.get_conv2d_params(scope_name)
if dnode is not None:
dilation = struct.unpack('i', dnode.attr['value'].tensor.tensor_content[0:4])[0]
else:
dilation = 1
if anode is not None:
activation = anode.op
else:
activation = 'None'
padding = node.attr['padding'].s.decode("utf-8")
# conv2d with dilation > 1 generates tens of nodes, not easy to parse them, so use this tricky method.
if dilation > 1 and scope_name + '/stack' in self.name_node_dict:
if self.name_node_dict[scope_name + '/stack'].op == "Const":
padding = 'SAME'
padding = self.conv_paddings[padding]
ktensor = knode.attr['value'].tensor
filter_height = ktensor.tensor_shape.dim[0].size
filter_width = ktensor.tensor_shape.dim[1].size
in_channels = ktensor.tensor_shape.dim[2].size
out_channels = ktensor.tensor_shape.dim[3].size
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
kernel = np.transpose(kernel, [3, 0, 1, 2])
has_bias = 1
np.array([self.op2code[node.op], dilation, padding, self.conv_activations[activation], in_channels, out_channels, filter_height, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
btensor = bnode.attr['value'].tensor
if btensor.tensor_shape.dim[0].size == 1:
bias = struct.pack("f", btensor.float_val[0])
else:
bias = btensor.tensor_content
f.write(bias)
input_name = self.conv2d_scopename_inputname_dict[scope_name]
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
if anode is not None:
output_operand_index = self.add_operand(anode.name, Operand.IOTYPE_OUTPUT)
else:
output_operand_index = self.add_operand(self.edges[bnode.name][0].name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_dense_to_file(self, node, f):
assert(node.op == 'MatMul')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
scope_name = TFConverter.get_scope_name(node.name)
#knode for kernel, bnode for bias, anode for activation
knode, bnode, anode = self.get_dense_params(scope_name.split('/')[0])
if bnode is not None:
has_bias = 1
btensor = bnode.attr['value'].tensor
if btensor.tensor_shape.dim[0].size == 1:
bias = struct.pack("f", btensor.float_val[0])
else:
bias = btensor.tensor_content
else:
has_bias = 0
if anode is not None:
activation = anode.op
else:
activation = 'None'
ktensor = knode.attr['value'].tensor
in_channels = ktensor.tensor_shape.dim[0].size
out_channels = ktensor.tensor_shape.dim[1].size
if in_channels * out_channels == 1:
kernel = np.float32(ktensor.float_val[0])
else:
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(in_channels, out_channels)
kernel = np.transpose(kernel, [1, 0])
np.array([self.op2code[node.op], self.conv_activations[activation], in_channels, out_channels, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
if has_bias:
f.write(bias)
input_name = self.dense_scopename_inputname_dict[scope_name.split('/')[0]]
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
if anode is not None:
output_operand_index = self.add_operand(anode.name, Operand.IOTYPE_OUTPUT)
else:
if bnode is not None:
output_operand_index = self.add_operand(self.edges[bnode.name][0].name, Operand.IOTYPE_OUTPUT)
else:
output_operand_index = self.add_operand(self.edges[scope_name+'/concat_1'][0].name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_simple_conv2d_to_file(self, node, f):
assert(node.op == 'Conv2D')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
node0 = self.name_node_dict[node.input[0]]
node1 = self.name_node_dict[node.input[1]]
if node0.op == 'Const':
knode = node0
input_name = node.input[1]
else:
knode = node1
input_name = node.input[0]
ktensor = knode.attr['value'].tensor
filter_height = ktensor.tensor_shape.dim[0].size
filter_width = ktensor.tensor_shape.dim[1].size
in_channels = ktensor.tensor_shape.dim[2].size
out_channels = ktensor.tensor_shape.dim[3].size
if filter_height * filter_width * in_channels * out_channels == 1:
kernel = np.float32(ktensor.float_val[0])
else:
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
kernel = np.transpose(kernel, [3, 0, 1, 2])
has_bias = 0
dilation = 1
padding = node.attr['padding'].s.decode("utf-8")
np.array([self.op2code[node.op], dilation, self.conv_paddings[padding], self.conv_activations['None'],
in_channels, out_channels, filter_height, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_depth2space_to_file(self, node, f):
assert(node.op == 'DepthToSpace')
self.layer_number = self.layer_number + 1
block_size = node.attr['block_size'].i
np.array([self.op2code[node.op], block_size], dtype=np.uint32).tofile(f)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_mirrorpad_to_file(self, node, f):
assert(node.op == 'MirrorPad')
self.layer_number = self.layer_number + 1
mode = node.attr['mode'].s
mode = self.mirrorpad_mode[mode.decode("utf-8")]
np.array([self.op2code[node.op], mode], dtype=np.uint32).tofile(f)
pnode = self.name_node_dict[node.input[1]]
self.converted_nodes.add(pnode.name)
paddings = pnode.attr['value'].tensor.tensor_content
f.write(paddings)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_maximum_to_file(self, node, f):
assert(node.op == 'Maximum')
self.layer_number = self.layer_number + 1
ynode = self.name_node_dict[node.input[1]]
y = ynode.attr['value'].tensor.float_val[0]
np.array([self.op2code[node.op]], dtype=np.uint32).tofile(f)
np.array([y], dtype=np.float32).tofile(f)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_mathbinary_to_file(self, node, f):
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
i0_node = self.name_node_dict[node.input[0]]
i1_node = self.name_node_dict[node.input[1]]
np.array([self.op2code['MathBinary'], self.mathbin2code[node.op]], dtype=np.uint32).tofile(f)
if i0_node.op == 'Const':
scalar = i0_node.attr['value'].tensor.float_val[0]
np.array([1], dtype=np.uint32).tofile(f) # broadcast: 1
np.array([scalar], dtype=np.float32).tofile(f)
np.array([0], dtype=np.uint32).tofile(f) # broadcast: 0
input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
elif i1_node.op == 'Const':
scalar = i1_node.attr['value'].tensor.float_val[0]
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
np.array([1], dtype=np.uint32).tofile(f)
np.array([scalar], dtype=np.float32).tofile(f)
else:
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([output_operand_index], dtype=np.uint32).tofile(f)
def dump_mathunary_to_file(self, node, f):
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
i0_node = self.name_node_dict[node.input[0]]
np.array([self.op2code['MathUnary'], self.mathun2code[node.op]], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([output_operand_index],dtype=np.uint32).tofile(f)
def dump_avg_pool_to_file(self, node, f):
assert(node.op == 'AvgPool')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
node0 = self.name_node_dict[node.input[0]]
strides = node.attr['strides']
# Tensorflow do not support pooling strides in batch dimension and
# current native NN do not support pooling strides in channel dimension, added assert() here.
assert(strides.list.i[1]==strides.list.i[2])
assert(strides.list.i[0]==1)
assert(strides.list.i[3]==1)
strides = strides.list.i[1]
filter_node = node.attr['ksize']
input_name = node.input[0]
# Tensorflow do not support pooling ksize in batch dimension and channel dimension.
assert(filter_node.list.i[0]==1)
assert(filter_node.list.i[3]==1)
filter_height = filter_node.list.i[1]
filter_width = filter_node.list.i[2]
padding = node.attr['padding'].s.decode("utf-8")
np.array([self.op2code[node.op], strides, self.pool_paddings[padding], filter_height],
dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index],dtype=np.uint32).tofile(f)
def dump_layers_to_file(self, f):
for node in self.nodes:
if node.name in self.converted_nodes:
continue
# conv2d with dilation generates very complex nodes, so handle it in special
if self.in_conv2d_scope(node.name):
if node.op == 'Conv2D':
self.dump_complex_conv2d_to_file(node, f)
continue
if self.in_dense_scope(node.name):
if node.op == 'MatMul':
self.dump_dense_to_file(node, f)
continue
if node.op == 'Conv2D':
self.dump_simple_conv2d_to_file(node, f)
continue
if node.name in self.output_names:
input_name = self.id_different_scope_dict[node.name]
if TFConverter.get_scope_name(input_name)!=TFConverter.get_scope_name(node.name):
continue
if node.op == 'AvgPool':
self.dump_avg_pool_to_file(node, f)
elif node.op == 'DepthToSpace':
self.dump_depth2space_to_file(node, f)
elif node.op == 'MirrorPad':
self.dump_mirrorpad_to_file(node, f)
elif node.op == 'Maximum':
self.dump_maximum_to_file(node, f)
elif node.op in self.mathbin2code:
self.dump_mathbinary_to_file(node, f)
elif node.op in self.mathun2code:
self.dump_mathunary_to_file(node, f)
def dump_operands_to_file(self, f):
operands = sorted(self.name_operand_dict.values())
for operand in operands:
#print('{}'.format(operand))
np.array([operand.index, len(operand.name)], dtype=np.uint32).tofile(f)
f.write(operand.name.encode('utf-8'))
np.array([operand.iotype, operand.dtype], dtype=np.uint32).tofile(f)
np.array(operand.dims, dtype=np.uint32).tofile(f)
def dump_to_file(self):
with open(self.outfile, 'wb') as f:
f.write(header.str.encode('utf-8'))
np.array([header.major, header.minor], dtype=np.uint32).tofile(f)
self.dump_layers_to_file(f)
self.dump_operands_to_file(f)
np.array([self.layer_number, len(self.name_operand_dict)], dtype=np.uint32).tofile(f)
def generate_name_node_dict(self):
for node in self.nodes:
self.name_node_dict[node.name] = node
def generate_output_names(self):
used_names = []
for node in self.nodes:
for input in node.input:
used_names.append(input)
for node in self.nodes:
if node.name not in used_names:
self.output_names.append(node.name)
def remove_identity(self):
self.id_different_scope_dict = {}
id_nodes = []
id_dict = {}
for node in self.nodes:
if node.op == 'Identity':
name = node.name
input = node.input[0]
id_nodes.append(node)
# do not change the output name
if name in self.output_names:
self.name_node_dict[input].name = name
self.name_node_dict[name] = self.name_node_dict[input]
del self.name_node_dict[input]
self.id_different_scope_dict[name] = input
else:
id_dict[name] = input
for idnode in id_nodes:
self.nodes.remove(idnode)
for node in self.nodes:
for i in range(len(node.input)):
input = node.input[i]
if input in id_dict:
node.input[i] = id_dict[input]
def generate_edges(self):
for node in self.nodes:
for input in node.input:
if input in self.edges:
self.edges[input].append(node)
else:
self.edges[input] = [node]
@staticmethod
def get_scope_name(name):
index = name.rfind('/')
if index == -1:
return ""
return name[0:index]
def in_conv2d_scope(self, name):
inner_scope = TFConverter.get_scope_name(name)
if inner_scope == "":
return False;
for scope in self.conv2d_scope_names:
index = inner_scope.find(scope)
if index == 0:
return True
return False
def in_dense_scope(self, name):
inner_scope = TFConverter.get_scope_name(name)
if inner_scope == "":
return False;
for scope in self.dense_scope_names:
index = inner_scope.find(scope)
if index == 0:
return True
return False
def generate_sub_block_op_scope_info(self):
# mostly, conv2d/dense is a sub block in graph, get the scope name
for node in self.nodes:
if node.op == 'Conv2D':
scope = TFConverter.get_scope_name(node.name)
# for the case tf.nn.conv2d is called directly
if scope == '':
continue
# for the case tf.nn.conv2d is called within a scope
if scope + '/kernel' not in self.name_node_dict:
continue
self.conv2d_scope_names.add(scope)
elif node.op == 'MatMul':
scope = TFConverter.get_scope_name(node.name)
# for the case tf.nn.dense is called directly
if scope == '':
continue
# for the case tf.nn.dense is called within a scope
if scope + '/kernel' not in self.name_node_dict and scope.split('/Tensordot')[0] + '/kernel' not in self.name_node_dict:
continue
self.dense_scope_names.add(scope.split('/Tensordot')[0])
# get the input name to the conv2d/dense sub block
for node in self.nodes:
scope = TFConverter.get_scope_name(node.name)
if scope in self.conv2d_scope_names:
if node.op == 'Conv2D' or node.op == 'Shape':
for inp in node.input:
if TFConverter.get_scope_name(inp) != scope:
self.conv2d_scopename_inputname_dict[scope] = inp
elif scope in self.dense_scope_names:
if node.op == 'MatMul' or node.op == 'Shape':
for inp in node.input:
if TFConverter.get_scope_name(inp) != scope:
self.dense_scopename_inputname_dict[scope] = inp
elif scope.split('/Tensordot')[0] in self.dense_scope_names:
if node.op == 'Transpose':
for inp in node.input:
if TFConverter.get_scope_name(inp).find(scope)<0 and TFConverter.get_scope_name(inp).find(scope.split('/')[0])<0:
self.dense_scopename_inputname_dict[scope.split('/Tensordot')[0]] = inp
def run(self):
self.generate_name_node_dict()
self.generate_output_names()
self.remove_identity()
self.generate_edges()
self.generate_sub_block_op_scope_info()
if self.dump4tb:
self.dump_for_tensorboard()
self.dump_to_file()
def convert_from_tensorflow(infile, outfile, dump4tb):
with open(infile, 'rb') as f:
# read the file in .proto format
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
nodes = graph_def.node
converter = TFConverter(graph_def, nodes, outfile, dump4tb)
converter.run()
|
the-stack_0_17451 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pyrimidine import *
from pyrimidine.local_search import *
from pyrimidine.utils import randint2
from pyrimidine.benchmarks.optimization import *
_evaluate = ShortestPath.random(30)
class _Chromosome(PermutationChromosome):
default_size = 30
def decode(self):
return np.hstack((self, [self[0]]))
def to_points(self):
x = self.decode()
return points[x, 0], points[x, 1]
_Individual = MonoIndividual[_Chromosome].set_fitness(lambda obj: 1 / _evaluate(obj.decode()))
class SAIndividual(SimulatedAnnealing, _Individual):
def get_neighbour(self):
cpy = self.clone(fitness=None)
cpy.chromosome.mutate()
return cpy
sa = SAIndividual.random(size=30)
from matplotlib import pyplot as plt
from celluloid import Camera
fig = plt.figure()
ax = fig.add_subplot(111)
points = _evaluate.points
def animate(i):
sa.evolve(n_iter=5, verbose=False)
ax.plot(*sa.chromosome.to_points(), 'k-o')
ax.plot(*sa.phantom.chromosome.to_points(), 'b--o')
ax.legend((f'Best Solution({sa.fitness:.4})', f'Generation {i*5}'))
camera = Camera(fig)
ax.plot(*sa.chromosome.to_points(), 'k-o')
ax.legend(('Generation 0',))
for i in range(1, 300):
animate(i)
camera.snap()
animation = camera.animate()
animation.save('animation-sa.mp4')
|
the-stack_0_17454 | from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = ({{ cookiecutter.minimum_supported_python_version [0] }}, {{ cookiecutter.minimum_supported_python_version [2] }})
if sys.version_info < min_version:
error = """
{{ cookiecutter.package_dist_name }} does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='{{ cookiecutter.package_dist_name }}',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="{{ cookiecutter.project_short_description }}",
long_description=readme,
author="{{ cookiecutter.full_name }}",
author_email='{{ cookiecutter.email }}',
url='https://{{ cookiecutter.vcs_domain }}/{{ cookiecutter.vcs_username }}/{{ cookiecutter.repo_name }}',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'{{ cookiecutter.package_dir_name }}': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
the-stack_0_17455 | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Jan Felix Wiebe, Mohit Jindal
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import calendar
import hashlib
import math
from collections import defaultdict
from datetime import date, datetime, time, timedelta
from functools import reduce
from urllib.parse import quote, urlencode
import dateutil
import isoweek
import pytz
from django.conf import settings
from django.core.cache import caches
from django.db.models import Exists, Max, Min, OuterRef, Prefetch, Q
from django.db.models.functions import Coalesce, Greatest
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.formats import date_format, get_format
from django.utils.timezone import get_current_timezone, now
from django.views import View
from django.views.decorators.cache import cache_page
from django.views.generic import ListView, TemplateView
from pytz import UTC
from pretix.base.i18n import language
from pretix.base.models import (
Event, EventMetaValue, Organizer, Quota, SubEvent, SubEventMetaValue,
)
from pretix.base.services.quotas import QuotaAvailability
from pretix.helpers.compat import date_fromisocalendar
from pretix.helpers.daterange import daterange
from pretix.helpers.formats.en.formats import (
SHORT_MONTH_DAY_FORMAT, WEEK_FORMAT,
)
from pretix.multidomain.urlreverse import eventreverse
from pretix.presale.ical import get_public_ical
from pretix.presale.views import OrganizerViewMixin
def filter_qs_by_attr(qs, request):
"""
We'll allow to filter the event list using attributes defined in the event meta data
models in the format ?attr[meta_name]=meta_value
"""
attrs = {}
for i, item in enumerate(request.GET.items()):
k, v = item
if k.startswith("attr[") and k.endswith("]"):
attrs[k[5:-1]] = v
skey = 'filter_qs_by_attr_{}_{}'.format(request.organizer.pk, request.event.pk if hasattr(request, 'event') else '')
if request.GET.get('attr_persist'):
request.session[skey] = attrs
elif skey in request.session:
attrs = request.session[skey]
props = {
p.name: p for p in request.organizer.meta_properties.filter(
name__in=attrs.keys()
)
}
for i, item in enumerate(attrs.items()):
attr, v = item
emv_with_value = EventMetaValue.objects.filter(
event=OuterRef('event' if qs.model == SubEvent else 'pk'),
property__name=attr,
value=v
)
emv_with_any_value = EventMetaValue.objects.filter(
event=OuterRef('event' if qs.model == SubEvent else 'pk'),
property__name=attr,
)
if qs.model == SubEvent:
semv_with_value = SubEventMetaValue.objects.filter(
subevent=OuterRef('pk'),
property__name=attr,
value=v
)
semv_with_any_value = SubEventMetaValue.objects.filter(
subevent=OuterRef('pk'),
property__name=attr,
)
prop = props.get(attr)
if not prop:
continue
annotations = {'attr_{}'.format(i): Exists(emv_with_value)}
if qs.model == SubEvent:
annotations['attr_{}_sub'.format(i)] = Exists(semv_with_value)
annotations['attr_{}_sub_any'.format(i)] = Exists(semv_with_any_value)
filters = Q(**{'attr_{}_sub'.format(i): True})
filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}'.format(i): True}))
if prop.default == v:
annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)
filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}_any'.format(i): False}))
else:
filters = Q(**{'attr_{}'.format(i): True})
if prop.default == v:
annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)
filters |= Q(**{'attr_{}_any'.format(i): False})
qs = qs.annotate(**annotations).filter(filters)
return qs
class EventListMixin:
def _get_event_queryset(self):
query = Q(is_public=True) & Q(live=True)
qs = self.request.organizer.events.using(settings.DATABASE_REPLICA).filter(query)
qs = qs.filter(sales_channels__contains=self.request.sales_channel.identifier)
qs = qs.annotate(
min_from=Min('subevents__date_from'),
min_to=Min('subevents__date_to'),
max_from=Max('subevents__date_from'),
max_to=Max('subevents__date_to'),
max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from')),
)
if "old" in self.request.GET:
qs = qs.filter(
Q(Q(has_subevents=False) & Q(
Q(date_to__lt=now()) | Q(Q(date_to__isnull=True) & Q(date_from__lt=now()))
)) | Q(Q(has_subevents=True) & Q(
Q(min_to__lt=now()) | Q(min_from__lt=now()))
)
).annotate(
order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),
).order_by('-order_to')
else:
qs = qs.filter(
Q(Q(has_subevents=False) & Q(
Q(date_to__gte=now()) | Q(Q(date_to__isnull=True) & Q(date_from__gte=now()))
)) | Q(Q(has_subevents=True) & Q(
Q(max_to__gte=now()) | Q(max_from__gte=now()))
)
).annotate(
order_from=Coalesce('min_from', 'date_from'),
).order_by('order_from')
qs = Event.annotated(filter_qs_by_attr(qs, self.request))
return qs
def _set_month_to_next_subevent(self):
tz = pytz.timezone(self.request.event.settings.timezone)
next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
active=True,
is_public=True,
).select_related('event').order_by('date_from').first()
if next_sev:
datetime_from = next_sev.date_from
self.year = datetime_from.astimezone(tz).year
self.month = datetime_from.astimezone(tz).month
else:
self.year = now().year
self.month = now().month
def _set_month_to_next_event(self):
next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
organizer=self.request.organizer,
live=True,
is_public=True,
has_subevents=False
), self.request).order_by('date_from').first()
next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
active=True,
is_public=True,
), self.request).select_related('event').order_by('date_from').first()
datetime_from = None
if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
datetime_from = next_sev.date_from
next_ev = next_sev.event
elif next_ev:
datetime_from = next_ev.date_from
if datetime_from:
tz = pytz.timezone(next_ev.settings.timezone)
self.year = datetime_from.astimezone(tz).year
self.month = datetime_from.astimezone(tz).month
else:
self.year = now().year
self.month = now().month
def _set_month_year(self):
if 'date' in self.request.GET:
try:
date = dateutil.parser.isoparse(self.request.GET.get('date')).date()
except ValueError:
date = now().date()
self.year = date.year
self.month = date.month
else:
if hasattr(self.request, 'event'):
self._set_month_to_next_subevent()
else:
self._set_month_to_next_event()
def _set_week_to_next_subevent(self):
tz = pytz.timezone(self.request.event.settings.timezone)
next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
active=True,
is_public=True,
).select_related('event').order_by('date_from').first()
if next_sev:
datetime_from = next_sev.date_from
self.year = datetime_from.astimezone(tz).isocalendar()[0]
self.week = datetime_from.astimezone(tz).isocalendar()[1]
else:
self.year = now().isocalendar()[0]
self.week = now().isocalendar()[1]
def _set_week_to_next_event(self):
next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
organizer=self.request.organizer,
live=True,
is_public=True,
has_subevents=False
), self.request).order_by('date_from').first()
next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
active=True,
is_public=True,
), self.request).select_related('event').order_by('date_from').first()
datetime_from = None
if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
datetime_from = next_sev.date_from
next_ev = next_sev.event
elif next_ev:
datetime_from = next_ev.date_from
if datetime_from:
tz = pytz.timezone(next_ev.settings.timezone)
self.year = datetime_from.astimezone(tz).isocalendar()[0]
self.week = datetime_from.astimezone(tz).isocalendar()[1]
else:
self.year = now().isocalendar()[0]
self.week = now().isocalendar()[1]
def _set_week_year(self):
if 'date' in self.request.GET:
try:
iso = dateutil.parser.isoparse(self.request.GET.get('date')).isocalendar()
except ValueError:
iso = now().isocalendar()
self.year = iso[0]
self.week = iso[1]
else:
if hasattr(self.request, 'event'):
self._set_week_to_next_subevent()
else:
self._set_week_to_next_event()
class OrganizerIndex(OrganizerViewMixin, EventListMixin, ListView):
model = Event
context_object_name = 'events'
template_name = 'pretixpresale/organizers/index.html'
paginate_by = 30
def dispatch(self, request, *args, **kwargs):
# In stock pretix, nothing on this page is session-dependent except for the language and the customer login part,
# so we can cache pretty aggressively if the user is anonymous. Note that we deliberately implement the caching
# on the view layer, *after* all middlewares have been ran, so we have access to the computed locale, as well
# as the login status etc.
cache_allowed = (
settings.CACHE_LARGE_VALUES_ALLOWED and
not getattr(request, 'customer', None) and
not request.user.is_authenticated
)
if not cache_allowed:
return super().dispatch(request, *args, **kwargs)
cache_key_parts = [
request.method,
request.host,
str(request.organizer.pk),
request.get_full_path(),
request.LANGUAGE_CODE,
self.request.sales_channel.identifier,
]
for c, v in request.COOKIES.items():
# If the cookie is not one we know, it might be set by a plugin and we need to include it in the
# cache key to be safe. A known example includes plugins that e.g. store cookie banner state.
if c not in (settings.SESSION_COOKIE_NAME, settings.LANGUAGE_COOKIE_NAME, settings.CSRF_COOKIE_NAME) and not c.startswith('__'):
cache_key_parts.append(f'{c}={v}')
for c, v in request.session.items():
# If the session key is not one we know, it might be set by a plugin and we need to include it in the
# cache key to be safe. A known example would be the pretix-campaigns plugin setting the campaign ID.
if (
not c.startswith('_auth') and
not c.startswith('pretix_auth_') and
not c.startswith('customer_auth_') and
not c.startswith('current_cart_') and
not c.startswith('cart_') and
not c.startswith('payment_') and
c not in ('carts', 'payment', 'pinned_user_agent')
):
cache_key_parts.append(f'{c}={repr(v)}')
cache_key = f'pretix.presale.views.organizer.OrganizerIndex:{hashlib.md5(":".join(cache_key_parts).encode()).hexdigest()}'
cache_timeout = 15
cache = caches[settings.CACHE_LARGE_VALUES_ALIAS]
response = cache.get(cache_key)
if response is not None:
return response
response = super().dispatch(request, *kwargs, **kwargs)
if response.status_code >= 400:
return response
if hasattr(response, 'render') and callable(response.render):
def _store_to_cache(r):
cache.set(cache_key, r, cache_timeout)
response.add_post_render_callback(_store_to_cache)
else:
cache.set(cache_key, response, cache_timeout)
return response
def get(self, request, *args, **kwargs):
style = request.GET.get("style", request.organizer.settings.event_list_type)
if style == "calendar":
cv = CalendarView()
cv.request = request
return cv.get(request, *args, **kwargs)
elif style == "day":
cv = DayCalendarView()
cv.request = request
return cv.get(request, *args, **kwargs)
elif style == "week":
cv = WeekCalendarView()
cv.request = request
return cv.get(request, *args, **kwargs)
else:
return super().get(request, *args, **kwargs)
def get_queryset(self):
return self._get_event_queryset()
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
for event in ctx['events']:
event.tzname = pytz.timezone(event.cache.get_or_set('timezone', lambda: event.settings.timezone))
if event.has_subevents:
event.daterange = daterange(
event.min_from.astimezone(event.tzname),
(event.max_fromto or event.max_to or event.max_from).astimezone(event.tzname)
)
return ctx
def has_before_after(eventqs, subeventqs, before, after):
eqs = eventqs.filter(is_public=True, live=True, has_subevents=False)
sqs = subeventqs.filter(active=True, is_public=True)
return (
eqs.filter(Q(date_from__lte=before)).exists() or sqs.filter(Q(date_from__lte=before)).exists(),
eqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists() or sqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists()
)
def add_events_for_days(request, baseqs, before, after, ebd, timezones):
qs = baseqs.filter(is_public=True, live=True, has_subevents=False).filter(
Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |
Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |
Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))
).order_by(
'date_from'
).prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
if hasattr(request, 'organizer'):
qs = filter_qs_by_attr(qs, request)
for event in qs:
timezones.add(event.settings.timezones)
tz = pytz.timezone(event.settings.timezone)
datetime_from = event.date_from.astimezone(tz)
date_from = datetime_from.date()
if event.settings.show_date_to and event.date_to:
datetime_to = event.date_to.astimezone(tz)
date_to = event.date_to.astimezone(tz).date()
d = max(date_from, before.date())
while d <= date_to and d <= after.date():
first = d == date_from
ebd[d].append({
'event': event,
'continued': not first,
'time': datetime_from.time().replace(tzinfo=None) if first and event.settings.show_times else None,
'time_end': (
datetime_to.time().replace(tzinfo=None)
if (date_to == date_from or (
date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()
)) and event.settings.show_times
else None
),
'time_end_today': (
datetime_to.time().replace(tzinfo=None)
if date_to == d and event.settings.show_times
else None
),
'url': eventreverse(event, 'presale:event.index'),
'timezone': event.settings.timezone,
})
d += timedelta(days=1)
else:
ebd[date_from].append({
'event': event,
'continued': False,
'time': datetime_from.time().replace(tzinfo=None) if event.settings.show_times else None,
'url': eventreverse(event, 'presale:event.index'),
'timezone': event.settings.timezone,
})
def add_subevents_for_days(qs, before, after, ebd, timezones, event=None, cart_namespace=None, voucher=None):
qs = qs.filter(active=True, is_public=True).filter(
Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |
Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |
Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))
).order_by(
'date_from'
)
quotas_to_compute = []
for se in qs:
if se.presale_is_running:
quotas_to_compute += se.active_quotas
qcache = {}
if quotas_to_compute:
qa = QuotaAvailability()
qa.queue(*quotas_to_compute)
qa.compute(allow_cache=True)
qcache.update(qa.results)
for se in qs:
if qcache:
se._quota_cache = qcache
kwargs = {'subevent': se.pk}
if cart_namespace:
kwargs['cart_namespace'] = cart_namespace
s = event.settings if event else se.event.settings
if s.event_list_available_only:
hide = se.presale_has_ended or (
(not voucher or not voucher.allow_ignore_quota) and
se.best_availability_state is not None and
se.best_availability_state < Quota.AVAILABILITY_RESERVED
)
if hide:
continue
timezones.add(s.timezones)
tz = pytz.timezone(s.timezone)
datetime_from = se.date_from.astimezone(tz)
date_from = datetime_from.date()
if s.show_date_to and se.date_to:
datetime_to = se.date_to.astimezone(tz)
date_to = se.date_to.astimezone(tz).date()
d = max(date_from, before.date())
while d <= date_to and d <= after.date():
first = d == date_from
ebd[d].append({
'continued': not first,
'timezone': s.timezone,
'time': datetime_from.time().replace(tzinfo=None) if first and s.show_times else None,
'time_end': (
datetime_to.time().replace(tzinfo=None)
if (date_to == date_from or (
date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()
)) and s.show_times
else None
),
'time_end_today': (
datetime_to.time().replace(tzinfo=None)
if date_to == d and s.show_times
else None
),
'event': se,
'url': (
eventreverse(se.event, 'presale:event.redeem',
kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'
if voucher
else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)
)
})
d += timedelta(days=1)
else:
ebd[date_from].append({
'event': se,
'continued': False,
'time': datetime_from.time().replace(tzinfo=None) if s.show_times else None,
'url': (
eventreverse(se.event, 'presale:event.redeem',
kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'
if voucher
else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)
),
'timezone': s.timezone,
})
def sort_ev(e):
return e['time'] or time(0, 0, 0), str(e['event'].name)
def days_for_template(ebd, week):
day_format = get_format('WEEK_DAY_FORMAT')
if day_format == 'WEEK_DAY_FORMAT':
day_format = 'SHORT_DATE_FORMAT'
return [
{
'day_formatted': date_format(day, day_format),
'date': day,
'today': day == now().astimezone(get_current_timezone()).date(),
'events': sorted(ebd.get(day), key=sort_ev) if day in ebd else []
}
for day in week.days()
]
def weeks_for_template(ebd, year, month):
calendar.setfirstweekday(0) # TODO: Configurable
return [
[
{
'day': day,
'date': date(year, month, day),
'events': (
sorted(ebd.get(date(year, month, day)), key=sort_ev)
if date(year, month, day) in ebd else None
)
}
if day > 0
else None
for day in week
]
for week in calendar.monthcalendar(year, month)
]
class CalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
template_name = 'pretixpresale/organizers/calendar.html'
def get(self, request, *args, **kwargs):
# redirect old month-year-URLs to new date-URLs
keys = ("month", "year")
if all(k in request.GET for k in keys):
get_params = {k: v for k, v in request.GET.items() if k not in keys}
get_params["date"] = "%s-%s" % (request.GET.get("year"), request.GET.get("month"))
return redirect(self.request.path + "?" + urlencode(get_params))
self._set_month_year()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
try:
_, ndays = calendar.monthrange(self.year, self.month)
except calendar.IllegalMonthError:
raise Http404()
before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=UTC) - timedelta(days=1)
after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=UTC) + timedelta(days=1)
ctx['date'] = date(self.year, self.month, 1)
ctx['before'] = before
ctx['after'] = after
ebd = self._events_by_day(before, after)
ctx['has_before'], ctx['has_after'] = has_before_after(
self.request.organizer.events.filter(
sales_channels__contains=self.request.sales_channel.identifier
),
SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
),
before,
after,
)
ctx['multiple_timezones'] = self._multiple_timezones
ctx['weeks'] = weeks_for_template(ebd, self.year, self.month)
ctx['months'] = [date(self.year, i + 1, 1) for i in range(12)]
ctx['years'] = range(now().year - 2, now().year + 3)
return ctx
def _events_by_day(self, before, after):
ebd = defaultdict(list)
timezones = set()
add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
settings.DATABASE_REPLICA
).filter(
sales_channels__contains=self.request.sales_channel.identifier
), before, after, ebd, timezones)
add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
).prefetch_related(
Prefetch(
'event',
queryset=Event.objects.prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
)), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
self._multiple_timezones = len(timezones) > 1
return ebd
class WeekCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
template_name = 'pretixpresale/organizers/calendar_week.html'
def get(self, request, *args, **kwargs):
# redirect old week-year-URLs to new date-URLs
keys = ("week", "year")
if all(k in request.GET for k in keys):
get_params = {k: v for k, v in request.GET.items() if k not in keys}
get_params["date"] = "%s-W%s" % (request.GET.get("year"), request.GET.get("week"))
return redirect(self.request.path + "?" + urlencode(get_params))
self._set_week_year()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
week = isoweek.Week(self.year, self.week)
before = datetime(
week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=UTC
) - timedelta(days=1)
after = datetime(
week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=UTC
) + timedelta(days=1)
ctx['date'] = week.monday()
ctx['before'] = before
ctx['after'] = after
ebd = self._events_by_day(before, after)
ctx['has_before'], ctx['has_after'] = has_before_after(
self.request.organizer.events.filter(
sales_channels__contains=self.request.sales_channel.identifier
),
SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
),
before,
after,
)
ctx['days'] = days_for_template(ebd, week)
years = (self.year - 1, self.year, self.year + 1)
weeks = []
for year in years:
weeks += [
(date_fromisocalendar(year, i + 1, 1), date_fromisocalendar(year, i + 1, 7))
for i in range(53 if date(year, 12, 31).isocalendar()[1] == 53 else 52)
]
ctx['weeks'] = [[w for w in weeks if w[0].year == year] for year in years]
ctx['week_format'] = get_format('WEEK_FORMAT')
if ctx['week_format'] == 'WEEK_FORMAT':
ctx['week_format'] = WEEK_FORMAT
ctx['short_month_day_format'] = get_format('SHORT_MONTH_DAY_FORMAT')
if ctx['short_month_day_format'] == 'SHORT_MONTH_DAY_FORMAT':
ctx['short_month_day_format'] = SHORT_MONTH_DAY_FORMAT
ctx['multiple_timezones'] = self._multiple_timezones
return ctx
def _events_by_day(self, before, after):
ebd = defaultdict(list)
timezones = set()
add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
settings.DATABASE_REPLICA
).filter(
sales_channels__contains=self.request.sales_channel.identifier
), before, after, ebd, timezones)
add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
).prefetch_related(
Prefetch(
'event',
queryset=Event.objects.prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
)), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
self._multiple_timezones = len(timezones) > 1
return ebd
class DayCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):
template_name = 'pretixpresale/organizers/calendar_day.html'
def _set_date_to_next_event(self):
next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
organizer=self.request.organizer,
live=True,
is_public=True,
date_from__gte=now(),
), self.request).order_by('date_from').first()
next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(
Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
active=True,
is_public=True,
), self.request).select_related('event').order_by('date_from').first()
datetime_from = None
if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):
datetime_from = next_sev.date_from
next_ev = next_sev.event
elif next_ev:
datetime_from = next_ev.date_from
if datetime_from:
self.tz = pytz.timezone(next_ev.settings.timezone)
self.date = datetime_from.astimezone(self.tz).date()
else:
self.tz = self.request.organizer.timezone
self.date = now().astimezone(self.tz).date()
def _set_date(self):
if 'date' in self.request.GET:
self.tz = self.request.organizer.timezone
try:
self.date = dateutil.parser.parse(self.request.GET.get('date')).date()
except ValueError:
self.date = now().astimezone(self.tz).date()
else:
self._set_date_to_next_event()
def get(self, request, *args, **kwargs):
self._set_date()
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
before = datetime(
self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC
) - timedelta(days=1)
after = datetime(
self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC
) + timedelta(days=1)
ctx['date'] = self.date
ctx['cal_tz'] = self.tz
ctx['before'] = before
ctx['after'] = after
ctx['has_before'], ctx['has_after'] = has_before_after(
self.request.organizer.events.filter(
sales_channels__contains=self.request.sales_channel.identifier
),
SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
),
before,
after,
)
ebd = self._events_by_day(before, after)
if not ebd[self.date]:
return ctx
events = ebd[self.date]
shortest_duration = self._get_shortest_duration(events).total_seconds() // 60
# pick the next biggest tick_duration based on shortest_duration, max. 180 minutes
tick_duration = next((d for d in [5, 10, 15, 30, 60, 120, 180] if d >= shortest_duration), 180)
raster_size = min(self._get_raster_size(events), tick_duration)
events, start, end = self._rasterize_events(events, tick_duration=tick_duration, raster_size=raster_size)
calendar_duration = self._get_time_duration(start, end)
ctx["calendar_duration"] = self._format_duration(calendar_duration)
ctx['time_ticks'] = self._get_time_ticks(start, end, tick_duration)
ctx['start'] = datetime.combine(self.date, start)
ctx['raster_size'] = raster_size
# ctx['end'] = end
# size of each grid-column is based on shortest event duration and raster_size
# raster_size is based on start/end times, so it could happen we have a small raster but long running events
# raster_size will always be smaller or equals tick_duration
ctx['raster_to_shortest_ratio'] = round((8 * raster_size) / shortest_duration)
ctx['events'] = events
events_by_series = self._grid_for_template(events)
ctx['collections'] = events_by_series
ctx['no_headlines'] = not any([series for series, events in events_by_series])
ctx['multiple_timezones'] = self._multiple_timezones
return ctx
def _get_raster_size(self, events):
# get best raster-size for min. # of columns in grid
# due to grid-col-calculations in CSS raster_size cannot be bigger than 60 (minutes)
# all start- and end-times (minute-part) except full hour
times = [
e["time"].minute for e in events if e["time"] and e["time"].minute
] + [
e["time_end_today"].minute for e in events if "time_end_today" in e and e["time_end_today"] and e["time_end_today"].minute
]
if not times:
# no time other than full hour, so raster can be 1 hour/60 minutes
return 60
gcd = reduce(math.gcd, set(times))
return next((d for d in [5, 10, 15, 30, 60] if d >= gcd), 60)
def _get_time_duration(self, start, end):
midnight = time(0, 0)
return datetime.combine(
self.date if end != midnight else self.date + timedelta(days=1),
end
) - datetime.combine(
self.date,
start
)
def _format_duration(self, duration):
return ":".join([
"%02d" % i for i in (
(duration.days * 24) + (duration.seconds // 3600),
(duration.seconds // 60) % 60
)
])
def _floor_time(self, t, raster_size=5):
# raster_size based on minutes, might be factored into a helper class with a timedelta as raster
minutes = t.hour * 60 + t.minute
if minutes % raster_size:
minutes = (minutes // raster_size) * raster_size
return t.replace(hour=minutes // 60, minute=minutes % 60)
return t
def _ceil_time(self, t, raster_size=5):
# raster_size based on minutes, might be factored into a helper class with a timedelta as raster
minutes = t.hour * 60 + t.minute
if not minutes % raster_size:
return t
minutes = math.ceil(minutes / raster_size) * raster_size
minute = minutes % 60
hour = minutes // 60
if hour > 23:
hour = hour % 24
return t.replace(minute=minute, hour=hour)
def _rasterize_events(self, events, tick_duration, raster_size=5):
rastered_events = []
start, end = self._get_time_range(events)
start = self._floor_time(start, raster_size=tick_duration)
end = self._ceil_time(end, raster_size=tick_duration)
midnight = time(0, 0)
for e in events:
t = e["time"] or time(0, 0)
e["offset_shift_start"] = 0
if e["continued"]:
e["time_rastered"] = midnight
elif t.minute % raster_size:
e["time_rastered"] = t.replace(minute=(t.minute // raster_size) * raster_size)
e["offset_shift_start"] = t.minute % raster_size
else:
e["time_rastered"] = t
e["offset_shift_end"] = 0
if "time_end_today" in e and e["time_end_today"]:
if e["time_end_today"].minute % raster_size:
minute = math.ceil(e["time_end_today"].minute / raster_size) * raster_size
hour = e["time_end_today"].hour
if minute > 59:
minute = minute % 60
hour = (hour + 1) % 24
e["time_end_today_rastered"] = e["time_end_today"].replace(minute=minute, hour=hour)
e["offset_shift_end"] = raster_size - e["time_end_today"].minute % raster_size
else:
e["time_end_today_rastered"] = e["time_end_today"]
else:
e["time_end_today"] = e["time_end_today_rastered"] = time(0, 0)
e["duration_rastered"] = self._format_duration(datetime.combine(
self.date if e["time_end_today_rastered"] != midnight else self.date + timedelta(days=1),
e["time_end_today_rastered"]
) - datetime.combine(
self.date,
e['time_rastered']
))
e["offset_rastered"] = datetime.combine(self.date, time(0, 0)) + self._get_time_duration(start, e["time_rastered"])
rastered_events.append(e)
return rastered_events, start, end
def _get_shortest_duration(self, events):
midnight = time(0, 0)
durations = [
datetime.combine(
self.date if e.get('time_end_today') and e['time_end_today'] != midnight else self.date + timedelta(days=1),
e['time_end_today'] if e.get('time_end_today') else time(0, 0)
)
-
datetime.combine(
self.date,
time(0, 0) if e['continued'] else (e['time'] or time(0, 0))
)
for e in events
]
return min([d for d in durations])
def _get_time_range(self, events):
if any(e['continued'] for e in events) or any(e['time'] is None for e in events):
starting_at = time(0, 0)
else:
starting_at = min(e['time'] for e in events)
if any(e.get('time_end_today') is None for e in events):
ending_at = time(0, 0)
else:
ending_at = max(e['time_end_today'] for e in events)
return starting_at, ending_at
def _get_time_ticks(self, start, end, tick_duration):
ticks = []
tick_duration = timedelta(minutes=tick_duration)
# convert time to datetime for timedelta calc
start = datetime.combine(self.date, start)
end = datetime.combine(self.date, end)
if end <= start:
end = end + timedelta(days=1)
tick_start = start
offset = datetime.utcfromtimestamp(0)
duration = datetime.utcfromtimestamp(tick_duration.total_seconds())
while tick_start < end:
tick = {
"start": tick_start,
"duration": duration,
"offset": offset,
}
ticks.append(tick)
tick_start += tick_duration
offset += tick_duration
return ticks
def _grid_for_template(self, events):
midnight = time(0, 0)
rows_by_collection = defaultdict(list)
# We sort the events into "collections": all subevents from the same
# event series together and all non-series events into a "None"
# collection. Then, we look if there's already an event in the
# collection that overlaps, in which case we need to split the
# collection into multiple rows.
for counter, e in enumerate(events):
collection = e['event'].event if isinstance(e['event'], SubEvent) else None
placed_in_row = False
for row in rows_by_collection[collection]:
if any(
(e['time_rastered'] < o['time_end_today_rastered'] or o['time_end_today_rastered'] == midnight) and
(o['time_rastered'] < e['time_end_today_rastered'] or e['time_end_today_rastered'] == midnight)
for o in row
):
continue
row.append(e)
placed_in_row = True
break
if not placed_in_row:
rows_by_collection[collection].append([e])
# flatten rows to one stream of events with attribute row
# for better keyboard-tab-order in html
for collection in rows_by_collection:
for i, row in enumerate(rows_by_collection[collection]):
concurrency = i + 1
for e in row:
e["concurrency"] = concurrency
rows_by_collection[collection] = {
"concurrency": len(rows_by_collection[collection]),
"events": sorted([e for row in rows_by_collection[collection] for e in row], key=lambda d: d['time'] or time(0, 0)),
}
def sort_key(c):
collection, row = c
if collection is None:
return ''
else:
return str(collection.name)
return sorted(rows_by_collection.items(), key=sort_key)
def _events_by_day(self, before, after):
ebd = defaultdict(list)
timezones = set()
add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(
settings.DATABASE_REPLICA
).filter(
sales_channels__contains=self.request.sales_channel.identifier
), before, after, ebd, timezones)
add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
event__sales_channels__contains=self.request.sales_channel.identifier
).prefetch_related(
Prefetch(
'event',
queryset=Event.objects.prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
)), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)
self._multiple_timezones = len(timezones) > 1
return ebd
@method_decorator(cache_page(300), name='dispatch')
class OrganizerIcalDownload(OrganizerViewMixin, View):
def get(self, request, *args, **kwargs):
cutoff = now() - timedelta(days=31)
events = list(
filter_qs_by_attr(
self.request.organizer.events.filter(
Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),
is_public=True,
live=True,
has_subevents=False,
sales_channels__contains=self.request.sales_channel.identifier,
),
request
).order_by(
'date_from'
).prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
events += list(
filter_qs_by_attr(
SubEvent.objects.filter(
Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),
event__organizer=self.request.organizer,
event__is_public=True,
event__live=True,
is_public=True,
active=True,
event__sales_channels__contains=self.request.sales_channel.identifier
),
request
).prefetch_related(
Prefetch(
'event',
queryset=Event.objects.prefetch_related(
'_settings_objects',
Prefetch(
'organizer',
queryset=Organizer.objects.prefetch_related('_settings_objects')
)
)
)
).order_by(
'date_from'
)
)
if 'locale' in request.GET and request.GET.get('locale') in dict(settings.LANGUAGES):
with language(request.GET.get('locale'), self.request.organizer.settings.region):
cal = get_public_ical(events)
else:
cal = get_public_ical(events)
resp = HttpResponse(cal.serialize(), content_type='text/calendar')
resp['Content-Disposition'] = 'attachment; filename="{}.ics"'.format(
request.organizer.slug
)
if request.organizer.settings.meta_noindex:
resp['X-Robots-Tag'] = 'noindex'
return resp
|
the-stack_0_17456 | import numpy as np
from vector import Vec4
def u_2(r):
"""
Integration variable u_2 --- solution for r = 2u^1 - 1u^2
"""
return 1. - np.sqrt(1.-r)
def u_3(r):
"""
Integration variable u_3 --- solution for r = 3u^2 - 2u^3
"""
x = pow(1.-2.*r+2.*np.sqrt(r*(r-1.)+0.j),1./3.)
y = (2.-(1.-1.j*np.sqrt(3.))/x-(1.+1.j*np.sqrt(3.))*x)/4.
return y.real
def u_4(r):
"""
Integration variable u_4 --- solution for r = 4u^3 - 3u^4
"""
y = pow(r+np.sqrt(r*r*(1-r)+0.j),1./3.)
x = 3./2.*(r/y+y)
y = np.sqrt(1.+x)
z = (1.+y-np.sqrt(2.-x+2./y))/3.
return z.real
def f(x, a, r):
"""
The equation ax^(a-1) - (a-1)x^a - r = 0
To be used as argument in solver
"""
return a*x**(a-1) - (a-1)*x**a - r
def fp(x, a):
"""
First derivative of f
"""
return a*(a-1)*(x**(a-2) - x**(a-1))
def fpp(x, a):
"""
Second derivative of
"""
return a*(a-1)*((a-2)*x**(a-3) - (a-1)*x**(a-2))
def get_u(a, r):
"""
Solve f for u
a = n + 1 -i in Simon's notation
The lowest order case is n=3 and i = 2, i.e. a = 2
"""
if a < 2 : raise Exception("a = {} not implemented".format(a))
from scipy import optimize
if a == 2: return u_2(r)
elif a == 3: return u_3(r)
elif a == 4: return u_4(r)
else:
return optimize.newton(lambda x : f(x, a, r), r, fprime=lambda x: fp(x,a), fprime2=lambda x: fpp(x,a))
def rho(Min, Mout, mext=0.0):
"""
Helper function for mass term eq (5)
"""
M2 = Min*Min
return 0.125 * np.sqrt( (M2 - (Mout+mext)*(Mout+mext)) * (M2 - (Mout-mext)*(Mout-mext))) / M2
def rho_massless(Min, Mout):
"""
Helper function for mass term eq (5)
"""
M2 = Min*Min
M22 = Mout*Mout
return 0.125 * np.sqrt( M2*M2 - 2*M2*M22 + M22*M22) / M2
def generate_point(pa,pb,rans):
# The final momenta
MOM = [ -rans[-1]*pa, -rans[-2]*pb ]
_Q = -MOM[0]-MOM[1]
# Storage of intermediate Masses, Qs
M = [_Q.M()]
ALLQ =[_Q]
U, R = [], [] # Store the u and random numbers r
for i in range(2, NP+1):
# print("now i = {}".format(i))
if i < NP:
# print("Solving for u_{}, M_{}".format(i, i))
r = rans[3*(i-2)+2]
u = get_u(NP+1-i, r)
U.append(u)
R.append(r)
# Simon's paper must be wrong here, check
_M = np.sqrt(u*_Q.M2()) # M_i^2
else:
_M = 0
# print("Got M_{}={}".format(i, _M))
M.append(_M)
q = 4*M[-2] * rho_massless(M[-2], M[-1])
# Random numbers for costheta and phi
costheta = 2*rans[3*(i-2)] - 1
phi = 2.*np.pi*rans[3*(i-2)+1]
# Generated 4 Vectors
# p_(i-1)
sintheta = np.sqrt(1. - costheta*costheta)
p = q*Vec4(1, np.cos(phi)*sintheta, np.sin(phi)*sintheta, costheta)
# print("p_{} = {} {}".format(i+1, p, np.sqrt(abs(p.M2()))))
# now Q_i
_Q = Vec4(np.sqrt(q*q + M[-1]*M[-1]), -p.px, -p.py, -p.pz)
# print("Q_{} = {} {}".format(i, _Q, np.sqrt(abs(_Q.M2()))))
p = ALLQ[i-2].BoostBack(p)
_Q = ALLQ[i-2].BoostBack(_Q)
# print ALLQ[i-2]-_Q-p
# print "p boosted ",p,p.M2()
# print "Q boosted ",_Q,np.sqrt(abs(_Q.M2()))
# print "sum p+Q ",(p+_Q),(p+_Q).M()
MOM.append(p)
ALLQ.append(_Q)
MOM.append(_Q)
return MOM
def generate_weight(pa,pb,mom):
Q = -mom[0]-mom[1]
rans = []
for i in range(2, NP+1):
# print("now i = {}".format(i))
p = Q.Boost(mom[i])
# print 'p = ',p
costh = p[3]/p.P()
phi = p.Phi()
if phi < 0: phi += 2.*np.pi
# print "phi = ",phi
rans.append((1+costh)/2.)
rans.append(phi/(2.*np.pi))
if i < NP:
m = (Q-mom[i]).M2() / Q.M2()
u = f(m, NP+1-i, 0)
# print Q.M2(),(Q-mom[i]).M2(),(mom[3]+mom[4]).M2(),m,u
# print Q
Q -= mom[i]
# print Q
rans.append(u)
else:
_M = 0
rans.append(-(mom[1]*pa)/(pa*pb))
rans.append(-(mom[0]*pb)/(pa*pb))
return rans
def ME_ESW(P):
"""
Calculate the matrix element for g(p1) g(p2) --> g(p3) g(p4) g(p5)
Using eq (7.51) in QCD for collider physics.
P ... list of 4 momentum vectors
"""
from itertools import permutations
permutations=list(permutations([0,1,2,3,4])) # All 120 permutations
# M = const * A * B / C
# A = sum_permutations {1 2} ^ 4
A = 0
B = 0
for i in permutations:
A+= (P[i[0]] * P[i[1]])**4
B+= (P[i[0]] * P[i[1]]) * (P[i[1]] * P[i[2]]) * (P[i[2]] * P[i[3]]) * (P[i[3]] * P[i[4]]) * (P[i[4]] * P[i[0]])
C = 1
for i in range(5):
for j in range(5):
if i <j:
# print("i={}, j={}: {} * {} = {}".format(i, j, P[i], P[j], P[i]*P[j]))
C *= P[i]*P[j]
return A*B/C
def ME_PLB(P):
"""
Calculate the matrix element for g(p1) g(p2) --> g(p3) g(p4) g(p5)
Using eq (18) in Berends et al, Phys Let B 103 (1981) p 124 ff.
P ... list of 4 momentum vectors
"""
from itertools import permutations, combinations
permutations= [
(0,1,2,3,4),
(0,1,2,4,3),
(0,1,3,2,4),
(0,1,3,4,2),
(0,1,4,2,3),
(0,1,4,3,2),
(0,2,1,3,4),
(0,2,1,4,3),
(0,2,3,1,4),
(0,2,4,1,3),
(0,3,1,2,4),
(0,3,2,1,4),
]
kpermutations = list(combinations([0,1,2,3,4], 2))
# M = const * A * B / C
# A = sum_permutations {1 2} ^ 4
A = 0
for i in kpermutations:
A+= (P[i[0]] * P[i[1]])**4
B = 0
for i in permutations:
# print("(k{} * k{})^4".format(i[0]+1, i[1]+1))
B+= (P[i[0]] * P[i[1]]) * (P[i[1]] * P[i[2]]) * (P[i[2]] * P[i[3]]) * (P[i[3]] * P[i[4]]) * (P[i[4]] * P[i[0]])
C = 1
for i in range(5):
for j in range(5):
if i <j:
# print("i={}, j={}: {} * {} = {}".format(i, j, P[i], P[j], P[i]*P[j]))
C *= P[i]*P[j]
return A*B/C
if __name__ == "__main__":
import sys
np.random.seed(1)
pa = Vec4(7000,0,0,7000)
pb = Vec4(7000,0,0,-7000)
if len(sys.argv) <2:
print("Please specify the number of external particles, exiting")
sys.exit(1)
NP = int(sys.argv[1]) # Number of external particles
if NP<3:
print("NP should be >=3 for the whole thing to make sense, exiting")
sys.exit(1)
rans = [ np.random.rand() for i in range(0,3*NP-4+2) ]
moms = generate_point(pa,pb,rans)
msum = Vec4()
for num, mom in enumerate(moms):
msum += mom
print("p_{} = {} {}".format(num+1, mom, mom.M2()))
print("Mom sum {}".format(msum))
ranc = generate_weight(pa,pb,moms)
for r in range(0,len(rans)):
print("r_{} = {} -> dev. {}".format(r, ranc[r], ranc[r]/rans[r]-1))
print("120*Berends: {:.20f}".format(120*ME_PLB(moms)))
print("Ellis: {:.20f}".format(ME_ESW(moms)))
|
the-stack_0_17457 | #!/usr/bin/env python3
"""Parser for U.S. Energy Information Administration, https://www.eia.gov/ .
Aggregates and standardizes data from most of the US ISOs,
and exposes them via a unified API.
Requires an API key, set in the EIA_KEY environment variable. Get one here:
https://www.eia.gov/opendata/register.php
"""
import datetime
import requests
from dateutil import parser, tz
from .ENTSOE import merge_production_outputs
from .lib.utils import get_token
from .lib.validation import validate
#Reverse exchanges need to be multiplied by -1, since they are reported in the opposite direction
REVERSE_EXCHANGES = [
'US-CA->MX-BC',
'MX-BC->US-CAL-CISO',
'CA-SK->US-CENT-SWPP',
'CA-MB->US-MIDW-MISO',
'CA-ON->US-MIDW-MISO',
'CA-QC->US-NE-ISNE',
'CA-NB->US-NE-ISNE',
'CA-BC->US-NW-BPAT',
'CA-AB->US-NW-NWMT',
'CA-QC->US-NY-NYIS',
'CA-ON->US-NY-NYIS',
'MX-NE->US-TEX-ERCO',
'MX-NO->US-TEX-ERCO',
'US-SW-PNM->US-SW-SRP' # For some reason EBA.SRP-PNM.ID.H exists in EIA, but PNM-SRP does not. Probably because it is unidirectional
]
NEGATIVE_PRODUCTION_THRESHOLDS = {
'default': -10,
'zoneOverrides': {
'US-SW-SRP': {
'coal': -50,
'unknown': -50
},
'US-CAL-CISO': {
'unknown': -50,
'solar': -100
},
'US-SE-AEC': {
'coal': -50,
'gas': -20
},
'US-CAR-CPLE': {
'coal': -20
},
'US-NW-AVRN': {
'wind': -20
}
}
}
EXCHANGES = {
#Old exchanges with old zones, to be updated/removed once clients have had time to switch
'US-CA->MX-BC': 'EBA.CISO-CFE.ID.H',
'US-BPA->US-IPC': 'EBA.BPAT-IPCO.ID.H',
'US-SPP->US-TX': 'EBA.SWPP-ERCO.ID.H',
'US-MISO->US-PJM': 'EBA.MISO-PJM.ID.H',
'US-MISO->US-SPP': 'EBA.MISO-SWPP.ID.H',
'US-NEISO->US-NY': 'EBA.ISNE-NYIS.ID.H',
'US-NY->US-PJM': 'EBA.NYIS-PJM.ID.H',
#Exchanges to non-US BAs
'MX-BC->US-CAL-CISO': 'EBA.CISO-CFE.ID.H', #Unable to verify if MX-BC is correct
'CA-SK->US-CENT-SWPP': 'EBA.SWPP-SPC.ID.H',
'CA-MB->US-MIDW-MISO': 'EBA.MISO-MHEB.ID.H',
'CA-ON->US-MIDW-MISO': 'EBA.MISO-IESO.ID.H',
'CA-QC->US-NE-ISNE': 'EBA.ISNE-HQT.ID.H',
'CA-NB->US-NE-ISNE': 'EBA.ISNE-NBSO.ID.H',
'CA-BC->US-NW-BPAT': 'EBA.BPAT-BCHA.ID.H',
'CA-AB->US-NW-NWMT': 'EBA.NWMT-AESO.ID.H',
'CA-QC->US-NY-NYIS': 'EBA.NYIS-HQT.ID.H',
'CA-ON->US-NY-NYIS': 'EBA.NYIS-IESO.ID.H',
'MX-NE->US-TEX-ERCO': 'EBA.ERCO-CEN.ID.H', #Unable to verify if MX-NE is correct
'MX-NO->US-TEX-ERCO': 'EBA.ERCO-CFE.ID.H', #Unable to verify if MX-NO is correct
#Exchanges to other US balancing authorities
'US-CAL-BANC->US-NW-BPAT': 'EBA.BANC-BPAT.ID.H',
'US-CAL-BANC->US-CAL-CISO': 'EBA.BANC-CISO.ID.H',
'US-CAL-BANC->US-CAL-TIDC': 'EBA.BANC-TIDC.ID.H',
'US-CAL-CISO->US-SW-AZPS': 'EBA.CISO-AZPS.ID.H',
'US-CAL-CISO->US-NW-BPAT': 'EBA.CISO-BPAT.ID.H',
'US-CAL-CISO->US-CAL-IID': 'EBA.CISO-IID.ID.H',
'US-CAL-CISO->US-CAL-LDWP': 'EBA.CISO-LDWP.ID.H',
'US-CAL-CISO->US-NW-NEVP': 'EBA.CISO-NEVP.ID.H',
'US-CAL-CISO->US-NW-PACW': 'EBA.CISO-PACW.ID.H',
'US-CAL-CISO->US-SW-SRP': 'EBA.CISO-SRP.ID.H',
'US-CAL-CISO->US-CAL-TIDC': 'EBA.CISO-TIDC.ID.H',
'US-CAL-CISO->US-SW-WALC': 'EBA.CISO-WALC.ID.H',
'US-CAL-IID->US-SW-AZPS': 'EBA.IID-AZPS.ID.H',
'US-CAL-IID->US-SW-WALC': 'EBA.IID-WALC.ID.H',
'US-CAL-LDWP->US-SW-AZPS': 'EBA.LDWP-AZPS.ID.H',
'US-CAL-LDWP->US-NW-BPAT': 'EBA.LDWP-BPAT.ID.H',
'US-CAL-LDWP->US-NW-NEVP': 'EBA.LDWP-NEVP.ID.H',
'US-CAL-LDWP->US-NW-PACE': 'EBA.LDWP-PACE.ID.H',
'US-CAL-LDWP->US-SW-WALC': 'EBA.LDWP-WALC.ID.H',
'US-CAR-CPLE->US-CAR-YAD': 'EBA.CPLE-YAD.ID.H',
'US-CAR-CPLE->US-CAR-DUK': 'EBA.CPLE-DUK.ID.H',
'US-CAR-CPLE->US-MIDA-PJM': 'EBA.CPLE-PJM.ID.H',
'US-CAR-CPLE->US-CAR-SCEG': 'EBA.CPLE-SCEG.ID.H',
'US-CAR-CPLE->US-CAR-SC': 'EBA.CPLE-SC.ID.H',
'US-CAR-CPLW->US-CAR-DUK': 'EBA.CPLW-DUK.ID.H',
'US-CAR-CPLW->US-MIDA-PJM': 'EBA.CPLW-PJM.ID.H',
'US-CAR-CPLW->US-TEN-TVA': 'EBA.CPLW-TVA.ID.H',
'US-CAR-DUK->US-CAR-YAD': 'EBA.DUK-YAD.ID.H',
'US-CAR-DUK->US-MIDA-PJM': 'EBA.DUK-PJM.ID.H',
'US-CAR-DUK->US-CAR-SCEG': 'EBA.DUK-SCEG.ID.H',
'US-CAR-DUK->US-CAR-SC': 'EBA.DUK-SC.ID.H',
'US-CAR-DUK->US-SE-SEPA': 'EBA.DUK-SEPA.ID.H',
'US-CAR-DUK->US-SE-SOCO': 'EBA.DUK-SOCO.ID.H',
'US-CAR-DUK->US-TEN-TVA': 'EBA.DUK-TVA.ID.H',
'US-CAR-SC->US-CAR-SCEG': 'EBA.SC-SCEG.ID.H',
'US-CAR-SC->US-SE-SEPA': 'EBA.SC-SEPA.ID.H',
'US-CAR-SC->US-SE-SOCO': 'EBA.SC-SOCO.ID.H',
'US-CAR-SCEG->US-SE-SEPA': 'EBA.SCEG-SEPA.ID.H',
'US-CAR-SCEG->US-SE-SOCO': 'EBA.SCEG-SOCO.ID.H',
'US-CENT-SPA->US-MIDW-AECI': 'EBA.SPA-AECI.ID.H',
'US-CENT-SPA->US-MIDW-MISO': 'EBA.SPA-MISO.ID.H',
'US-CENT-SPA->US-CENT-SWPP': 'EBA.SPA-SWPP.ID.H',
'US-CENT-SWPP->US-MIDW-AECI': 'EBA.SWPP-AECI.ID.H',
'US-CENT-SWPP->US-SW-EPE': 'EBA.SWPP-EPE.ID.H',
'US-CENT-SWPP->US-TEX-ERCO': 'EBA.SWPP-ERCO.ID.H',
'US-CENT-SWPP->US-MIDW-MISO': 'EBA.SWPP-MISO.ID.H',
'US-CENT-SWPP->US-NW-PSCO': 'EBA.SWPP-PSCO.ID.H',
'US-CENT-SWPP->US-SW-PNM': 'EBA.SWPP-PNM.ID.H',
'US-CENT-SWPP->US-NW-WACM': 'EBA.SWPP-WACM.ID.H',
'US-CENT-SWPP->US-NW-WAUW': 'EBA.SWPP-WAUW.ID.H',
'US-FLA-FMPP->US-FLA-FPC': 'EBA.FMPP-FPC.ID.H',
'US-FLA-FMPP->US-FLA-FPL': 'EBA.FMPP-FPL.ID.H',
'US-FLA-FMPP->US-FLA-JEA': 'EBA.FMPP-JEA.ID.H',
'US-FLA-FMPP->US-FLA-TEC': 'EBA.FMPP-TEC.ID.H',
'US-FLA-FPC->US-FLA-TAL': 'EBA.FPC-TAL.ID.H',
'US-FLA-FPC->US-FLA-FPL': 'EBA.FPC-FPL.ID.H',
'US-FLA-FPC->US-FLA-GVL': 'EBA.FPC-GVL.ID.H',
'US-FLA-FPC->US-FLA-SEC': 'EBA.FPC-SEC.ID.H',
'US-FLA-FPC->US-SE-SOCO': 'EBA.FPC-SOCO.ID.H',
'US-FLA-FPC->US-FLA-TEC': 'EBA.FPC-TEC.ID.H',
'US-FLA-FPC->US-FLA-NSB': 'EBA.FPC-NSB.ID.H',
'US-FLA-FPL->US-FLA-HST': 'EBA.FPL-HST.ID.H',
'US-FLA-FPL->US-FLA-GVL': 'EBA.FPL-GVL.ID.H',
'US-FLA-FPL->US-FLA-JEA': 'EBA.FPL-JEA.ID.H',
'US-FLA-FPL->US-FLA-SEC': 'EBA.FPL-SEC.ID.H',
'US-FLA-FPL->US-SE-SOCO': 'EBA.FPL-SOCO.ID.H',
'US-FLA-FPL->US-FLA-TEC': 'EBA.FPL-TEC.ID.H',
'US-FLA-FPL->US-FLA-NSB': 'EBA.FPL-NSB.ID.H',
'US-FLA-JEA->US-FLA-SEC': 'EBA.JEA-SEC.ID.H',
'US-FLA-SEC->US-FLA-TEC': 'EBA.SEC-TEC.ID.H',
'US-FLA-TAL->US-SE-SOCO': 'EBA.TAL-SOCO.ID.H',
'US-MIDA-OVEC->US-MIDW-LGEE': 'EBA.OVEC-LGEE.ID.H',
'US-MIDA-OVEC->US-MIDA-PJM': 'EBA.OVEC-PJM.ID.H',
'US-MIDA-PJM->US-MIDW-LGEE': 'EBA.PJM-LGEE.ID.H',
'US-MIDA-PJM->US-MIDW-MISO': 'EBA.PJM-MISO.ID.H',
'US-MIDA-PJM->US-NY-NYIS': 'EBA.PJM-NYIS.ID.H',
'US-MIDA-PJM->US-TEN-TVA': 'EBA.PJM-TVA.ID.H',
'US-MIDW-AECI->US-MIDW-MISO': 'EBA.AECI-MISO.ID.H',
'US-MIDW-AECI->US-TEN-TVA': 'EBA.AECI-TVA.ID.H',
'US-MIDW-EEI->US-MIDW-LGEE': 'EBA.EEI-LGEE.ID.H',
'US-MIDW-EEI->US-MIDW-MISO': 'EBA.EEI-MISO.ID.H',
'US-MIDW-EEI->US-TEN-TVA': 'EBA.EEI-TVA.ID.H',
'US-MIDW-GLHB->US-MIDW-LGEE': 'EBA.GLHB-LGEE.ID.H',
'US-MIDW-GLHB->US-MIDW-MISO': 'EBA.GLHB-MISO.ID.H',
'US-MIDW-LGEE->US-MIDW-MISO': 'EBA.LGEE-MISO.ID.H',
'US-MIDW-LGEE->US-TEN-TVA': 'EBA.LGEE-TVA.ID.H',
'US-MIDW-MISO->US-SE-AEC': 'EBA.MISO-AEC.ID.H',
'US-MIDW-MISO->US-SE-SOCO': 'EBA.MISO-SOCO.ID.H',
'US-MIDW-MISO->US-TEN-TVA': 'EBA.MISO-TVA.ID.H',
'US-NE-ISNE->US-NY-NYIS': 'EBA.ISNE-NYIS.ID.H',
'US-NW-AVA->US-NW-BPAT': 'EBA.AVA-BPAT.ID.H',
'US-NW-AVA->US-NW-IPCO': 'EBA.AVA-IPCO.ID.H',
'US-NW-AVA->US-NW-NWMT': 'EBA.AVA-NWMT.ID.H',
'US-NW-AVA->US-NW-PACW': 'EBA.AVA-PACW.ID.H',
'US-NW-AVA->US-NW-CHPD': 'EBA.AVA-CHPD.ID.H',
'US-NW-AVA->US-NW-GCPD': 'EBA.AVA-GCPD.ID.H',
'US-NW-AVRN->US-NW-BPAT': 'EBA.AVRN-BPAT.ID.H',
'US-NW-AVRN->US-NW-PACW': 'EBA.AVRN-PACW.ID.H',
'US-NW-BPAT->US-NW-TPWR': 'EBA.BPAT-TPWR.ID.H',
'US-NW-BPAT->US-NW-GRID': 'EBA.BPAT-GRID.ID.H',
'US-NW-BPAT->US-NW-IPCO': 'EBA.BPAT-IPCO.ID.H',
'US-NW-BPAT->US-NW-NEVP': 'EBA.BPAT-NEVP.ID.H',
'US-NW-BPAT->US-NW-NWMT': 'EBA.BPAT-NWMT.ID.H',
'US-NW-BPAT->US-NW-DOPD': 'EBA.BPAT-DOPD.ID.H',
'US-NW-BPAT->US-NW-PACW': 'EBA.BPAT-PACW.ID.H',
'US-NW-BPAT->US-NW-PGE': 'EBA.BPAT-PGE.ID.H',
'US-NW-BPAT->US-NW-CHPD': 'EBA.BPAT-CHPD.ID.H',
'US-NW-BPAT->US-NW-GCPD': 'EBA.BPAT-GCPD.ID.H',
'US-NW-BPAT->US-NW-PSEI': 'EBA.BPAT-PSEI.ID.H',
'US-NW-BPAT->US-NW-SCL': 'EBA.BPAT-SCL.ID.H',
'US-NW-CHPD->US-NW-DOPD': 'EBA.CHPD-DOPD.ID.H',
'US-NW-CHPD->US-NW-PSEI': 'EBA.CHPD-PSEI.ID.H',
'US-NW-GCPD->US-NW-PACW': 'EBA.GCPD-PACW.ID.H',
'US-NW-GCPD->US-NW-PSEI': 'EBA.GCPD-PSEI.ID.H',
'US-NW-GWA->US-NW-NWMT': 'EBA.GWA-NWMT.ID.H',
'US-NW-IPCO->US-NW-NEVP': 'EBA.IPCO-NEVP.ID.H',
'US-NW-IPCO->US-NW-NWMT': 'EBA.IPCO-NWMT.ID.H',
'US-NW-IPCO->US-NW-PACE': 'EBA.IPCO-PACE.ID.H',
'US-NW-IPCO->US-NW-PACW': 'EBA.IPCO-PACW.ID.H',
'US-NW-NEVP->US-NW-PACE': 'EBA.NEVP-PACE.ID.H',
'US-NW-NEVP->US-SW-WALC': 'EBA.NEVP-WALC.ID.H',
'US-NW-NWMT->US-NW-WWA': 'EBA.NWMT-WWA.ID.H',
'US-NW-NWMT->US-NW-PACE': 'EBA.NWMT-PACE.ID.H',
'US-NW-NWMT->US-NW-WAUW': 'EBA.NWMT-WAUW.ID.H',
'US-NW-PACE->US-SW-AZPS': 'EBA.PACE-AZPS.ID.H',
'US-NW-PACE->US-NW-PACW': 'EBA.PACE-PACW.ID.H',
'US-NW-PACE->US-NW-WACM': 'EBA.PACE-WACM.ID.H',
'US-NW-PACW->US-NW-PGE': 'EBA.PACW-PGE.ID.H',
'US-NW-PSCO->US-SW-PNM': 'EBA.PSCO-PNM.ID.H',
'US-NW-PSCO->US-NW-WACM': 'EBA.PSCO-WACM.ID.H',
'US-NW-PSEI->US-NW-TPWR': 'EBA.PSEI-TPWR.ID.H',
'US-NW-PSEI->US-NW-SCL': 'EBA.PSEI-SCL.ID.H',
'US-NW-WACM->US-SW-AZPS': 'EBA.WACM-AZPS.ID.H',
'US-NW-WACM->US-SW-PNM': 'EBA.WACM-PNM.ID.H',
'US-NW-WACM->US-SW-WALC': 'EBA.WACM-WALC.ID.H',
'US-NW-WACM->US-NW-WAUW': 'EBA.WACM-WAUW.ID.H',
'US-SE-AEC->US-SE-SOCO': 'EBA.AEC-SOCO.ID.H',
'US-SE-SEPA->US-SE-SOCO': 'EBA.SEPA-SOCO.ID.H',
'US-SE-SOCO->US-TEN-TVA': 'EBA.SOCO-TVA.ID.H',
'US-SW-AZPS->US-SW-GRMA': 'EBA.AZPS-GRMA.ID.H',
'US-SW-AZPS->US-SW-PNM': 'EBA.AZPS-PNM.ID.H',
'US-SW-AZPS->US-SW-SRP': 'EBA.AZPS-SRP.ID.H',
'US-SW-AZPS->US-SW-TEPC': 'EBA.AZPS-TEPC.ID.H',
'US-SW-AZPS->US-SW-WALC': 'EBA.AZPS-WALC.ID.H',
'US-SW-DEAA->US-SW-SRP': 'EBA.DEAA-SRP.ID.H',
'US-SW-EPE->US-SW-PNM': 'EBA.EPE-PNM.ID.H',
'US-SW-EPE->US-SW-TEPC': 'EBA.EPE-TEPC.ID.H',
'US-SW-GRIF->US-SW-WALC': 'EBA.GRIF-WALC.ID.H',
'US-SW-HGMA->US-SW-SRP': 'EBA.HGMA-SRP.ID.H',
'US-SW-PNM->US-SW-TEPC': 'EBA.PNM-TEPC.ID.H',
'US-SW-PNM->US-SW-SRP': 'EBA.SRP-PNM.ID.H',
'US-SW-SRP->US-SW-TEPC': 'EBA.SRP-TEPC.ID.H',
'US-SW-SRP->US-SW-WALC': 'EBA.SRP-WALC.ID.H',
'US-SW-TEPC->US-SW-WALC': 'EBA.TEPC-WALC.ID.H'
}
# based on https://www.eia.gov/beta/electricity/gridmonitor/dashboard/electric_overview/US48/US48
# or https://www.eia.gov/opendata/qb.php?category=3390101
# List includes regions and Balancing Authorities.
REGIONS = {
#Old regions, to be updated/removed once clients have had time to switch
'US-BPA': 'BPAT',
'US-CA': 'CAL',
'US-CAR': 'CAR',
'US-DUK': 'DUK', #Duke Energy Carolinas
'US-SPP': 'CENT',
'US-FL': 'FLA',
'US-PJM': 'MIDA',
'US-MISO': 'MIDW',
'US-NEISO': 'NE',
'US-NEVP': 'NEVP', #Nevada Power Company
'US-NY': 'NY',
'US-NW': 'NW',
'US-SC': 'SC', #South Carolina Public Service Authority
'US-SE': 'SE',
'US-SEC': 'SEC',
'US-SOCO': 'SOCO', #Southern Company Services Inc - Trans
'US-SWPP': 'SWPP', #Southwest Power Pool
'US-SVERI': 'SW',
'US-TN': 'TEN',
'US-TX': 'TEX',
#New regions - EIA
'US-CAL-BANC': 'BANC', #Balancing Authority Of Northern California
'US-CAL-CISO': 'CISO', #California Independent System Operator
'US-CAL-IID': 'IID', #Imperial Irrigation District
'US-CAL-LDWP': 'LDWP', #Los Angeles Department Of Water And Power
'US-CAL-TIDC': 'TIDC', #Turlock Irrigation District
'US-CAR-CPLE': 'CPLE', #Duke Energy Progress East
'US-CAR-CPLW': 'CPLW', #Duke Energy Progress West
'US-CAR-DUK': 'DUK', #Duke Energy Carolinas
'US-CAR-SC': 'SC', #South Carolina Public Service Authority
'US-CAR-SCEG': 'SCEG', #South Carolina Electric & Gas Company
'US-CAR-YAD': 'YAD', #Alcoa Power Generating, Inc. - Yadkin Division
'US-CENT-SPA': 'SPA', #Southwestern Power Administration
'US-CENT-SWPP': 'SWPP', #Southwest Power Pool
'US-FLA-FMPP': 'FMPP', #Florida Municipal Power Pool
'US-FLA-FPC': 'FPC', #Duke Energy Florida Inc
'US-FLA-FPL': 'FPL', #Florida Power & Light Company
'US-FLA-GVL': 'GVL', #Gainesville Regional Utilities
'US-FLA-HST': 'HST', #City Of Homestead
'US-FLA-JEA': 'JEA', #Jea
'US-FLA-NSB': 'NSB', #New Smyrna Beach, Utilities Commission Of
'US-FLA-SEC': 'SEC', #Seminole Electric Cooperative
'US-FLA-TAL': 'TAL', #City Of Tallahassee
'US-FLA-TEC': 'TEC', #Tampa Electric Company
'US-MIDA-OVEC': 'OVEC', #Ohio Valley Electric Corporation
'US-MIDA-PJM': 'PJM', #Pjm Interconnection, Llc
'US-MIDW-AECI': 'AECI', #Associated Electric Cooperative, Inc.
'US-MIDW-EEI': 'EEI', #Electric Energy, Inc.
'US-MIDW-GLHB': 'GLHB', #GridLiance
'US-MIDW-LGEE': 'LGEE', #Louisville Gas And Electric Company And Kentucky Utilities
'US-MIDW-MISO': 'MISO', #Midcontinent Independent Transmission System Operator, Inc..
'US-NE-ISNE': 'ISNE', #Iso New England Inc.
'US-NW-AVA': 'AVA', #Avista Corporation
'US-NW-AVRN': 'AVRN', #Avangrid Renewables Cooperative
'US-NW-BPAT': 'BPAT', #Bonneville Power Administration
'US-NW-CHPD': 'CHPD', #Public Utility District No. 1 Of Chelan County
'US-NW-DOPD': 'DOPD', #Pud No. 1 Of Douglas County
'US-NW-GCPD': 'GCPD', #Public Utility District No. 2 Of Grant County, Washington
'US-NW-GRID': 'GRID', #Gridforce Energy Management, Llc
'US-NW-GWA': 'GWA', #Naturener Power Watch, Llc (Gwa)
'US-NW-IPCO': 'IPCO', #Idaho Power Company
'US-NW-NEVP': 'NEVP', #Nevada Power Company
'US-NW-NWMT': 'NWMT', #Northwestern Energy (Nwmt)
'US-NW-PACE': 'PACE', #Pacificorp - East
'US-NW-PACW': 'PACW', #Pacificorp - West
'US-NW-PGE': 'PGE', #Portland General Electric Company
'US-NW-PSCO': 'PSCO', #Public Service Company Of Colorado
'US-NW-PSEI': 'PSEI', #Puget Sound Energy
'US-NW-SCL': 'SCL', #Seattle City Light
'US-NW-TPWR': 'TPWR', #City Of Tacoma, Department Of Public Utilities, Light Division
'US-NW-WACM': 'WACM', #Western Area Power Administration - Rocky Mountain Region
'US-NW-WAUW': 'WAUW', #Western Area Power Administration Ugp West
'US-NW-WWA': 'WWA', #Naturener Wind Watch, Llc
'US-NY-NYIS': 'NYIS', #New York Independent System Operator
'US-SE-AEC': 'AEC', #Powersouth Energy Cooperative
'US-SE-SEPA': 'SEPA', #Southeastern Power Administration
'US-SE-SOCO': 'SOCO', #Southern Company Services, Inc. - Trans
'US-SW-AZPS': 'AZPS', #Arizona Public Service Company
'US-SW-DEAA': 'DEAA', #Arlington Valley, Llc - Avba
'US-SW-EPE': 'EPE', #El Paso Electric Company
'US-SW-GRIF': 'GRIF', #Griffith Energy, Llc
'US-SW-GRMA': 'GRMA', #Gila River Power, Llc
'US-SW-HGMA': 'HGMA', #New Harquahala Generating Company, Llc - Hgba
'US-SW-PNM': 'PNM', #Public Service Company Of New Mexico
'US-SW-SRP': 'SRP', #Salt River Project
'US-SW-TEPC': 'TEPC', #Tucson Electric Power Company
'US-SW-WALC': 'WALC', #Western Area Power Administration - Desert Southwest Region
'US-TEN-TVA': 'TVA', #Tennessee Valley Authority
'US-TEX-ERCO': 'ERCO' #Electric Reliability Council Of Texas, Inc.
}
TYPES = {
# 'biomass': 'BM', # not currently supported
'coal': 'COL',
'gas': 'NG',
'hydro': 'WAT',
'nuclear': 'NUC',
'oil': 'OIL',
'unknown': 'OTH',
'solar': 'SUN',
'wind': 'WND',
}
PRODUCTION_SERIES = 'EBA.%s-ALL.NG.H'
PRODUCTION_MIX_SERIES = 'EBA.%s-ALL.NG.%s.H'
DEMAND_SERIES = 'EBA.%s-ALL.D.H'
FORECAST_SERIES = 'EBA.%s-ALL.DF.H'
def fetch_consumption_forecast(zone_key, session=None, target_datetime=None, logger=None):
return _fetch_series(zone_key, FORECAST_SERIES % REGIONS[zone_key],
session=session, target_datetime=target_datetime,
logger=logger)
def fetch_production(zone_key, session=None, target_datetime=None, logger=None):
return _fetch_series(zone_key, PRODUCTION_SERIES % REGIONS[zone_key],
session=session, target_datetime=target_datetime,
logger=logger)
def fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):
consumption = _fetch_series(zone_key, DEMAND_SERIES % REGIONS[zone_key],
session=session, target_datetime=target_datetime,
logger=logger)
for point in consumption:
point['consumption'] = point.pop('value')
return consumption
def fetch_production_mix(zone_key, session=None, target_datetime=None, logger=None):
mixes = []
for type, code in TYPES.items():
series = PRODUCTION_MIX_SERIES % (REGIONS[zone_key], code)
mix = _fetch_series(zone_key, series, session=session,
target_datetime=target_datetime, logger=logger)
# EIA does not currently split production from the Virgil Summer C
# plant across the two owning/ utilizing BAs:
# US-CAR-SCEG and US-CAR-SC,
# but attributes it all to US-CAR-SCEG
# Here we apply a temporary fix for that until EIA properly splits the production
# This split can be found in the eGRID data,
# https://www.epa.gov/energy/emissions-generation-resource-integrated-database-egrid
SC_VIRGIL_OWNERSHIP = 0.3333333
if zone_key == 'US-CAR-SC' and type == 'nuclear':
series = PRODUCTION_MIX_SERIES % (REGIONS['US-CAR-SCEG'], code)
mix = _fetch_series('US-CAR-SCEG', series, session=session,
target_datetime=target_datetime, logger=logger)
for point in mix:
point.update({
'value': point['value']*SC_VIRGIL_OWNERSHIP
})
if zone_key == 'US-CAR-SCEG' and type == 'nuclear':
for point in mix:
point.update({
'value': point['value']*(1-SC_VIRGIL_OWNERSHIP)
})
if not mix:
continue
for point in mix:
negative_threshold = NEGATIVE_PRODUCTION_THRESHOLDS['zoneOverrides']\
.get(zone_key, {})\
.get(type, NEGATIVE_PRODUCTION_THRESHOLDS['default'])
if type != 'hydro' and \
point['value'] and \
0 > point['value'] >= negative_threshold:
point['value'] = 0
if type == 'hydro' and point['value'] and point['value'] < 0:
point.update({
'production': {},# required by merge_production_outputs()
'storage': {type: point.pop('value')},
})
else:
point.update({
'production': {type: point.pop('value')},
'storage': {}, # required by merge_production_outputs()
})
#replace small negative values (>-5) with 0s This is necessary for solar
point = validate(point, logger=logger, remove_negative=True)
mixes.append(mix)
# Some of the returned mixes could be for older timeframes.
# Fx the latest oil data could be 6 months old.
# In this case we want to discard the old data as we won't be able to merge it
timeframes = [
sorted(map(lambda x: x['datetime'], mix))
for mix in mixes
]
latest_timeframe = max(timeframes, key=lambda x: x[-1])
correct_mixes = []
for mix in mixes:
correct_mix = []
for production_in_mix in mix:
if production_in_mix['datetime'] in latest_timeframe:
correct_mix.append(production_in_mix)
if len(correct_mix) > 0:
correct_mixes.append(correct_mix)
return merge_production_outputs(correct_mixes, zone_key, merge_source='eia.gov')
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
sortedcodes = '->'.join(sorted([zone_key1, zone_key2]))
exchange = _fetch_series(sortedcodes, EXCHANGES[sortedcodes], session=session,
target_datetime=target_datetime, logger=logger)
for point in exchange:
point.update({
'sortedZoneKeys': point.pop('zoneKey'),
'netFlow': point.pop('value'),
})
if sortedcodes in REVERSE_EXCHANGES:
point['netFlow'] = -point['netFlow']
return exchange
def _fetch_series(zone_key, series_id, session=None, target_datetime=None,
logger=None):
"""Fetches and converts a data series."""
s = session or requests.Session()
# local import to avoid the exception that happens if EIAPY token is not set
# even if this module is unused
from eiapy import Series
series = Series(series_id=series_id, session=s)
if target_datetime:
utc = tz.gettz('UTC')
#eia currently only accepts utc timestamps in the form YYYYMMDDTHHZ
end = target_datetime.astimezone(utc).strftime('%Y%m%dT%HZ')
start = (target_datetime.astimezone(utc) - datetime.timedelta(days=1)).strftime('%Y%m%dT%HZ')
raw_data = series.get_data(start=start, end=end)
else:
# Get the last 24 hours available.
raw_data = series.last(24)
# UTC timestamp with no offset returned.
if not raw_data.get('series'):
# Series doesn't exist. Probably requesting a fuel from a region that
# doesn't have any capacity for that fuel type.
return []
return [{
'zoneKey': zone_key,
'datetime': parser.parse(datapoint[0]),
'value': datapoint[1],
'source': 'eia.gov',
} for datapoint in raw_data['series'][0]['data']]
def main():
"Main method, never used by the Electricity Map backend, but handy for testing."
from pprint import pprint
pprint(fetch_consumption_forecast('US-NY'))
pprint(fetch_production('US-SEC'))
pprint(fetch_production_mix('US-TN'))
pprint(fetch_consumption('US-CAR'))
pprint(fetch_exchange('MX-BC', 'US-CA'))
if __name__ == '__main__':
main()
|
the-stack_0_17458 | from typing import Union
from uuid import UUID
from getnet.services.plans.plan_response import PlanResponse
from getnet.services.subscriptions.credit import Credit
from getnet.services.subscriptions.customer import Customer
from getnet.services.utils import Device
class Subscription:
seller_id: str
customer_id: str
plan_id: str
order_id: str
credit: Credit
device: Device
def __init__(
self,
order_id: str,
customer_id: Union[Customer, str],
plan_id: Union[PlanResponse, UUID, str],
credit: Union[Credit, dict],
device: Union[Device, dict] = None,
seller_id: str = None,
):
self.customer_id = (
customer_id.customer_id
if isinstance(customer_id, Customer)
else customer_id
)
self.plan_id = (
plan_id.plan_id if isinstance(plan_id, PlanResponse) else str(plan_id)
)
self.order_id = order_id
self.credit = (
credit if isinstance(credit, Credit) or credit is None else Credit(**credit)
)
self.device = (
device if isinstance(device, Device) or device is None else Device(**device)
)
self.seller_id = seller_id
def as_dict(self):
data = {
"seller_id": str(self.seller_id),
"customer_id": str(self.customer_id),
"plan_id": str(self.plan_id),
"order_id": self.order_id,
"subscription": {"payment_type": {"credit": self.credit.as_dict()}},
}
if self.device is not None:
data["devise"] = self.device.as_dict()
return data
|
the-stack_0_17459 | #!/usr/bin/env python
from aoc import get_input
def run(cmds):
acc = ptr = 0
seen = set()
while ptr < len(cmds):
if ptr in seen:
return acc, False
seen.add(ptr)
cmd, val = cmds[ptr]
if cmd == 'j':
ptr += val
continue
if cmd == 'a':
acc += val
ptr += 1
return acc, True
def main():
with get_input(__file__) as ifile:
cmds = [[line[0], int(line[4:])] for line in ifile]
print(run(cmds)[0]) #1
for i, (cmd, _) in enumerate(cmds): # BRUTE FORCE BAYBEE
if cmd == 'a': continue
old = cmds[i][0]
cmds[i][0] = {'j': 'n', 'n': 'j'}[old]
acc, res = run(cmds)
if res: break
cmds[i][0] = old
print(acc) # 2
if __name__ == '__main__':
main()
|
the-stack_0_17460 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 13:05:34 2019
@author: tadahaya
"""
import unittest
import pandas as pd
import os
import sys
import math
from enan.fet import FET
class SampleTest(unittest.TestCase):
CLS_VAL = 'none'
# called when test class initialization
@classmethod
def setUpClass(cls):
if sys.flags.debug:
print('> setUpClass method is called.')
cls.CLS_VAL = '> setUpClass : initialized!'
if sys.flags.debug:
print(cls.CLS_VAL)
# called when test class end
@classmethod
def tearDownClass(cls):
if sys.flags.debug:
print('> tearDownClass method is called.')
cls.CLS_VAL = '> tearDownClass : released!'
if sys.flags.debug:
print(cls.CLS_VAL)
# called when a test method runs
def setUp(self):
if sys.flags.debug:
print(os.linesep + '> setUp method is called.')
self.smpl = FET()
# called when a test method ends
def tearDown(self):
if sys.flags.debug:
print(os.linesep + '> tearDown method is called.')
def _df_checker(self,df):
if type(df)!=pd.core.frame.DataFrame:
return False
elif df.shape[0]==0:
return False
else:
head = df.head(1)
judge = math.isnan(head.iat[0,0])
return not judge
def _sr_checker(self,sr):
if type(sr)!=pd.core.series.Series:
return False
if sr.shape[0]==0:
return False
else:
head = sr.head(1)
judge = math.isnan(head.iat[0])
return not judge
def test_calc(self):
# prepare test patterns
test_patterns = [
("fdr_bh","greater",None), # (arg1, arg2, ..., expected result)
("fdr_bh","two-sided",None), # (arg1, arg2, ..., expected result)
("fdr_bh","less",None), # (arg1, arg2, ..., expected result)
("fdr_bh","greater",3), # (arg1, arg2, ..., expected result)
]
self.smpl.gene(ref="enrichr",species="human")
self.smpl.load_ref(library="KEGG_2019_Human")
self.smpl.set_obj({"abca1","abcg1","abcb11","abcc2","abcc3","abcc4"})
### loop for sweeping all conditions
for tcorr,tmode,tfocus in test_patterns:
with self.subTest(correction=tcorr,mode=tmode,focus=tfocus):
self.assertTrue(self._df_checker(self.smpl.calc(correction=tcorr,mode=tmode,focus=tfocus)))
|
the-stack_0_17461 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 14:24:59 2019
@author: jdummer
"""
import yaml
yml_lines = []
with open('2019-01-08-oleanna.md', encoding='utf8') as file:
for line in file:
if line.strip() == "---":
break
for line in file:
if line.strip() == "---":
break
else:
yml_lines.append(line)
yml_string = "".join(yml_lines)
print(yaml.load(yml_string))
|
the-stack_0_17462 | def extractTeafragranceWordpressCom(item):
'''
Parser for 'teafragrance.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Rock Sugar And Pear Stew', 'Rock Sugar And Pear Stew', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
the-stack_0_17466 | """
Zamien miejscami sasiadujace bity.
1010
0101
Maska 0xAAAAAAAA ma wszystkie parzyste bity ustawione
Maska 0x55555555 ma wszystkie nieparzyste bity ustawione
robiac AND zerujemy te ktore nie sa ustawione w pierwotnej liczbie
nastepnie przesuwamy w lewo prawo
dodajemy za pomoca OR
"""
def zamien_sasiadow(liczba):
parzyste = liczba & 0xAAAAAAAA
nieparzyste = liczba & 0x55555555
parzyste >>= 1
nieparzyste <<= 1
return parzyste | nieparzyste
if __name__ == "__main__":
liczba = 9131
wynik = 4951
assert zamien_sasiadow(liczba) == wynik
|
the-stack_0_17467 | #!/usr/bin/env python
"""Script that makes determining PATTERN for a new [2to3] fix much easier.
Figuring out exactly what PATTERN I want for a given fixer class is
getting tedious. This script will step through each possible subtree
for a given string, allowing you to select which one you want. It will
then try to figure out an appropriate pattern to match that tree. This
pattern will require some editing (it will be overly restrictive) but
should provide a solid base to work with and handle the tricky parts.
Usage:
python find_pattern.py "g.throw(E, V, T)"
This will step through each subtree in the parse. To reject a
candidate subtree, hit enter; to accept a candidate, hit "y" and
enter. The pattern will be spit out to stdout.
For example, the above will yield a succession of possible snippets,
skipping all leaf-only trees. I accept
'g.throw(E, V, T)'
This causes find_pattern to spit out
power< 'g' trailer< '.' 'throw' >
trailer< '(' arglist< 'E' ',' 'V' ',' 'T' > ')' > >
Some minor tweaks later, I'm left with
power< any trailer< '.' 'throw' >
trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > >
which is exactly what I was after.
Larger snippets can be placed in a file (as opposed to a command-line
arg) and processed with the -f option.
"""
from __future__ import print_function
__author__ = "Collin Winter <[email protected]>"
# Python imports
import optparse
import sys
from StringIO import StringIO
# Local imports
from lib2to3 import pytree
from lib2to3.pgen2 import driver
from lib2to3.pygram import python_symbols, python_grammar
driver = driver.Driver(python_grammar, convert=pytree.convert)
def main(args):
parser = optparse.OptionParser(usage="find_pattern.py [options] [string]")
parser.add_option("-f", "--file", action="store",
help="Read a code snippet from the specified file")
# Parse command line arguments
options, args = parser.parse_args(args)
if options.file:
tree = driver.parse_file(options.file)
elif len(args) > 1:
tree = driver.parse_stream(StringIO(args[1] + "\n"))
else:
print("You must specify an input file or an input string", file=sys.stderr)
return 1
examine_tree(tree)
return 0
def examine_tree(tree):
for node in tree.post_order():
if isinstance(node, pytree.Leaf):
continue
print(repr(str(node)))
verdict = raw_input()
if verdict.strip():
print(find_pattern(node))
return
def find_pattern(node):
if isinstance(node, pytree.Leaf):
return repr(node.value)
return find_symbol(node.type) + \
"< " + " ".join(find_pattern(n) for n in node.children) + " >"
def find_symbol(sym):
for n, v in python_symbols.__dict__.items():
if v == sym:
return n
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
the-stack_0_17468 | # :coding: utf-8
# :copyright: Copyright (c) 2014 ftrack
from __future__ import absolute_import
import json
import logging
import collections
import datetime
import os
import getpass
import functools
import itertools
import distutils.version
import hashlib
import tempfile
import threading
import atexit
import warnings
import requests
import requests.auth
import arrow
import clique
import ftrack_api
import ftrack_api.exception
import ftrack_api.entity.factory
import ftrack_api.entity.base
import ftrack_api.entity.location
import ftrack_api.cache
import ftrack_api.symbol
import ftrack_api.query
import ftrack_api.attribute
import ftrack_api.collection
import ftrack_api.event.hub
import ftrack_api.event.base
import ftrack_api.plugin
import ftrack_api.inspection
import ftrack_api.operation
import ftrack_api.accessor.disk
import ftrack_api.structure.origin
import ftrack_api.structure.entity_id
import ftrack_api.accessor.server
import ftrack_api._centralized_storage_scenario
import ftrack_api.logging
from ftrack_api.logging import LazyLogMessage as L
try:
from weakref import WeakMethod
except ImportError:
from ftrack_api._weakref import WeakMethod
class SessionAuthentication(requests.auth.AuthBase):
'''Attach ftrack session authentication information to requests.'''
def __init__(self, api_key, api_user):
'''Initialise with *api_key* and *api_user*.'''
self.api_key = api_key
self.api_user = api_user
super(SessionAuthentication, self).__init__()
def __call__(self, request):
'''Modify *request* to have appropriate headers.'''
request.headers.update({
'ftrack-api-key': self.api_key,
'ftrack-user': self.api_user
})
return request
class Session(object):
'''An isolated session for interaction with an ftrack server.'''
def __init__(
self, server_url=None, api_key=None, api_user=None, auto_populate=True,
plugin_paths=None, cache=None, cache_key_maker=None,
auto_connect_event_hub=None, schema_cache_path=None,
plugin_arguments=None
):
'''Initialise session.
*server_url* should be the URL of the ftrack server to connect to
including any port number. If not specified attempt to look up from
:envvar:`FTRACK_SERVER`.
*api_key* should be the API key to use for authentication whilst
*api_user* should be the username of the user in ftrack to record
operations against. If not specified, *api_key* should be retrieved
from :envvar:`FTRACK_API_KEY` and *api_user* from
:envvar:`FTRACK_API_USER`.
If *auto_populate* is True (the default), then accessing entity
attributes will cause them to be automatically fetched from the server
if they are not already. This flag can be changed on the session
directly at any time.
*plugin_paths* should be a list of paths to search for plugins. If not
specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
*cache* should be an instance of a cache that fulfils the
:class:`ftrack_api.cache.Cache` interface and will be used as the cache
for the session. It can also be a callable that will be called with the
session instance as sole argument. The callable should return ``None``
if a suitable cache could not be configured, but session instantiation
can continue safely.
.. note::
The session will add the specified cache to a pre-configured layered
cache that specifies the top level cache as a
:class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
to construct a separate memory cache for typical behaviour. Working
around this behaviour or removing the memory cache can lead to
unexpected behaviour.
*cache_key_maker* should be an instance of a key maker that fulfils the
:class:`ftrack_api.cache.KeyMaker` interface and will be used to
generate keys for objects being stored in the *cache*. If not specified,
a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
If *auto_connect_event_hub* is True then embedded event hub will be
automatically connected to the event server and allow for publishing and
subscribing to **non-local** events. If False, then only publishing and
subscribing to **local** events will be possible until the hub is
manually connected using :meth:`EventHub.connect
<ftrack_api.event.hub.EventHub.connect>`.
.. note::
The event hub connection is performed in a background thread to
improve session startup time. If a registered plugin requires a
connected event hub then it should check the event hub connection
status explicitly. Subscribing to events does *not* require a
connected event hub.
Enable schema caching by setting *schema_cache_path* to a folder path.
If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
determine the path to store cache in. If the environment variable is
also not specified then a temporary directory will be used. Set to
`False` to disable schema caching entirely.
*plugin_arguments* should be an optional mapping (dict) of keyword
arguments to pass to plugin register functions upon discovery. If a
discovered plugin has a signature that is incompatible with the passed
arguments, the discovery mechanism will attempt to reduce the passed
arguments to only those that the plugin accepts. Note that a warning
will be logged in this case.
'''
super(Session, self).__init__()
self.logger = logging.getLogger(
__name__ + '.' + self.__class__.__name__
)
self._closed = False
if server_url is None:
server_url = os.environ.get('FTRACK_SERVER')
if not server_url:
raise TypeError(
'Required "server_url" not specified. Pass as argument or set '
'in environment variable FTRACK_SERVER.'
)
self._server_url = server_url
if api_key is None:
api_key = os.environ.get(
'FTRACK_API_KEY',
# Backwards compatibility
os.environ.get('FTRACK_APIKEY')
)
if not api_key:
raise TypeError(
'Required "api_key" not specified. Pass as argument or set in '
'environment variable FTRACK_API_KEY.'
)
self._api_key = api_key
if api_user is None:
api_user = os.environ.get('FTRACK_API_USER')
if not api_user:
try:
api_user = getpass.getuser()
except Exception:
pass
if not api_user:
raise TypeError(
'Required "api_user" not specified. Pass as argument, set in '
'environment variable FTRACK_API_USER or one of the standard '
'environment variables used by Python\'s getpass module.'
)
self._api_user = api_user
# Currently pending operations.
self.recorded_operations = ftrack_api.operation.Operations()
self.record_operations = True
self.cache_key_maker = cache_key_maker
if self.cache_key_maker is None:
self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
# Enforce always having a memory cache at top level so that the same
# in-memory instance is returned from session.
self.cache = ftrack_api.cache.LayeredCache([
ftrack_api.cache.MemoryCache()
])
if cache is not None:
if callable(cache):
cache = cache(self)
if cache is not None:
self.cache.caches.append(cache)
self._managed_request = None
self._request = requests.Session()
self._request.auth = SessionAuthentication(
self._api_key, self._api_user
)
self.auto_populate = auto_populate
# Fetch server information and in doing so also check credentials.
self._server_information = self._fetch_server_information()
# Now check compatibility of server based on retrieved information.
self.check_server_compatibility()
# Construct event hub and load plugins.
self._event_hub = ftrack_api.event.hub.EventHub(
self._server_url,
self._api_user,
self._api_key,
)
self._auto_connect_event_hub_thread = None
if auto_connect_event_hub in (None, True):
# Connect to event hub in background thread so as not to block main
# session usage waiting for event hub connection.
self._auto_connect_event_hub_thread = threading.Thread(
target=self._event_hub.connect
)
self._auto_connect_event_hub_thread.daemon = True
self._auto_connect_event_hub_thread.start()
# To help with migration from auto_connect_event_hub default changing
# from True to False.
self._event_hub._deprecation_warning_auto_connect = (
auto_connect_event_hub is None
)
# Register to auto-close session on exit.
atexit.register(WeakMethod(self.close))
self._plugin_paths = plugin_paths
if self._plugin_paths is None:
self._plugin_paths = os.environ.get(
'FTRACK_EVENT_PLUGIN_PATH', ''
).split(os.pathsep)
self._discover_plugins(plugin_arguments=plugin_arguments)
# TODO: Make schemas read-only and non-mutable (or at least without
# rebuilding types)?
if schema_cache_path is not False:
if schema_cache_path is None:
schema_cache_path = os.environ.get(
'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
)
schema_cache_path = os.path.join(
schema_cache_path, 'ftrack_api_schema_cache.json'
)
self.schemas = self._load_schemas(schema_cache_path)
self.types = self._build_entity_type_classes(self.schemas)
ftrack_api._centralized_storage_scenario.register(self)
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.ready',
data=dict(
session=self
)
),
synchronous=True
)
def __enter__(self):
'''Return session as context manager.'''
return self
def __exit__(self, exception_type, exception_value, traceback):
'''Exit session context, closing session in process.'''
self.close()
@property
def _request(self):
'''Return request session.
Raise :exc:`ftrack_api.exception.ConnectionClosedError` if session has
been closed and connection unavailable.
'''
if self._managed_request is None:
raise ftrack_api.exception.ConnectionClosedError()
return self._managed_request
@_request.setter
def _request(self, value):
'''Set request session to *value*.'''
self._managed_request = value
@property
def closed(self):
'''Return whether session has been closed.'''
return self._closed
@property
def server_information(self):
'''Return server information such as server version.'''
return self._server_information.copy()
@property
def server_url(self):
'''Return server ulr used for session.'''
return self._server_url
@property
def api_user(self):
'''Return username used for session.'''
return self._api_user
@property
def api_key(self):
'''Return API key used for session.'''
return self._api_key
@property
def event_hub(self):
'''Return event hub.'''
return self._event_hub
@property
def _local_cache(self):
'''Return top level memory cache.'''
return self.cache.caches[0]
def check_server_compatibility(self):
'''Check compatibility with connected server.'''
server_version = self.server_information.get('version')
if server_version is None:
raise ftrack_api.exception.ServerCompatibilityError(
'Could not determine server version.'
)
# Perform basic version check.
if server_version != 'dev':
min_server_version = '3.3.11'
if (
distutils.version.LooseVersion(min_server_version)
> distutils.version.LooseVersion(server_version)
):
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0} incompatible with this version of the '
'API which requires a server version >= {1}'.format(
server_version,
min_server_version
)
)
def close(self):
'''Close session.
Close connections to server. Clear any pending operations and local
cache.
Use this to ensure that session is cleaned up properly after use.
'''
if self.closed:
self.logger.debug('Session already closed.')
return
self._closed = True
self.logger.debug('Closing session.')
if self.recorded_operations:
self.logger.warning(
'Closing session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Close connections.
self._request.close()
self._request = None
try:
self.event_hub.disconnect()
if self._auto_connect_event_hub_thread:
self._auto_connect_event_hub_thread.join()
except ftrack_api.exception.EventHubConnectionError:
pass
self.logger.debug('Session closed.')
def reset(self):
'''Reset session clearing local state.
Clear all pending operations and expunge all entities from session.
Also clear the local cache. If the cache used by the session is a
:class:`~ftrack_api.cache.LayeredCache` then only clear top level cache.
Otherwise, clear the entire cache.
Plugins are not rediscovered or reinitialised, but certain plugin events
are re-emitted to properly configure session aspects that are dependant
on cache (such as location plugins).
.. warning::
Previously attached entities are not reset in memory and will retain
their state, but should not be used. Doing so will cause errors.
'''
if self.recorded_operations:
self.logger.warning(
'Resetting session with pending operations not persisted.'
)
# Clear pending operations.
self.recorded_operations.clear()
# Clear top level cache (expected to be enforced memory cache).
self._local_cache.clear()
# Re-configure certain session aspects that may be dependant on cache.
self._configure_locations()
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.reset',
data=dict(
session=self
)
),
synchronous=True
)
def auto_populating(self, auto_populate):
'''Temporarily set auto populate to *auto_populate*.
The current setting will be restored automatically when done.
Example::
with session.auto_populating(False):
print entity['name']
'''
return AutoPopulatingContext(self, auto_populate)
def operation_recording(self, record_operations):
'''Temporarily set operation recording to *record_operations*.
The current setting will be restored automatically when done.
Example::
with session.operation_recording(False):
entity['name'] = 'change_not_recorded'
'''
return OperationRecordingContext(self, record_operations)
@property
def created(self):
'''Return list of newly created entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.CREATED
]
@property
def modified(self):
'''Return list of locally modified entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.MODIFIED
]
@property
def deleted(self):
'''Return list of deleted entities.'''
entities = self._local_cache.values()
states = ftrack_api.inspection.states(entities)
return [
entity for (entity, state) in itertools.izip(entities, states)
if state is ftrack_api.symbol.DELETED
]
def reset_remote(self, reset_type, entity=None):
'''Perform a server side reset.
*reset_type* is a server side supported reset type,
passing the optional *entity* to perform the option upon.
Please refer to ftrack documentation for a complete list of
supported server side reset types.
'''
payload = {
'action': 'reset_remote',
'reset_type': reset_type
}
if entity is not None:
payload.update({
'entity_type': entity.entity_type,
'entity_key': entity.get('id')
})
result = self.call(
[payload]
)
return result[0]['data']
def create(self, entity_type, data=None, reconstructing=False):
'''Create and return an entity of *entity_type* with initial *data*.
If specified, *data* should be a dictionary of key, value pairs that
should be used to populate attributes on the entity.
If *reconstructing* is False then create a new entity setting
appropriate defaults for missing data. If True then reconstruct an
existing entity.
Constructed entity will be automatically :meth:`merged <Session.merge>`
into the session.
'''
entity = self._create(entity_type, data, reconstructing=reconstructing)
entity = self.merge(entity)
return entity
def _create(self, entity_type, data, reconstructing):
'''Create and return an entity of *entity_type* with initial *data*.'''
try:
EntityTypeClass = self.types[entity_type]
except KeyError:
raise ftrack_api.exception.UnrecognisedEntityTypeError(entity_type)
return EntityTypeClass(self, data=data, reconstructing=reconstructing)
def ensure(self, entity_type, data, identifying_keys=None):
'''Retrieve entity of *entity_type* with *data*, creating if necessary.
*data* should be a dictionary of the same form passed to :meth:`create`.
By default, check for an entity that has matching *data*. If
*identifying_keys* is specified as a list of keys then only consider the
values from *data* for those keys when searching for existing entity. If
*data* is missing an identifying key then raise :exc:`KeyError`.
If no *identifying_keys* specified then use all of the keys from the
passed *data*. Raise :exc:`ValueError` if no *identifying_keys* can be
determined.
Each key should be a string.
.. note::
Currently only top level scalars supported. To ensure an entity by
looking at relationships, manually issue the :meth:`query` and
:meth:`create` calls.
If more than one entity matches the determined filter criteria then
raise :exc:`~ftrack_api.exception.MultipleResultsFoundError`.
If no matching entity found then create entity using supplied *data*.
If a matching entity is found, then update it if necessary with *data*.
.. note::
If entity created or updated then a :meth:`commit` will be issued
automatically. If this behaviour is undesired, perform the
:meth:`query` and :meth:`create` calls manually.
Return retrieved or created entity.
Example::
# First time, a new entity with `username=martin` is created.
entity = session.ensure('User', {'username': 'martin'})
# After that, the existing entity is retrieved.
entity = session.ensure('User', {'username': 'martin'})
# When existing entity retrieved, entity may also be updated to
# match supplied data.
entity = session.ensure(
'User', {'username': 'martin', 'email': '[email protected]'}
)
'''
if not identifying_keys:
identifying_keys = data.keys()
self.logger.debug(L(
'Ensuring entity {0!r} with data {1!r} using identifying keys '
'{2!r}', entity_type, data, identifying_keys
))
if not identifying_keys:
raise ValueError(
'Could not determine any identifying data to check against '
'when ensuring {0!r} with data {1!r}. Identifying keys: {2!r}'
.format(entity_type, data, identifying_keys)
)
expression = '{0} where'.format(entity_type)
criteria = []
for identifying_key in identifying_keys:
value = data[identifying_key]
if isinstance(value, basestring):
value = '"{0}"'.format(value)
elif isinstance(
value, (arrow.Arrow, datetime.datetime, datetime.date)
):
# Server does not store microsecond or timezone currently so
# need to strip from query.
# TODO: When datetime handling improved, update this logic.
value = (
arrow.get(value).naive.replace(microsecond=0).isoformat()
)
value = '"{0}"'.format(value)
criteria.append('{0} is {1}'.format(identifying_key, value))
expression = '{0} {1}'.format(
expression, ' and '.join(criteria)
)
try:
entity = self.query(expression).one()
except ftrack_api.exception.NoResultFoundError:
self.logger.debug('Creating entity as did not already exist.')
# Create entity.
entity = self.create(entity_type, data)
self.commit()
else:
self.logger.debug('Retrieved matching existing entity.')
# Update entity if required.
updated = False
for key, target_value in data.items():
if entity[key] != target_value:
entity[key] = target_value
updated = True
if updated:
self.logger.debug('Updating existing entity to match new data.')
self.commit()
return entity
def delete(self, entity):
'''Mark *entity* for deletion.'''
if self.record_operations:
self.recorded_operations.push(
ftrack_api.operation.DeleteEntityOperation(
entity.entity_type,
ftrack_api.inspection.primary_key(entity)
)
)
def get(self, entity_type, entity_key):
'''Return entity of *entity_type* with unique *entity_key*.
First check for an existing entry in the configured cache, otherwise
issue a query to the server.
If no matching entity found, return None.
'''
self.logger.debug(L('Get {0} with key {1}', entity_type, entity_key))
primary_key_definition = self.types[entity_type].primary_key_attributes
if isinstance(entity_key, basestring):
entity_key = [entity_key]
if len(entity_key) != len(primary_key_definition):
raise ValueError(
'Incompatible entity_key {0!r} supplied. Entity type {1} '
'expects a primary key composed of {2} values ({3}).'
.format(
entity_key, entity_type, len(primary_key_definition),
', '.join(primary_key_definition)
)
)
entity = None
try:
entity = self._get(entity_type, entity_key)
except KeyError:
# Query for matching entity.
self.logger.debug(
'Entity not present in cache. Issuing new query.'
)
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
expression = '{0} where ({1})'.format(
entity_type, ' and '.join(condition)
)
results = self.query(expression).all()
if results:
entity = results[0]
return entity
def _get(self, entity_type, entity_key):
'''Return cached entity of *entity_type* with unique *entity_key*.
Raise :exc:`KeyError` if no such entity in the cache.
'''
# Check cache for existing entity emulating
# ftrack_api.inspection.identity result object to pass to key maker.
cache_key = self.cache_key_maker.key(
(str(entity_type), map(str, entity_key))
)
self.logger.debug(L(
'Checking cache for entity with key {0}', cache_key
))
entity = self.cache.get(cache_key)
self.logger.debug(L(
'Retrieved existing entity from cache: {0} at {1}',
entity, id(entity)
))
return entity
def query(self, expression, page_size=500):
'''Query against remote data according to *expression*.
*expression* is not executed directly. Instead return an
:class:`ftrack_api.query.QueryResult` instance that will execute remote
call on access.
*page_size* specifies the maximum page size that the returned query
result object should be configured with.
.. seealso:: :ref:`querying`
'''
self.logger.debug(L('Query {0!r}', expression))
# Add in sensible projections if none specified. Note that this is
# done here rather than on the server to allow local modification of the
# schema setting to include commonly used custom attributes for example.
# TODO: Use a proper parser perhaps?
if not expression.startswith('select'):
entity_type = expression.split(' ', 1)[0]
EntityTypeClass = self.types[entity_type]
projections = EntityTypeClass.default_projections
expression = 'select {0} from {1}'.format(
', '.join(projections),
expression
)
query_result = ftrack_api.query.QueryResult(
self, expression, page_size=page_size
)
return query_result
def _query(self, expression):
'''Execute *query* and return (records, metadata).
Records will be a list of entities retrieved via the query and metadata
a dictionary of accompanying information about the result set.
'''
# TODO: Actually support batching several queries together.
# TODO: Should batches have unique ids to match them up later.
batch = [{
'action': 'query',
'expression': expression
}]
# TODO: When should this execute? How to handle background=True?
results = self.call(batch)
# Merge entities into local cache and return merged entities.
data = []
merged = dict()
for entity in results[0]['data']:
data.append(self._merge_recursive(entity, merged))
return data, results[0]['metadata']
def merge(self, value, merged=None):
'''Merge *value* into session and return merged value.
*merged* should be a mapping to record merges during run and should be
used to avoid infinite recursion. If not set will default to a
dictionary.
'''
if merged is None:
merged = {}
with self.operation_recording(False):
return self._merge(value, merged)
def _merge(self, value, merged):
'''Return merged *value*.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if isinstance(value, ftrack_api.entity.base.Entity):
log_debug and self.logger.debug(
'Merging entity into session: {0} at {1}'
.format(value, id(value))
)
return self._merge_entity(value, merged=merged)
elif isinstance(value, ftrack_api.collection.Collection):
log_debug and self.logger.debug(
'Merging collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
elif isinstance(value, ftrack_api.collection.MappedCollectionProxy):
log_debug and self.logger.debug(
'Merging mapped collection into session: {0!r} at {1}'
.format(value, id(value))
)
merged_collection = []
for entry in value.collection:
merged_collection.append(
self._merge(entry, merged=merged)
)
return merged_collection
else:
return value
def _merge_recursive(self, entity, merged=None):
'''Merge *entity* and all its attributes recursivly.'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
attached = self.merge(entity, merged)
for attribute in entity.attributes:
# Remote attributes.
remote_value = attribute.get_remote_value(entity)
if isinstance(
remote_value,
(
ftrack_api.entity.base.Entity,
ftrack_api.collection.Collection,
ftrack_api.collection.MappedCollectionProxy
)
):
log_debug and self.logger.debug(
'Merging remote value for attribute {0}.'.format(attribute)
)
if isinstance(remote_value, ftrack_api.entity.base.Entity):
self._merge_recursive(remote_value, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.Collection
):
for entry in remote_value:
self._merge_recursive(entry, merged=merged)
elif isinstance(
remote_value, ftrack_api.collection.MappedCollectionProxy
):
for entry in remote_value.collection:
self._merge_recursive(entry, merged=merged)
return attached
def _merge_entity(self, entity, merged=None):
'''Merge *entity* into session returning merged entity.
Merge is recursive so any references to other entities will also be
merged.
*entity* will never be modified in place. Ensure that the returned
merged entity instance is used.
'''
log_debug = self.logger.isEnabledFor(logging.DEBUG)
if merged is None:
merged = {}
with self.auto_populating(False):
entity_key = self.cache_key_maker.key(
ftrack_api.inspection.identity(entity)
)
# Check whether this entity has already been processed.
attached_entity = merged.get(entity_key)
if attached_entity is not None:
log_debug and self.logger.debug(
'Entity already processed for key {0} as {1} at {2}'
.format(entity_key, attached_entity, id(attached_entity))
)
return attached_entity
else:
log_debug and self.logger.debug(
'Entity not already processed for key {0}.'
.format(entity_key)
)
# Check for existing instance of entity in cache.
log_debug and self.logger.debug(
'Checking for entity in cache with key {0}'.format(entity_key)
)
try:
attached_entity = self.cache.get(entity_key)
log_debug and self.logger.debug(
'Retrieved existing entity from cache: {0} at {1}'
.format(attached_entity, id(attached_entity))
)
except KeyError:
# Construct new minimal instance to store in cache.
attached_entity = self._create(
entity.entity_type, {}, reconstructing=True
)
log_debug and self.logger.debug(
'Entity not present in cache. Constructed new instance: '
'{0} at {1}'.format(attached_entity, id(attached_entity))
)
# Mark entity as seen to avoid infinite loops.
merged[entity_key] = attached_entity
changes = attached_entity.merge(entity, merged=merged)
if changes:
self.cache.set(entity_key, attached_entity)
self.logger.debug('Cache updated with merged entity.')
else:
self.logger.debug(
'Cache not updated with merged entity as no differences '
'detected.'
)
return attached_entity
def populate(self, entities, projections):
'''Populate *entities* with attributes specified by *projections*.
Any locally set values included in the *projections* will not be
overwritten with the retrieved remote value. If this 'synchronise'
behaviour is required, first clear the relevant values on the entity by
setting them to :attr:`ftrack_api.symbol.NOT_SET`. Deleting the key will
have the same effect::
>>> print(user['username'])
martin
>>> del user['username']
>>> print(user['username'])
Symbol(NOT_SET)
.. note::
Entities that have been created and not yet persisted will be
skipped as they have no remote values to fetch.
'''
self.logger.debug(L(
'Populate {0!r} projections for {1}.', projections, entities
))
if not isinstance(
entities, (list, tuple, ftrack_api.query.QueryResult)
):
entities = [entities]
# TODO: How to handle a mixed collection of different entity types
# Should probably fail, but need to consider handling hierarchies such
# as User and Group both deriving from Resource. Actually, could just
# proceed and ignore projections that are not present in entity type.
entities_to_process = []
for entity in entities:
if ftrack_api.inspection.state(entity) is ftrack_api.symbol.CREATED:
# Created entities that are not yet persisted have no remote
# values. Don't raise an error here as it is reasonable to
# iterate over an entities properties and see that some of them
# are NOT_SET.
self.logger.debug(L(
'Skipping newly created entity {0!r} for population as no '
'data will exist in the remote for this entity yet.', entity
))
continue
entities_to_process.append(entity)
if entities_to_process:
reference_entity = entities_to_process[0]
entity_type = reference_entity.entity_type
query = 'select {0} from {1}'.format(projections, entity_type)
primary_key_definition = reference_entity.primary_key_attributes
entity_keys = [
ftrack_api.inspection.primary_key(entity).values()
for entity in entities_to_process
]
if len(primary_key_definition) > 1:
# Composite keys require full OR syntax unfortunately.
conditions = []
for entity_key in entity_keys:
condition = []
for key, value in zip(primary_key_definition, entity_key):
condition.append('{0} is "{1}"'.format(key, value))
conditions.append('({0})'.format('and '.join(condition)))
query = '{0} where {1}'.format(query, ' or '.join(conditions))
else:
primary_key = primary_key_definition[0]
if len(entity_keys) > 1:
query = '{0} where {1} in ({2})'.format(
query, primary_key,
','.join([
str(entity_key[0]) for entity_key in entity_keys
])
)
else:
query = '{0} where {1} is {2}'.format(
query, primary_key, str(entity_keys[0][0])
)
result = self.query(query)
# Fetch all results now. Doing so will cause them to populate the
# relevant entities in the cache.
result.all()
# TODO: Should we check that all requested attributes were
# actually populated? If some weren't would we mark that to avoid
# repeated calls or perhaps raise an error?
# TODO: Make atomic.
def commit(self):
'''Commit all local changes to the server.'''
batch = []
with self.auto_populating(False):
for operation in self.recorded_operations:
# Convert operation to payload.
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
# At present, data payload requires duplicating entity
# type in data and also ensuring primary key added.
entity_data = {
'__entity_type__': operation.entity_type,
}
entity_data.update(operation.entity_key)
entity_data.update(operation.entity_data)
payload = OperationPayload({
'action': 'create',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.UpdateEntityOperation
):
entity_data = {
# At present, data payload requires duplicating entity
# type.
'__entity_type__': operation.entity_type,
operation.attribute_name: operation.new_value
}
payload = OperationPayload({
'action': 'update',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values(),
'entity_data': entity_data
})
elif isinstance(
operation, ftrack_api.operation.DeleteEntityOperation
):
payload = OperationPayload({
'action': 'delete',
'entity_type': operation.entity_type,
'entity_key': operation.entity_key.values()
})
else:
raise ValueError(
'Cannot commit. Unrecognised operation type {0} '
'detected.'.format(type(operation))
)
batch.append(payload)
# Optimise batch.
# TODO: Might be better to perform these on the operations list instead
# so all operation contextual information available.
# If entity was created and deleted in one batch then remove all
# payloads for that entity.
created = set()
deleted = set()
for payload in batch:
if payload['action'] == 'create':
created.add(
(payload['entity_type'], str(payload['entity_key']))
)
elif payload['action'] == 'delete':
deleted.add(
(payload['entity_type'], str(payload['entity_key']))
)
created_then_deleted = deleted.intersection(created)
if created_then_deleted:
optimised_batch = []
for payload in batch:
entity_type = payload.get('entity_type')
entity_key = str(payload.get('entity_key'))
if (entity_type, entity_key) in created_then_deleted:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Remove early update operations so that only last operation on
# attribute is applied server side.
updates_map = set()
for payload in reversed(batch):
if payload['action'] in ('update', ):
for key, value in payload['entity_data'].items():
if key == '__entity_type__':
continue
identity = (
payload['entity_type'], str(payload['entity_key']), key
)
if identity in updates_map:
del payload['entity_data'][key]
else:
updates_map.add(identity)
# Remove NOT_SET values from entity_data.
for payload in batch:
entity_data = payload.get('entity_data', {})
for key, value in entity_data.items():
if value is ftrack_api.symbol.NOT_SET:
del entity_data[key]
# Remove payloads with redundant entity_data.
optimised_batch = []
for payload in batch:
entity_data = payload.get('entity_data')
if entity_data is not None:
keys = entity_data.keys()
if not keys or keys == ['__entity_type__']:
continue
optimised_batch.append(payload)
batch = optimised_batch
# Collapse updates that are consecutive into one payload. Also, collapse
# updates that occur immediately after creation into the create payload.
optimised_batch = []
previous_payload = None
for payload in batch:
if (
previous_payload is not None
and payload['action'] == 'update'
and previous_payload['action'] in ('create', 'update')
and previous_payload['entity_type'] == payload['entity_type']
and previous_payload['entity_key'] == payload['entity_key']
):
previous_payload['entity_data'].update(payload['entity_data'])
continue
else:
optimised_batch.append(payload)
previous_payload = payload
batch = optimised_batch
# Process batch.
if batch:
result = self.call(batch)
# Clear recorded operations.
self.recorded_operations.clear()
# As optimisation, clear local values which are not primary keys to
# avoid redundant merges when merging references. Note: primary keys
# remain as needed for cache retrieval on new entities.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
for attribute in entity:
if attribute not in entity.primary_key_attributes:
del entity[attribute]
# Process results merging into cache relevant data.
for entry in result:
if entry['action'] in ('create', 'update'):
# Merge returned entities into local cache.
self.merge(entry['data'])
elif entry['action'] == 'delete':
# TODO: Detach entity - need identity returned?
# TODO: Expunge entity from cache.
pass
# Clear remaining local state, including local values for primary
# keys on entities that were merged.
with self.auto_populating(False):
with self.operation_recording(False):
for entity in self._local_cache.values():
entity.clear()
def rollback(self):
'''Clear all recorded operations and local state.
Typically this would be used following a failed :meth:`commit` in order
to revert the session to a known good state.
Newly created entities not yet persisted will be detached from the
session / purged from cache and no longer contribute, but the actual
objects are not deleted from memory. They should no longer be used and
doing so could cause errors.
'''
with self.auto_populating(False):
with self.operation_recording(False):
# Detach all newly created entities and remove from cache. This
# is done because simply clearing the local values of newly
# created entities would result in entities with no identity as
# primary key was local while not persisted. In addition, it
# makes no sense for failed created entities to exist in session
# or cache.
for operation in self.recorded_operations:
if isinstance(
operation, ftrack_api.operation.CreateEntityOperation
):
entity_key = str((
str(operation.entity_type),
operation.entity_key.values()
))
try:
self.cache.remove(entity_key)
except KeyError:
pass
# Clear locally stored modifications on remaining entities.
for entity in self._local_cache.values():
entity.clear()
self.recorded_operations.clear()
def _fetch_server_information(self):
'''Return server information.'''
result = self.call([{'action': 'query_server_information'}])
return result[0]
def _discover_plugins(self, plugin_arguments=None):
'''Find and load plugins in search paths.
Each discovered module should implement a register function that
accepts this session as first argument. Typically the function should
register appropriate event listeners against the session's event hub.
def register(session):
session.event_hub.subscribe(
'topic=ftrack.api.session.construct-entity-type',
construct_entity_type
)
*plugin_arguments* should be an optional mapping of keyword arguments
and values to pass to plugin register functions upon discovery.
'''
plugin_arguments = plugin_arguments or {}
ftrack_api.plugin.discover(
self._plugin_paths, [self], plugin_arguments
)
def _read_schemas_from_cache(self, schema_cache_path):
'''Return schemas and schema hash from *schema_cache_path*.
*schema_cache_path* should be the path to the file containing the
schemas in JSON format.
'''
self.logger.debug(L(
'Reading schemas from cache {0!r}', schema_cache_path
))
if not os.path.exists(schema_cache_path):
self.logger.info(L(
'Cache file not found at {0!r}.', schema_cache_path
))
return [], None
with open(schema_cache_path, 'r') as schema_file:
schemas = json.load(schema_file)
hash_ = hashlib.md5(
json.dumps(schemas, sort_keys=True)
).hexdigest()
return schemas, hash_
def _write_schemas_to_cache(self, schemas, schema_cache_path):
'''Write *schemas* to *schema_cache_path*.
*schema_cache_path* should be a path to a file that the schemas can be
written to in JSON format.
'''
self.logger.debug(L(
'Updating schema cache {0!r} with new schemas.', schema_cache_path
))
with open(schema_cache_path, 'w') as local_cache_file:
json.dump(schemas, local_cache_file, indent=4)
def _load_schemas(self, schema_cache_path):
'''Load schemas.
First try to load schemas from cache at *schema_cache_path*. If the
cache is not available or the cache appears outdated then load schemas
from server and store fresh copy in cache.
If *schema_cache_path* is set to `False`, always load schemas from
server bypassing cache.
'''
local_schema_hash = None
schemas = []
if schema_cache_path:
try:
schemas, local_schema_hash = self._read_schemas_from_cache(
schema_cache_path
)
except (IOError, TypeError, AttributeError, ValueError):
# Catch any known exceptions when trying to read the local
# schema cache to prevent API from being unusable.
self.logger.exception(L(
'Schema cache could not be loaded from {0!r}',
schema_cache_path
))
# Use `dictionary.get` to retrieve hash to support older version of
# ftrack server not returning a schema hash.
server_hash = self._server_information.get(
'schema_hash', False
)
if local_schema_hash != server_hash:
self.logger.debug(L(
'Loading schemas from server due to hash not matching.'
'Local: {0!r} != Server: {1!r}', local_schema_hash, server_hash
))
schemas = self.call([{'action': 'query_schemas'}])[0]
if schema_cache_path:
try:
self._write_schemas_to_cache(schemas, schema_cache_path)
except (IOError, TypeError):
self.logger.exception(L(
'Failed to update schema cache {0!r}.',
schema_cache_path
))
else:
self.logger.debug(L(
'Using cached schemas from {0!r}', schema_cache_path
))
return schemas
def _build_entity_type_classes(self, schemas):
'''Build default entity type classes.'''
fallback_factory = ftrack_api.entity.factory.StandardFactory()
classes = {}
for schema in schemas:
results = self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.construct-entity-type',
data=dict(
schema=schema,
schemas=schemas
)
),
synchronous=True
)
results = [result for result in results if result is not None]
if not results:
self.logger.debug(L(
'Using default StandardFactory to construct entity type '
'class for "{0}"', schema['id']
))
entity_type_class = fallback_factory.create(schema)
elif len(results) > 1:
raise ValueError(
'Expected single entity type to represent schema "{0}" but '
'received {1} entity types instead.'
.format(schema['id'], len(results))
)
else:
entity_type_class = results[0]
classes[entity_type_class.entity_type] = entity_type_class
return classes
def _configure_locations(self):
'''Configure locations.'''
# First configure builtin locations, by injecting them into local cache.
# Origin.
location = self.create(
'Location',
data=dict(
name='ftrack.origin',
id=ftrack_api.symbol.ORIGIN_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.OriginLocationMixin,
name='OriginLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 100
# Unmanaged.
location = self.create(
'Location',
data=dict(
name='ftrack.unmanaged',
id=ftrack_api.symbol.UNMANAGED_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
# location.resource_identifier_transformer = (
# ftrack_api.resource_identifier_transformer.internal.InternalResourceIdentifierTransformer(session)
# )
location.priority = 90
# Review.
location = self.create(
'Location',
data=dict(
name='ftrack.review',
id=ftrack_api.symbol.REVIEW_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.UnmanagedLocationMixin,
name='UnmanagedLocation'
)
location.accessor = ftrack_api.accessor.disk.DiskAccessor(prefix='')
location.structure = ftrack_api.structure.origin.OriginStructure()
location.priority = 110
# Server.
location = self.create(
'Location',
data=dict(
name='ftrack.server',
id=ftrack_api.symbol.SERVER_LOCATION_ID
),
reconstructing=True
)
ftrack_api.mixin(
location, ftrack_api.entity.location.ServerLocationMixin,
name='ServerLocation'
)
location.accessor = ftrack_api.accessor.server._ServerAccessor(
session=self
)
location.structure = ftrack_api.structure.entity_id.EntityIdStructure()
location.priority = 150
# Master location based on server scenario.
storage_scenario = self.server_information.get('storage_scenario')
if (
storage_scenario and
storage_scenario.get('scenario')
):
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.storage-scenario.activate',
data=dict(
storage_scenario=storage_scenario
)
),
synchronous=True
)
# Next, allow further configuration of locations via events.
self.event_hub.publish(
ftrack_api.event.base.Event(
topic='ftrack.api.session.configure-location',
data=dict(
session=self
)
),
synchronous=True
)
@ftrack_api.logging.deprecation_warning(
'Session._call is now available as public method Session.call. The '
'private method will be removed in version 2.0.'
)
def _call(self, data):
'''Make request to server with *data* batch describing the actions.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.call(data)
def call(self, data):
'''Make request to server with *data* batch describing the actions.'''
url = self._server_url + '/api'
headers = {
'content-type': 'application/json',
'accept': 'application/json'
}
data = self.encode(data, entity_attribute_strategy='modified_only')
self.logger.debug(L('Calling server {0} with {1!r}', url, data))
response = self._request.post(
url,
headers=headers,
data=data
)
self.logger.debug(L('Call took: {0}', response.elapsed.total_seconds()))
self.logger.debug(L('Response: {0!r}', response.text))
try:
result = self.decode(response.text)
except Exception:
error_message = (
'Server reported error in unexpected format. Raw error was: {0}'
.format(response.text)
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
else:
if 'exception' in result:
# Handle exceptions.
error_message = 'Server reported error: {0}({1})'.format(
result['exception'], result['content']
)
self.logger.exception(error_message)
raise ftrack_api.exception.ServerError(error_message)
return result
def encode(self, data, entity_attribute_strategy='set_only'):
'''Return *data* encoded as JSON formatted string.
*entity_attribute_strategy* specifies how entity attributes should be
handled. The following strategies are available:
* *all* - Encode all attributes, loading any that are currently NOT_SET.
* *set_only* - Encode only attributes that are currently set without
loading any from the remote.
* *modified_only* - Encode only attributes that have been modified
locally.
* *persisted_only* - Encode only remote (persisted) attribute values.
'''
entity_attribute_strategies = (
'all', 'set_only', 'modified_only', 'persisted_only'
)
if entity_attribute_strategy not in entity_attribute_strategies:
raise ValueError(
'Unsupported entity_attribute_strategy "{0}". Must be one of '
'{1}'.format(
entity_attribute_strategy,
', '.join(entity_attribute_strategies)
)
)
return json.dumps(
data,
sort_keys=True,
default=functools.partial(
self._encode,
entity_attribute_strategy=entity_attribute_strategy
)
)
def _encode(self, item, entity_attribute_strategy='set_only'):
'''Return JSON encodable version of *item*.
*entity_attribute_strategy* specifies how entity attributes should be
handled. See :meth:`Session.encode` for available strategies.
'''
if isinstance(item, (arrow.Arrow, datetime.datetime, datetime.date)):
return {
'__type__': 'datetime',
'value': item.isoformat()
}
if isinstance(item, OperationPayload):
data = dict(item.items())
if "entity_data" in data:
for key, value in data["entity_data"].items():
if isinstance(value, ftrack_api.entity.base.Entity):
data["entity_data"][key] = self.entity_reference(value)
return data
if isinstance(item, ftrack_api.entity.base.Entity):
data = self.entity_reference(item)
with self.auto_populating(True):
for attribute in item.attributes:
value = ftrack_api.symbol.NOT_SET
if entity_attribute_strategy == 'all':
value = attribute.get_value(item)
elif entity_attribute_strategy == 'set_only':
if attribute.is_set(item):
value = attribute.get_local_value(item)
if value is ftrack_api.symbol.NOT_SET:
value = attribute.get_remote_value(item)
elif entity_attribute_strategy == 'modified_only':
if attribute.is_modified(item):
value = attribute.get_local_value(item)
elif entity_attribute_strategy == 'persisted_only':
if not attribute.computed:
value = attribute.get_remote_value(item)
if value is not ftrack_api.symbol.NOT_SET:
if isinstance(
attribute, ftrack_api.attribute.ReferenceAttribute
):
if isinstance(value, ftrack_api.entity.base.Entity):
value = self.entity_reference(value)
data[attribute.name] = value
return data
if isinstance(
item, ftrack_api.collection.MappedCollectionProxy
):
# Use proxied collection for serialisation.
item = item.collection
if isinstance(item, ftrack_api.collection.Collection):
data = []
for entity in item:
data.append(self.entity_reference(entity))
return data
raise TypeError('{0!r} is not JSON serializable'.format(item))
def entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along with
the key, value pairs that make up it's primary key.
'''
reference = {
'__entity_type__': entity.entity_type
}
with self.auto_populating(False):
reference.update(ftrack_api.inspection.primary_key(entity))
return reference
@ftrack_api.logging.deprecation_warning(
'Session._entity_reference is now available as public method '
'Session.entity_reference. The private method will be removed '
'in version 2.0.'
)
def _entity_reference(self, entity):
'''Return entity reference that uniquely identifies *entity*.
Return a mapping containing the __entity_type__ of the entity along
with the key, value pairs that make up it's primary key.
.. note::
This private method is now available as public method
:meth:`entity_reference`. This alias remains for backwards
compatibility, but will be removed in version 2.0.
'''
return self.entity_reference(entity)
def decode(self, string):
'''Return decoded JSON *string* as Python object.'''
with self.operation_recording(False):
return json.loads(string, object_hook=self._decode)
def _decode(self, item):
'''Return *item* transformed into appropriate representation.'''
if isinstance(item, collections.Mapping):
if '__type__' in item:
if item['__type__'] == 'datetime':
item = arrow.get(item['value'])
elif '__entity_type__' in item:
item = self._create(
item['__entity_type__'], item, reconstructing=True
)
return item
def _get_locations(self, filter_inaccessible=True):
'''Helper to returns locations ordered by priority.
If *filter_inaccessible* is True then only accessible locations will be
included in result.
'''
# Optimise this call.
locations = self.query('Location')
# Filter.
if filter_inaccessible:
locations = filter(
lambda location: location.accessor,
locations
)
# Sort by priority.
locations = sorted(
locations, key=lambda location: location.priority
)
return locations
def pick_location(self, component=None):
'''Return suitable location to use.
If no *component* specified then return highest priority accessible
location. Otherwise, return highest priority accessible location that
*component* is available in.
Return None if no suitable location could be picked.
'''
if component:
return self.pick_locations([component])[0]
else:
locations = self._get_locations()
if locations:
return locations[0]
else:
return None
def pick_locations(self, components):
'''Return suitable locations for *components*.
Return list of locations corresponding to *components* where each
picked location is the highest priority accessible location for that
component. If a component has no location available then its
corresponding entry will be None.
'''
candidate_locations = self._get_locations()
availabilities = self.get_component_availabilities(
components, locations=candidate_locations
)
locations = []
for component, availability in zip(components, availabilities):
location = None
for candidate_location in candidate_locations:
if availability.get(candidate_location['id']) > 0.0:
location = candidate_location
break
locations.append(location)
return locations
def create_component(
self, path, data=None, location='auto'
):
'''Create a new component from *path* with additional *data*
.. note::
This is a helper method. To create components manually use the
standard :meth:`Session.create` method.
*path* can be a string representing a filesystem path to the data to
use for the component. The *path* can also be specified as a sequence
string, in which case a sequence component with child components for
each item in the sequence will be created automatically. The accepted
format for a sequence is '{head}{padding}{tail} [{ranges}]'. For
example::
'/path/to/file.%04d.ext [1-5, 7, 8, 10-20]'
.. seealso::
`Clique documentation <http://clique.readthedocs.org>`_
*data* should be a dictionary of any additional data to construct the
component with (as passed to :meth:`Session.create`).
If *location* is specified then automatically add component to that
location. The default of 'auto' will automatically pick a suitable
location to add the component to if one is available. To not add to any
location specifiy locations as None.
.. note::
A :meth:`Session.commit<ftrack_api.session.Session.commit>` may be
automatically issued as part of the components registration in the
location.
'''
if data is None:
data = {}
if location == 'auto':
# Check if the component name matches one of the ftrackreview
# specific names. Add the component to the ftrack.review location if
# so. This is used to not break backwards compatibility.
if data.get('name') in (
'ftrackreview-mp4', 'ftrackreview-webm', 'ftrackreview-image'
):
location = self.get(
'Location', ftrack_api.symbol.REVIEW_LOCATION_ID
)
else:
location = self.pick_location()
try:
collection = clique.parse(path)
except ValueError:
# Assume is a single file.
if 'size' not in data:
data['size'] = self._get_filesystem_size(path)
data.setdefault('file_type', os.path.splitext(path)[-1])
return self._create_component(
'FileComponent', path, data, location
)
else:
# Calculate size of container and members.
member_sizes = {}
container_size = data.get('size')
if container_size is not None:
if len(collection.indexes) > 0:
member_size = int(
round(container_size / len(collection.indexes))
)
for item in collection:
member_sizes[item] = member_size
else:
container_size = 0
for item in collection:
member_sizes[item] = self._get_filesystem_size(item)
container_size += member_sizes[item]
# Create sequence component
container_path = collection.format('{head}{padding}{tail}')
data.setdefault('padding', collection.padding)
data.setdefault('file_type', os.path.splitext(container_path)[-1])
data.setdefault('size', container_size)
container = self._create_component(
'SequenceComponent', container_path, data, location=None
)
# Create member components for sequence.
for member_path in collection:
member_data = {
'name': collection.match(member_path).group('index'),
'container': container,
'size': member_sizes[member_path],
'file_type': os.path.splitext(member_path)[-1]
}
component = self._create_component(
'FileComponent', member_path, member_data, location=None
)
container['members'].append(component)
if location:
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
location.add_component(
container, origin_location, recursive=True
)
return container
def _create_component(self, entity_type, path, data, location):
'''Create and return component.
See public function :py:func:`createComponent` for argument details.
'''
component = self.create(entity_type, data)
# Add to special origin location so that it is possible to add to other
# locations.
origin_location = self.get(
'Location', ftrack_api.symbol.ORIGIN_LOCATION_ID
)
origin_location.add_component(component, path, recursive=False)
if location:
location.add_component(component, origin_location, recursive=False)
return component
def _get_filesystem_size(self, path):
'''Return size from *path*'''
try:
size = os.path.getsize(path)
except OSError:
size = 0
return size
def get_component_availability(self, component, locations=None):
'''Return availability of *component*.
If *locations* is set then limit result to availability of *component*
in those *locations*.
Return a dictionary of {location_id:percentage_availability}
'''
return self.get_component_availabilities(
[component], locations=locations
)[0]
def get_component_availabilities(self, components, locations=None):
'''Return availabilities of *components*.
If *locations* is set then limit result to availabilities of
*components* in those *locations*.
Return a list of dictionaries of {location_id:percentage_availability}.
The list indexes correspond to those of *components*.
'''
availabilities = []
if locations is None:
locations = self.query('Location')
# Separate components into two lists, those that are containers and
# those that are not, so that queries can be optimised.
standard_components = []
container_components = []
for component in components:
if 'members' in component.keys():
container_components.append(component)
else:
standard_components.append(component)
# Perform queries.
if standard_components:
self.populate(
standard_components, 'component_locations.location_id'
)
if container_components:
self.populate(
container_components,
'members, component_locations.location_id'
)
base_availability = {}
for location in locations:
base_availability[location['id']] = 0.0
for component in components:
availability = base_availability.copy()
availabilities.append(availability)
is_container = 'members' in component.keys()
if is_container and len(component['members']):
member_availabilities = self.get_component_availabilities(
component['members'], locations=locations
)
multiplier = 1.0 / len(component['members'])
for member, member_availability in zip(
component['members'], member_availabilities
):
for location_id, ratio in member_availability.items():
availability[location_id] += (
ratio * multiplier
)
else:
for component_location in component['component_locations']:
location_id = component_location['location_id']
if location_id in availability:
availability[location_id] = 100.0
for location_id, percentage in availability.items():
# Avoid quantization error by rounding percentage and clamping
# to range 0-100.
adjusted_percentage = round(percentage, 9)
adjusted_percentage = max(0.0, min(adjusted_percentage, 100.0))
availability[location_id] = adjusted_percentage
return availabilities
@ftrack_api.logging.deprecation_warning(
'Session.delayed_job has been deprecated in favour of session.call. '
'Please refer to the release notes for more information.'
)
def delayed_job(self, job_type):
'''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned.
*job_type* should be one of the allowed job types. There is currently
only one remote job type "SYNC_USERS_LDAP".
'''
if job_type not in (ftrack_api.symbol.JOB_SYNC_USERS_LDAP, ):
raise ValueError(
u'Invalid Job type: {0}.'.format(job_type)
)
operation = {
'action': 'delayed_job',
'job_type': job_type.name
}
try:
result = self.call(
[operation]
)[0]
except ftrack_api.exception.ServerError as error:
raise
return result['data']
def get_widget_url(self, name, entity=None, theme=None):
'''Return an authenticated URL for widget with *name* and given options.
The returned URL will be authenticated using a token which will expire
after 6 minutes.
*name* should be the name of the widget to return and should be one of
'info', 'tasks' or 'tasks_browser'.
Certain widgets require an entity to be specified. If so, specify it by
setting *entity* to a valid entity instance.
*theme* sets the theme of the widget and can be either 'light' or 'dark'
(defaulting to 'dark' if an invalid option given).
'''
operation = {
'action': 'get_widget_url',
'name': name,
'theme': theme
}
if entity:
operation['entity_type'] = entity.entity_type
operation['entity_key'] = (
ftrack_api.inspection.primary_key(entity).values()
)
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_widget_url\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "get_widget_url", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
else:
return result[0]['widget_url']
def encode_media(self, media, version_id=None, keep_original='auto'):
'''Return a new Job that encode *media* to make it playable in browsers.
*media* can be a path to a file or a FileComponent in the ftrack.server
location.
The job will encode *media* based on the file type and job data contains
information about encoding in the following format::
{
'output': [{
'format': 'video/mp4',
'component_id': 'e2dc0524-b576-11d3-9612-080027331d74'
}, {
'format': 'image/jpeg',
'component_id': '07b82a97-8cf9-11e3-9383-20c9d081909b'
}],
'source_component_id': 'e3791a09-7e11-4792-a398-3d9d4eefc294',
'keep_original': True
}
The output components are associated with the job via the job_components
relation.
An image component will always be generated if possible that can be used
as a thumbnail.
If *media* is a file path, a new source component will be created and
added to the ftrack server location and a call to :meth:`commit` will be
issued. If *media* is a FileComponent, it will be assumed to be in
available in the ftrack.server location.
If *version_id* is specified, the new components will automatically be
associated with the AssetVersion. Otherwise, the components will not
be associated to a version even if the supplied *media* belongs to one.
A server version of 3.3.32 or higher is required for the version_id
argument to function properly.
If *keep_original* is not set, the original media will be kept if it
is a FileComponent, and deleted if it is a file path. You can specify
True or False to change this behavior.
'''
if isinstance(media, basestring):
# Media is a path to a file.
server_location = self.get(
'Location', ftrack_api.symbol.SERVER_LOCATION_ID
)
if keep_original == 'auto':
keep_original = False
component_data = None
if keep_original:
component_data = dict(version_id=version_id)
component = self.create_component(
path=media,
data=component_data,
location=server_location
)
# Auto commit to ensure component exists when sent to server.
self.commit()
elif (
hasattr(media, 'entity_type') and
media.entity_type in ('FileComponent',)
):
# Existing file component.
component = media
if keep_original == 'auto':
keep_original = True
else:
raise ValueError(
'Unable to encode media of type: {0}'.format(type(media))
)
operation = {
'action': 'encode_media',
'component_id': component['id'],
'version_id': version_id,
'keep_original': keep_original
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'encode_media\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support "encode_media", '
'please update server and try again.'.format(
self.server_information.get('version')
)
)
else:
raise
return self.get('Job', result[0]['job_id'])
def get_upload_metadata(
self, component_id, file_name, file_size, checksum=None
):
'''Return URL and headers used to upload data for *component_id*.
*file_name* and *file_size* should match the components details.
The returned URL should be requested using HTTP PUT with the specified
headers.
The *checksum* is used as the Content-MD5 header and should contain
the base64-encoded 128-bit MD5 digest of the message (without the
headers) according to RFC 1864. This can be used as a message integrity
check to verify that the data is the same data that was originally sent.
'''
operation = {
'action': 'get_upload_metadata',
'component_id': component_id,
'file_name': file_name,
'file_size': file_size,
'checksum': checksum
}
try:
result = self.call([operation])
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'get_upload_metadata\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"get_upload_metadata", please update server and try '
'again.'.format(
self.server_information.get('version')
)
)
else:
raise
return result[0]
def send_user_invite(self, user):
'''Send a invitation to the provided *user*.
*user* is a User instance
'''
self.send_user_invites(
[user]
)
def send_user_invites(self, users):
'''Send a invitation to the provided *user*.
*users* is a list of User instances
'''
operations = []
for user in users:
operations.append(
{
'action':'send_user_invite',
'user_id': user['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_user_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_user_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
def send_review_session_invite(self, invitee):
'''Send an invite to a review session to *invitee*.
*invitee* is a instance of ReviewSessionInvitee.
.. note::
The *invitee* must be committed.
'''
self.send_review_session_invites([invitee])
def send_review_session_invites(self, invitees):
'''Send an invite to a review session to a list of *invitees*.
*invitee* is a list of ReviewSessionInvitee objects.
.. note::
All *invitees* must be committed.
'''
operations = []
for invitee in invitees:
operations.append(
{
'action': 'send_review_session_invite',
'review_session_invitee_id': invitee['id']
}
)
try:
self.call(operations)
except ftrack_api.exception.ServerError as error:
# Raise informative error if the action is not supported.
if 'Invalid action u\'send_review_session_invite\'' in error.message:
raise ftrack_api.exception.ServerCompatibilityError(
'Server version {0!r} does not support '
'"send_review_session_invite", please update server and '
'try again.'.format(
self.server_information.get('version')
)
)
else:
raise
class AutoPopulatingContext(object):
'''Context manager for temporary change of session auto_populate value.'''
def __init__(self, session, auto_populate):
'''Initialise context.'''
super(AutoPopulatingContext, self).__init__()
self._session = session
self._auto_populate = auto_populate
self._current_auto_populate = None
def __enter__(self):
'''Enter context switching to desired auto populate setting.'''
self._current_auto_populate = self._session.auto_populate
self._session.auto_populate = self._auto_populate
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context resetting auto populate to original setting.'''
self._session.auto_populate = self._current_auto_populate
class OperationRecordingContext(object):
'''Context manager for temporary change of session record_operations.'''
def __init__(self, session, record_operations):
'''Initialise context.'''
super(OperationRecordingContext, self).__init__()
self._session = session
self._record_operations = record_operations
self._current_record_operations = None
def __enter__(self):
'''Enter context.'''
self._current_record_operations = self._session.record_operations
self._session.record_operations = self._record_operations
def __exit__(self, exception_type, exception_value, traceback):
'''Exit context.'''
self._session.record_operations = self._current_record_operations
class OperationPayload(collections.MutableMapping):
'''Represent operation payload.'''
def __init__(self, *args, **kwargs):
'''Initialise payload.'''
super(OperationPayload, self).__init__()
self._data = dict()
self.update(dict(*args, **kwargs))
def __str__(self):
'''Return string representation.'''
return '<{0} {1}>'.format(
self.__class__.__name__, str(self._data)
)
def __getitem__(self, key):
'''Return value for *key*.'''
return self._data[key]
def __setitem__(self, key, value):
'''Set *value* for *key*.'''
self._data[key] = value
def __delitem__(self, key):
'''Remove *key*.'''
del self._data[key]
def __iter__(self):
'''Iterate over all keys.'''
return iter(self._data)
def __len__(self):
'''Return count of keys.'''
return len(self._data)
|
the-stack_0_17469 | from pathlib import Path
import pytest
from graeScript.file_explorer.delete_move_dirs import (_validate_args,
_validate_dir,
_validate_dirs)
class TestValidateArgs:
def test_one(self):
user_input = 'replace', 'cancel', 'delete'
num_allowed = 1
with pytest.raises(SystemExit):
assert _validate_args(user_input, num_allowed, 'replace',
'compare', 'delete')
def test_two(self):
user_input = 'make', 'withdraw', 'plan'
num_allowed = 5
assert _validate_args(user_input, num_allowed,
'make', 'withdraw', 'plan', 'deposit',
'draw', 'sample', 'save'
) == ['make', 'withdraw', 'plan']
def test_three(self):
user_input = 'replace', 'cancel'
num_allowed = 1
assert _validate_args(user_input, num_allowed, 'replace',
'compare', 'delete'
) == ['replace']
class TestValidateDir:
home = Path().home()
def test_one(self):
with pytest.raises(SystemExit):
assert _validate_dir('/src/graeScript/data')
def test_two(self):
with pytest.raises(SystemExit):
assert _validate_dir(self.home / 'bananaPaperSmallFakeFolder')
def test_three(self):
assert _validate_dir(str(self.home)) == self.home
class TestValidateDirs:
home = Path().home()
fake_folder = home / 'bananaPaperSmallFakeFolder'
def test_one(self):
assert _validate_dirs('/src/graeScript/data',
str(self.home),
self.fake_folder) == [self.home]
def test_two(self):
with pytest.raises(SystemExit):
assert _validate_dirs('/src/graeScript/data', self.fake_folder)
|
the-stack_0_17470 | #!/usr/bin/python
# *****************************************************************************
#
# Copyright (c) 2016, EPAM SYSTEMS INC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ******************************************************************************
from dlab.fab import *
from dlab.actions_lib import *
import sys
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
print('Generating infrastructure names and tags')
edge_conf = dict()
edge_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
edge_conf['zone'] = os.environ['gcp_zone']
edge_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
edge_conf['instance_name'] = '{0}-{1}-edge'.format(edge_conf['service_base_name'], edge_conf['edge_user_name'])
logging.info('[STOP EDGE]')
print('[STOP EDGE]')
try:
GCPActions().stop_instance(edge_conf['instance_name'], edge_conf['zone'])
except Exception as err:
append_result("Failed to stop edge.", str(err))
sys.exit(1)
try:
with open("/root/result.json", 'w') as result:
res = {"instance_name": edge_conf['instance_name'],
"Action": "Stop edge server"}
print(json.dumps(res))
result.write(json.dumps(res))
except:
print("Failed writing results.")
sys.exit(0)
|
the-stack_0_17471 | import pickle
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import itertools
import functools
from gene_map2 import split_label
def encode_dataset(input_file, gw_map, c_map, f_map, s_map, y_map):
gw_unk = gw_map['<unk>']
c_con = c_map[' ']
c_unk = c_map['<unk>']
dataset = list()
tmpw_gw, tmpc, tmpf, tmps, tmpy = list(), list(), list(), list(), list()
with open(input_file, 'r') as fin:
for line in fin:
if line.isspace() or line.startswith('-DOCSTART-'):
if len(tmpw_gw) > 0:
dataset.append([tmpw_gw, tmpc, tmpf, tmps])
tmpw_gw, tmpc, tmpf, tmps = list(), list(), list(), list()
else:
line = line.split()
tmpw_gw.append(gw_map.get(line[0].lower(), gw_unk))
assert line[-1] != '<eof>'
a, b = split_label(line[-1])
tmpf.append(f_map[a])
tmps.append(s_map[b])
tmpc.append([c_map.get(tup, c_unk) for tup in line[0]])
tmpy.append(y_map[line[-1]])
if len(tmpw_gw) > 0:
dataset.append([tmpw_gw, tmpc, tmpf, tmps, tmpy])
return dataset
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--train_file', default="../data/ner/eng.train.iobes")
parser.add_argument('--test_file', default="../data/ner/eng.testb.iobes")
parser.add_argument('--dev_file', default="../data/ner/eng.testa.iobes")
parser.add_argument('--input_map', default="../data/conll_map.pk")
parser.add_argument('--output_file', default="../data/ner_dataset.pk")
parser.add_argument('--unk', default='<unk>')
args = parser.parse_args()
with open(args.input_map, 'rb') as f:
p_data = pickle.load(f)
name_list = ['gw_map', 'c_map', 'f_map', 's_map', 'y_map', 'emb_array']
gw_map, c_map, f_map, s_map, y_map, emb_array = [p_data[tup] for tup in name_list]
train_dataset = encode_dataset(args.train_file, gw_map, c_map, f_map, s_map, y_map)
test_dataset = encode_dataset(args.test_file, gw_map, c_map, f_map, s_map, y_map)
dev_dataset = encode_dataset(args.dev_file, gw_map, c_map, f_map, s_map, y_map)
with open(args.output_file, 'wb') as f:
pickle.dump({'gw_map': gw_map, 'c_map': c_map, 'f_map': f_map, 's_map': s_map, 'y_map': y_map,
'emb_array': emb_array, 'train_data': train_dataset, 'test_data': test_dataset, 'dev_data': dev_dataset}, f)
|
the-stack_0_17472 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
import itertools
import json
import logging
import os
from typing import Optional
from argparse import Namespace
from omegaconf import II
import numpy as np
from fairseq import metrics, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
data_utils,
encoders,
indexed_dataset,
)
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.tasks import FairseqTask, register_task
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index("[{}]".format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index("[{}]".format(tgt))
)
eos = tgt_dict.index("[{}]".format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
@dataclass
class TranslationConfig(FairseqDataclass):
data: Optional[str] = field(
default=None,
metadata={
"help": "colon separated path to data directories list, will be iterated upon during epochs "
"in round-robin manner; however, valid and test data are always in the first directory "
"to avoid the need for repeating them in all directories"
},
)
source_lang: Optional[str] = field(
default=None,
metadata={
"help": "source language",
"argparse_alias": "-s",
},
)
target_lang: Optional[str] = field(
default=None,
metadata={
"help": "target language",
"argparse_alias": "-t",
},
)
load_alignments: bool = field(
default=False, metadata={"help": "load the binarized alignments"}
)
left_pad_source: bool = field(
default=False, metadata={"help": "pad the source on the left"}
)
left_pad_target: bool = field(
default=False, metadata={"help": "pad the target on the left"}
)
max_source_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the source sequence"}
)
max_target_positions: int = field(
default=1024, metadata={"help": "max number of tokens in the target sequence"}
)
upsample_primary: int = field(
default=-1, metadata={"help": "the amount of upsample primary dataset"}
)
truncate_source: bool = field(
default=False, metadata={"help": "truncate source to max-source-positions"}
)
num_batch_buckets: int = field(
default=0,
metadata={
"help": "if >0, then bucket source and target lengths into "
"N buckets and pad accordingly; this is useful on TPUs to minimize the number of compilations"
},
)
train_subset: str = II("dataset.train_subset")
dataset_impl: Optional[ChoiceEnum(get_available_dataset_impl())] = II(
"dataset.dataset_impl"
)
required_seq_len_multiple: int = II("dataset.required_seq_len_multiple")
# options for reporting BLEU during validation
eval_bleu: bool = field(
default=False, metadata={"help": "evaluation with BLEU scores"}
)
eval_bleu_args: Optional[str] = field(
default="{}",
metadata={
"help": 'generation args for BLUE scoring, e.g., \'{"beam": 4, "lenpen": 0.6}\', as JSON string'
},
)
eval_bleu_detok: str = field(
default="space",
metadata={
"help": "detokenize before computing BLEU (e.g., 'moses'); required if using --eval-bleu; "
"use 'space' to disable detokenization; see fairseq.data.encoders for other options"
},
)
eval_bleu_detok_args: Optional[str] = field(
default="{}",
metadata={"help": "args for building the tokenizer, if needed, as JSON string"},
)
eval_tokenized_bleu: bool = field(
default=False, metadata={"help": "compute tokenized BLEU instead of sacrebleu"}
)
eval_bleu_remove_bpe: Optional[str] = field(
default=None,
metadata={
"help": "remove BPE before computing BLEU",
"argparse_const": "@@ ",
},
)
eval_bleu_print_samples: bool = field(
default=False, metadata={"help": "print sample generations during validation"}
)
@register_task("translation", dataclass=TranslationConfig)
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
"""
cfg: TranslationConfig
def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):
super().__init__(cfg)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, cfg: TranslationConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
# find language pair automatically
if cfg.source_lang is None or cfg.target_lang is None:
cfg.source_lang, cfg.target_lang = data_utils.infer_language_pair(paths[0])
if cfg.source_lang is None or cfg.target_lang is None:
raise Exception(
"Could not infer language pair, please provide it explicitly"
)
# load dictionaries
src_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.source_lang))
)
tgt_dict = cls.load_dictionary(
os.path.join(paths[0], "dict.{}.txt".format(cfg.target_lang))
)
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
logger.info("[{}] dictionary: {} types".format(cfg.source_lang, len(src_dict)))
logger.info("[{}] dictionary: {} types".format(cfg.target_lang, len(tgt_dict)))
return cls(cfg, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
if split != self.cfg.train_subset:
# if not training data set, use the first shard for valid and test
paths = paths[:1]
data_path = paths[(epoch - 1) % len(paths)]
# infer langcode
src, tgt = self.cfg.source_lang, self.cfg.target_lang
self.datasets[split] = load_langpair_dataset(
data_path,
split,
src,
self.src_dict,
tgt,
self.tgt_dict,
combine=combine,
dataset_impl=self.cfg.dataset_impl,
upsample_primary=self.cfg.upsample_primary,
left_pad_source=self.cfg.left_pad_source,
left_pad_target=self.cfg.left_pad_target,
max_source_positions=self.cfg.max_source_positions,
max_target_positions=self.cfg.max_target_positions,
load_alignments=self.cfg.load_alignments,
truncate_source=self.cfg.truncate_source,
num_buckets=self.cfg.num_batch_buckets,
shuffle=(split != "test"),
pad_to_multiple=self.cfg.required_seq_len_multiple,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(
src_tokens,
src_lengths,
self.source_dictionary,
tgt_dict=self.target_dictionary,
constraints=constraints,
)
def build_model(self, cfg):
model = super().build_model(cfg)
self.cfg.eval_bleu = False
if self.cfg.eval_bleu:
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(
Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args)
)
gen_args = json.loads(self.cfg.eval_bleu_args)
self.sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
if self.cfg.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output["_bleu_sys_len"] = bleu.sys_len
logging_output["_bleu_ref_len"] = bleu.ref_len
# we split counts into separate entries so that they can be
# summed efficiently across workers using fast-stat-sync
assert len(bleu.counts) == EVAL_BLEU_ORDER
for i in range(EVAL_BLEU_ORDER):
logging_output["_bleu_counts_" + str(i)] = bleu.counts[i]
logging_output["_bleu_totals_" + str(i)] = bleu.totals[i]
return loss, sample_size, logging_output
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_bleu:
def sum_logs(key):
import torch
result = sum(log.get(key, 0) for log in logging_outputs)
if torch.is_tensor(result):
result = result.cpu()
return result
counts, totals = [], []
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs("_bleu_counts_" + str(i)))
totals.append(sum_logs("_bleu_totals_" + str(i)))
if max(totals) > 0:
# log counts as numpy arrays -- log_scalar will sum them correctly
metrics.log_scalar("_bleu_counts", np.array(counts))
metrics.log_scalar("_bleu_totals", np.array(totals))
metrics.log_scalar("_bleu_sys_len", sum_logs("_bleu_sys_len"))
metrics.log_scalar("_bleu_ref_len", sum_logs("_bleu_ref_len"))
def compute_bleu(meters):
import inspect
import sacrebleu
fn_sig = inspect.getfullargspec(sacrebleu.compute_bleu)[0]
if "smooth_method" in fn_sig:
smooth = {"smooth_method": "exp"}
else:
smooth = {"smooth": "exp"}
bleu = sacrebleu.compute_bleu(
correct=meters["_bleu_counts"].sum,
total=meters["_bleu_totals"].sum,
sys_len=meters["_bleu_sys_len"].sum,
ref_len=meters["_bleu_ref_len"].sum,
**smooth
)
return round(bleu.score, 2)
metrics.log_derived("bleu", compute_bleu)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(
toks.int().cpu(),
self.cfg.eval_bleu_remove_bpe,
# The default unknown string in fairseq is `<unk>`, but
# this is tokenized by sacrebleu as `< unk >`, inflating
# BLEU scores. Instead, we use a somewhat more verbose
# alternative that is unlikely to appear in the real
# reference, but doesn't get split into multiple tokens.
unk_string=("UNKNOWNTOKENINREF" if escape_unk else "UNKNOWNTOKENINHYP"),
)
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
hyps, refs = [], []
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]["tokens"]))
refs.append(
decode(
utils.strip_pad(sample["target"][i], self.tgt_dict.pad()),
escape_unk=True, # don't count <unk> as matches to the hypo
)
)
if self.cfg.eval_bleu_print_samples:
logger.info("example hypothesis: " + hyps[0])
logger.info("example reference: " + refs[0])
if self.cfg.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize="none")
else:
return sacrebleu.corpus_bleu(hyps, [refs])
|
the-stack_0_17473 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import time
import traceback
from sqlalchemy import event, exc
from airflow.configuration import conf
log = logging.getLogger(__name__)
def setup_event_handlers(engine):
"""Setups event handlers."""
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
if engine.dialect.name == "sqlite":
@event.listens_for(engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
# this ensures sanity in mysql when storing datetimes (not required for postgres)
if engine.dialect.name == "mysql":
@event.listens_for(engine, "connect")
def set_mysql_timezone(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("SET time_zone = '+00:00'")
cursor.close()
@event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy):
pid = os.getpid()
if connection_record.info['pid'] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid {}, "
"attempting to check out in pid {}".format(connection_record.info['pid'], pid)
)
if conf.getboolean('debug', 'sqlalchemy_stats', fallback=False):
@event.listens_for(engine, "before_cursor_execute")
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
conn.info.setdefault('query_start_time', []).append(time.perf_counter())
@event.listens_for(engine, "after_cursor_execute")
def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
total = time.perf_counter() - conn.info['query_start_time'].pop()
file_name = [
f"'{f.name}':{f.filename}:{f.lineno}"
for f in traceback.extract_stack()
if 'sqlalchemy' not in f.filename
][-1]
stack = [f for f in traceback.extract_stack() if 'sqlalchemy' not in f.filename]
stack_info = ">".join([f"{f.filename.rpartition('/')[-1]}:{f.name}" for f in stack][-3:])
conn.info.setdefault('query_start_time', []).append(time.monotonic())
log.info(
"@SQLALCHEMY %s |$ %s |$ %s |$ %s ",
total,
file_name,
stack_info,
statement.replace("\n", " "),
)
|
the-stack_0_17475 | # coding: utf-8
"""
AusSeabed product catalogue
The API description for the Ausseabed product catalogue inventory # noqa: E501
The version of the OpenAPI document: 0.2.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from product_catalogue_py_rest_client.api_client import ApiClient
from product_catalogue_py_rest_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class StylesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def styles_controller_find_all(self, **kwargs): # noqa: E501
"""styles_controller_find_all # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.styles_controller_find_all(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: list[Style]
"""
kwargs['_return_http_data_only'] = True
return self.styles_controller_find_all_with_http_info(**kwargs) # noqa: E501
def styles_controller_find_all_with_http_info(self, **kwargs): # noqa: E501
"""styles_controller_find_all # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.styles_controller_find_all_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(list[Style], status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method styles_controller_find_all" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['access-token'] # noqa: E501
return self.api_client.call_api(
'/styles', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Style]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
|
the-stack_0_17477 | def main():
hist = {}
for k, a_i in enumerate(a):
if a_i not in hist:
hist[a_i] = 0
hist[a_i] += 1
current_height = 0
carryover = None
for h_i, count in reversed(sorted(hist.items())):
cnt = count
if carryover:
cnt -= 1
current_height += h_i * (cnt // 2)
if cnt % 2 != 0:
current_height += h_i
carryover = h_i
else:
carryover = None
if current_height > h:
return k
return n
if __name__ == '__main__':
n, h = map(int, input().split())
a = list(map(int, input().split()))
print(main())
|
the-stack_0_17478 | import numpy as np
import numpy.linalg as la
import numpy.random as npr
import matplotlib.pyplot as plt
import os
import sys
sys.path.append("..")
from pickle_io import pickle_import,pickle_export
folderstr_list = []
folderstr_list.append("1564554983p5677059_1e4")
folderstr_list.append("1564555001p5515425_1e5")
folderstr_list.append("1564555047p6032026_1e6")
folderstr_list.append("1564555255p6612067_1e7")
folderstr_list.append("1564525514p9662921_1e8")
nr_list = [1e4,1e5,1e6,1e7,1e8]
N = len(nr_list)
data_noiseless = []
data_noisy = []
for i,folderstr in enumerate(folderstr_list):
dirname_in = folderstr
filename = 'data_noiseless.pickle'
filename_in = os.path.join(dirname_in,filename)
data_noiseless.append(pickle_import(filename_in))
filename = 'data_noisy.pickle'
filename_in = os.path.join(dirname_in,filename)
data_noisy.append(pickle_import(filename_in))
# Plotting
mean_error_norm_noiseless = np.zeros(N)
mean_error_norm_noisy = np.zeros(N)
mean_error_angle_noiseless = np.zeros(N)
mean_error_angle_noisy = np.zeros(N)
for i in range(N):
mean_error_norm_noiseless[i] = np.mean(data_noiseless[i][4])/la.norm(data_noiseless[0][0])
mean_error_norm_noisy[i] = np.mean(data_noisy[i][4])/la.norm(data_noisy[0][0])
mean_error_angle_noiseless[i] = np.mean(data_noiseless[i][2])
mean_error_angle_noisy[i] = np.mean(data_noisy[i][2])
plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif')
plt.figure(figsize=(4,2.2))
#plt.figure(figsize=(7,5))
plt.semilogx(nr_list,mean_error_norm_noiseless,linewidth=4,marker='^',markersize=8,color='tab:blue')
plt.semilogx(nr_list,mean_error_norm_noisy,linewidth=4,marker='o',markersize=8,color='tab:red')
guide_color = 'tab:grey'
plt.semilogx(nr_list,0.1*np.ones(N),color=guide_color,linestyle='--')
#plt.axvline(5*10**5,ymax=0.15,color=guide_color,linestyle='--')
#plt.axvline(10**8,ymax=0.15,color=guide_color,linestyle='--')
plt.yticks(ticks=[0,0.1,0.25,0.50,0.75])
plt.xlabel('Number of rollouts')
plt.ylabel('Normalized gradient estimate error')
plt.ylabel(r'$\|\nabla C(K)-\widehat{\nabla} C_K \|/\|\nabla C(K)\|$')
plt.legend(["Noiseless","Noisy"])
plt.tight_layout()
plt.savefig("plot_gradient_estimation_error.png",dpi=300)
#plt.savefig("fig1alt.png",dpi=300)
#plt.figure()
#plt.semilogx(nr_list,mean_error_angle_noiseless,linewidth=4)
#plt.semilogx(nr_list,mean_error_angle_noisy,linewidth=4)
#plt.xlabel('Number of rollouts')
#plt.ylabel('Gradient estimate error angle (deg)')
#plt.legend(["Noiseless","Noisy"]) |
the-stack_0_17480 | import logging
import pandas as pd
import numpy as np
def basicPivot(df, key, column, value):
return df.pivot_table(index=[key], columns=[column], values=value, aggfunc=np.median).fillna(0)
def gtex(dbad):
df = dbad.loadGTEX()
logging.info("staticData: DBAdapter:{0}; GTEX: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df.info() #DEBUG
df = basicPivot(df, "protein_id", "tissue_type_detail", "median_tpm")
df.reset_index(drop=False, inplace=True)
logging.info("staticData: GTEX proteins: rows: {0}; cols: {1}".format(df.shape[0], df.shape[1]))
return df
def lincs(dbad):
df = dbad.loadLINCS()
logging.info("staticData: DBAdapter:{0}; LINCS: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df = basicPivot(df, "protein_id", "col_id", "zscore")
df.reset_index(drop=False, inplace=True)
logging.info("staticData: LINCS proteins: rows: {0}; cols: {1}".format(df.shape[0], df.shape[1]))
return df
def ccle(dbad):
df = dbad.loadCCLE()
logging.info("staticData: DBAdapter:{0}; CCLE: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df["col_id"] = (df.cell_id+"_"+df.tissue)
df.col_id = df.col_id.str.replace("[ /,]", "_")
df = df[["protein_id", "col_id", "expression"]].drop_duplicates()
df = basicPivot(df, "protein_id", "col_id", "expression")
df.reset_index(drop=False, inplace=True)
logging.info("staticData: CCLE proteins: rows: {0}; cols: {1}".format(df.shape[0], df.shape[1]))
return df
def hpa(dbad):
#(Why did Oleg use mode not median?)
df = dbad.loadHPA()
logging.debug("staticData ({0}): HPA: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df = df.drop_duplicates()
df.col_id = df.col_id.str.replace("[ /,]", "_")
df = df.rename(columns={'level':'level_str'})
for key,val in df["level_str"].value_counts().iteritems():
logging.debug('\t%s: %6d: %s'%("level_str", val, key))
df["level"] = df.level_str.apply(lambda s: 3 if s=="High" else 2 if s=="Medium" else 1 if s=="Low" else 0 if "Not detected" else 0)
for key,val in df["level"].value_counts().iteritems():
logging.debug('\t%s: %6d: %s'%("level", val, key))
logging.debug("staticData ({0}): HPA: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
#df.info() #DEBUG
df = basicPivot(df, "protein_id", "col_id", "level")
logging.debug("staticData ({0}): HPA: rows: {1}; cols: {2}".format(type(dbad).__name__, df.shape[0], df.shape[1]))
df.reset_index(drop=False, inplace=True)
return df
###
|
the-stack_0_17481 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('object_position04.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [59106432, 60702720]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart, {'object_position': 2})
workbook.close()
self.assertExcelEqual()
|
the-stack_0_17484 | class Node:
def __init__(self,data):
self.data = data
self.previous = None
self.next = None
class removeDuplicates:
def __init__(self):
self.head = None
self.tail = None
def remove_duplicates(self):
if (self.head == None):
return
else:
current = self.head
while (current!= None):
index = current.next
while (index != None):
if (current.data == index.data):
temp = index
index.previous.next = index.next
if (index.next != None):
index.next.previous = index.previous
temp = None
index = index.next
current = current.next
|
the-stack_0_17485 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 11:02:00 2021
@author: Annika
"""
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import scipy.io
from datetime import datetime, timedelta
import numpy as np
from read_log_files import *
from read_time_rides import *
def datenum(d):
return 366 + d.toordinal() + (d - datetime.fromordinal(d.toordinal())).total_seconds()/(24*60*60)
#Function to reverse datenum
def datestr(x, tz=None):
dt = datetime.fromordinal(int(x)) + timedelta(days=x%1) - timedelta(days = 366)
return dt
def plot_temp_RH_Klosters_gondola(path_log_file,path_time_rides,path_data_Klosters):
myFmt = mdates.DateFormatter('%H:%M')
start_time = '2019-02-22 08:00:00'
end_time = '2019-02-22 10:00:00'
#Read in data from weather station in Klosters
mat = scipy.io.loadmat(path_data_Klosters)
data_KLA = mat['WS']
T_KLA = data_KLA['T']
T_KLA = T_KLA[0][0][0]
T_KLA = np.array(T_KLA,dtype=np.float)
RH_KLA = data_KLA['RH']
RH_KLA = RH_KLA[0][0][0]
RH_KLA = np.array(RH_KLA,dtype=np.float)
time_KLA = data_KLA['time']
time_KLA = time_KLA[0][0][0]
time_KLA = np.array([datestr(time_KLA[i]) for i in range(len(time_KLA))])
index_KLA = pd.DatetimeIndex(time_KLA)
T_KLA = pd.Series(T_KLA,index = index_KLA)
RH_KLA = pd.Series(RH_KLA,index=index_KLA)
#Read in log file from HOLIMO
log = read_log_file(start_time,end_time,path_log_file)
day_of_month = log['day_of_month'][0]
month = log['month'][0]
year = log['year'][0]
hour = log['hour'][0]
minute = log['minute'][0]
second = log['second'][0]
time_gondel = [str(day_of_month[i])+'/'+str(month[i])+'/'+str(year[i])+' ' +str(hour[i])+':'+str(minute[i])+':'+str(second[i]) for i in range(0,len(month))]
index_gondel = pd.DatetimeIndex(time_gondel)
T_gondel = pd.Series(log['temp'][0],index = index_gondel)
RH_gondel = pd.Series(log['rh'][0],index = index_gondel)
time_gondel = [datenum(index_gondel[i]) for i in range(0,len(index_gondel))]
#Read in time of gondola rides
[start_time_ride,end_time_ride] = read_time_rides(path_time_rides)
#Derive temperature at Gotschnaboden (Gondola at lowest point considered for measurements)
idx_gb = [np.argmin(np.abs(time_gondel-start_time_ride[i])) for i in range(0,len(start_time_ride))]
T_GB=T_gondel[idx_gb]
RH_GB=RH_gondel[idx_gb]
index_GB = index_gondel[idx_gb]
T_GB = pd.Series(T_GB,index=index_GB)
RH_GB = pd.Series(RH_GB,index=index_GB)
#Derive temperature at Gotschnagrat (Gondola at highest point considered for measurements)
idx_gg = [np.argmin(np.abs(time_gondel-end_time_ride[i])) for i in range(0,len(end_time_ride))]
T_GG=T_gondel[idx_gg]
RH_GG=RH_gondel[idx_gg]
index_GG = index_gondel[idx_gg]
T_GG = pd.Series(T_GG,index=index_GG)
RH_GG = pd.Series(RH_GG,index=index_GG)
time_gb = np.array([datestr(start_time_ride[i]) for i in range(len(start_time_ride))])
time_gg = np.array([datestr(end_time_ride[i]) for i in range(len(end_time_ride))])
x_gr = np.column_stack((time_gb,time_gg))
y_gr = np.column_stack((T_GB,T_GG))
y_gr_RH = np.column_stack((RH_GB,RH_GG))
#Melting layer
melting = [0,0]
time_melting = [start_time,end_time]
time_melting = pd.to_datetime(time_melting)
index_melting = pd.DatetimeIndex(time_melting)
melting = pd.Series(melting, index=index_melting)
#Lines for gondel rides
fs=25
f=1
plt.figure(f)
gr = plt.plot(x_gr.transpose(),y_gr.transpose(),color = [0.7, 0.7, 0.7])
gg, = plt.plot(T_GG[start_time:end_time].index,T_GG[start_time:end_time],label='Gotschnagrat 2300m',color = [0,0.447,0.741])
gb, = plt.plot(T_GB[start_time:end_time].index,T_GB[start_time:end_time],label='Gotschnaboden 1700m',color = [0.9290, 0.6940, 0.1250])
kla, = plt.plot(T_KLA[start_time:end_time].index,T_KLA[start_time:end_time],label='Klosters 1200m',color = [0, 0.5, 0])
m = plt.plot(melting[start_time:end_time].index,melting[start_time:end_time],'k')
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(myFmt)
plt.gca().invert_yaxis()
plt.xlim(start_time,end_time)
plt.ylim(4,-3)
plt.xlabel('Time (UTC)',fontsize=fs)
plt.ylabel('Temperature (°C)',fontsize=fs)
plt.tick_params(right=True)
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
plt.show()
f=2
plt.figure(f)
gr = plt.plot(x_gr.transpose(),y_gr_RH.transpose(),color = [0.7, 0.7, 0.7])
gg, = plt.plot(RH_GG[start_time:end_time].index,RH_GG[start_time:end_time],label='Gotschnagrat 2300m',color = [0,0.447,0.741])
gb, = plt.plot(RH_GB[start_time:end_time].index,RH_GB[start_time:end_time],label='Gotschnaboden 1700m',color = [0.9290, 0.6940, 0.1250])
kla, = plt.plot(RH_KLA[start_time:end_time].index,RH_KLA[start_time:end_time],label='Klosters 1200m',color = [0, 0.5, 0])
plt.gcf().autofmt_xdate()
plt.gca().xaxis.set_major_formatter(myFmt)
plt.xlim(start_time,end_time)
plt.ylim(75,100)
plt.xlabel('Time (UTC)',fontsize=fs)
plt.ylabel('RH (%)',fontsize=fs)
plt.tick_params(right=True)
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
plt.show()
|
the-stack_0_17486 | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from decimal import Decimal
from test_framework.address import (
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
program_to_witness,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.blocktools import witness_script, send_to_witness
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, sha256, ToHex
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE, OP_DROP
from test_framework.test_framework import LearncoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes, hex_str_to_bytes, sync_blocks, try_rpc
from io import BytesIO
NODE_0 = 0
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_spendable_utxo(node, min_value):
for utxo in node.listunspent(query_options={'minimumAmount': min_value}):
if utxo['spendable']:
return utxo
raise AssertionError("Unspent output equal or higher than %s not found" % min_value)
class SegWitTest(LearncoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
[
"-rpcserialversion=0",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
[
"-blockversion=4",
"-rpcserialversion=1",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
[
"-blockversion=536870915",
"-vbparams=segwit:0:999999999999",
"-addresstype=legacy",
"-deprecatedrpc=addwitnessaddress",
],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes(self.nodes[0], 2)
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_rpc_error(-26, error_msg, send_to_witness, use_p2wsh=1, node=node, utxo=getutxo(txid), pubkey=self.pubkey[0], encode_p2sh=False, amount=Decimal("49.998"), sign=sign, insert_redeem_script=redeem_script)
def run_test(self):
self.nodes[0].generate(161) #block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"])
multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_addr = self.nodes[i].addwitnessaddress(newaddress)
bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False)
p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address']
bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address']
assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1]))
assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1]))
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
self.nodes[2].generate(4) # blocks 428-431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify default node can't accept txs with missing witness")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', wit_ids[NODE_2][WIT_V0][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', wit_ids[NODE_2][WIT_V1][2], sign=False)
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program hash mismatch) (code 64)', p2sh_ids[NODE_2][WIT_V0][2], sign=False, redeem_script=witness_script(False, self.pubkey[2]))
self.fail_accept(self.nodes[2], 'non-mandatory-script-verify-flag (Witness program was passed an empty witness) (code 64)', p2sh_ids[NODE_2][WIT_V1][2], sign=False, redeem_script=witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Check that wtxid is properly reported in mempool entry
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].getaddressinfo(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].getaddressinfo(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address'])
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address']
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([p2sh])
# bare multisig can be watched and signed, but is not treated as ours
solvable_after_importaddress.extend([bare])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that no witness address should be returned by unsolvable addresses
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address:
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# addwitnessaddress should return a witness addresses even if keys are not in the wallet
self.nodes[0].addwitnessaddress(multisig_without_privkey_address)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are always spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable
solvable_anytime.extend([p2wpkh, p2sh_p2wpkh])
self.mine_and_test_listunspent(spendable_anytime, 2)
self.mine_and_test_listunspent(solvable_anytime, 1)
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress:
# This will raise an exception
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].getaddressinfo(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works
v1_addr = program_to_witness(1, [3,5])
v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1})
v1_decoded = self.nodes[1].decoderawtransaction(v1_tx)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305")
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
# Test that importing native P2WPKH/P2WSH scripts works
for use_p2wsh in [False, True]:
if use_p2wsh:
scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a"
transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000"
else:
scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87"
transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000"
self.nodes[1].importaddress(scriptPubKey, "", False)
rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex']
rawtxfund = self.nodes[1].signrawtransactionwithwallet(rawtxfund)["hex"]
txid = self.nodes[1].sendrawtransaction(rawtxfund)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
# Assert it is properly saved
self.stop_node(1)
self.start_node(1)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_spendable_utxo(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
|
the-stack_0_17488 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import os,re
from setuptools import setup, find_packages
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
with open(os.path.join(package, "__init__.py")) as f:
return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1)
def get_long_description():
"""
Return the README.
"""
with open("README.md", encoding="utf8") as f:
return f.read()
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [
dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, "__init__.py"))
]
setup(
name="{{cookiecutter.project_name}}",
version=get_version("{{cookiecutter.project_slug}}"),
python_requires=">=3.6",
license="BSD",
description="{{cookiecutter.project_short_description}}",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="{{ cookiecutter.full_name.replace('\"', '\\\"') }}",
author_email="{{ cookiecutter.email }}",
packages=get_packages("{{cookiecutter.project_slug}}"),
# package_data={"databases": ["py.typed"]},
# data_files=[("", ["LICENSE.md"])],
install_requires=[
"starlette>=0.13.0",
"websockets==8.0.1",
'dalchemy @ git+https://github.com/Tuteria/shared_lib.git@master',
],
extras_require={},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
zip_safe=False,
) |
the-stack_0_17490 | # MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import os
from pathlib import Path
import pytest
from flexmock import flexmock
from ogr import GithubService, GitlabService
from packit.config import JobConfigTriggerType
from packit_service.config import ServiceConfig
from packit_service.models import JobTriggerModelType
from packit_service.service.events import (
PullRequestGithubEvent,
PushGitHubEvent,
ReleaseEvent,
MergeRequestGitlabEvent,
)
from packit_service.worker.parser import Parser
from tests.spellbook import SAVED_HTTPD_REQS, DATA_DIR
@pytest.fixture(scope="session", autouse=True)
def global_service_config():
"""
This config will be used instead of the one loaded from the local config file.
You can still mock/overwrite the service config content in your tests
but this one will be used by default.
"""
service_config = ServiceConfig()
service_config.services = {
GithubService(token="token"),
GitlabService(token="token"),
}
service_config.dry_run = False
service_config.server_name = "localhost"
service_config.github_requests_log_path = "/path"
ServiceConfig.service_config = service_config
@pytest.fixture()
def dump_http_com():
"""
This fixture is able to dump whole http traffic of a single test case
so that no http comm is happening while testing
Usage:
1. add it to your test case and pass the test path
def test_something(dump_http_com):
service_config = dump_http_com(f"{Path(__file__).name}/pr_handle.yaml")
2. Run your test
GITHUB_TOKEN=asdqwe pytest-3 -k test_something
3. Your http communication should now be stored in tests/data/http-requests/{path}
4. Once you rerun the tests WITHOUT the token, the offline communication should be picked up
"""
def f(path: str):
""" path points to a file where the http communication will be saved """
conf = ServiceConfig()
# TODO: add pagure support
# conf._pagure_user_token = os.environ.get("PAGURE_TOKEN", "test")
# conf._pagure_fork_token = os.environ.get("PAGURE_FORK_TOKEN", "test")
conf._github_token = os.getenv("GITHUB_TOKEN", None)
conf.dry_run = True
target_path: Path = SAVED_HTTPD_REQS / path
target_path.parent.mkdir(parents=True, exist_ok=True)
conf.github_requests_log_path = str(target_path)
return conf
return f
def copr_build_model(
repo_name="bar",
repo_namespace="foo",
forge_instance="github.com",
job_config_trigger_type=JobConfigTriggerType.pull_request,
job_trigger_model_type=JobTriggerModelType.pull_request,
**trigger_model_kwargs,
):
project_model = flexmock(
repo_name=repo_name,
namespace=repo_namespace,
project_url=f"https://{forge_instance}/{repo_namespace}/{repo_name}",
)
pr_model = flexmock(
id=1,
pr_id=123,
project=project_model,
job_config_trigger_type=job_config_trigger_type,
**trigger_model_kwargs,
)
trigger_model = flexmock(
id=2,
type=job_trigger_model_type,
trigger_id=1,
get_trigger_object=lambda: pr_model,
)
return flexmock(
id=1,
build_id="1",
commit_sha="0011223344",
project_name="some-project",
owner="some-owner",
web_url="https://some-url",
target="some-target",
status="some-status",
srpm_build=flexmock(logs="asdsdf", url=None),
job_trigger=trigger_model,
)
@pytest.fixture(scope="module")
def copr_build_pr():
return copr_build_model()
@pytest.fixture()
def koji_build_pr():
project_model = flexmock(
repo_name="bar", namespace="foo", project_url="https://github.com/foo/bar"
)
pr_model = flexmock(
id=1,
pr_id=123,
project=project_model,
job_config_trigger_type=JobConfigTriggerType.pull_request,
)
trigger_model = flexmock(
id=2,
type=JobTriggerModelType.pull_request,
trigger_id=1,
get_trigger_object=lambda: pr_model,
)
koji_build_model = flexmock(
id=1,
build_id="1",
commit_sha="0011223344",
project_name="some-project",
owner="some-owner",
web_url="https://some-url",
target="some-target",
status="some-status",
srpm_build=flexmock(logs="asdsdf"),
job_trigger=trigger_model,
)
return koji_build_model
@pytest.fixture(scope="module")
def github_release_webhook() -> dict:
with open(DATA_DIR / "webhooks" / "github" / "release.json") as outfile:
return json.load(outfile)
@pytest.fixture(scope="module")
def release_event(github_release_webhook) -> ReleaseEvent:
return Parser.parse_release_event(github_release_webhook)
@pytest.fixture(scope="module")
def github_pr_webhook():
with open(DATA_DIR / "webhooks" / "github" / "pr.json") as outfile:
return json.load(outfile)
@pytest.fixture(scope="module")
def github_push_webhook():
with open(DATA_DIR / "webhooks" / "github" / "push_branch.json") as outfile:
return json.load(outfile)
@pytest.fixture(scope="module")
def github_pr_event(github_pr_webhook) -> PullRequestGithubEvent:
return Parser.parse_pr_event(github_pr_webhook)
@pytest.fixture(scope="module")
def github_push_event(github_push_webhook) -> PushGitHubEvent:
return Parser.parse_push_event(github_push_webhook)
@pytest.fixture(scope="module")
def gitlab_mr_webhook():
with open(DATA_DIR / "webhooks" / "gitlab" / "mr_event.json") as outfile:
return json.load(outfile)
@pytest.fixture(scope="module")
def gitlab_mr_event(gitlab_mr_webhook) -> MergeRequestGitlabEvent:
return Parser.parse_mr_event(gitlab_mr_webhook)
@pytest.fixture
def cache_clear(request):
"""
Fixture which cleans lru_cache of functions defined in module variable CACHE_CLEAR.
This allows reliable test results.
:return:
"""
if getattr(request.module, "CACHE_CLEAR", None):
[f.cache_clear() for f in getattr(request.module, "CACHE_CLEAR")]
|
the-stack_0_17492 | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY', default='4EIgRS3BINfpj8mFVlolNSa914hNKAgdKSxHDeO5Ym4dU2GLOzq5yTdvaisW6xWL')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # noqa F405
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend')
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = 'localhost'
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2']
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions'] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
|
the-stack_0_17493 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.common.net.luci_auth import LuciAuth
class LuciAuthTest(unittest.TestCase):
def test_run_on_linux(self):
host = MockHost(os_name='linux')
host.filesystem.maybe_make_directory(
'/mock-checkout/third_party/depot_tools')
luci_auth = LuciAuth(host)
luci_auth.get_access_token()
self.assertListEqual(
host.executive.calls,
[['/mock-checkout/third_party/depot_tools/luci-auth', 'token']])
def test_run_on_windows(self):
host = MockHost(os_name='win')
host.filesystem.maybe_make_directory(
'/mock-checkout/third_party/depot_tools')
luci_auth = LuciAuth(host)
luci_auth.get_access_token()
self.assertEqual(
host.executive.calls,
[['/mock-checkout/third_party/depot_tools/luci-auth.bat', 'token']
])
|
the-stack_0_17496 | import os
import re
import threading
import unittest
import pytest
from packaging import version
from localstack.services.install import TERRAFORM_BIN, install_terraform
from localstack.utils.aws import aws_stack
from localstack.utils.common import is_command_available, rm_rf, run, start_worker_thread
BUCKET_NAME = "tf-bucket"
QUEUE_NAME = "tf-queue"
QUEUE_ARN = "arn:aws:sqs:us-east-1:000000000000:tf-queue"
# lambda Testing Variables
LAMBDA_NAME = "tf-lambda"
LAMBDA_ARN = f"arn:aws:lambda:us-east-1:000000000000:function:{LAMBDA_NAME}"
LAMBDA_HANDLER = "DotNetCore2::DotNetCore2.Lambda.Function::SimpleFunctionHandler"
LAMBDA_RUNTIME = "dotnetcore2.0"
LAMBDA_ROLE = "arn:aws:iam::000000000000:role/iam_for_lambda"
INIT_LOCK = threading.RLock()
def check_terraform_version():
if not is_command_available(TERRAFORM_BIN):
return False, None
ver_string = run([TERRAFORM_BIN, "-version"])
ver_string = re.search(r"v(\d+\.\d+\.\d+)", ver_string).group(1)
if ver_string is None:
return False, None
return version.parse(ver_string) < version.parse("0.15"), ver_string
class TestTerraform(unittest.TestCase):
@classmethod
def setUpClass(cls):
with INIT_LOCK:
available, version = check_terraform_version()
if not available:
msg = "could not find a compatible version of terraform"
if version:
msg += f" (version = {version})"
else:
msg += " (command not found)"
return pytest.skip(msg)
run("cd %s; %s apply -input=false tfplan" % (cls.get_base_dir(), TERRAFORM_BIN))
@classmethod
def tearDownClass(cls):
run("cd %s; %s destroy -auto-approve" % (cls.get_base_dir(), TERRAFORM_BIN))
@classmethod
def init_async(cls):
def _run(*args):
with INIT_LOCK:
install_terraform()
base_dir = cls.get_base_dir()
if not os.path.exists(os.path.join(base_dir, ".terraform", "plugins")):
run("cd %s; %s init -input=false" % (base_dir, TERRAFORM_BIN))
# remove any cache files from previous runs
for tf_file in [
"tfplan",
"terraform.tfstate",
"terraform.tfstate.backup",
]:
rm_rf(os.path.join(base_dir, tf_file))
# create TF plan
run("cd %s; %s plan -out=tfplan -input=false" % (base_dir, TERRAFORM_BIN))
start_worker_thread(_run)
@classmethod
def get_base_dir(cls):
return os.path.join(os.path.dirname(__file__), "terraform")
@pytest.mark.skip_offline
def test_bucket_exists(self):
s3_client = aws_stack.connect_to_service("s3")
response = s3_client.head_bucket(Bucket=BUCKET_NAME)
self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])
cors = {
"AllowedHeaders": ["*"],
"AllowedMethods": ["GET", "PUT", "POST"],
"AllowedOrigins": ["*"],
"ExposeHeaders": ["ETag", "x-amz-version-id"],
"MaxAgeSeconds": 3000,
}
response = s3_client.get_bucket_cors(Bucket=BUCKET_NAME)
self.assertEqual(cors, response["CORSRules"][0])
response = s3_client.get_bucket_versioning(Bucket=BUCKET_NAME)
self.assertEqual("Enabled", response["Status"])
@pytest.mark.skip_offline
def test_sqs(self):
sqs_client = aws_stack.connect_to_service("sqs")
queue_url = sqs_client.get_queue_url(QueueName=QUEUE_NAME)["QueueUrl"]
response = sqs_client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"])
self.assertEqual("90", response["Attributes"]["DelaySeconds"])
self.assertEqual("2048", response["Attributes"]["MaximumMessageSize"])
self.assertEqual("86400", response["Attributes"]["MessageRetentionPeriod"])
self.assertEqual("10", response["Attributes"]["ReceiveMessageWaitTimeSeconds"])
@pytest.mark.skip_offline
def test_lambda(self):
lambda_client = aws_stack.connect_to_service("lambda")
response = lambda_client.get_function(FunctionName=LAMBDA_NAME)
self.assertEqual(LAMBDA_NAME, response["Configuration"]["FunctionName"])
self.assertEqual(LAMBDA_HANDLER, response["Configuration"]["Handler"])
self.assertEqual(LAMBDA_RUNTIME, response["Configuration"]["Runtime"])
self.assertEqual(LAMBDA_ROLE, response["Configuration"]["Role"])
@pytest.mark.skip_offline
def test_event_source_mapping(self):
lambda_client = aws_stack.connect_to_service("lambda")
all_mappings = lambda_client.list_event_source_mappings(
EventSourceArn=QUEUE_ARN, FunctionName=LAMBDA_NAME
)
function_mapping = all_mappings.get("EventSourceMappings")[0]
assert function_mapping["FunctionArn"] == LAMBDA_ARN
assert function_mapping["EventSourceArn"] == QUEUE_ARN
@pytest.mark.skip_offline
def test_apigateway(self):
apigateway_client = aws_stack.connect_to_service("apigateway")
rest_apis = apigateway_client.get_rest_apis()
rest_id = None
for rest_api in rest_apis["items"]:
if rest_api["name"] == "test-tf-apigateway":
rest_id = rest_api["id"]
break
self.assertTrue(rest_id)
resources = apigateway_client.get_resources(restApiId=rest_id)["items"]
# We always have 1 default root resource (with path "/")
self.assertEqual(3, len(resources))
res1 = [r for r in resources if r.get("pathPart") == "mytestresource"]
self.assertTrue(res1)
self.assertEqual("/mytestresource", res1[0]["path"])
self.assertEqual(2, len(res1[0]["resourceMethods"]))
self.assertEqual("MOCK", res1[0]["resourceMethods"]["GET"]["methodIntegration"]["type"])
res2 = [r for r in resources if r.get("pathPart") == "mytestresource1"]
self.assertTrue(res2)
self.assertEqual("/mytestresource1", res2[0]["path"])
self.assertEqual(2, len(res2[0]["resourceMethods"]))
self.assertEqual(
"AWS_PROXY", res2[0]["resourceMethods"]["GET"]["methodIntegration"]["type"]
)
self.assertTrue(res2[0]["resourceMethods"]["GET"]["methodIntegration"]["uri"])
@pytest.mark.skip_offline
def test_route53(self):
route53 = aws_stack.connect_to_service("route53")
response = route53.create_hosted_zone(Name="zone123", CallerReference="ref123")
self.assertEqual(201, response["ResponseMetadata"]["HTTPStatusCode"])
change_id = response.get("ChangeInfo", {}).get("Id", "change123")
response = route53.get_change(Id=change_id)
self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])
@pytest.mark.skip_offline
def test_acm(self):
acm = aws_stack.connect_to_service("acm")
certs = acm.list_certificates()["CertificateSummaryList"]
certs = [c for c in certs if c.get("DomainName") == "example.com"]
self.assertEqual(1, len(certs))
@pytest.mark.skip_offline
def test_apigateway_escaped_policy(self):
apigateway_client = aws_stack.connect_to_service("apigateway")
rest_apis = apigateway_client.get_rest_apis()
service_apis = []
for rest_api in rest_apis["items"]:
if rest_api["name"] == "service_api":
service_apis.append(rest_api)
self.assertEqual(1, len(service_apis))
@pytest.mark.skip_offline
def test_dynamodb(self):
def _table_exists(tablename, dynamotables):
return any(name for name in dynamotables["TableNames"] if name == tablename)
dynamo_client = aws_stack.connect_to_service("dynamodb")
tables = dynamo_client.list_tables()
self.assertTrue(_table_exists("tf_dynamotable1", tables))
self.assertTrue(_table_exists("tf_dynamotable2", tables))
self.assertTrue(_table_exists("tf_dynamotable3", tables))
|
the-stack_0_17497 | from utils import utils
class AskComplexGraph():
filepath = "../outputs/v4/intents/"
name = 'Ask for complex graph'
def __init__(self, database, table):
self.database = database
self.table = table
@property
def intent(self):
intent = {
"id":"7c308982-f0d2-4129-b5d4-5f01d21545b8",
"name":"Ask for complex graph",
"auto":True,
"contexts":[
],
"responses":[
{
"resetContexts":False,
"affectedContexts":[
{
"name":"Twovargraph-followup",
"parameters":{
},
"lifespan":2
}
],
"parameters":[
{
"id":"3bb0df88-f36d-42eb-be60-08c021ef469d",
"required":True,
"dataType":"@columns_select",
"name":"columns_select",
"value":"$columns_select",
"promptMessages":[
],
"noMatchPromptMessages":[
],
"noInputPromptMessages":[
],
"outputDialogContexts":[
],
"isList":True
},
{
"id":"26e56b0a-03b7-4ceb-8322-5b4d9089d231",
"required":False,
"dataType":"",
"name":"tablename",
"value":self.table,
"promptMessages":[
],
"noMatchPromptMessages":[
],
"noInputPromptMessages":[
],
"outputDialogContexts":[
],
"isList":False
},
{
"id":"ba50e724-051b-4c8d-aacf-23b97fea00f0",
"required":False,
"dataType":"",
"name":"databasename",
"value":self.database,
"promptMessages":[
],
"noMatchPromptMessages":[
],
"noInputPromptMessages":[
],
"outputDialogContexts":[
],
"isList":False
}
],
"messages":[
],
"defaultResponsePlatforms":{
},
"speech":[
]
}
],
"priority":500000,
"webhookUsed":True,
"webhookForSlotFilling":False,
"fallbackIntent":False,
"events":[
],
"conditionalResponses":[
],
"condition":"",
"conditionalFollowupEvents":[
]
}
return intent
@property
def usersays(self):
us = [
{
"id":"41f24594-9c05-4797-9c02-bdfef39b8a2e",
"data":[
{
"text":"graph ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":", ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":", ",
"userDefined":False
},
{
"text":"three",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"58ed5d98-560c-4758-88ab-c50e5fd627ac",
"data":[
{
"text":"graph ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"5063647f-b26c-419a-bb2f-af018a191dec",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":False
},
{
"text":", ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" and ",
"userDefined":False
},
{
"text":"three",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"9b9bc170-b480-4e56-8e22-fa6a3976bc06",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" and ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"15f1fa59-af6a-4bdc-b122-ba787d70aae4",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":False
},
{
"text":", ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":False
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"34ec4914-f864-4d2d-9c9d-cb6f042e78c1",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"one",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":False
},
{
"text":", ",
"userDefined":False
},
{
"text":"two",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":", ",
"userDefined":False
},
{
"text":"three",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"9c8e394c-7a00-42d1-8a56-f51833413f53",
"data":[
{
"text":"graph",
"userDefined":False
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"b6e5447b-0460-4280-989e-efa128fd2ef4",
"data":[
{
"text":"localizacion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" vs ",
"userDefined":False
},
{
"text":"localizacion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"07de519b-3f1f-4e92-84a5-5df1647bc275",
"data":[
{
"text":"A graph with ",
"userDefined":False
},
{
"text":"direccion_coordenada_x",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" vs ",
"userDefined":False
},
{
"text":"direccion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"f1b88e4d-420e-47df-ad71-d43ceb637657",
"data":[
{
"text":"I want a graph with ",
"userDefined":False
},
{
"text":"direccion_coordenada_x",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" vs ",
"userDefined":False
},
{
"text":"direccion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
},
{
"id":"01e193cd-906e-4bde-a574-ba08b768d4de",
"data":[
{
"text":"view graph with ",
"userDefined":False
},
{
"text":"direccion_coordenada_x",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
},
{
"text":" vs ",
"userDefined":False
},
{
"text":"direccion_coordenada_y",
"alias":"columns_select",
"meta":"@columns_select",
"userDefined":True
}
],
"isTemplate":False,
"count":0,
"updated":0
}
]
return us
@property
def database(self):
return self.__database
@property
def table(self):
return self.__table
@database.setter
def database(self, database):
self.__database = database
@table.setter
def table(self, table):
self.__table = table
def writeToFile(self):
utils.writeToFile(self.intent, self.filepath + self.name + '.json')
utils.writeToFile(self.usersays, self.filepath + self.name + '_usersays_en.json')
|
the-stack_0_17498 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0201
from recipe_engine import recipe_api
CONFIG_DEBUG = 'Debug'
CONFIG_RELEASE = 'Release'
class SkiaVarsApi(recipe_api.RecipeApi):
def setup(self):
"""Prepare the variables."""
# Setup
self.builder_name = self.m.properties['buildername']
self.slave_dir = self.m.path['start_dir']
# Special input/output directories.
self.build_dir = self.slave_dir.join('build')
self.default_env = self.m.context.env
self.default_env['CHROME_HEADLESS'] = '1'
self.default_env['PATH'] = self.m.path.pathsep.join([
self.default_env.get('PATH', '%(PATH)s'),
str(self.m.bot_update._module.PACKAGE_REPO_ROOT),
])
self.cache_dir = self.slave_dir.join('cache')
self.swarming_out_dir = self.slave_dir.join(
self.m.properties['swarm_out_dir'])
self.tmp_dir = self.m.path['start_dir'].join('tmp')
self.builder_cfg = self.m.builder_name_schema.DictForBuilderName(
self.builder_name)
self.role = self.builder_cfg['role']
if self.role in [self.m.builder_name_schema.BUILDER_ROLE_HOUSEKEEPER,
self.m.builder_name_schema.BUILDER_ROLE_CALMBENCH]:
self.configuration = CONFIG_RELEASE
else:
self.configuration = self.builder_cfg.get('configuration', CONFIG_DEBUG)
arch = (self.builder_cfg.get('arch') or self.builder_cfg.get('target_arch'))
if ('Win' in self.builder_cfg.get('os', '') and arch == 'x86_64'):
self.configuration += '_x64'
self.extra_tokens = []
if len(self.builder_cfg.get('extra_config', '')) > 0:
if self.builder_cfg['extra_config'].startswith('SK'):
assert self.builder_cfg['extra_config'].isupper()
self.extra_tokens = [self.builder_cfg['extra_config']]
else:
self.extra_tokens = self.builder_cfg['extra_config'].split('_')
self.patch_storage = self.m.properties.get('patch_storage', 'gerrit')
self.issue = None
self.patchset = None
self.is_trybot = False
if (self.m.properties.get('patch_issue', '') and
self.m.properties.get('patch_set', '')):
self.is_trybot = True
self.issue = self.m.properties['patch_issue']
self.patchset = self.m.properties['patch_set']
self._swarming_bot_id = None
self._swarming_task_id = None
# Internal bot support.
self.internal_hardware_label = (
self.m.properties.get('internal_hardware_label'))
self.is_internal_bot = self.internal_hardware_label is not None
@property
def is_linux(self):
return 'Ubuntu' in self.builder_name or 'Debian' in self.builder_name
@property
def swarming_bot_id(self):
if not self._swarming_bot_id:
self._swarming_bot_id = self.m.python.inline(
name='get swarming bot id',
program='''import os
print os.environ.get('SWARMING_BOT_ID', '')
''',
stdout=self.m.raw_io.output()).stdout.rstrip()
return self._swarming_bot_id
@property
def swarming_task_id(self):
if not self._swarming_task_id:
self._swarming_task_id = self.m.python.inline(
name='get swarming task id',
program='''import os
print os.environ.get('SWARMING_TASK_ID', '')
''',
stdout=self.m.raw_io.output()).stdout.rstrip()
return self._swarming_task_id
|
the-stack_0_17500 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : 使用gs_guc set方法设置参数partition_lock_upgrade_timeout为3000 ,
观察预期结果
Description :
1.查询partition_lock_upgrade_timeout默认值
2.修改参数值为3000并重启数据库
3.查询修改后的参数值
4.恢复参数默认值
Expect :
1.显示默认值为1800
2.设置成功
3.显示3000
4.默认值恢复成功
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
LOG = Logger()
commonsh = CommonSH('dbuser')
class ClientConnection(unittest.TestCase):
def setUp(self):
LOG.info(
'-----Opengauss_Function_Guc_ClientConnection_Case0194start----')
self.constant = Constant()
def test_partition_lock_upgrade_timeout(self):
LOG.info('--步骤1:查看默认值--')
sql_cmd = commonsh.execut_db_sql('show '
'partition_lock_upgrade_timeout;')
LOG.info(sql_cmd)
self.assertEqual("1800", sql_cmd.split("\n")[-2].strip())
LOG.info('--步骤2:修改参数值为3000并重启数据库--')
msg = commonsh.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
'partition_lock_upgrade_timeout = 3000')
LOG.info(msg)
self.assertTrue(msg)
msg = commonsh.restart_db_cluster()
LOG.info(msg)
status = commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
LOG.info('--步骤3:查询修改后的参数值--')
sql_cmd = commonsh.execut_db_sql('show'
' partition_lock_upgrade_timeout;')
LOG.info(sql_cmd)
self.assertEqual("3000", sql_cmd.split("\n")[-2].strip())
def tearDown(self):
LOG.info('--步骤4:恢复默认值--')
sql_cmd = commonsh.execut_db_sql('show '
'partition_lock_upgrade_timeout;')
LOG.info(sql_cmd)
if "1800" != sql_cmd.splitlines()[-2].strip():
msg = commonsh.execute_gsguc('set',
self.constant.GSGUC_SUCCESS_MSG,
'partition_lock_upgrade_timeout=1800')
LOG.info(msg)
msg = commonsh.restart_db_cluster()
LOG.info(msg)
status = commonsh.get_db_cluster_status()
self.assertTrue("Degraded" in status or "Normal" in status)
LOG.info(
'-Opengauss_Function_Guc_ClientConnection_Case0194执行完成-----')
|
the-stack_0_17503 | # Ensures proper division mapping on / Needed for Adafruit lib
from __future__ import division
import time
import sys
import RPi.GPIO as GPIO
import Adafruit_PCA9685
# References:
# https://github.com/adafruit/Adafruit_Python_PCA9685/blob/master/examples/simpletest.py
# https://learn.adafruit.com/16-channel-pwm-servo-driver/library-reference
# https://howtomechatronics.com/how-it-works/how-servo-motors-work-how-to-control-servos-using-arduino/
# https://www.adeept.com/learn/tutorial-249.html
# https://www.adeept.com/learn/tutorial-252.html
# Understanding of the code in this file works can be gained by surfing the above links
class DriveTrain:
def __init__(self):
# Right now, we only use this to control the turn servo
# on the drivetrain. Eventually, it will have to be moved to
# a global/shared context to control the claw servos.
self.turn_pwm = Adafruit_PCA9685.PCA9685()
# 50Hz PWM frequency => servo expects updates every 1/50Hz = 20ms
self.turn_pwm.set_pwm_freq(50)
# Pin numbers for back wheels (forward/backward)
self.motor_A_pin1 = 26
self.motor_A_pin2 = 21
self.motor_A_en = 4
self.motor_B_pin1 = 27
self.motor_B_pin2 = 18
self.motor_B_en = 17
# Just declarations
self.motor_pwm_A = 0
self.motor_pwm_B = 0
# Constants for turning servo
self.initPos = 300
self.maxPos = 560
self.minPos = 100
self.angleRange = 180
self.driveSetup()
self.turnSetup()
def driveSetup(self):
GPIO.setwarnings(False)
#Broadcomm chip specific pin nums
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.motor_A_pin1, GPIO.OUT)
GPIO.setup(self.motor_A_pin2, GPIO.OUT)
GPIO.setup(self.motor_A_en, GPIO.OUT)
GPIO.setup(self.motor_B_pin1, GPIO.OUT)
GPIO.setup(self.motor_B_pin2, GPIO.OUT)
GPIO.setup(self.motor_B_en, GPIO.OUT)
self.driveHalt()
#Enclose in try except pass if this don't work
self.motor_pwm_A = GPIO.PWM(self.motor_A_en, 1000)
self.motor_pwm_B = GPIO.PWM(self.motor_B_en, 1000)
def driveHalt(self):
GPIO.output(self.motor_A_pin1, GPIO.LOW)
GPIO.output(self.motor_A_pin2, GPIO.LOW)
GPIO.output(self.motor_A_en, GPIO.LOW)
GPIO.output(self.motor_B_pin1, GPIO.LOW)
GPIO.output(self.motor_B_pin2, GPIO.LOW)
GPIO.output(self.motor_B_en, GPIO.LOW)
self.turn_pwm.set_pwm(0, 0, self.initPos)
def turnSetup(self, initPos = 300, moveTo = 1):
if initPos > self.minPos and initPos < self.maxPos:
if moveTo:
# First arg is ID/channel of the motor - in this case 0
self.turn_pwm.set_pwm(0, 0, initPos)
else:
strErrorMsg = "Drivetrain: Invalid input position" + str(initPos) + ", minPos = " + str(self.minPos) + ", maxPos = " + str(self.maxPos)
print(strErrorMsg)
def moveSpeed(self, speed, direction):
# Correct combination of LOW/HIGH pin settings were found by lifting the bot
# and trying until it worked as intended
if direction == "backward":
GPIO.output(self.motor_A_pin1, GPIO.LOW)
GPIO.output(self.motor_A_pin2, GPIO.HIGH)
self.motor_pwm_A.start(0)
self.motor_pwm_A.ChangeDutyCycle(speed)
GPIO.output(self.motor_B_pin1, GPIO.LOW)
GPIO.output(self.motor_B_pin2, GPIO.HIGH)
self.motor_pwm_B.start(0)
self.motor_pwm_B.ChangeDutyCycle(speed)
elif direction == "forward":
GPIO.output(self.motor_A_pin1, GPIO.HIGH)
GPIO.output(self.motor_A_pin2, GPIO.LOW)
self.motor_pwm_A.start(100)
self.motor_pwm_A.ChangeDutyCycle(speed)
GPIO.output(self.motor_B_pin1, GPIO.HIGH)
GPIO.output(self.motor_B_pin2, GPIO.LOW)
self.motor_pwm_B.start(100)
self.motor_pwm_B.ChangeDutyCycle(speed)
def turnAngle(self, angle):
# Positive input is left, negative input is right
pwmOut = int((self.maxPos - self.minPos)/self.angleRange*angle)
setPos = int(self.initPos + pwmOut)
if setPos > self.maxPos: setPos = self.maxPos
elif setPos < self.minPos: setPos = self.minPos
self.turn_pwm.set_pwm(0, 0, setPos)
def moveAbsoluteDelay(self, speed, angle, timestep):
# Clamp these values on this side to ensure no hardware damage of any sort.
if speed < -100: speed = -100
elif speed > 100: speed = 100
if angle < -60: angle = -60
elif angle > 60: angle = 60
if speed == 0:
self.curAngle = 0
self.driveHalt()
time.sleep(timestep)
return
self.turnAngle(angle)
if speed < 0:
self.moveSpeed(-speed, "backward")
else:
self.moveSpeed(speed, "forward")
time.sleep(timestep)
return
def destroy(self):
# Add logic for to uninstanitiate turn servo
self.driveHalt()
GPIO.cleanup()
|
the-stack_0_17505 | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="EmfLoadOptions.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import ImageLoadOptions
class EmfLoadOptions(ImageLoadOptions):
"""
Emf load options
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self, **kwargs): # noqa: E501
"""Initializes new instance of EmfLoadOptions""" # noqa: E501
base = super(EmfLoadOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmfLoadOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_17509 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from GridCal.Engine.Devices.transformer import TransformerType
# from math import sqrt
def test_transformer_type():
Vhv = 21 # primary voltage in kV
Vlv = 0.42 # secondary voltage kV
Sn = 0.25 # nominal power in MVA
Pcu = 2.35 # short circuit power (copper losses) kW
Pfe = 0.27 # no load power (iron losses) kW
I0 = 1.0 # no load voltage in %
Vsc = 4.6 # short-circuit voltage in %
obj = TransformerType(hv_nominal_voltage=Vhv,
lv_nominal_voltage=Vlv,
nominal_power=Sn,
copper_losses=Pcu,
short_circuit_voltage=Vsc,
iron_losses=Pfe,
no_load_current=I0,
gr_hv1=0.5, gx_hv1=0.5)
Sbase = 100
z_series, y_shunt = obj.get_impedances(VH=Vhv, VL=Vlv, Sbase=Sbase)
assert np.allclose(z_series, 3.76+18.01j, rtol=0.01)
assert np.allclose(y_shunt, 2.6532597915358445e-06-2.456722029199863e-05j, rtol=0.01)
if __name__ == '__main__':
# template_from_impedances()
test_transformer_type()
|
the-stack_0_17511 | from .base import DiagnosticBase
from .diagnostic_8gig import diagnostic8Gig
from .diagnostic_20gig import diagnostic20Gig
from .diagnostic_maxsize import diagnosticMaxSize
from .diagnostic_network import diagnosticNetwork
class allDiagnostics(DiagnosticBase):
def getName(self):
"""
Returns the human-readable name of the diagnostic
"""
return "Run all available diagnostics"
def getDescription(self):
"""
Returns a description of what the diagnostic does
"""
return "This diagnostic runs all available diagnostics in sequence."
def run(self, logger, args=[]):
"""
Runs the diagnostic
"""
# Run all available diagnostics in turn, storing the results
results = []
diagnostics = [
diagnostic8Gig(),
diagnostic20Gig(),
diagnosticMaxSize(),
diagnosticNetwork(),
]
for index, diagnostic in enumerate(diagnostics):
# Run the diagnostic and report its result
logger.info(
'[all] Running individual diagnostic: "{}"'.format(
diagnostic.getName()
),
True,
)
results.append(diagnostic.run(logger))
logger.info(
"[all] Individual diagnostic result: {}".format(
"passed" if results[-1] == True else "failed"
),
False,
)
# Print a newline after the last diagnostic has run
if index == len(diagnostics) - 1:
print()
# Only report success if all diagnostics succeeded
return False not in results
|
the-stack_0_17514 | """
This simple example shows how you could use MLflow REST API to create new
runs inside an experiment to log parameters/metrics. Using MLflow REST API
instead of MLflow library might be useful to embed in an application where
you don't want to depend on the whole MLflow library, or to make
your own HTTP requests in another programming language (not Python).
For more details on MLflow REST API endpoints check the following page:
https://www.mlflow.org/docs/latest/rest-api.html
"""
import argparse
import os
import time
import requests
_DEFAULT_USER_ID = "unknown"
class MLflowTrackingRestApi:
def __init__(self, hostname, port, experiment_id):
self.base_url = f'http://{hostname}:{str(port)}/api/2.0/preview/mlflow'
self.experiment_id = experiment_id
self.run_id = self.create_run()
def create_run(self):
"""Create a new run for tracking."""
url = f'{self.base_url}/runs/create'
# user_id is deprecated and will be removed from the API in a future release
payload = {
"experiment_id": self.experiment_id,
"start_time": int(time.time() * 1000),
"user_id": _get_user_id(),
}
r = requests.post(url, json=payload)
run_id = None
if r.status_code == 200:
run_id = r.json()["run"]["info"]["run_uuid"]
else:
print("Creating run failed!")
return run_id
def list_experiments(self):
"""Get all experiments."""
url = f'{self.base_url}/experiments/list'
r = requests.get(url)
return r.json()["experiments"] if r.status_code == 200 else None
def log_param(self, param):
"""Log a parameter dict for the given run."""
url = f'{self.base_url}/runs/log-parameter'
payload = {"run_uuid": self.run_id, "key": param["key"], "value": param["value"]}
r = requests.post(url, json=payload)
return r.status_code
def log_metric(self, metric):
"""Log a metric dict for the given run."""
url = f'{self.base_url}/runs/log-metric'
payload = {"run_uuid": self.run_id, "key": metric["key"], "value": metric["value"]}
r = requests.post(url, json=payload)
return r.status_code
def _get_user_id():
"""Get the ID of the user for the current run."""
try:
import pwd
return pwd.getpwuid(os.getuid())[0]
except ImportError:
return _DEFAULT_USER_ID
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser(description="MLflow REST API Example")
parser.add_argument(
"--hostname",
type=str,
default="localhost",
dest="hostname",
help="MLflow server hostname/ip (default: localhost)",
)
parser.add_argument(
"--port",
type=int,
default=5000,
dest="port",
help="MLflow server port number (default: 5000)",
)
parser.add_argument(
"--experiment-id",
type=int,
default=0,
dest="experiment_id",
help="Experiment ID (default: 0)",
)
print("Running mlflow_tracking_rest_api.py")
args = parser.parse_args()
mlflow_rest = MLflowTrackingRestApi(args.hostname, args.port, args.experiment_id)
# Parameter is a key/val pair (str types)
param = {"key": "alpha", "value": "0.1980"}
status_code = mlflow_rest.log_param(param)
if status_code == 200:
print(
"Successfully logged parameter: {} with value: {}".format(param["key"], param["value"])
)
else:
print("Logging parameter failed!")
# Metric is a key/val pair (key/val have str/float types)
metric = {"key": "precision", "value": 0.769}
status_code = mlflow_rest.log_metric(metric)
if status_code == 200:
print(
"Successfully logged parameter: {} with value: {}".format(
metric["key"], metric["value"]
)
)
else:
print("Logging metric failed!")
|
the-stack_0_17515 | from fuse.server.immunespace.dispatcher import GetObject
import json
import os
import pytest
import numpy as np
# this takes about 20s to return
# go get a session id and group objectIdfrom immunespace for user for this to work:
# https://www.immunespace.org/security/externalToolsView.view?returnUrl=%2Fproject%2FStudies%2Fbegin.view%3F
#g_debug = True
g_debug = False
def test_GetObject():
if os.getenv('TEST_LIBRARY') == "0":
pytest.skip("Only testing docker container")
objectId = os.getenv('GROUP')
username = os.getenv('USERNAME')
sess = os.getenv('APIKEY')
obj = {
"id": objectId,
"resourceType": "eset",
"resource": GetObject(objectId,sess,username)
}
with open('tests/expected/test_1.json', 'r', encoding='utf-8') as f:
expected = json.load(f)
#make smaller chunks for easier debugging
if(g_debug):
max_subjs=3
max_pheno=4
max_genes=5
obj["resource"]["exprs"] = np.array(obj["resource"]["exprs"])[0:max_genes,0:max_subjs].tolist() # 3 genes, 2 subjects
obj["resource"]["featureNames"] = np.array(obj["resource"]["featureNames"])[0:max_genes].tolist()
obj["resource"]["pData"] = np.array(obj["resource"]["pData"])[0:max_pheno,0:max_subjs].tolist() # 4 phenoetypes, 2 subjects
expected["resource"]["exprs"] = np.array(expected["resource"]["exprs"])[0:max_genes,0:max_subjs].tolist() # 3 genes, 2 subjects
expected["resource"]["featureNames"] = np.array(expected["resource"]["featureNames"])[0:max_genes].tolist()
expected["resource"]["pData"] = np.array(expected["resource"]["pData"])[0:max_pheno,0:max_subjs].tolist() # 4 phenoetypes, 2 subjects
# Uncomment this to capture output:
#with open('tests/test_1.out.json', 'w', encoding='utf-8') as f:
# json.dump(obj, f, ensure_ascii=False, indent=4, sort_keys=True)
objs = json.dumps(obj, ensure_ascii=False, indent=4, sort_keys=True)
expecteds = json.dumps(expected, ensure_ascii=False, indent=4, sort_keys=True)
if(g_debug):
print("obj:")
print(obj["resource"]["exprs"])
#print("expected:")
#print(expected["resource"]["exprs"])
# xxx sort the keyes, then copy this to test_func.py
assert objs == expecteds
|
the-stack_0_17517 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import List, Optional, Union
import numpy as np
from habitat import get_config as get_task_config
from habitat.config import Config as CN
DEFAULT_CONFIG_DIR = "configs/"
CONFIG_FILE_SEPARATOR = ","
# -----------------------------------------------------------------------------
# EXPERIMENT CONFIG
# -----------------------------------------------------------------------------
_C = CN()
# task config can be a list of conifgs like "A.yaml,B.yaml"
_C.BASE_TASK_CONFIG_PATH = "configs/tasks/pointnav.yaml"
_C.TASK_CONFIG = CN() # task_config will be stored as a config node
_C.CMD_TRAILING_OPTS = [] # store command line options as list of strings
_C.TRAINER_NAME = "ppo"
_C.ENV_NAME = "NavRLEnv"
_C.SIMULATOR_GPU_ID = 0
_C.TORCH_GPU_ID = 0
_C.VIDEO_OPTION = ["disk", "tensorboard"]
_C.TENSORBOARD_DIR = "tb"
_C.WRITER_TYPE = "tb"
_C.VIDEO_DIR = "video_dir"
_C.VIDEO_FPS = 10
_C.VIDEO_RENDER_TOP_DOWN = True
_C.VIDEO_RENDER_ALL_INFO = False
_C.TEST_EPISODE_COUNT = -1
_C.EVAL_CKPT_PATH_DIR = "data/checkpoints" # path to ckpt or path to ckpts dir
_C.NUM_ENVIRONMENTS = 16
_C.NUM_PROCESSES = -1 # depricated
_C.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
_C.CHECKPOINT_FOLDER = "data/checkpoints"
_C.NUM_UPDATES = 10000
_C.NUM_CHECKPOINTS = 10
# Number of model updates between checkpoints
_C.CHECKPOINT_INTERVAL = -1
_C.TOTAL_NUM_STEPS = -1.0
_C.LOG_INTERVAL = 10
_C.LOG_FILE = "train.log"
_C.FORCE_BLIND_POLICY = False
_C.VERBOSE = True
_C.EVAL_KEYS_TO_INCLUDE_IN_NAME = []
# For our use case, the CPU side things are mainly memory copies
# and nothing of substantive compute. PyTorch has been making
# more and more memory copies parallel, but that just ends up
# slowing those down dramatically and reducing our perf.
# This forces it to be single threaded. The default
# value is left as false as it's different than how
# PyTorch normally behaves, but all configs we provide
# set it to true and yours likely should too
_C.FORCE_TORCH_SINGLE_THREADED = False
# -----------------------------------------------------------------------------
# Weights and Biases config
# -----------------------------------------------------------------------------
_C.WB = CN()
# The name of the project on W&B.
_C.WB.PROJECT_NAME = ""
# Logging entity (like your username or team name)
_C.WB.ENTITY = ""
# The group ID to assign to the run. Optional to specify.
_C.WB.GROUP = ""
# The run name to assign to the run. If not specified, W&B will randomly assign a name.
_C.WB.RUN_NAME = ""
# -----------------------------------------------------------------------------
# EVAL CONFIG
# -----------------------------------------------------------------------------
_C.EVAL = CN()
# The split to evaluate on
_C.EVAL.SPLIT = "val"
_C.EVAL.USE_CKPT_CONFIG = True
_C.EVAL.SHOULD_LOAD_CKPT = True
# -----------------------------------------------------------------------------
# REINFORCEMENT LEARNING (RL) ENVIRONMENT CONFIG
# -----------------------------------------------------------------------------
_C.RL = CN()
# -----------------------------------------------------------------------------
# preemption CONFIG
# -----------------------------------------------------------------------------
_C.RL.preemption = CN()
# Append the slurm job ID to the resume state filename if running a slurm job
# This is useful when you want to have things from a different job but same
# same checkpoint dir not resume.
_C.RL.preemption.append_slurm_job_id = False
# Number of gradient updates between saving the resume state
_C.RL.preemption.save_resume_state_interval = 100
# Save resume states only when running with slurm
# This is nice if you don't want debug jobs to resume
_C.RL.preemption.save_state_batch_only = False
# -----------------------------------------------------------------------------
# POLICY CONFIG
# -----------------------------------------------------------------------------
_C.RL.POLICY = CN()
_C.RL.POLICY.name = "PointNavResNetPolicy"
_C.RL.POLICY.action_distribution_type = "categorical" # or 'gaussian'
# If the list is empty, all keys will be included.
_C.RL.POLICY.include_visual_keys = []
_C.RL.GYM_OBS_KEYS = []
# For gaussian action distribution:
_C.RL.POLICY.ACTION_DIST = CN()
_C.RL.POLICY.ACTION_DIST.use_log_std = True
_C.RL.POLICY.ACTION_DIST.use_softplus = False
# If True, the std will be a parameter not conditioned on state
_C.RL.POLICY.ACTION_DIST.use_std_param = False
# If True, the std will be clamped to the specified min and max std values
_C.RL.POLICY.ACTION_DIST.clamp_std = True
_C.RL.POLICY.ACTION_DIST.min_std = 1e-6
_C.RL.POLICY.ACTION_DIST.max_std = 1
_C.RL.POLICY.ACTION_DIST.min_log_std = -5
_C.RL.POLICY.ACTION_DIST.max_log_std = 2
# For continuous action distributions (including gaussian):
_C.RL.POLICY.ACTION_DIST.action_activation = "tanh" # ['tanh', '']
# -----------------------------------------------------------------------------
# OBS_TRANSFORMS CONFIG
# -----------------------------------------------------------------------------
_C.RL.POLICY.OBS_TRANSFORMS = CN()
_C.RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS = tuple()
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER = CN()
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER.HEIGHT = 256
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER.WIDTH = 256
_C.RL.POLICY.OBS_TRANSFORMS.RESIZE_SHORTEST_EDGE = CN()
_C.RL.POLICY.OBS_TRANSFORMS.RESIZE_SHORTEST_EDGE.SIZE = 256
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ = CN()
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ.HEIGHT = 256
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ.WIDTH = 512
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2EQ.SENSOR_UUIDS = list()
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH = CN()
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.HEIGHT = 256
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.WIDTH = 256
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.FOV = 180
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.PARAMS = (0.2, 0.2, 0.2)
_C.RL.POLICY.OBS_TRANSFORMS.CUBE2FISH.SENSOR_UUIDS = list()
_C.RL.POLICY.OBS_TRANSFORMS.EQ2CUBE = CN()
_C.RL.POLICY.OBS_TRANSFORMS.EQ2CUBE.HEIGHT = 256
_C.RL.POLICY.OBS_TRANSFORMS.EQ2CUBE.WIDTH = 256
_C.RL.POLICY.OBS_TRANSFORMS.EQ2CUBE.SENSOR_UUIDS = list()
# -----------------------------------------------------------------------------
# PROXIMAL POLICY OPTIMIZATION (PPO)
# -----------------------------------------------------------------------------
_C.RL.PPO = CN()
_C.RL.PPO.clip_param = 0.2
_C.RL.PPO.ppo_epoch = 4
_C.RL.PPO.num_mini_batch = 2
_C.RL.PPO.value_loss_coef = 0.5
_C.RL.PPO.entropy_coef = 0.01
_C.RL.PPO.lr = 2.5e-4
_C.RL.PPO.eps = 1e-5
_C.RL.PPO.max_grad_norm = 0.5
_C.RL.PPO.num_steps = 5
_C.RL.PPO.use_gae = True
_C.RL.PPO.use_linear_lr_decay = False
_C.RL.PPO.use_linear_clip_decay = False
_C.RL.PPO.gamma = 0.99
_C.RL.PPO.tau = 0.95
_C.RL.PPO.reward_window_size = 50
_C.RL.PPO.use_normalized_advantage = False
_C.RL.PPO.hidden_size = 512
# Use double buffered sampling, typically helps
# when environment time is similar or large than
# policy inference time during rollout generation
# Not that this does not change the memory requirements
_C.RL.PPO.use_double_buffered_sampler = False
# -----------------------------------------------------------------------------
# DECENTRALIZED DISTRIBUTED PROXIMAL POLICY OPTIMIZATION (DD-PPO)
# -----------------------------------------------------------------------------
_C.RL.DDPPO = CN()
_C.RL.DDPPO.sync_frac = 0.6
_C.RL.DDPPO.distrib_backend = "GLOO"
_C.RL.DDPPO.rnn_type = "GRU"
_C.RL.DDPPO.num_recurrent_layers = 1
_C.RL.DDPPO.backbone = "resnet18"
_C.RL.DDPPO.pretrained_weights = "data/ddppo-models/gibson-2plus-resnet50.pth"
# Loads pretrained weights
_C.RL.DDPPO.pretrained = False
# Loads just the visual encoder backbone weights
_C.RL.DDPPO.pretrained_encoder = False
# Whether or not the visual encoder backbone will be trained
_C.RL.DDPPO.train_encoder = True
# Whether or not to reset the critic linear layer
_C.RL.DDPPO.reset_critic = True
# Forces distributed mode for testing
_C.RL.DDPPO.force_distributed = False
# -----------------------------------------------------------------------------
# ORBSLAM2 BASELINE
# -----------------------------------------------------------------------------
_C.ORBSLAM2 = CN()
_C.ORBSLAM2.SLAM_VOCAB_PATH = "habitat_baselines/slambased/data/ORBvoc.txt"
_C.ORBSLAM2.SLAM_SETTINGS_PATH = (
"habitat_baselines/slambased/data/mp3d3_small1k.yaml"
)
_C.ORBSLAM2.MAP_CELL_SIZE = 0.1
_C.ORBSLAM2.MAP_SIZE = 40
_C.ORBSLAM2.CAMERA_HEIGHT = get_task_config().SIMULATOR.DEPTH_SENSOR.POSITION[
1
]
_C.ORBSLAM2.BETA = 100
_C.ORBSLAM2.H_OBSTACLE_MIN = 0.3 * _C.ORBSLAM2.CAMERA_HEIGHT
_C.ORBSLAM2.H_OBSTACLE_MAX = 1.0 * _C.ORBSLAM2.CAMERA_HEIGHT
_C.ORBSLAM2.D_OBSTACLE_MIN = 0.1
_C.ORBSLAM2.D_OBSTACLE_MAX = 4.0
_C.ORBSLAM2.PREPROCESS_MAP = True
_C.ORBSLAM2.MIN_PTS_IN_OBSTACLE = (
get_task_config().SIMULATOR.DEPTH_SENSOR.WIDTH / 2.0
)
_C.ORBSLAM2.ANGLE_TH = float(np.deg2rad(15))
_C.ORBSLAM2.DIST_REACHED_TH = 0.15
_C.ORBSLAM2.NEXT_WAYPOINT_TH = 0.5
_C.ORBSLAM2.NUM_ACTIONS = 3
_C.ORBSLAM2.DIST_TO_STOP = 0.05
_C.ORBSLAM2.PLANNER_MAX_STEPS = 500
_C.ORBSLAM2.DEPTH_DENORM = get_task_config().SIMULATOR.DEPTH_SENSOR.MAX_DEPTH
# -----------------------------------------------------------------------------
# PROFILING
# -----------------------------------------------------------------------------
_C.PROFILING = CN()
_C.PROFILING.CAPTURE_START_STEP = -1
_C.PROFILING.NUM_STEPS_TO_CAPTURE = -1
_C.register_renamed_key
def get_config(
config_paths: Optional[Union[List[str], str]] = None,
opts: Optional[list] = None,
) -> CN:
r"""Create a unified config with default values overwritten by values from
:ref:`config_paths` and overwritten by options from :ref:`opts`.
Args:
config_paths: List of config paths or string that contains comma
separated list of config paths.
opts: Config options (keys, values) in a list (e.g., passed from
command line into the config. For example, ``opts = ['FOO.BAR',
0.5]``. Argument can be used for parameter sweeping or quick tests.
"""
config = _C.clone()
if config_paths:
if isinstance(config_paths, str):
if CONFIG_FILE_SEPARATOR in config_paths:
config_paths = config_paths.split(CONFIG_FILE_SEPARATOR)
else:
config_paths = [config_paths]
for config_path in config_paths:
config.merge_from_file(config_path)
if opts:
for k, v in zip(opts[0::2], opts[1::2]):
if k == "BASE_TASK_CONFIG_PATH":
config.BASE_TASK_CONFIG_PATH = v
config.TASK_CONFIG = get_task_config(config.BASE_TASK_CONFIG_PATH)
# In case the config specifies overrides for the TASK_CONFIG, we
# remerge the files here
if config_paths:
for config_path in config_paths:
config.merge_from_file(config_path)
if opts:
config.CMD_TRAILING_OPTS = config.CMD_TRAILING_OPTS + opts
config.merge_from_list(config.CMD_TRAILING_OPTS)
if config.NUM_PROCESSES != -1:
warnings.warn(
"NUM_PROCESSES is depricated and will be removed in a future version."
" Use NUM_ENVIRONMENTS instead."
" Overwriting NUM_ENVIRONMENTS with NUM_PROCESSES for backwards compatibility."
)
config.NUM_ENVIRONMENTS = config.NUM_PROCESSES
config.freeze()
return config
|
the-stack_0_17521 | # ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Surface Heat Diffuse Skinning",
"author": "mesh online",
"version": (3, 4, 0),
"blender": (2, 80, 0),
"location": "View3D > UI > Mesh Online",
"description": "Surface Heat Diffuse Skinning",
"warning": "",
"wiki_url": "http://www.mesh-online.net/vhd.html",
"category": "Object"
}
import bpy
import sys
import os
import time
import platform
from subprocess import PIPE, Popen
from threading import Thread
from bpy.props import *
from queue import Queue, Empty
class SFC_OT_ModalTimerOperator(bpy.types.Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.surface_heat_diffuse"
bl_label = "Surface Heat Diffuse Skinning"
bl_options = {'REGISTER', 'UNDO'}
_timer = None
_pid = None
_queue = None
_objs = []
_permulation = []
_selected_indices = []
_selected_group_index_weights = []
_start_time = None
def write_bone_data(self, obj, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse bone export.\n")
amt = obj.data
bpy.ops.object.mode_set(mode='EDIT')
for bone in amt.edit_bones:
if bone.use_deform:
world_bone_head = obj.matrix_world @ bone.head
world_bone_tail = obj.matrix_world @ bone.tail
f.write("b,{},{:.6f},{:.6f},{:.6f},{:.6f},{:.6f},{:.6f}\n".format(
bone.name.replace(",", "\\;"), world_bone_head[0], world_bone_head[1], world_bone_head[2],
world_bone_tail[0], world_bone_tail[1], world_bone_tail[2]))
bpy.ops.object.mode_set(mode='OBJECT')
f.close()
def write_mesh_data(self, objs, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse mesh export.\n")
vertex_offset = 0
for obj in objs:
for v in obj.data.vertices:
world_v_co = obj.matrix_world @ v.co
f.write("v,{:.6f},{:.6f},{:.6f}\n".format(world_v_co[0], world_v_co[1], world_v_co[2]))
for poly in obj.data.polygons:
f.write("f");
for loop_ind in poly.loop_indices:
vert_ind = obj.data.loops[loop_ind].vertex_index
f.write(",{}".format(vertex_offset + vert_ind))
f.write("\n")
vertex_offset += len(obj.data.vertices)
f.close()
def read_weight_data(self, objs, filepath):
# make permulation for all vertices
vertex_offset = 0;
for obj in objs:
for index in range(len(obj.data.vertices)):
self._permulation.append((vertex_offset + index, index, obj))
vertex_offset += len(obj.data.vertices)
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# get selected vertex indices
self._selected_indices.append([i.index for i in obj.data.vertices if i.select])
self._selected_group_index_weights.append([])
# push protected vertices weight
for vert_ind in self._selected_indices[index]:
for g in obj.data.vertices[vert_ind].groups:
self._selected_group_index_weights[index].append((obj.vertex_groups[g.group].name, vert_ind, g.weight))
f = open(filepath, 'r', encoding='utf-8')
bones = []
for line in f:
if len(line) == 0:
continue
tokens = line.strip("\r\n").split(",")
if tokens[0] == "b":
group_name = tokens[1].replace("\\;", ",")
bones.append(group_name)
for obj in objs:
#check for existing group with the same name
if None != obj.vertex_groups.get(group_name):
group = obj.vertex_groups[group_name]
obj.vertex_groups.remove(group)
obj.vertex_groups.new(name = group_name)
if tokens[0] == "w":
group_name = bones[int(tokens[2])]
index = int(tokens[1])
vert_ind = self._permulation[index][1]
weight = float(tokens[3])
obj = self._permulation[index][2]
# protect vertices weight
if bpy.context.scene.surface_protect and vert_ind in self._selected_indices[objs.index(obj)]:
continue
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
f.close()
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# pop protected vertices weight
for (group_name, vert_ind, weight) in self._selected_group_index_weights[index]:
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
def modal(self, context, event):
if event.type == 'ESC':
self._pid.terminate()
return self.cancel(context)
if event.type == 'TIMER':
# background task is still running
if None == self._pid.poll():
# read line without blocking
try: rawline = self._queue.get_nowait()
except Empty:
pass
else:
line = rawline.decode().strip("\r\n")
self.report({'INFO'}, line)
else:
# background task finished running
self.read_weight_data(self._objs, os.path.join(os.path.dirname(__file__), "data", "untitled-weight.txt"))
running_time = time.time() - self._start_time
self.report({'INFO'}, "".join(("Complete, ", "running time: ", \
str(int(running_time / 60))," minutes ", str(int(running_time % 60)), " seconds")))
# bind meshes to the armature
bpy.ops.object.parent_set(type='ARMATURE')
return self.cancel(context)
return {'RUNNING_MODAL'}
def execute(self, context):
arm_count = 0
obj_count = 0
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm_count += 1
if 'MESH' == ob.type:
obj_count += 1
if not (context.mode == 'OBJECT' and arm_count == 1 and obj_count >= 1):
self.report({'ERROR'}, "Please select one armature and at least one mesh in 'OBJECT' mode, then try again.")
return {'CANCELLED'}
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
arm = None
objs = []
# get armature and mesh
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm = ob
if 'MESH' == ob.type:
objs.append(ob)
# sort meshes by name
objs.sort(key=lambda obj:obj.name);
# save the reference for later use
self._objs = objs
for obj in objs:
# focus on the mesh
bpy.context.view_layer.objects.active = obj
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write mesh data
self.write_mesh_data(objs, os.path.join(os.path.dirname(__file__), "data", "untitled-mesh.txt"))
# we must focus on the armature before we can write bone data
bpy.context.view_layer.objects.active = arm
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write bone data
self.write_bone_data(arm, os.path.join(os.path.dirname(__file__), "data", "untitled-bone.txt"))
# do voxel skinning in background
ON_POSIX = 'posix' in sys.builtin_module_names
# chmod
if ON_POSIX:
os.chmod(os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd"), 0o755)
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
executable_path = None
if platform.system() == 'Windows':
if platform.machine().endswith('64'):
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x64", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x86", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd")
self._pid = Popen([executable_path,
"untitled-mesh.txt",
"untitled-bone.txt",
"untitled-weight.txt",
str(context.scene.surface_resolution),
str(context.scene.surface_loops),
str(context.scene.surface_samples),
str(context.scene.surface_influence),
str(context.scene.surface_falloff),
context.scene.surface_sharpness,
"y" if context.scene.detect_surface_solidify else "n"],
cwd = os.path.join(os.path.dirname(__file__), "data"),
stdout = PIPE,
bufsize = 1,
close_fds = ON_POSIX)
self._queue = Queue()
t = Thread(target=enqueue_output, args=(self._pid.stdout, self._queue))
t.daemon = True
t.start()
self._start_time = time.time()
# start timer to poll data
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
# remove timer
context.window_manager.event_timer_remove(self._timer)
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
return {'CANCELLED'}
def init_properties():
bpy.types.Scene.surface_resolution = IntProperty(
name = "Voxel Resolution",
description = "Maximum voxel grid size",
default = 128,
min = 32,
max = 1024)
bpy.types.Scene.surface_loops = IntProperty(
name = "Diffuse Loops",
description = "Heat diffuse pass = Voxel Resolution * Diffuse Loops",
default = 5,
min = 1,
max = 9)
bpy.types.Scene.surface_samples = IntProperty(
name = "Sample Rays",
description = "Ray samples count",
default = 64,
min = 32,
max = 128)
bpy.types.Scene.surface_influence = IntProperty(
name = "Influence Bones",
description = "Max influence bones per vertex, please decrease the value (such as 4) for mobile devices",
default = 8,
min = 1,
max = 128)
bpy.types.Scene.surface_falloff = FloatProperty(
name = "Diffuse Falloff",
description = "Heat diffuse falloff",
default = 0.2,
min = 0.01,
max = 0.99)
bpy.types.Scene.surface_protect = BoolProperty(
name = "Protect Selected Vertex Weight",
description = "Protect selected vertex weight",
default = False)
bpy.types.Scene.surface_sharpness = EnumProperty(
name = "Edges",
description = "Edges",
items = [
('1','Soft','Soft Curvature'),
('2','Normal','Normal Curvature'),
('3','Sharp','Sharp Curvature'),
('4','Sharpest','Sharpest Curvature')],
default = '3')
bpy.types.Scene.detect_surface_solidify = BoolProperty(
name = "Detect Solidify",
description = "Detect solidified clothes, if you enable this option, make sure that all bones are in the charecter's volume, otherwise, the result may be wrong",
default = False)
def clear_properties():
props = ["surface_resolution",
"surface_samples",
"surface_falloff",
"surface_loops",
"surface_influence",
"surface_protect"]
for p in props:
if p in bpy.types.Scene.bl_rna.properties:
exec("del bpy.types.Scene." + p)
class SFC_PT_SurfaceHeatDiffuseSkinningPanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "Surface Heat Diffuse Skinning"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Mesh Online'
@classmethod
def poll(self, context):
return True
def draw(self, context):
layout = self.layout
layout.prop(context.scene, 'surface_resolution', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_loops', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_samples', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_influence', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_falloff', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_sharpness')
layout.prop(context.scene, 'surface_protect')
layout.prop(context.scene, 'detect_surface_solidify')
row = layout.row()
row.operator("wm.surface_heat_diffuse")
def register():
bpy.utils.register_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.register_class(SFC_OT_ModalTimerOperator)
init_properties()
def unregister():
bpy.utils.unregister_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.unregister_class(SFC_OT_ModalTimerOperator)
clear_properties()
if __name__ == "__main__":
register()
|
the-stack_0_17522 |
from mock import Mock
from twisted.internet.defer import maybeDeferred, succeed
from synapse.events import FrozenEvent
from synapse.types import Requester, UserID
from synapse.util import Clock
from synapse.util.logcontext import LoggingContext
from tests import unittest
from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver
class MessageAcceptTests(unittest.TestCase):
def setUp(self):
self.http_client = Mock()
self.reactor = ThreadedMemoryReactorClock()
self.hs_clock = Clock(self.reactor)
self.homeserver = setup_test_homeserver(
self.addCleanup,
http_client=self.http_client,
clock=self.hs_clock,
reactor=self.reactor,
)
user_id = UserID("us", "test")
our_user = Requester(user_id, None, False, None, None)
room_creator = self.homeserver.get_room_creation_handler()
room = room_creator.create_room(
our_user, room_creator.PRESETS_DICT["public_chat"], ratelimit=False
)
self.reactor.advance(0.1)
self.room_id = self.successResultOf(room)["room_id"]
# Figure out what the most recent event is
most_recent = self.successResultOf(
maybeDeferred(
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
)[0]
join_event = FrozenEvent(
{
"room_id": self.room_id,
"sender": "@baduser:test.serv",
"state_key": "@baduser:test.serv",
"event_id": "$join:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.member",
"origin": "test.servx",
"content": {"membership": "join"},
"auth_events": [],
"prev_state": [(most_recent, {})],
"prev_events": [(most_recent, {})],
}
)
self.handler = self.homeserver.get_handlers().federation_handler
self.handler.do_auth = lambda *a, **b: succeed(True)
self.client = self.homeserver.get_federation_client()
self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
pdus
)
# Send the join, it should return None (which is not an error)
d = self.handler.on_receive_pdu(
"test.serv", join_event, sent_to_us_directly=True
)
self.reactor.advance(1)
self.assertEqual(self.successResultOf(d), None)
# Make sure we actually joined the room
self.assertEqual(
self.successResultOf(
maybeDeferred(
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
)[0],
"$join:test.serv",
)
def test_cant_hide_direct_ancestors(self):
"""
If you send a message, you must be able to provide the direct
prev_events that said event references.
"""
def post_json(destination, path, data, headers=None, timeout=0):
# If it asks us for new missing events, give them NOTHING
if path.startswith("/_matrix/federation/v1/get_missing_events/"):
return {"events": []}
self.http_client.post_json = post_json
# Figure out what the most recent event is
most_recent = self.successResultOf(
maybeDeferred(
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
)[0]
# Now lie about an event
lying_event = FrozenEvent(
{
"room_id": self.room_id,
"sender": "@baduser:test.serv",
"event_id": "one:test.serv",
"depth": 1000,
"origin_server_ts": 1,
"type": "m.room.message",
"origin": "test.serv",
"content": {"body": "hewwo?"},
"auth_events": [],
"prev_events": [("two:test.serv", {}), (most_recent, {})],
}
)
with LoggingContext(request="lying_event"):
d = self.handler.on_receive_pdu(
"test.serv", lying_event, sent_to_us_directly=True
)
# Step the reactor, so the database fetches come back
self.reactor.advance(1)
# on_receive_pdu should throw an error
failure = self.failureResultOf(d)
self.assertEqual(
failure.value.args[0],
(
"ERROR 403: Your server isn't divulging details about prev_events "
"referenced in this event."
),
)
# Make sure the invalid event isn't there
extrem = maybeDeferred(
self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
)
self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
|
the-stack_0_17524 | #!/usr/bin/env python
import sys
from os import path
from setuptools import find_namespace_packages, setup
with open("arcade/version.py") as file:
exec(file.read())
def get_long_description() -> str:
fname = path.join(path.dirname(path.abspath(__file__)), "README.rst")
with open(fname, "r") as f:
return f.read()
setup(
name="arcade",
description="Arcade Game Development Library",
long_description=get_long_description(),
author="Paul Vincent Craven",
author_email="[email protected]",
license="MIT",
url="https://api.arcade.academy",
download_url="https://api.arcade.academy",
install_requires=[
"pyglet==2.0.dev13",
"pillow~=9.0.0",
"pymunk~=6.2.1",
"pytiled-parser==2.0.1",
],
extras_require={
"dev": [
"pytest",
"flake8",
"mypy",
"coverage",
"coveralls",
"pytest-mock",
"pytest-cov",
"sphinx",
"sphinx-sitemap",
"sphinx_rtd_theme",
"sphinx_copybutton",
"dirsync",
"wheel",
],
},
packages=find_namespace_packages(
include=["arcade", "arcade.*"],
exclude=[],
),
python_requires=">=3.7",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development :: Libraries :: Python Modules",
],
include_package_data=True,
project_urls={
"Documentation": "https://api.arcade.academy/",
"Example Code": "https://api.arcade.academy/en/latest/examples/index.html",
"Issue Tracker": "https://github.com/pythonarcade/arcade/issues",
"Source": "https://github.com/pythonarcade/arcade",
"On-line Book": "https://learn.arcade.academy",
},
version=VERSION,
)
|
the-stack_0_17526 | # modify the globals
import config
import os, sys
from pathlib import Path
data_file_name = 'owid-covid-data.json'
config.CURRENT_DIR_STR = os.path.dirname(__file__)
config.DATA_FILE_STR = os.path.join(config.CURRENT_DIR_STR, 'data', data_file_name)
config.ARGO_PACKAGE_STR = os.path.join(config.CURRENT_DIR_STR, 'colchis')
config.DATA_FILE_PATH = Path(config.DATA_FILE_STR)
sys.path.append(config.DATA_FILE_PATH)
config.ARGO_PACKAGE_PATH = Path(config.ARGO_PACKAGE_STR)
sys.path.append(config.ARGO_PACKAGE_PATH)
|
the-stack_0_17527 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageStorageProfile(Model):
"""Describes a storage profile.
:param os_disk: The OS disk.
:type os_disk: :class:`ImageOSDisk
<azure.mgmt.compute.compute.v2017_03_30.models.ImageOSDisk>`
:param data_disks: The data disks.
:type data_disks: list of :class:`ImageDataDisk
<azure.mgmt.compute.compute.v2017_03_30.models.ImageDataDisk>`
"""
_validation = {
'os_disk': {'required': True},
}
_attribute_map = {
'os_disk': {'key': 'osDisk', 'type': 'ImageOSDisk'},
'data_disks': {'key': 'dataDisks', 'type': '[ImageDataDisk]'},
}
def __init__(self, os_disk, data_disks=None):
self.os_disk = os_disk
self.data_disks = data_disks
|
the-stack_0_17530 | import collections
from scipy.special import comb
import numpy as np
def _iter_key_sorted_dct(dct):
for k in sorted(dct.keys()):
yield k, dct[k]
def make_sum(dct_values, base=None):
"""base is some previous result"""
sum_cnt = collections.defaultdict(int)
if base is not None:
sum_cnt.update(base)
for v, n in _iter_key_sorted_dct(dct_values):
# to include from 1 to n elements of value v
dct = dict(sum_cnt)
for i in range(1, n + 1):
n_ways = comb(n, i)
increment = i * v # increment for sum by including n times v
sum_cnt[increment] += n_ways
for k, v_orig in _iter_key_sorted_dct(dct):
sum_cnt[k + increment] += n_ways * v_orig
return sum_cnt
class Jewelry(object):
def __init__(self):
self.values_ = None
self.ways_below_ = collections.defaultdict(int)
self.ways_below_[0] = 1
def __repr__(self):
return repr(self.values_)
def set_values(self, v):
self.values_ = collections.Counter(v)
def how_many(self, values):
self.set_values(values)
count = 0
values_for_above = dict(self.values_)
for v, cnt in _iter_key_sorted_dct(self.values_):
# Remove value v iteratively to get all the possible sums from
# the values above v
values_for_above.pop(v)
ways_above_exclude_v = make_sum(values_for_above)
ways_below_exclude_v = dict(self.ways_below_)
for i in range(1, cnt + 1):
n_ways = comb(cnt, i)
ways_below = collections.defaultdict(int)
for k, cnt_orig in _iter_key_sorted_dct(ways_below_exclude_v):
sum_with_iv = k + v * i
cnt_increment = n_ways * cnt_orig
ways_below[sum_with_iv] += cnt_increment
self.ways_below_[sum_with_iv] += cnt_increment
# The ways above can include cnt - i elements in maximum
ways_above = make_sum({v: cnt - i}, ways_above_exclude_v)
intersection = set(ways_below).intersection(ways_above)
count += np.sum([ways_below[k] * ways_above[k]
for k in intersection])
return count
|
the-stack_0_17532 | import csv
import json
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
# Skips the header of the csv
finCsv = fin[1:]
finalCsv = df[1:]
obj = {}
# loop through the csv with images
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
# create a final object
finalObj = {}
# check full csv
for i in finCsv:
x = i.split(',')
id = x[6]
# create an object with the key of the id regardless
finalObj[id]= {}
row = fin[0].split(',')
# if the id has an image add it to the final object
if id in obj:
finalObj[id]['before_img'] = obj[id]['before_img']
finalObj[id]['after_img'] = obj[id]['after_img'][0:-1]
for i in range(len(row)):
key = row[i].replace(' ',"_")
key = key.strip()
val = x[i].strip()
# 8 is the position of the latitude
if i == 8:
key = 'latitude'
# val = float(val)
if i == 9:
key = 'longitude'
if i == 11:
continue
try:
val = int(val)
except ValueError:
val = val
finalObj[id][key.lower()] = val
print(finalObj['1013351'])
with open('results.json','w') as fp:
json.dump(finalObj,fp,indent=4) |
the-stack_0_17533 | import socketserver
import threading
import mimetypes
import os
import os.path
import collections
import json
import logging
import io
import h2.connection
import h2.events
log = logging.getLogger(__name__)
AUTHORITY = u'localhost:6001'
# header-body pair for each stream
request_data = collections.namedtuple('request_data', ['headers', 'data'])
# info needed to send message to a client
stream_conn_sock = collections.namedtuple('stream_conn_sock',
['stream_id', 'connection', 'socket'])
clients = { }
class ThreadingTCPServer(socketserver.ThreadingMixIn,
socketserver.TCPServer):
allow_reuse_address = True
class MyH2Handler(socketserver.StreamRequestHandler):
connection = None
# store headers-body pair of each stream
stream_data = { }
# every socket represents a client, which has a special id
client_id = None
# store functions that handle the body
body_handlers = { }
def initiate_client(self, stream_id):
# get the current client's id from request body
body = self.stream_data[stream_id].data.getvalue().decode('utf-8')
log.debug(body)
bodyjson = json.loads(body)
self.client_id = bodyjson['client']
log.debug('client id %s', self.client_id)
# save the information needed to send message to this client
socket = self.request
s_c_s = stream_conn_sock(stream_id, self.connection, socket)
log.info('reg client %s %s', self.client_id, s_c_s)
clients.update({self.client_id: s_c_s})
# inform client that it's okay to start the chat now
ok = b'ready to continue'
headers = collections.OrderedDict([(':status', '200'),
('server','http2'),
('content-length', len(ok))])
self.connection.send_headers(stream_id, headers)
self.connection.send_data(stream_id, ok)
self.request.sendall(self.connection.data_to_send())
def send_message(self, stream_id):
# get message and receiver
body = self.stream_data[stream_id].data.getvalue().decode('utf-8')
bodyjson = json.loads(body)
receiver = bodyjson['to']
message = bodyjson['message'].encode('utf-8')
# get receiver "address"
r_stream, r_conn, r_socket = clients[receiver]
# initiate push request to receiver
request_headers = collections.OrderedDict([(':status', '200'),
('server', 'http2')])
new_stream_id = r_conn.get_next_available_stream_id()
log.info('push req %s %s %s %s', request_headers, r_stream, r_conn, r_socket)
r_conn.push_stream(r_stream, new_stream_id, request_headers)
r_socket.sendall(r_conn.data_to_send())
# push message to receiver
r_response_headers = collections.OrderedDict([(':status', '200'),
(':authority', AUTHORITY),
('server', 'http2'),
('content-length', len(message))])
r_conn.send_headers(new_stream_id, r_response_headers)
log.info('push resp %s %s %s', message, r_stream, r_conn)
r_conn.send_data(new_stream_id, message, end_stream = True)
r_socket.sendall(r_conn.data_to_send())
# inform sender that message is sent
'''
sent = b'sent'
response_headers = collections.OrderedDict([(':status', '200'),
('server', 'http2'),
('content_length', len(sent))])
self.connection.send_headers(stream_id, response_headers)
self.connection.send_data(stream_id, sent)
self.request.sendall(self.connection.data_to_send())
'''
def end_chat(stream_id):
# close receiving channel
r_stream_id, r_conn, socket = clients[self.client_id]
r_response_headers = collection.OrderedDict([(':status', '200'),
('server', 'http2')])
r_conn.send_headers(r_stream_id, r_response_headers, end_stream = True)
socket.sendall(r_conn.data_to_send())
# inform client and close connection
ended = b'chat ended'
response_headers = collection.OrderedDict([(':status', '200'),
('server', 'http2'),
('content-length', len(ended))])
self.connection.send_headers(stream_id, response_headers)
self.connection.send_data(stream_id, ended, end_stream = True)
self.request.sendall(self.connection.data_to_send())
self.connection.close_connection()
self.request.close()
def request_received(self, headers, stream_id):
headers = collections.OrderedDict(headers)
# store headers (to match with request body)
r_d = request_data(headers, io.BytesIO())
self.stream_data[stream_id] = r_d
# find out what the client intends to do
path = headers[':path']
route = os.path.basename(os.path.normpath(path))
log.info('request path %s at %s', path, stream_id)
if route == 'login':
self.body_handlers[stream_id] = self.initiate_client
elif route == 'send':
self.body_handlers[stream_id] = self.send_message
elif route == 'end':
self.end_chat(stream_id)
else:
return
def data_received(self, data, stream_id):
s_d = self.stream_data[stream_id]
s_d.data.write(data)
fn = self.body_handlers[stream_id]
if fn :
log.info('dispatch %s with %s', stream_id, fn)
fn(stream_id)
def handle(self):
self.connection = h2.connection.H2Connection(client_side = False)
self.connection.initiate_connection()
self.request.sendall(self.connection.data_to_send())
log.debug('init pass')
while True:
data = self.request.recv(65535)
events = self.connection.receive_data(data)
for event in events:
if isinstance(event, h2.events.RequestReceived):
self.request_received(event.headers, event.stream_id)
if isinstance(event, h2.events.DataReceived):
self.data_received(event.data, event.stream_id)
if isinstance(event, h2.events.StreamEnded):
self.server.shutdown()
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
host, port = '', 6001
httpd = ThreadingTCPServer((host, port), MyH2Handler)
httpd.serve_forever()
|
the-stack_0_17534 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from unittest import mock
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.test.utils import override_settings
from django.urls import reverse
from horizon import tables as horizon_tables
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.images.images import forms
from openstack_dashboard.dashboards.project.images.images import tables
IMAGES_INDEX_URL = reverse('horizon:project:images:index')
class CreateImageFormTests(test.ResetImageAPIVersionMixin, test.TestCase):
@mock.patch.object(api.glance, 'get_image_formats')
@mock.patch.object(api.glance, 'image_list_detailed')
def test_no_location_or_file(self, mock_image_list, mock_schemas_list):
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_calls = [
mock.call(test.IsA(dict), filters={'disk_format': 'aki'}),
mock.call(test.IsA(dict), filters={'disk_format': 'ari'})
]
post = {
'name': 'Ubuntu 11.10',
'source_type': 'file',
'description': 'Login with admin/admin',
'disk_format': 'qcow2',
'architecture': 'x86-64',
'min_disk': 15,
'min_ram': 512,
'is_public': 1}
files = {}
form = forms.CreateImageForm(post, files)
self.assertFalse(form.is_valid())
mock_image_list.assert_has_calls(image_calls)
class UpdateImageFormTests(test.ResetImageAPIVersionMixin, test.TestCase):
def test_is_format_field_editable(self):
form = forms.UpdateImageForm({})
disk_format = form.fields['disk_format']
self.assertFalse(disk_format.widget.attrs.get('readonly', False))
@mock.patch.object(api.glance, 'image_get')
def test_image_update(self, mock_image_get):
image = self.images.first()
mock_image_get.return_value = image
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertEqual(res.context['image'].disk_format,
image.disk_format)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
image.id)
@mock.patch.object(api.glance, 'image_get')
@mock.patch.object(api.glance, 'image_update')
def test_image_update_post_v2(self, mock_image_update, mock_image_get):
image = self.images.first()
data = {
'name': 'Ubuntu 11.10',
'image_id': str(image.id),
'description': 'Login with admin/admin',
'source_type': 'url',
'image_url': 'http://cloud-images.ubuntu.com/releases/'
'oneiric/release/ubuntu-11.10-server-cloudimg'
'-amd64-disk1.img',
'disk_format': 'qcow2',
'architecture': 'x86-64',
'min_disk': 15,
'min_ram': 512,
'is_public': False,
'protected': False,
'method': 'UpdateImageForm'}
mock_image_get.return_value = image
mock_image_update.return_value = image
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
str(image.id))
mock_image_update.assert_called_once_with(
test.IsHttpRequest(),
image.id,
visibility='private',
protected=data['protected'],
disk_format=data['disk_format'],
container_format="bare",
name=data['name'],
min_ram=data['min_ram'],
min_disk=data['min_disk'],
description=data['description'],
architecture=data['architecture'])
class ImageViewTests(test.ResetImageAPIVersionMixin, test.TestCase):
@mock.patch.object(api.glance, 'get_image_schemas')
@mock.patch.object(api.glance, 'image_list_detailed')
def test_image_create_get(self, mock_image_list, mock_schemas_list):
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_calls = [
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'})
]
url = reverse('horizon:project:images:images:create')
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/images/images/create.html')
mock_image_list.assert_has_calls(image_calls)
@override_settings(IMAGES_ALLOW_LOCATION=True)
@mock.patch.object(api.glance, 'get_image_schemas')
def test_image_create_post_location_v2(self, mock_schemas_list):
mock_schemas_list.return_value = self.image_schemas.first()
data = {
'source_type': 'url',
'image_url': 'http://cloud-images.ubuntu.com/releases/'
'oneiric/release/ubuntu-11.10-server-cloudimg'
'-amd64-disk1.img'}
api_data = {'location': data['image_url']}
self._test_image_create(data, api_data)
@mock.patch.object(api.glance, 'get_image_schemas')
def test_image_create_post_upload_v2(self, mock_schemas_list):
mock_schemas_list.return_value = self.image_schemas.first()
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {'source_type': 'file',
'image_file': temp_file}
api_data = {'data': test.IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@mock.patch.object(api.glance, 'get_image_schemas')
def test_image_create_post_with_kernel_ramdisk_v2(self, mock_schemas_list):
mock_schemas_list.return_value = self.image_schemas.first()
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {
'source_type': 'file',
'image_file': temp_file,
'kernel_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482e',
'ramdisk_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482a'
}
api_data = {'data': test.IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@mock.patch.object(api.glance, 'image_create')
@mock.patch.object(api.glance, 'image_list_detailed')
def _test_image_create(self, extra_form_data, extra_api_data,
mock_image_list, mock_image_create):
data = {
'name': 'Ubuntu 11.10',
'description': 'Login with admin/admin',
'disk_format': 'qcow2',
'architecture': 'x86-64',
'min_disk': 15,
'min_ram': 512,
'is_public': True,
'protected': False,
'method': 'CreateImageForm'}
data.update(extra_form_data)
api_data = {'container_format': 'bare',
'disk_format': data['disk_format'],
'protected': False,
'min_disk': data['min_disk'],
'min_ram': data['min_ram'],
'name': data['name']}
if api.glance.VERSIONS.active < 2:
api_data.update({'is_public': True,
'properties': {
'description': data['description'],
'architecture': data['architecture']}
})
else:
api_data.update({'visibility': 'public',
'description': data['description'],
'architecture': data['architecture']
})
api_data.update(extra_api_data)
mock_image_list.side_effect = [
[self.images.list(), False, False],
[self.images.list(), False, False]
]
image_list_calls = [
mock.call(test.IsHttpRequest(), filters={'disk_format': 'aki'}),
mock.call(test.IsHttpRequest(), filters={'disk_format': 'ari'})
]
mock_image_create.return_value = self.images.first()
url = reverse('horizon:project:images:images:create')
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
mock_image_list.assert_has_calls(image_list_calls)
mock_image_create.assert_called_once_with(test.IsHttpRequest(),
**api_data)
@mock.patch.object(api.glance, 'image_get')
def _test_image_detail_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].name, image.name)
self.assertEqual(res.context['image'].protected, image.protected)
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
def test_image_detail_get_v2(self):
image = self.imagesV2.first()
self._test_image_detail_get(image)
@mock.patch.object(api.glance, 'image_get')
def _test_image_detail_custom_props_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
image_props = res.context['image_props']
# Test description property not displayed
image_keys = [prop[0] for prop in image_props]
self.assertNotIn(('description'), image_keys)
# Test custom properties are sorted
self.assertLess(image_props.index(('bar', 'bar', 'bar val')),
image_props.index(('foo', 'foo', 'foo val')))
# Test all custom properties appear in template
self.assertContains(res, '<dt title="bar">bar</dt>')
self.assertContains(res, '<dd>bar val</dd>')
self.assertContains(res, '<dt title="foo">foo</dt>')
self.assertContains(res, '<dd>foo val</dd>')
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
def test_image_detail_custom_props_get_v2(self):
image = self.imagesV2.list()[2]
self._test_image_detail_custom_props_get(image)
@mock.patch.object(api.glance, 'image_get')
def _test_protected_image_detail_get(self, image, mock_image_get):
mock_image_get.return_value = image
res = self.client.get(
reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].protected, image.protected)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
image.id)
def test_protected_image_detail_get_v2(self):
image = self.imagesV2.list()[1]
self._test_protected_image_detail_get(image)
@mock.patch.object(api.glance, 'image_get')
def test_image_detail_get_with_exception(self, mock_image_get):
image = self.images.first()
mock_image_get.side_effect = self.exceptions.glance
url = reverse('horizon:project:images:images:detail',
args=[image.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_update_get(self, mock_image_get):
image = self.images.filter(is_public=True)[0]
mock_image_get.return_value = image
res = self.client.get(
reverse('horizon:project:images:images:update',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images/images/_update.html')
self.assertEqual(res.context['image'].name, image.name)
# Bug 1076216 - is_public checkbox not being set correctly
self.assertContains(res, "<input type='checkbox' id='id_is_public'"
" name='is_public' checked='checked'>",
html=True,
msg_prefix="The is_public checkbox is not checked")
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
class OwnerFilterTests(test.TestCase):
def setUp(self):
super().setUp()
self.table = mock.Mock(spec=horizon_tables.DataTable)
self.table.request = self.request
@override_settings(IMAGES_LIST_FILTER_TENANTS=[{'name': 'Official',
'tenant': 'officialtenant',
'icon': 'fa-check'}])
def test_filter(self):
all_images = self.images.list()
table = self.table
self.filter_tenants = settings.IMAGES_LIST_FILTER_TENANTS
filter_ = tables.OwnerFilter()
images = filter_.filter(table, all_images, 'project')
self.assertEqual(images, self._expected('project'))
images = filter_.filter(table, all_images, 'public')
self.assertEqual(images, self._expected('public'))
images = filter_.filter(table, all_images, 'shared')
self.assertEqual(images, self._expected('shared'))
images = filter_.filter(table, all_images, 'officialtenant')
self.assertEqual(images, self._expected('officialtenant'))
def _expected(self, filter_string):
my_tenant_id = self.request.user.tenant_id
images = self.images.list()
special = map(lambda t: t['tenant'], self.filter_tenants)
if filter_string == 'public':
return [im for im in images if im.is_public]
if filter_string == 'shared':
return [im for im in images
if (not im.is_public and
im.owner != my_tenant_id and
im.owner not in special)]
if filter_string == 'project':
filter_string = my_tenant_id
return [im for im in images if im.owner == filter_string]
|
the-stack_0_17536 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import uuid
# import the RestClient class
from epages_client.client import RestClient
# import base class for unit testing
from .base_unit_test import BaseUnitTest
class TestNewsletterMethods(BaseUnitTest):
'''A class for testing newsletter related methods on RestClient class'''
def setUp(self):
self.client = RestClient(
os.environ["EPAGES_API_URL"], os.environ["EPAGES_API_TOKEN"])
self.params = {
"query": {},
"param1": "",
"param2": ""
}
def test_001_get_newsletter_campaigns(self):
newsletter_campaigns = self.client.get_newsletter_campaigns(
self.params)
self.assertEqual(isinstance(newsletter_campaigns, dict), True)
def test_002_get_newsletter_campaign_subscribers_no_id(self):
with self.assertRaises(ValueError) as e:
newsletter_subscribers = self.client.get_newsletter_campaign_subscribers(
self.params)
def test_003_get_newsletter_campaign_subscribers_false_id(self):
self.params["param1"] = str(uuid.uuid4())
with self.assertRaises(RuntimeError) as e:
newsletter_subscribers = self.client.get_newsletter_campaign_subscribers(
self.params)
def test_004_get_newsletter_campaign_subscribers(self):
newsletter_campaigns = self.client.get_newsletter_campaigns(
self.params)
# If there are some newsletters, check if the first one has subscribers
if newsletter_campaigns["results"] > 0:
campaign_id = newsletter_campaigns["items"][0]["campaignId"]
self.params["param1"] = campaign_id
newsletter_subscribers = self.client.get_newsletter_campaign_subscribers(
self.params)
self.assertEqual(isinstance(newsletter_subscribers, dict), True)
|
the-stack_0_17537 | import os
from flask import Flask, request, make_response, redirect, url_for, send_from_directory
from werkzeug.utils import secure_filename
from convert import convert_file
from cleanup import cleanup
with open('template.html', 'r') as inp:
template = inp.read()
app = Flask(__name__)
app.config['upload_folder'] = '../uploads'
@app.route('/upload', methods=['POST'])
def upload_file():
# check if the post request has the file part
if 'file' not in request.files:
resp = make_response('No file provided', 400)
return resp
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
resp = make_response('No file provided', 400)
return resp
if file and file.filename.lower().endswith('.docx'):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['upload_folder'], filename))
# Try to convert the file; redirect to success/fail page
try:
filename = convert_file(filename)
filename = cleanup(filename)
return redirect(url_for('converted_file', filename=filename))
except Exception as e:
return redirect(url_for('conversion_failure', error=e))
else:
resp = make_response(
f'Неправильный тип файла (требуется .docx): {file.filename}', 400)
return resp
@app.route('/result/<filename>', methods=['GET'])
def converted_file(filename):
download_url = url_for('download_file', filename=filename)
home_url = url_for('landing')
return template.format(
body=f'''<p>Файл был успешно конвертирован: <a href="{download_url}">{filename}</a></p>
<p><a href="{home_url}">Конвертировать другой файл</a>.</p>''')
@app.route('/download/<filename>', methods=['GET'])
def download_file(filename):
path = os.path.join('..', 'converted')
if not os.path.exists(os.path.join(path, filename)):
return make_response('File not found', 404)
return send_from_directory(path, filename)
@app.route('/failure/<filename>', methods=['GET'])
def conversion_failure(error):
return template.format(body=f'Ошибка конвертации ({error})')
@app.route('/', methods=['GET'])
def landing():
return template.format(body="""<h1>Загрузите файл в формате .docx</h1>
<form method="post" enctype="multipart/form-data" action="/upload">
<input type="file" name="file">
<input type="submit" value="Загрузить">
</form>""")
|
the-stack_0_17538 | #!/usr/bin/env python
"""
Usage example employing Lasagne for digit recognition using the MNIST dataset.
This example is deliberately structured as a long flat file, focusing on how
to use Lasagne, instead of focusing on writing maximally modular and reusable
code. It is used as the foundation for the introductory Lasagne tutorial:
http://lasagne.readthedocs.org/en/latest/user/tutorial.html
More in-depth examples and reproductions of paper results are maintained in
a separate repository: https://github.com/Lasagne/Recipes
"""
from __future__ import print_function
import sys
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
# ################## Download and prepare the MNIST dataset ##################
# This is just some way of getting the MNIST dataset from an online location
# and loading it into numpy arrays. It doesn't involve Lasagne at all.
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
return X_train, y_train, X_val, y_val, X_test, y_test
# ##################### Build the neural network model #######################
# This script supports three types of models. For each one, we define a
# function that takes a Theano variable representing the input and returns
# the output layer of a neural network model built in Lasagne.
def build_cnn(input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
network = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
# Expert note: Lasagne provides alternative convolutional layers that
# override Theano's choice of which implementation to use; for details
# please see http://lasagne.readthedocs.org/en/latest/user/tutorial.html.
# Max-pooling layer of factor 2 in both dimensions:
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
network = lasagne.layers.Conv2DLayer(
network, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
network = lasagne.layers.MaxPool2DLayer(network, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=.5),
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return network
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
############################# Freeze Layers ###############################
# so they aren't updated during training
def freeze (layer):
for param in layer.params:
layer.params[param].discard('trainable')
return layer # optional, if you want to use it in-line
# usage:
# for layer in lasagne.layers.get_all_layers(output_layer):
# if layer is not output_layer:
# freeze(layer)
def unfreeze (layer):
for param in layer.params:
layer.params[param].discard('trainable')
return layer # optional, if you want to use it in-line
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model='cnn', num_epochs=100):
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
if model == 'cnn':
network = build_cnn(input_var)
else:
print("Unrecognized model type %r." % model)
return
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
# only train the FC layer
for layer in lasagne.layers.get_all_layers(output_layer): # This function gathers all layers below one or more given Layer instances, including the given layer(s).
if layer is not output_layer:
freeze(layer)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv):
print("Trains a neural network on MNIST using Lasagne.")
print("Usage: %s [MODEL [EPOCHS]]" % sys.argv[0])
print()
print("MODEL: 'mlp' for a simple Multi-Layer Perceptron (MLP),")
print(" 'custom_mlp:DEPTH,WIDTH,DROP_IN,DROP_HID' for an MLP")
print(" with DEPTH hidden layers of WIDTH units, DROP_IN")
print(" input dropout and DROP_HID hidden dropout,")
print(" 'cnn' for a simple Convolutional Neural Network (CNN).")
print("EPOCHS: number of training epochs to perform (default: 500)")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['model'] = sys.argv[1]
if len(sys.argv) > 2:
kwargs['num_epochs'] = int(sys.argv[2])
main(**kwargs)
|
the-stack_0_17539 | # Relationships!!
# Up until this point, you've only looked at one variable at a time. In this chapter, you'll explore relationships between variables two at a time, using scatter plots and other visualizations to extract insights from a new dataset obtained from the Behavioral Risk Factor Surveillance Survey (BRFSS). You'll also learn how to quantify those relationships using correlation and simple regression.
# PMF of age
# Do people tend to gain weight as they get older? We can answer this question by visualizing the relationship between weight and age. But before we make a scatter plot, it is a good idea to visualize distributions one variable at a time. Here, you'll visualize age using a bar chart first. Recall that all PMF objects have a .bar() method to make a bar chart.
# The BRFSS dataset includes a variable, 'AGE' (note the capitalization!), which represents each respondent's age. To protect respondents' privacy, ages are rounded off into 5-year bins. 'AGE' contains the midpoint of the bins
# Extract age
age = brfss['AGE']
# Plot the PMF
Pmf(age).bar()
# Label the axes
plt.xlabel('Age in years')
plt.ylabel('PMF')
plt.show()
# Scatter plot
# Now let's make a scatterplot of weight versus age. To make the code run faster, I've selected only the first 1000 rows from the brfss DataFrame.
# weight and age have already been extracted for you. Your job is to use plt.plot() to make a scatter plot.
# Select the first 1000 respondents
brfss = brfss[:1000]
# Extract age and weight
age = brfss['AGE']
weight = brfss['WTKG3']
# Make a scatter plot
plt.plot(age,weight, 'o', alpha = 0.1 )
plt.xlabel('Age in years')
plt.ylabel('Weight in kg')
plt.show()
# Jittering
# In the previous exercise, the ages fall in columns because they've been rounded into 5-year bins. If we jitter them, the scatter plot will show the relationship more clearly. Recall how Allen jittered height and weight in the video:
# height_jitter = height + np.random.normal(0, 2, size=len(brfss))
# weight_jitter = weight + np.random.normal(0, 2, size=len(brfss))
# Select the first 1000 respondents
brfss = brfss[:1000]
# Add jittering to age
age = brfss['AGE'] + np.random.normal(0,2.5, size = len(brfss))
# Extract weight
weight = brfss['WTKG3']
# Make a scatter plot
plt.plot(age, weight, 'o',markersize = 5, alpha =0.2 )
plt.xlabel('Age in years')
plt.ylabel('Weight in kg')
plt.show()
# Height and weight
# Previously we looked at a scatter plot of height and weight, and saw that taller people tend to be heavier. Now let's take a closer look using a box plot. The brfss DataFrame contains a variable '_HTMG10' that represents height in centimeters, binned into 10 cm groups.
# Recall how Allen created the box plot of 'AGE' and 'WTKG3' in the video, with the y-axis on a logarithmic scale:
# sns.boxplot(x='AGE', y='WTKG3', data=data, whis=10)
# plt.yscale('log')
# Drop rows with missing data
data = brfss.dropna(subset=['_HTMG10', 'WTKG3'])
# Make a box plot
sns.boxplot(x = '_HTMG10', y = 'WTKG3', data = data, whis = 10)
# Plot the y-axis on a log scale
plt.yscale('log')
# Remove unneeded lines and label axes
sns.despine(left=True, bottom=True)
plt.xlabel('Height in cm')
plt.ylabel('Weight in kg')
plt.show()
# Distribution of income
# In the next two exercises we'll look at relationships between income and other variables. In the BRFSS, income is represented as a categorical variable; that is, respondents are assigned to one of 8 income categories. The variable name is 'INCOME2'. Before we connect income with anything else, let's look at the distribution by computing the PMF. Recall that all Pmf objects have a .bar() method.
# Extract income
income = brfss['INCOME2']
# Plot the PMF
Pmf(income).bar()
# Label the axes
plt.xlabel('Income level')
plt.ylabel('PMF')
plt.show()
# Income and height
# Let's now use a violin plot to visualize the relationship between income and height.
# Drop rows with missing data
data = brfss.dropna(subset=['INCOME2', 'HTM4'])
# Make a violin plot
sns.violinplot(x = 'INCOME2', y ='HTM4', data=data, inner = None)
# Remove unneeded lines and label axes
sns.despine(left=True, bottom=True)
plt.xlabel('Income level')
plt.ylabel('Height in cm')
plt.show()
# Computing correlations
# The purpose of the BRFSS is to explore health risk factors, so it includes questions about diet. The variable '_VEGESU1' represents the number of servings of vegetables respondents reported eating per day.
# Let's see how this variable relates to age and income.
# Select columns
columns = ['AGE', 'INCOME2', '_VEGESU1']
subset = brfss[columns]
# Compute the correlation matrix
print(subset.corr())
# Income and vegetables
# As we saw in a previous exercise, the variable '_VEGESU1' represents the number of vegetable servings respondents reported eating per day.
# Let's estimate the slope of the relationship between vegetable consumption and income.
from scipy.stats import linregress
# Extract the variables
subset = brfss.dropna(subset=['INCOME2', '_VEGESU1'])
xs = subset['INCOME2']
ys = subset['_VEGESU1']
# Compute the linear regression
res = linregress(xs,ys)
print(res)
# Fit a line
# Continuing from the previous exercise:
# Assume that xs and ys contain income codes and daily vegetable consumption, respectively, and
# res contains the results of a simple linear regression of ys onto xs.
# Now, you're going to compute the line of best fit. NumPy has been imported for you as np.
# Plot the scatter plot
plt.clf()
x_jitter = xs + np.random.normal(0, 0.15, len(xs))
plt.plot(x_jitter, ys, 'o', alpha=0.2)
# Plot the line of best fit
fx = np.array([xs.min(),xs.max()])
fy = res.intercept+res.slope*fx
plt.plot(fx, fy, '-', alpha=0.7)
plt.xlabel('Income code')
plt.ylabel('Vegetable servings per day')
plt.ylim([0, 6])
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.