ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40e71e1ceca0caf1501a36a6850c9ee66e524f9 | from mangopaysdk.tools.apibase import ApiBase
from mangopaysdk.entities.cardregistration import CardRegistration
from mangopaysdk.entities.temporarypaymentcard import TemporaryPaymentCard
class ApiCardRegistrations (ApiBase):
"""Class to management MangoPay API for card registrations."""
def Create(self, cardRegistration):
"""Create new card registration
param CardRegistration object to create
return CardRegistration Object returned from API
"""
return self._createObject('cardregistration_create', cardRegistration, 'CardRegistration')
def Get(self, cardRegistrationId):
"""Get card registration
param string Card Registration identifier
return CardRegistration Object returned from API
"""
return self._getObject('cardregistration_get', cardRegistrationId, 'CardRegistration')
def Update(self, cardRegistration):
"""Update card registration
param CardRegistration object to save
return CardRegistration Object returned from API
"""
return self._saveObject('cardregistration_save', cardRegistration, 'CardRegistration')
def CreateTemporaryPaymentCard(self, paymentCard):
"""WARNING!
This is temporary function and will be removed in future.
Contact support before using these features or if have any queries.
Creates new temporary payment card.
param TemporaryPaymentCard Temporary payment card to be created
return TemporaryPaymentCard Object returned from API
"""
return self._createObject('temp_paymentcards_create', paymentCard, 'TemporaryPaymentCard')
def GetTemporaryPaymentCard(self, paymentCardId):
"""WARNING!
This is temporary function and will be removed in future.
Contact support before using these features or if have any queries.
Gets temporary payment card.
param string Temporary payment card identifier
return TemporaryPaymentCard Object returned from API
"""
return self._getObject('temp_paymentcards_get', paymentCardId, 'TemporaryPaymentCard') |
py | b40e7278a6950f04344ea155626291020271ac2b | import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from evalml.pipelines.components import (
DropColumns,
SelectByType,
SelectColumns,
)
@pytest.mark.parametrize("class_to_test", [DropColumns, SelectColumns])
def test_column_transformer_init(class_to_test):
transformer = class_to_test(columns=None)
assert transformer.parameters["columns"] is None
transformer = class_to_test(columns=[])
assert transformer.parameters["columns"] == []
transformer = class_to_test(columns=["a", "b"])
assert transformer.parameters["columns"] == ["a", "b"]
with pytest.raises(ValueError, match="Parameter columns must be a list."):
_ = class_to_test(columns="Column1")
def test_select_by_type_init():
transformer = SelectByType(column_types=None)
assert transformer.parameters["column_types"] is None
transformer = SelectByType(column_types=[])
assert transformer.parameters["column_types"] == []
transformer = SelectByType(column_types=["a", "b"])
assert transformer.parameters["column_types"] == ["a", "b"]
def test_select_by_type_empty_X():
X = pd.DataFrame()
transformer = SelectByType(columns=[])
assert_frame_equal(X, transformer.transform(X))
transformer = SelectByType(columns=[])
assert_frame_equal(X, transformer.fit_transform(X))
transformer = SelectByType(columns=list(X.columns))
assert transformer.transform(X).empty
@pytest.mark.parametrize(
"class_to_test,checking_functions",
[
(
DropColumns,
[
lambda X, X_t: X_t.equals(X.astype("int64")),
lambda X, X_t: X_t.equals(X.astype("int64")),
lambda X, X_t: X_t.equals(X.drop(columns=["one"]).astype("int64")),
lambda X, X_t: X_t.empty,
],
),
(
SelectColumns,
[
lambda X, X_t: X_t.empty,
lambda X, X_t: X_t.empty,
lambda X, X_t: X_t.equals(X[["one"]].astype("int64")),
lambda X, X_t: X_t.equals(X.astype("int64")),
],
),
(
SelectByType,
[
lambda X, X_t: X_t.empty,
lambda X, X_t: X_t.empty,
lambda X, X_t: X_t.equals(X[["three"]].astype("int64")),
lambda X, X_t: X_t.astype(str).equals(X.astype(str)),
],
),
],
)
def test_column_transformer_transform(class_to_test, checking_functions):
if class_to_test is SelectByType:
X = pd.DataFrame(
{
"one": ["1", "2", "3", "4"],
"two": [False, True, True, False],
"three": [1, 2, 3, 4],
}
)
else:
X = pd.DataFrame(
{"one": [1, 2, 3, 4], "two": [2, 3, 4, 5], "three": [1, 2, 3, 4]}
)
check1, check2, check3, check4 = checking_functions
transformer = class_to_test(columns=None)
assert check1(X, transformer.transform(X))
transformer = class_to_test(columns=[])
assert check2(X, transformer.transform(X))
if class_to_test is SelectByType:
transformer = class_to_test(column_types=["integer"])
else:
transformer = class_to_test(columns=["one"])
assert check3(X, transformer.transform(X))
if class_to_test is SelectByType:
transformer = class_to_test(column_types=["categorical", "Boolean", "Integer"])
X.ww.init(logical_types={"one": "categorical"})
else:
transformer = class_to_test(columns=list(X.columns))
assert check4(X, transformer.transform(X))
@pytest.mark.parametrize(
"class_to_test,checking_functions",
[
(
DropColumns,
[
lambda X, X_t: X_t.equals(X.astype("int64")),
lambda X, X_t: X_t.equals(X.drop(columns=["one"]).astype("int64")),
lambda X, X_t: X_t.empty,
],
),
(
SelectColumns,
[
lambda X, X_t: X_t.empty,
lambda X, X_t: X_t.equals(X[["one"]].astype("int64")),
lambda X, X_t: X_t.equals(X.astype("int64")),
],
),
(
SelectByType,
[
lambda X, X_t: X_t.empty,
lambda X, X_t: X_t.equals(X[["three"]].astype("int64")),
lambda X, X_t: X_t.astype(str).equals(X.astype(str)),
],
),
],
)
def test_column_transformer_fit_transform(class_to_test, checking_functions):
if class_to_test is SelectByType:
X = pd.DataFrame(
{
"one": ["1", "2", "3", "4"],
"two": [False, True, True, False],
"three": [1, 2, 3, 4],
}
)
else:
X = pd.DataFrame(
{"one": [1, 2, 3, 4], "two": [2, 3, 4, 5], "three": [1, 2, 3, 4]}
)
check1, check2, check3 = checking_functions
assert check1(X, class_to_test(columns=[]).fit_transform(X))
if class_to_test is SelectByType:
assert check2(X, class_to_test(column_types=["integer"]).fit_transform(X))
else:
assert check2(X, class_to_test(columns=["one"]).fit_transform(X))
if class_to_test is SelectByType:
X.ww.init(logical_types={"one": "categorical"})
assert check3(
X,
class_to_test(
column_types=["categorical", "boolean", "integer"]
).fit_transform(X),
)
else:
assert check3(X, class_to_test(columns=list(X.columns)).fit_transform(X))
@pytest.mark.parametrize(
"class_to_test,answers",
[
(
DropColumns,
[
pd.DataFrame(
[[0, 2, 3], [4, 6, 7], [8, 10, 11]],
columns=[0, 2, 3],
dtype="int64",
),
pd.DataFrame([[], [], []], dtype="Int64"),
pd.DataFrame(np.arange(12).reshape(3, 4), dtype="int64"),
],
),
(
SelectColumns,
[
pd.DataFrame([[1], [5], [9]], columns=[1], dtype="int64"),
pd.DataFrame(np.arange(12).reshape(3, 4), dtype="int64"),
pd.DataFrame([[], [], []], dtype="Int64"),
],
),
],
)
def test_column_transformer_int_col_names_np_array(class_to_test, answers):
X = np.arange(12).reshape(3, 4)
answer1, answer2, answer3 = answers
transformer = class_to_test(columns=[1])
assert_frame_equal(answer1, transformer.transform(X))
transformer = class_to_test(columns=[0, 1, 2, 3])
assert_frame_equal(answer2, transformer.transform(X))
transformer = class_to_test(columns=[])
assert_frame_equal(answer3, transformer.transform(X))
def test_typeortag_column_transformer_ww_logical_and_semantic_types():
X = pd.DataFrame(
{
"one": ["1", "2", "3", "4"],
"two": [False, True, True, False],
"three": [1, 2, 3, 4],
"four": [4.0, 2.3, 6.5, 2.6],
}
)
X.ww.init(logical_types={"one": "categorical"})
X_t = SelectByType(column_types=[ww.logical_types.Integer]).fit_transform(X)
assert X_t.equals(X[["three"]].astype("int64"))
X_t = SelectByType(column_types=["Double"]).fit_transform(X)
assert X_t.equals(X[["four"]].astype("float64"))
X_t = SelectByType(
column_types=[
ww.logical_types.Categorical,
ww.logical_types.Boolean,
ww.logical_types.Integer,
ww.logical_types.Double,
]
).fit_transform(X)
assert X_t.astype(str).equals(X.astype(str))
X_t = SelectByType(column_types=["numeric"]).fit_transform(X)
assert X_t.astype(str).equals(X[["three", "four"]].astype(str))
def test_column_selector_missing_columns():
selector = SelectColumns(columns=["A", "B", "C", "D"])
X = pd.DataFrame(columns=["A", "C", "F", "G"])
X_t = selector.fit_transform(X)
assert (X_t.columns == ["A", "C"]).all()
def test_select_by_type_exclude():
selector = SelectByType(column_types="category", exclude=True)
X = pd.DataFrame(
{
"one": ["1", "2", "3", "4"],
"two": [1, 2, 3, 4],
"three": [4.0, 2.3, 6.5, 2.6],
}
)
X.ww.init(logical_types={"one": "categorical"})
X_t = selector.fit_transform(X)
assert list(X_t.columns) == ["two", "three"]
|
py | b40e72bca7c7ebbae4c8ee7ac04fa39b82121471 | from typing import Optional
from django.conf import settings
from allauth.account.models import EmailAddress
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
settings = getattr(settings, "SOCIALACCOUNT_PROVIDERS", {}).get("mediawiki", {})
class MediaWikiAccount(ProviderAccount):
def get_profile_url(self):
userpage = settings.get(
"USERPAGE_TEMPLATE", "https://meta.wikimedia.org/wiki/User:{username}"
)
username = self.account.extra_data.get("username")
if not username:
return None
return userpage.format(username=username.replace(" ", "_"))
def to_str(self):
dflt = super(MediaWikiAccount, self).to_str()
return self.account.extra_data.get("username", dflt)
class MediaWikiProvider(OAuth2Provider):
id = "mediawiki"
name = "MediaWiki"
account_class = MediaWikiAccount
@staticmethod
def _get_email(data: dict) -> Optional[str]:
if data.get("confirmed_email"):
return data.get("email")
return None
def extract_uid(self, data):
return str(data["sub"])
def extract_extra_data(self, data):
return dict(
username=data.get("username"),
)
def extract_common_fields(self, data):
return dict(
email=self._get_email(data),
username=data.get("username"),
name=data.get("realname"),
)
def extract_email_addresses(self, data):
return [EmailAddress(email=self._get_email(data), verified=True, primary=True)]
provider_classes = [MediaWikiProvider]
|
py | b40e733da982f429fbffb3b1a55466b2c3a563a6 | """Intents and performers for basic user interaction.
Use :obj:`effect.io.stdio_dispatcher` as a dispatcher for :obj:`Display` and
:obj:`Prompt` that uses built-in Python standard io facilities.
"""
from __future__ import print_function
import attr
from six.moves import input
from . import sync_performer, TypeDispatcher
@attr.s
class Display(object):
"""Display some text to the user."""
output = attr.ib()
@attr.s
class Prompt(object):
"""Get some input from the user, with a prompt."""
prompt = attr.ib()
@sync_performer
def perform_display_print(dispatcher, intent):
"""Perform a :obj:`Display` intent by printing the output."""
print(intent.output)
@sync_performer
def perform_get_input_raw_input(dispatcher, intent):
"""
Perform a :obj:`Prompt` intent by using ``raw_input`` (or ``input`` on
Python 3).
"""
return input(intent.prompt)
stdio_dispatcher = TypeDispatcher({
Display: perform_display_print,
Prompt: perform_get_input_raw_input,
})
|
py | b40e7445ec5b21693a9c41cd4cc123c10623658b | """
Functions for changing global ufunc configuration
This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
"""
import collections.abc
import contextlib
from .overrides import set_module
from .umath import (
UFUNC_BUFSIZE_DEFAULT,
ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT,
SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID,
)
from . import umath
__all__ = [
"seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
"errstate",
]
_errdict = {"ignore": ERR_IGNORE,
"warn": ERR_WARN,
"raise": ERR_RAISE,
"call": ERR_CALL,
"print": ERR_PRINT,
"log": ERR_LOG}
_errdict_rev = {value: key for key, value in _errdict.items()}
@set_module('numpy')
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] https://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> old_settings = np.seterr(all='ignore') #seterr to known value
>>> np.seterr(over='raise')
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
>>> np.seterr(**old_settings) # reset to default
{'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
>>> np.int16(32000) * np.int16(3)
30464
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
>>> np.int16(32000) * np.int16(3)
30464
"""
pyvals = umath.geterrobj()
old = geterr()
if divide is None:
divide = all or old['divide']
if over is None:
over = all or old['over']
if under is None:
under = all or old['under']
if invalid is None:
invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
(_errdict[over] << SHIFT_OVERFLOW) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
pyvals[1] = maskvalue
umath.seterrobj(pyvals)
return old
@set_module('numpy')
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr()
{'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
>>> np.arange(3.) / np.arange(3.)
array([nan, 1., 1.])
>>> oldsettings = np.seterr(all='warn', over='raise')
>>> np.geterr()
{'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'}
>>> np.arange(3.) / np.arange(3.)
array([nan, 1., 1.])
"""
maskvalue = umath.geterrobj()[1]
mask = 7
res = {}
val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
res['divide'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_OVERFLOW) & mask
res['over'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_UNDERFLOW) & mask
res['under'] = _errdict_rev[val]
val = (maskvalue >> SHIFT_INVALID) & mask
res['invalid'] = _errdict_rev[val]
return res
@set_module('numpy')
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
Parameters
----------
size : int
Size of buffer.
"""
if size > 10e6:
raise ValueError("Buffer size, %s, is too big." % size)
if size < 5:
raise ValueError("Buffer size, %s, is too small." % size)
if size % 16 != 0:
raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
pyvals = umath.geterrobj()
old = getbufsize()
pyvals[0] = size
umath.seterrobj(pyvals)
return old
@set_module('numpy')
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
Returns
-------
getbufsize : int
Size of ufunc buffer in bytes.
"""
return umath.geterrobj()[0]
@set_module('numpy')
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is a string describing
the type of error (such as "divide by zero", "overflow", "underflow",
or "invalid value"), and the second is the status flag. The flag is a
byte, whose four least-significant bits indicate the type of error, one
of "divide", "over", "under", "invalid"::
[0 0 0 0 divide over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> saved_handler = np.seterrcall(err_handler)
>>> save_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([inf, inf, inf])
>>> np.seterrcall(saved_handler)
<function err_handler at 0x...>
>>> np.seterr(**save_err)
{'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
Log error message:
>>> class Log:
... def write(self, msg):
... print("LOG: %s" % msg)
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in true_divide
array([inf, inf, inf])
>>> np.seterrcall(saved_handler)
<numpy.core.numeric.Log object at 0x...>
>>> np.seterr(**save_err)
{'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
"""
if func is not None and not isinstance(func, collections.abc.Callable):
if (not hasattr(func, 'write') or
not isinstance(func.write, collections.abc.Callable)):
raise ValueError("Only callable can be used as callback")
pyvals = umath.geterrobj()
old = geterrcall()
pyvals[2] = func
umath.seterrobj(pyvals)
return old
@set_module('numpy')
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall() # we did not yet set a handler, returns None
>>> oldsettings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
>>> oldhandler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([inf, inf, inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
"""
return umath.geterrobj()[2]
class _unspecified:
pass
_Unspecified = _unspecified()
@set_module('numpy')
class errstate(contextlib.ContextDecorator):
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
.. versionchanged:: 1.17.0
`errstate` is also usable as a function decorator, saving
a level of indentation if an entire function is wrapped.
See :py:class:`contextlib.ContextDecorator` for more information.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
array([nan, inf, inf])
>>> with np.errstate(divide='warn'):
... np.arange(3) / 0.
array([nan, inf, inf])
>>> np.sqrt(-1)
nan
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
"""
def __init__(self, *, call=_Unspecified, **kwargs):
self.call = call
self.kwargs = kwargs
def __enter__(self):
self.oldstate = seterr(**self.kwargs)
if self.call is not _Unspecified:
self.oldcall = seterrcall(self.call)
def __exit__(self, *exc_info):
seterr(**self.oldstate)
if self.call is not _Unspecified:
seterrcall(self.oldcall)
def _setdef():
defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
umath.seterrobj(defval)
# set the default values
_setdef()
|
py | b40e750d1178cfa5fef0b8da99343819eb8155cc | import math
from testutils import assert_raises, skip_if_unsupported
NAN = float('nan')
INF = float('inf')
NINF = float('-inf')
# assert(math.exp(2) == math.exp(2.0))
# assert(math.exp(True) == math.exp(1.0))
#
# class Conversible():
# def __float__(self):
# print("Converting to float now!")
# return 1.1111
#
# assert math.log(1.1111) == math.log(Conversible())
# roundings
assert int.__trunc__
assert int.__floor__
assert int.__ceil__
# assert float.__trunc__
def float_floor_exists():
assert float.__floor__
def float_ceil_exists():
assert float.__ceil__
skip_if_unsupported(3, 9, float_floor_exists)
skip_if_unsupported(3, 9, float_ceil_exists)
assert math.trunc(2) == 2
assert math.ceil(3) == 3
assert math.floor(4) == 4
assert math.trunc(2.2) == 2
assert math.ceil(3.3) == 4
assert math.floor(4.4) == 4
assert isinstance(math.trunc(2.2), int)
assert isinstance(math.ceil(3.3), int)
assert isinstance(math.floor(4.4), int)
class A(object):
def __trunc__(self):
return 2
def __ceil__(self):
return 3
def __floor__(self):
return 4
assert math.trunc(A()) == 2
assert math.ceil(A()) == 3
assert math.floor(A()) == 4
class A(object):
def __trunc__(self):
return 2.2
def __ceil__(self):
return 3.3
def __floor__(self):
return 4.4
assert math.trunc(A()) == 2.2
assert math.ceil(A()) == 3.3
assert math.floor(A()) == 4.4
class A(object):
def __trunc__(self):
return 'trunc'
def __ceil__(self):
return 'ceil'
def __floor__(self):
return 'floor'
assert math.trunc(A()) == 'trunc'
assert math.ceil(A()) == 'ceil'
assert math.floor(A()) == 'floor'
with assert_raises(TypeError):
math.trunc(object())
with assert_raises(TypeError):
math.ceil(object())
with assert_raises(TypeError):
math.floor(object())
isclose = math.isclose
def assertIsClose(a, b, *args, **kwargs):
assert isclose(a, b, *args, **kwargs) == True, "%s and %s should be close!" % (a, b)
def assertIsNotClose(a, b, *args, **kwargs):
assert isclose(a, b, *args, **kwargs) == False, "%s and %s should not be close!" % (a, b)
def assertAllClose(examples, *args, **kwargs):
for a, b in examples:
assertIsClose(a, b, *args, **kwargs)
def assertAllNotClose(examples, *args, **kwargs):
for a, b in examples:
assertIsNotClose(a, b, *args, **kwargs)
# test_negative_tolerances: ValueError should be raised if either tolerance is less than zero
assert_raises(ValueError, lambda: isclose(1, 1, rel_tol=-1e-100))
assert_raises(ValueError, lambda: isclose(1, 1, rel_tol=1e-100, abs_tol=-1e10))
# test_identical: identical values must test as close
identical_examples = [(2.0, 2.0),
(0.1e200, 0.1e200),
(1.123e-300, 1.123e-300),
(12345, 12345.0),
(0.0, -0.0),
(345678, 345678)]
assertAllClose(identical_examples, rel_tol=0.0, abs_tol=0.0)
# test_eight_decimal_places: examples that are close to 1e-8, but not 1e-9
eight_decimal_places_examples = [(1e8, 1e8 + 1),
(-1e-8, -1.000000009e-8),
(1.12345678, 1.12345679)]
assertAllClose(eight_decimal_places_examples, rel_tol=1e-08)
assertAllNotClose(eight_decimal_places_examples, rel_tol=1e-09)
# test_near_zero: values close to zero
near_zero_examples = [(1e-9, 0.0),
(-1e-9, 0.0),
(-1e-150, 0.0)]
# these should not be close to any rel_tol
assertAllNotClose(near_zero_examples, rel_tol=0.9)
# these should be close to abs_tol=1e-8
assertAllClose(near_zero_examples, abs_tol=1e-8)
# test_identical_infinite: these are close regardless of tolerance -- i.e. they are equal
assertIsClose(INF, INF)
assertIsClose(INF, INF, abs_tol=0.0)
assertIsClose(NINF, NINF)
assertIsClose(NINF, NINF, abs_tol=0.0)
# test_inf_ninf_nan(self): these should never be close (following IEEE 754 rules for equality)
not_close_examples = [(NAN, NAN),
(NAN, 1e-100),
(1e-100, NAN),
(INF, NAN),
(NAN, INF),
(INF, NINF),
(INF, 1.0),
(1.0, INF),
(INF, 1e308),
(1e308, INF)]
# use largest reasonable tolerance
assertAllNotClose(not_close_examples, abs_tol=0.999999999999999)
# test_zero_tolerance: test with zero tolerance
zero_tolerance_close_examples = [(1.0, 1.0),
(-3.4, -3.4),
(-1e-300, -1e-300)]
assertAllClose(zero_tolerance_close_examples, rel_tol=0.0)
zero_tolerance_not_close_examples = [(1.0, 1.000000000000001),
(0.99999999999999, 1.0),
(1.0e200, .999999999999999e200)]
assertAllNotClose(zero_tolerance_not_close_examples, rel_tol=0.0)
# test_asymmetry: test the asymmetry example from PEP 485
assertAllClose([(9, 10), (10, 9)], rel_tol=0.1)
# test_integers: test with integer values
integer_examples = [(100000001, 100000000),
(123456789, 123456788)]
assertAllClose(integer_examples, rel_tol=1e-8)
assertAllNotClose(integer_examples, rel_tol=1e-9)
# test_decimals: test with Decimal values
# test_fractions: test with Fraction values
assert math.copysign(1, 42) == 1.0
assert math.copysign(0., 42) == 0.0
assert math.copysign(1., -42) == -1.0
assert math.copysign(3, 0.) == 3.0
assert math.copysign(4., -0.) == -4.0
assert_raises(TypeError, math.copysign)
# copysign should let us distinguish signs of zeros
assert math.copysign(1., 0.) == 1.
assert math.copysign(1., -0.) == -1.
assert math.copysign(INF, 0.) == INF
assert math.copysign(INF, -0.) == NINF
assert math.copysign(NINF, 0.) == INF
assert math.copysign(NINF, -0.) == NINF
# and of infinities
assert math.copysign(1., INF) == 1.
assert math.copysign(1., NINF) == -1.
assert math.copysign(INF, INF) == INF
assert math.copysign(INF, NINF) == NINF
assert math.copysign(NINF, INF) == INF
assert math.copysign(NINF, NINF) == NINF
assert math.isnan(math.copysign(NAN, 1.))
assert math.isnan(math.copysign(NAN, INF))
assert math.isnan(math.copysign(NAN, NINF))
assert math.isnan(math.copysign(NAN, NAN))
# copysign(INF, NAN) may be INF or it may be NINF, since
# we don't know whether the sign bit of NAN is set on any
# given platform.
assert math.isinf(math.copysign(INF, NAN))
# similarly, copysign(2., NAN) could be 2. or -2.
assert abs(math.copysign(2., NAN)) == 2.
assert str(math.frexp(0.0)) == str((+0.0, 0))
assert str(math.frexp(-0.0)) == str((-0.0, 0))
assert math.frexp(1) == (0.5, 1)
assert math.frexp(1.5) == (0.75, 1)
assert_raises(TypeError, lambda: math.frexp(None))
assert str(math.ldexp(+0.0, 0)) == str(0.0)
assert str(math.ldexp(-0.0, 0)) == str(-0.0)
assert math.ldexp(0.5, 1) == 1
assert math.ldexp(0.75, 1) == 1.5
assert_raises(TypeError, lambda: math.ldexp(None, None))
assert math.frexp(INF) == (INF, 0)
assert str(math.frexp(NAN)) == str((NAN, 0))
assert_raises(TypeError, lambda: math.frexp(None))
assert math.gcd(0, 0) == 0
assert math.gcd(1, 0) == 1
assert math.gcd(0, 1) == 1
assert math.gcd(1, 1) == 1
assert math.gcd(-1, 1) == 1
assert math.gcd(1, -1) == 1
assert math.gcd(-1, -1) == 1
assert math.gcd(125, -255) == 5
assert_raises(TypeError, lambda: math.gcd(1.1, 2))
assert math.factorial(0) == 1
assert math.factorial(1) == 1
assert math.factorial(2) == 2
assert math.factorial(3) == 6
assert math.factorial(10) == 3628800
assert math.factorial(20) == 2432902008176640000
assert_raises(ValueError, lambda: math.factorial(-1))
if hasattr(math, 'nextafter'):
try:
assert math.nextafter(4503599627370496.0, -INF) == 4503599627370495.5
assert math.nextafter(4503599627370496.0, INF) == 4503599627370497.0
assert math.nextafter(9223372036854775808.0, 0.0) == 9223372036854774784.0
assert math.nextafter(-9223372036854775808.0, 0.0) == -9223372036854774784.0
assert math.nextafter(4503599627370496, -INF) == 4503599627370495.5
assert math.nextafter(2.0, 2.0) == 2.0
assert math.isnan(math.nextafter(NAN, 1.0))
except NotImplementedError:
# WASM
pass
assert math.modf(1.25) == (0.25, 1.0)
assert math.modf(-1.25) == (-0.25, -1.0)
assert math.modf(2.56) == (0.56, 2.0)
assert math.modf(-2.56) == (-0.56, -2.0)
assert math.modf(1) == (0.0, 1.0)
assert math.modf(INF) == (0.0, INF)
assert math.modf(NINF) == (-0.0, NINF)
modf_nan = math.modf(NAN)
assert math.isnan(modf_nan[0])
assert math.isnan(modf_nan[1])
assert math.fmod(10, 1) == 0.0
assert math.fmod(10, 0.5) == 0.0
assert math.fmod(10, 1.5) == 1.0
assert math.fmod(-10, 1) == -0.0
assert math.fmod(-10, 0.5) == -0.0
assert math.fmod(-10, 1.5) == -1.0
assert math.isnan(math.fmod(NAN, 1.)) == True
assert math.isnan(math.fmod(1., NAN)) == True
assert math.isnan(math.fmod(NAN, NAN)) == True
assert_raises(ValueError, lambda: math.fmod(1., 0.))
assert_raises(ValueError, lambda: math.fmod(INF, 1.))
assert_raises(ValueError, lambda: math.fmod(NINF, 1.))
assert_raises(ValueError, lambda: math.fmod(INF, 0.))
assert math.fmod(3.0, INF) == 3.0
assert math.fmod(-3.0, INF) == -3.0
assert math.fmod(3.0, NINF) == 3.0
assert math.fmod(-3.0, NINF) == -3.0
assert math.fmod(0.0, 3.0) == 0.0
assert math.fmod(0.0, NINF) == 0.0
"""
TODO: math.remainder was added to CPython in 3.7 and RustPython CI runs on 3.6.
So put the tests of math.remainder in a comment for now.
https://github.com/RustPython/RustPython/pull/1589#issuecomment-551424940
"""
# testcases = [
# # Remainders modulo 1, showing the ties-to-even behaviour.
# '-4.0 1 -0.0',
# '-3.8 1 0.8',
# '-3.0 1 -0.0',
# '-2.8 1 -0.8',
# '-2.0 1 -0.0',
# '-1.8 1 0.8',
# '-1.0 1 -0.0',
# '-0.8 1 -0.8',
# '-0.0 1 -0.0',
# ' 0.0 1 0.0',
# ' 0.8 1 0.8',
# ' 1.0 1 0.0',
# ' 1.8 1 -0.8',
# ' 2.0 1 0.0',
# ' 2.8 1 0.8',
# ' 3.0 1 0.0',
# ' 3.8 1 -0.8',
# ' 4.0 1 0.0',
# # Reductions modulo 2*pi
# '0x0.0p+0 0x1.921fb54442d18p+2 0x0.0p+0',
# '0x1.921fb54442d18p+0 0x1.921fb54442d18p+2 0x1.921fb54442d18p+0',
# '0x1.921fb54442d17p+1 0x1.921fb54442d18p+2 0x1.921fb54442d17p+1',
# '0x1.921fb54442d18p+1 0x1.921fb54442d18p+2 0x1.921fb54442d18p+1',
# '0x1.921fb54442d19p+1 0x1.921fb54442d18p+2 -0x1.921fb54442d17p+1',
# '0x1.921fb54442d17p+2 0x1.921fb54442d18p+2 -0x0.0000000000001p+2',
# '0x1.921fb54442d18p+2 0x1.921fb54442d18p+2 0x0p0',
# '0x1.921fb54442d19p+2 0x1.921fb54442d18p+2 0x0.0000000000001p+2',
# '0x1.2d97c7f3321d1p+3 0x1.921fb54442d18p+2 0x1.921fb54442d14p+1',
# '0x1.2d97c7f3321d2p+3 0x1.921fb54442d18p+2 -0x1.921fb54442d18p+1',
# '0x1.2d97c7f3321d3p+3 0x1.921fb54442d18p+2 -0x1.921fb54442d14p+1',
# '0x1.921fb54442d17p+3 0x1.921fb54442d18p+2 -0x0.0000000000001p+3',
# '0x1.921fb54442d18p+3 0x1.921fb54442d18p+2 0x0p0',
# '0x1.921fb54442d19p+3 0x1.921fb54442d18p+2 0x0.0000000000001p+3',
# '0x1.f6a7a2955385dp+3 0x1.921fb54442d18p+2 0x1.921fb54442d14p+1',
# '0x1.f6a7a2955385ep+3 0x1.921fb54442d18p+2 0x1.921fb54442d18p+1',
# '0x1.f6a7a2955385fp+3 0x1.921fb54442d18p+2 -0x1.921fb54442d14p+1',
# '0x1.1475cc9eedf00p+5 0x1.921fb54442d18p+2 0x1.921fb54442d10p+1',
# '0x1.1475cc9eedf01p+5 0x1.921fb54442d18p+2 -0x1.921fb54442d10p+1',
# # Symmetry with respect to signs.
# ' 1 0.c 0.4',
# '-1 0.c -0.4',
# ' 1 -0.c 0.4',
# '-1 -0.c -0.4',
# ' 1.4 0.c -0.4',
# '-1.4 0.c 0.4',
# ' 1.4 -0.c -0.4',
# '-1.4 -0.c 0.4',
# # Huge modulus, to check that the underlying algorithm doesn't
# # rely on 2.0 * modulus being representable.
# '0x1.dp+1023 0x1.4p+1023 0x0.9p+1023',
# '0x1.ep+1023 0x1.4p+1023 -0x0.ap+1023',
# '0x1.fp+1023 0x1.4p+1023 -0x0.9p+1023',
# ]
# for case in testcases:
# x_hex, y_hex, expected_hex = case.split()
# # print(x_hex, y_hex, expected_hex)
# x = float.fromhex(x_hex)
# y = float.fromhex(y_hex)
# expected = float.fromhex(expected_hex)
# actual = math.remainder(x, y)
# # Cheap way of checking that the floats are
# # as identical as we need them to be.
# assert actual.hex() == expected.hex()
# # self.assertEqual(actual.hex(), expected.hex())
# # Test tiny subnormal modulus: there's potential for
# # getting the implementation wrong here (for example,
# # by assuming that modulus/2 is exactly representable).
# tiny = float.fromhex('1p-1074') # min +ve subnormal
# for n in range(-25, 25):
# if n == 0:
# continue
# y = n * tiny
# for m in range(100):
# x = m * tiny
# actual = math.remainder(x, y)
# actual = math.remainder(-x, y) |
py | b40e751c9253968d0d230d15e47a5397e58847a8 | #-*- coding: utf-8 -*-
import os
import time
import socket
import urllib, urlparse
import hashlib
import threading
import Queue
from cStringIO import StringIO
import base64
from defs import *
from protocol import parse_frame, make_frame
from utils import r_select
class Client(object):
def __init__(self, url, proxies=None, allow_fragments=True):
self.url = url
self.uri = urlparse.urlparse(url)
self.scheme = self.uri.scheme.lower()
self.proxies = proxies
self.is_ssl = self.scheme == 'wss'
self.sock = None
self.f = None
self.evt_abort = threading.Event()
self.q_in = Queue.Queue()
self.key = ''
self.status = None
self.headers = Headers()
self.allow_fragments = allow_fragments
def _parse_response(self, resp_header_raw):
lines = resp_header_raw.split(crlf)
h = lines[0].split(' ', 2)
status = (int(h[1]), h[2])
rows = [line.split(':')[:2] for line in lines if ':' in line]
headers = Headers()
for _k, _v in rows:
k = _k.strip().lower()
v = _v.strip()
if k in headers:
v0 = headers[k]
if isinstance(v0, list):
v0.append(v)
else:
headers[k] = [v0, v]
else:
headers[k] = v
return status, headers
def _check_handshake(self, resp_headers):
connection = resp_headers.get('connection', None)
if 'upgrade' not in resp_headers.get_lower_list('connection'):
return False
if 'websocket' not in resp_headers.get_lower_list('upgrade'):
return False
if 'sec-websocket-accept' not in resp_headers:
return False
key_hash = '%s%s' % (self.key, ws_uid)
key_hash = base64.b64encode(hashlib.sha1(key_hash).digest())
_accept = resp_headers['sec-websocket-accept']
if key_hash != _accept:
return False
return True
def handshake(self, timeout=20.0):
t0 = time.time()
uri = self.uri
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((uri.hostname, (uri.port or 80)))
except socket.error:
raise WsError(u'failed to connect to host.')
self.sock = sock
origin = '%s://%s' % (uri.scheme, uri.netloc)
key = base64.b64encode(os.urandom(8))
self.key = key
headers = (('host', uri.netloc),
('user-agent', 'hd-cluster/libs/ws.client'),
('connection', 'upgrade'),
('upgrade', 'websocket'),
('origin', origin),
('sec-websocket-version', 13),
('sec-websocket-key', key),)
sock.send('GET %s HTTP/1.1%s' % (uri.path, crlf))
headers_str = crlf.join(['%s: %s' % (k,v) for k,v in headers])
sock.send(headers_str)
sock.send(crlf + crlf)
buff = StringIO()
while not self.evt_abort.is_set():
r = r_select([sock], timeout=0.5)
if not r:
if t0 + timeout < time.time():
return False
continue
data = r[0].recv(1024)
if not data:
continue
buff.write(data)
if crlf + crlf not in buff.getvalue():
continue
resp_raw = buff.getvalue()
resp_header_raw = resp_raw.split(crlf+crlf)[0]
status, resp_headers = self._parse_response(resp_header_raw)
self.status = status
self.resp_headers = resp_headers
if self.status[0] != 101:
raise HTTPError(*self.status)
handshake_ok = self._check_handshake(resp_headers)
if not handshake_ok:
return False
data = resp_raw[len(resp_header_raw + crlf + crlf):]
if data:
try:
parse_frame(data)
except EOF:
pass
self.f = sock.makefile()
return True
def recv(self, timeout=5.0, allow_fragments=None):
if allow_fragments is None:
allow_fragments = self.allow_fragments
_op, _buff = None, None
while not self.evt_abort.is_set():
frame = self._recv_next(timeout=timeout)
if frame:
fin, op, payload = frame
if not allow_fragments:
if fin and not _buff:
return frame
if not fin:
if not _buff:
_op = op
_buff = StringIO()
_buff.write(payload)
if fin:
return fin, _op, _buff.getvalue()
else:
continue
return frame
def _recv_next(self, timeout=5.0):
_op, _buff = None, None
t0 = time.time()
while t0 + timeout >= time.time() and not self.evt_abort.is_set():
if not self.f:
time.sleep(0.1)
continue
r = r_select([self.f], timeout=0.1)
if not r:
continue
f = r[0]
try:
frame = parse_frame(f)
return frame
except (IOError, AttributeError, socket.error, WsIOError):
self.close()
# raise WsCommunicationError()
def __iter__(self):
if not self.f:
return
# while not self.evt_abort.is_set():
# item = self._recv_next()
# if item:
# yield item
for frame in self.recv():
yield frame
def send(self, data, fin=True, op=OP_TEXT, mask=True):
if self.evt_abort.is_set() or not self.f:
raise WsError('websocket was closed.')
sub_f_size = MAX_FRAME_SIZE
size = len(data)
if size > sub_f_size:
cur = 0
while True:
part = data[cur: cur + sub_f_size]
if not part:
break
_fin = False
if cur + len(part) >= size:
_fin = fin
_op = op
if cur > 0:
_op = 0
frame = make_frame(_fin, _op, part, mask=mask)
self.f.write(frame)
cur += len(part)
self.f.flush()
# if fin and (size > sub_f_size):
# cur = 0
# while True:
# part = data[cur: cur + sub_f_size]
# if not part:
# break
# _fin = 0
# if cur + len(part) >= size:
# _fin = 1
# _op = op
# if cur > 0:
# _op = 0
# frame = make_frame(_fin, _op, part, mask=mask)
# self.f.write(frame)
# cur += len(part)
# self.f.flush()
else:
frame = make_frame(fin, op, data, mask=mask)
self.f.write(frame)
self.f.flush()
def close(self):
if not self.evt_abort.is_set():
self.evt_abort.set()
if self.f:
self.f.close()
self.f = None
if self.sock:
self.sock.close()
self.sock = None
def __enter__(self):
return self
def __exit__(self, *args, **kargs):
self.close()
def ws_connect(*args, **kargs):
return Client(*args, **kargs)
if __name__ == '__main__':
with ws_connect('ws://50.gz2.yj.hp:8082/ws') as c:
if c.handshake():
print 'handshake: ok'
c.send('{"op":"LIST_NOTES","principal":"anonymous","ticket":"anonymous"}')
for msg in c:
print msg
break
# ws_connect('ws://localhost:10080/hub/notebooks/notebook/2BA4MWGBT/_.ws')
|
py | b40e75c99a2e6c31c3b67a15bebfa1f9f701cca5 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import jmespath
from parameterized import parameterized
from tests.helm_template_generator import render_chart
class WebserverDeploymentTest(unittest.TestCase):
def test_should_add_host_header_to_liveness_and_readiness_probes(self):
docs = render_chart(
values={
"config": {
"webserver": {"base_url": "https://example.com:21222/mypath/path"},
}
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert {"name": "Host", "value": "example.com"} in jmespath.search(
"spec.template.spec.containers[0].livenessProbe.httpGet.httpHeaders", docs[0]
)
assert {"name": "Host", "value": "example.com"} in jmespath.search(
"spec.template.spec.containers[0].readinessProbe.httpGet.httpHeaders", docs[0]
)
def test_should_add_path_to_liveness_and_readiness_probes(self):
docs = render_chart(
values={
"config": {
"webserver": {"base_url": "https://example.com:21222/mypath/path"},
}
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.httpGet.path", docs[0])
== "/mypath/path/health"
)
assert (
jmespath.search("spec.template.spec.containers[0].readinessProbe.httpGet.path", docs[0])
== "/mypath/path/health"
)
def test_should_not_contain_host_header_if_host_empty_string(self):
docs = render_chart(
values={},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.httpGet.httpHeaders", docs[0])
is None
)
assert (
jmespath.search("spec.template.spec.containers[0].readinessProbe.httpGet.httpHeaders", docs[0])
is None
)
def test_should_not_contain_host_header_if_base_url_not_set(self):
docs = render_chart(
values={
"config": {
"webserver": {"base_url": ""},
}
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.httpGet.httpHeaders", docs[0])
is None
)
assert (
jmespath.search("spec.template.spec.containers[0].readinessProbe.httpGet.httpHeaders", docs[0])
is None
)
def test_should_not_contain_host_header_by_default(self):
docs = render_chart(
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert (
jmespath.search("spec.template.spec.containers[0].livenessProbe.httpGet.httpHeaders", docs[0])
is None
)
assert (
jmespath.search("spec.template.spec.containers[0].readinessProbe.httpGet.httpHeaders", docs[0])
is None
)
def test_should_add_volume_and_volume_mount_when_exist_webserver_config(self):
docs = render_chart(
values={"webserver": {"webserverConfig": "CSRF_ENABLED = True"}},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert {
"name": "webserver-config",
"configMap": {"name": "RELEASE-NAME-webserver-config"},
} in jmespath.search("spec.template.spec.volumes", docs[0])
assert {
"name": "webserver-config",
"mountPath": "/opt/airflow/webserver_config.py",
"subPath": "webserver_config.py",
"readOnly": True,
} in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
def test_should_add_extra_containers(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"webserver": {
"extraContainers": [
{
"name": "test-container",
"image": "test-registry/test-repo:test-tag",
"imagePullPolicy": "Always",
}
],
},
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert "test-container" == jmespath.search("spec.template.spec.containers[-1].name", docs[0])
def test_should_create_valid_affinity_tolerations_and_node_selector(self):
docs = render_chart(
values={
"webserver": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{"key": "foo", "operator": "In", "values": ["true"]},
]
}
]
}
}
},
"tolerations": [
{"key": "dynamic-pods", "operator": "Equal", "value": "true", "effect": "NoSchedule"}
],
"nodeSelector": {"diskType": "ssd"},
}
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert "Deployment" == jmespath.search("kind", docs[0])
assert "foo" == jmespath.search(
"spec.template.spec.affinity.nodeAffinity."
"requiredDuringSchedulingIgnoredDuringExecution."
"nodeSelectorTerms[0]."
"matchExpressions[0]."
"key",
docs[0],
)
assert "ssd" == jmespath.search(
"spec.template.spec.nodeSelector.diskType",
docs[0],
)
assert "dynamic-pods" == jmespath.search(
"spec.template.spec.tolerations[0].key",
docs[0],
)
@parameterized.expand(
[
({"enabled": False}, None),
({"enabled": True}, "RELEASE-NAME-logs"),
({"enabled": True, "existingClaim": "test-claim"}, "test-claim"),
]
)
def test_logs_persistence_adds_volume_and_mount(self, log_persistence_values, expected_claim_name):
docs = render_chart(
values={"logs": {"persistence": log_persistence_values}},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
if expected_claim_name:
assert {
"name": "logs",
"persistentVolumeClaim": {"claimName": expected_claim_name},
} == jmespath.search("spec.template.spec.volumes[1]", docs[0])
assert {
"name": "logs",
"mountPath": "/opt/airflow/logs",
} == jmespath.search("spec.template.spec.containers[0].volumeMounts[1]", docs[0])
else:
assert "logs" not in [v["name"] for v in jmespath.search("spec.template.spec.volumes", docs[0])]
assert "logs" not in [
v["name"] for v in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
]
def test_webserver_resources_are_configurable(self):
docs = render_chart(
values={
"webserver": {
"resources": {
"limits": {"cpu": "200m", 'memory': "128Mi"},
"requests": {"cpu": "300m", 'memory': "169Mi"},
}
},
},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert "128Mi" == jmespath.search("spec.template.spec.containers[0].resources.limits.memory", docs[0])
assert "169Mi" == jmespath.search(
"spec.template.spec.containers[0].resources.requests.memory", docs[0]
)
assert "300m" == jmespath.search("spec.template.spec.containers[0].resources.requests.cpu", docs[0])
def test_webserver_resources_are_not_added_by_default(self):
docs = render_chart(
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources", docs[0]) == {}
@parameterized.expand(
[
("2.0.2", {"type": "RollingUpdate", "rollingUpdate": {"maxSurge": 1, "maxUnavailable": 0}}),
("1.10.14", {"type": "Recreate"}),
("1.9.0", {"type": "Recreate"}),
("2.1.0", {"type": "RollingUpdate", "rollingUpdate": {"maxSurge": 1, "maxUnavailable": 0}}),
],
)
def test_default_update_strategy(self, airflow_version, expected_strategy):
docs = render_chart(
values={"airflowVersion": airflow_version},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert jmespath.search("spec.strategy", docs[0]) == expected_strategy
def test_update_strategy(self):
expected_strategy = {"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": 1}}
docs = render_chart(
values={"webserver": {"strategy": expected_strategy}},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert jmespath.search("spec.strategy", docs[0]) == expected_strategy
def test_no_airflow_local_settings_by_default(self):
docs = render_chart(show_only=["templates/webserver/webserver-deployment.yaml"])
volume_mounts = jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
assert "airflow_local_settings.py" not in str(volume_mounts)
def test_airflow_local_settings(self):
docs = render_chart(
values={"airflowLocalSettings": "# Well hello!"},
show_only=["templates/webserver/webserver-deployment.yaml"],
)
assert {
"name": "config",
"mountPath": "/opt/airflow/config/airflow_local_settings.py",
"subPath": "airflow_local_settings.py",
"readOnly": True,
} in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
class WebserverServiceTest(unittest.TestCase):
def test_default_service(self):
docs = render_chart(
show_only=["templates/webserver/webserver-service.yaml"],
)
assert "RELEASE-NAME-webserver" == jmespath.search("metadata.name", docs[0])
assert jmespath.search("metadata.annotations", docs[0]) is None
assert {"tier": "airflow", "component": "webserver", "release": "RELEASE-NAME"} == jmespath.search(
"spec.selector", docs[0]
)
assert "ClusterIP" == jmespath.search("spec.type", docs[0])
assert {"name": "airflow-ui", "protocol": "TCP", "port": 8080} in jmespath.search(
"spec.ports", docs[0]
)
def test_overrides(self):
docs = render_chart(
values={
"ports": {"airflowUI": 9000},
"webserver": {
"service": {
"type": "LoadBalancer",
"loadBalancerIP": "127.0.0.1",
"annotations": {"foo": "bar"},
}
},
},
show_only=["templates/webserver/webserver-service.yaml"],
)
assert {"foo": "bar"} == jmespath.search("metadata.annotations", docs[0])
assert "LoadBalancer" == jmespath.search("spec.type", docs[0])
assert {"name": "airflow-ui", "protocol": "TCP", "port": 9000} in jmespath.search(
"spec.ports", docs[0]
)
assert "127.0.0.1" == jmespath.search("spec.loadBalancerIP", docs[0])
class WebserverConfigmapTest(unittest.TestCase):
def test_no_webserver_config_configmap_by_default(self):
docs = render_chart(show_only=["templates/configmaps/webserver-configmap.yaml"])
assert 0 == len(docs)
def test_webserver_config_configmap(self):
docs = render_chart(
values={"webserver": {"webserverConfig": "CSRF_ENABLED = True # {{ .Release.Name }}"}},
show_only=["templates/configmaps/webserver-configmap.yaml"],
)
assert "ConfigMap" == docs[0]["kind"]
assert "RELEASE-NAME-webserver-config" == jmespath.search("metadata.name", docs[0])
assert (
"CSRF_ENABLED = True # RELEASE-NAME"
== jmespath.search('data."webserver_config.py"', docs[0]).strip()
)
|
py | b40e75ecfd5f24955df3cbf07fc94b258705cafb | import time
import numpy as np
import pandas as pd
from pandas.util.testing import *
from trtools.compat import StringIO, BytesIO
from trtools.core.api import ColumnPanel
class TestStringIO(BytesIO):
def close(self):
pass
def free(self):
BytesIO.close(self)
class Timer:
"""
Usage:
with Timer() as t:
ret = func(df)
print(t.interval)
"""
runs = []
def __init__(self, name='', verbose=True):
self.name = name
self.verbose = verbose
self.start = None
self.wall_start = None
self.end = None
self.wall_end = None
Timer.runs.append(self)
def clear_runs(self):
Timer.runs = []
def __enter__(self):
self.start = time.clock()
self.wall_start = time.time()
return self
def __exit__(self, *args):
self.end = time.clock()
self.wall_end = time.time()
self.interval = self.end - self.start
self.wall_interval = self.wall_end - self.wall_start
if self.verbose:
print((self.msg))
@property
def msg(self):
msg = "Run {name}: CPU time: {interval} Wall time: {wall_interval}"
return msg.format(name=self.name, interval=_format_time(self.interval),
wall_interval=_format_time(self.wall_interval))
def __str__(self):
return self.msg
def __repr__(self):
if self.start is None:
return "Timer(name={name})".format(**self.__dict__)
msg = "Timer(name={name}, interval={interval},wall_interval={wall_interval})"
return msg.format(**self.__dict__)
def fake_ohlc(N=1000, start="2000/01/01", freq="D"):
"""
Meh, need to make this better behaved
"""
ind = pd.date_range(start, freq=freq, periods=N)
returns = (np.random.random(N) - .5) * .05
geom = (1+returns).cumprod()
open = 100 * geom
close = open + (open * (np.random.random(N) - .5)) * .1
high = np.maximum(open, close) + .01
low = np.minimum(open, close) - .01
vol = 10000 + np.random.random(N) * 10000
df = pd.DataFrame(index=ind)
df['open'] = open
df['high'] = high
df['low'] = low
df['close'] = close
df['vol'] = vol.astype(int)
return df
def assert_columnpanel_equal(left, right):
assert(isinstance(left, ColumnPanel))
assert(isinstance(right, ColumnPanel))
assert left.items == right.items
assert left.index.equals(right.index)
assert len(left.frames) == len(right.frames)
for key, l_frame in left.frames.items():
r_frame = right.frames[key]
assert_frame_equal(l_frame, r_frame)
test_col = left.columns[0]
l_colframe = getattr(left, test_col)
r_colframe = getattr(right, test_col)
assert_frame_equal(l_colframe, r_colframe)
# grabbed from IPython/core/magics/execution.py
def _format_time(timespan, precision=3):
"""Formats the timespan in a human readable form"""
import math
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append('%s%s' % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
units = ["s", "ms",'us',"ns"] # the save value
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
'\xb5'.encode(sys.stdout.encoding)
units = ["s", "ms",'\xb5s',"ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
ret = "%.*g %s" % (precision, timespan * scaling[order], units[order])
return ret
import unittest
import inspect
def setup_battery(targets, battery):
"""
ind = pd.date_range(start="2000", freq="D", periods=10)
targets = {}
targets['int_series'] = lambda : pd.Series(range(10))
targets['bool_series'] = lambda : pd.Series(np.random.randn(10) > 0, index=ind)
targets['float_series'] = lambda : pd.Series(np.random.randn(10))
targets['string_series'] = lambda : pd.Series(list('asdfqwerzx'), index=ind)
class ShiftBattery(object):
def test_check(self):
obj = self._construct()
if obj.is_time_series:
assert False, "Don't support time series"
setup_battery(targets, ShiftBattery)
"""
# global module scope of the calling function
caller_globals = inspect.stack()[1][0].f_globals
battery_name = battery.__name__
# create a unittest.TestCase subclass for each target
for target, maker in list(targets.items()):
cls_name = "Test" + battery_name + '_' + target
cls = makeClass(cls_name, battery, maker)
caller_globals[cls_name] = cls
def makeClass(cls_name, battery, maker):
cls = type(cls_name, (unittest.TestCase, battery), {})
cls._construct = lambda self: maker()
return cls
|
py | b40e7621f3cc65cb3bfd39d9a77ff7ada673a799 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def getAllElements(self, root1: TreeNode, root2: TreeNode) -> List[int]:
def inorder(root):
return inorder(root.left) + [root.val] + inorder(root.right) if root else []
x=inorder(root1)
y=inorder(root2)
return sorted(x+y)
|
py | b40e762de24e8bec072280535fd827112f23d898 | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image pre-processing utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
#from tensorflow.contrib.data.python.ops import batching
#from tensorflow.contrib.data.python.ops import interleave_ops
#from tensorflow.contrib.image.python.ops import distort_image_ops
from tensorflow.python.data.experimental import parallel_interleave###
from tensorflow.python.data.experimental import map_and_batch###
from tensorflow.python.layers import utils
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import gfile
import cnn_util
from tensorflow.python.ops import control_flow_ops
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.io.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.io.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.io.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.io.parse_single_example(serialized=example_serialized, features=feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(a=bbox, perm=[0, 2, 1])
return features['image/encoded'], label, bbox, features['image/class/text']
def get_image_resize_method(resize_method, batch_position=0):
"""Get tensorflow resize method.
If resize_method is 'round_robin', return different methods based on batch
position in a round-robin fashion. NOTE: If the batch size is not a multiple
of the number of methods, then the distribution of methods will not be
uniform.
Args:
resize_method: (string) nearest, bilinear, bicubic, area, or round_robin.
batch_position: position of the image in a batch. NOTE: this argument can
be an integer or a tensor
Returns:
one of resize type defined in tf.image.ResizeMethod.
"""
resize_methods_map = {
'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR,
'bilinear': tf.image.ResizeMethod.BILINEAR,
'bicubic': tf.image.ResizeMethod.BICUBIC,
'area': tf.image.ResizeMethod.AREA
}
if resize_method != 'round_robin':
return resize_methods_map[resize_method]
# return a resize method based on batch position in a round-robin fashion.
resize_methods = resize_methods_map.values()
def lookup(index):
return resize_methods[index]
def resize_method_0():
return utils.smart_cond(batch_position % len(resize_methods) == 0,
lambda: lookup(0), resize_method_1)
def resize_method_1():
return utils.smart_cond(batch_position % len(resize_methods) == 1,
lambda: lookup(1), resize_method_2)
def resize_method_2():
return utils.smart_cond(batch_position % len(resize_methods) == 2,
lambda: lookup(2), lambda: lookup(3))
# NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here
# because TF would not be able to construct a finite graph.
return resize_method_0()
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
# with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):
with tf.compat.v1.name_scope(scope or 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3) # ,
# fancy_upscaling=False,
# dct_method='INTEGER_FAST')
# image = tf.Print(image, [tf.shape(image)], 'Image shape: ')
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.compat.v1.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image,
central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize(image, [height, width],
method=tf.image.ResizeMethod.BILINEAR)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random.uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.compat.v1.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.compat.v1.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
image_size=tf.shape(input=image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
batch_position,
fast_mode=True,
scope=None,
add_image_summaries=True):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
batch_position: position of the image in a batch, which affects how images
are distorted and resized. NOTE: this argument can be an integer or a
tensor
scope: Optional scope for op_scope.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.compat.v1.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
if add_image_summaries:
tf.compat.v1.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image,
bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
if add_image_summaries:
tf.compat.v1.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize(x, [height, width],
method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.compat.v1.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if add_image_summaries:
tf.compat.v1.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def distort_color(image, batch_position=0, distort_color_in_yiq=False,
scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops based on the position of the image in a batch.
Args:
image: float32 Tensor containing single image. Tensor values should be in
range [0, 1].
batch_position: the position of the image in a batch. NOTE: this argument
can be an integer or a tensor
distort_color_in_yiq: distort color of input images in YIQ space.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.compat.v1.name_scope(scope or 'distort_color'):
def distort_fn_0(image=image):
"""Variant 0 of distort function."""
image = tf.image.random_brightness(image, max_delta=32. / 255.)
# if distort_color_in_yiq:
# image = distort_image_ops.random_hsv_in_yiq(
# image, lower_saturation=0.5, upper_saturation=1.5,
# max_delta_hue=0.2 * math.pi)
# else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
return image
def distort_fn_1(image=image):
"""Variant 1 of distort function."""
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
# if distort_color_in_yiq:
# image = distort_image_ops.random_hsv_in_yiq(
# image, lower_saturation=0.5, upper_saturation=1.5,
# max_delta_hue=0.2 * math.pi)
# else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
return image
image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0,
distort_fn_1)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
class RecordInputImagePreprocessor(object):
"""Preprocessor for images with RecordInput format."""
def __init__(self,
height,
width,
batch_size,
num_splits,
dtype,
train,
distortions=False,
resize_method="bilinear",
shift_ratio=0,
summary_verbosity=1,
distort_color_in_yiq=False,
fuse_decode_and_crop=False):
self.height = height
self.width = width
self.batch_size = batch_size
self.num_splits = num_splits
self.dtype = dtype
self.train = train
self.resize_method = resize_method
self.shift_ratio = shift_ratio
self.distortions = distortions
self.distort_color_in_yiq = distort_color_in_yiq
self.fuse_decode_and_crop = fuse_decode_and_crop
if self.batch_size % self.num_splits != 0:
raise ValueError(
('batch_size must be a multiple of num_splits: '
'batch_size %d, num_splits: %d') %
(self.batch_size, self.num_splits))
self.batch_size_per_split = self.batch_size // self.num_splits
self.summary_verbosity = summary_verbosity
def image_preprocess(self, image_buffer, bbox, batch_position):
"""Preprocessing image_buffer as a function of its batch position."""
if self.train:
image_buffer = tf.image.decode_jpeg(
image_buffer, channels=3, dct_method='INTEGER_FAST')
image = preprocess_for_train(image_buffer, self.height, self.width,
bbox,
batch_position)
else:
image = tf.image.decode_jpeg(
image_buffer, channels=3, dct_method='INTEGER_FAST')
image = preprocess_for_eval(image, self.height, self.width)
return image
def parse_and_preprocess(self, value, batch_position):
image_buffer, label_index, bbox, _ = parse_example_proto(value)
image = self.image_preprocess(image_buffer, bbox, batch_position)
return (label_index, image)
def minibatch(self, dataset, subset, use_datasets, cache_data,
shift_ratio=-1):
if shift_ratio < 0:
shift_ratio = self.shift_ratio
with tf.compat.v1.name_scope('batch_processing'):
# Build final results per split.
images = [[] for _ in range(self.num_splits)]
labels = [[] for _ in range(self.num_splits)]
if use_datasets:
glob_pattern = dataset.tf_record_pattern(subset)
file_names = gfile.Glob(glob_pattern)
if not file_names:
raise ValueError(
'Found no files in --data_dir matching: {}'
.format(glob_pattern))
ds = tf.data.TFRecordDataset.list_files(file_names)
ds = ds.apply(
#interleave_ops.parallel_interleave(
parallel_interleave( #
tf.data.TFRecordDataset, cycle_length=10))
if cache_data:
ds = ds.take(1).cache().repeat()
counter = tf.data.Dataset.range(self.batch_size)
counter = counter.repeat()
ds = tf.data.Dataset.zip((ds, counter))
ds = ds.prefetch(buffer_size=self.batch_size)
ds = ds.shuffle(buffer_size=10000)
ds = ds.repeat()
ds = ds.apply(
#batching.map_and_batch(
map_and_batch( ###
map_func=self.parse_and_preprocess,
batch_size=self.batch_size_per_split,
num_parallel_batches=self.num_splits))
ds = ds.prefetch(buffer_size=self.num_splits)
ds_iterator = tf.compat.v1.data.make_one_shot_iterator(ds)
for d in xrange(self.num_splits):
labels[d], images[d] = ds_iterator.get_next()
else:
record_input = data_flow_ops.RecordInput(
file_pattern=dataset.tf_record_pattern(subset),
seed=301,
parallelism=64,
buffer_size=10000,
batch_size=self.batch_size,
shift_ratio=shift_ratio,
name='record_input')
records = record_input.get_yield_op()
records = tf.split(records, self.batch_size, 0)
records = [tf.reshape(record, []) for record in records]
for idx in xrange(self.batch_size):
value = records[idx]
(label, image) = self.parse_and_preprocess(value, idx)
split_index = idx % self.num_splits
labels[split_index].append(label)
images[split_index].append(image)
for split_index in xrange(self.num_splits):
if not use_datasets:
images[split_index] = tf.parallel_stack(
images[split_index])
labels[split_index] = tf.concat(labels[split_index], 0)
images[split_index] = tf.cast(images[split_index], self.dtype)
depth = 3
images[split_index] = tf.reshape(
images[split_index],
shape=[self.batch_size_per_split, self.height, self.width,
depth])
labels[split_index] = tf.reshape(labels[split_index],
[self.batch_size_per_split])
return images, labels
|
py | b40e76672805e7adab1ddcf72861a77b6dd6b506 | #!/usr/bin/env python
#
# CLI tool to enable/disable private tenants
#
# Marco Caimi <[email protected]>
import keystoneclient
# client v2
from keystoneclient.v2_0 import client as keystone_client
from keystoneauth1.identity import v2 as auth_v2
# client v3
from keystoneclient import client as keystone_client_v3
from keystoneauth1.identity import v3 as auth_v3
# keystone exceptions
from keystoneclient.exceptions import AuthorizationFailure, Unauthorized
# keystone session support
from keystoneauth1 import session as keystone_session
import sys, os
from argparse import ArgumentParser
import requests
import json
MANDATORY_ENV_VARS = ['OS_IDENTITY_API_VERSION']
ENV_VARS_V2 = ['OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME', 'OS_AUTH_URL']
ENV_VARS_V3 = ['OS_USERNAME', 'OS_PASSWORD', 'OS_PROJECT_NAME', 'OS_USER_DOMAIN_NAME', 'OS_PROJECT_DOMAIN_NAME', 'OS_AUTH_URL']
def assert_parameters(environment_variables):
for entry in environment_variables:
if not entry in os.environ.keys():
print("Missing environment variable %s. Please load your OpenStack RC File" % entry)
sys.exit(-1)
assert_parameters(MANDATORY_ENV_VARS)
api_version = int(os.environ['OS_IDENTITY_API_VERSION'])
if api_version == 3:
assert_parameters(ENV_VARS_V3)
username = os.environ['OS_USERNAME']
password = os.environ['OS_PASSWORD']
project_name = os.environ['OS_PROJECT_NAME']
user_domain = os.environ['OS_USER_DOMAIN_NAME']
project_domain = os.environ['OS_PROJECT_DOMAIN_NAME']
auth_url = os.environ['OS_AUTH_URL']
api_endpoint = "projects"
else:
assert_parameters(ENV_VARS_V2)
username = os.environ['OS_USERNAME']
password = os.environ['OS_PASSWORD']
project_name = os.environ['OS_TENANT_NAME']
auth_url = os.environ['OS_AUTH_URL']
api_endpoint = "tenants"
# get params
aparser = ArgumentParser(prog="private_tenant_ctl.py", usage="%(prog)s [options] TENANT_ID", description="Tool to enable/disable the private_iaas metadata key in keystone")
aparser.add_argument("-e", "--enable", action='store_true', help="Set the private_iaas key to True for specified tenant")
aparser.add_argument("-d", "--disable", action='store_true', help="Set the private_iaas key to False for specified tenant")
aparser.add_argument("-l", "--list", action='store_true', help="List tenant IDs.")
aparser.add_argument("tenantid", type=str)
opts = aparser.parse_args(args=sys.argv[1:])
try:
# sanity check
if (not (bool(opts.enable) ^ bool(opts.disable))) ^ opts.list:
print("Syntax Error: You cannot specify both '--enable' and '--disable' switches at the same time.")
sys.exit(-1)
except:
aparser.print_help()
sys.exit(-1)
try:
if api_version == 2:
print("PRIVATEIAAS: Initializing v2.0 API session")
# create an admin session object
admin_auth = auth_v2.Password(username=username,
password=password,
tenant_name=project_name,
auth_url=auth_url)
admin_session = keystone_session.Session(auth=admin_auth)
else:
print("PRIVATEIAAS: Initializing v3 API session")
admin_auth = auth_v3.Password(username=username,
password=password,
project_name=project_name,
user_domain_name=user_domain,
project_domain_name=project_domain,
auth_url=auth_url)
admin_session = keystone_session.Session(auth=admin_auth)
try:
print("PRIVATEIAAS: Spawning ADMIN CLIENT")
# admin session
if api_version == 2:
keystoneclient = keystone_client.Client(session=admin_session)
else:
keystoneclient = keystone_client_v3.Client(session=admin_session)
except AuthorizationFailure as user_failed_auth_step:
print(user_failed_auth_step.message)
sys.exit(-1)
except Unauthorized as user_unauthorized:
print(user_unauthorized.message)
sys.exit(-1)
except Exception as e: # Catch superclass, so we can intercept every kind of error.
print("Exception caught while calling client.Client(): \n[%s]" % e)
sys.exit(-1)
# tenant manager reference
if hasattr(keystoneclient, "projects"):
tenant_manager = getattr(keystoneclient, "projects")
else:
tenant_manager = getattr(keystoneclient, "tenants")
if (opts.list):
print("Tenant IDs:")
for tid in tenant_manager.list():
print("ID: %s\t NAME: %s" % (tid.id, tid.name))
sys.exit(0)
try:
tid = tenant_manager.get(opts.tenantid)
print("Starting operation on tenant id -> %s [%s]" % (getattr(tid, "name", "undef"), opts.tenantid))
except http.Forbidden as e:
print("Keystone exception caught: \n[%s]" % e)
sys.exit(-1)
# API request wrapper object
class APIRequest():
def __init__(self, keystone_client_object=None, keystone_session_object=None, tenant=None):
if keystone_client_object==None or keystone_session_object==None:
raise Exception("Missing Parameter: keystone_client_object cannot be 'None'")
if tenant == None:
raise Exception("Missing Parameter: tenant object cannot be 'None'")
self.keystone_client = keystone_client_object
self.auth_token = keystone_session_object.get_token()
self.tid = tenant
self.request_body_template_v2 = { "tenant":
{ "private_iaas": False,
"enabled": True,
"description": "placeholder",
"id": "placeholder",
"name": "placeholder"
}
}
self.request_body_template_v3 = { "project":
{ "private_iaas": False,
"enabled": True,
"description": "placeholder",
"project_id": "placeholder",
"name": "placeholder"
}
}
if not self.assert_valid():
raise Exception("Auth token invalid!!")
# assert authentication token validity
def assert_valid(self):
return self.keystone_client.tokens.validate(self.auth_token)
# build request header hash
def build_request_header(self):
return { 'Content-Type': 'application/json',
'User-Agent': 'python-keystoneclient',
'X-Auth-Token': self.auth_token,
'Accept': 'application/json' }
# build request body hash
def build_request_body(self, private_tenant=False):
if api_version == 2:
self.request_body_template_v2['tenant']['private_iaas'] = private_tenant
self.request_body_template_v2['tenant']['description'] = self.tid.description
self.request_body_template_v2['tenant']['id'] = self.tid.id
self.request_body_template_v2['tenant']['name'] = self.tid.name
self.request_body_template = self.request_body_template_v2
else:
self.request_body_template_v3['project']['private_iaas'] = private_tenant
self.request_body_template_v3['project']['description'] = self.tid.description
self.request_body_template_v3['project']['project_id'] = self.tid.id
self.request_body_template_v3['project']['name'] = self.tid.name
self.request_body_template = self.request_body_template_v3
return self.request_body_template
# enable or disable private_iaas property.
# if key is False and switch --enable is true, switch key logical state
private_iaas_key_state = getattr(tid, "private_iaas", False)
# instantiate API Wrapper...
try:
apiwrapper = APIRequest(keystone_client_object=keystoneclient, keystone_session_object=admin_session, tenant=tid)
except Exception as e:
print(e)
sys.exit(-1)
try:
if not(private_iaas_key_state) and opts.enable:
# flip private_iaas key state to true
print("Enabling private_iaas metadata property...")
response_from_api = requests.patch("%s/%s/%s" % (os.environ["OS_AUTH_URL"], api_endpoint, tid.id),
headers=apiwrapper.build_request_header(),
data=json.dumps(apiwrapper.build_request_body(private_tenant=True)))
response_from_api.raise_for_status()
print(response_from_api.text)
pass
#otherwise, if private_iaas key is true and switch --disable is true, switch key state to false
elif private_iaas_key_state and opts.disable:
# flip private_iaas key state to false
print("Disabling private_iaas metadata property...")
response_from_api = requests.patch("%s/%s/%s" % (os.environ["OS_AUTH_URL"], api_endpoint, tid.id),
headers=apiwrapper.build_request_header(),
data=json.dumps(apiwrapper.build_request_body(private_tenant=False)))
response_from_api.raise_for_status()
print(response_from_api.text)
pass
else:
print("Tenant left unchanged.\nTID: %s" % tid)
sys.exit(0)
except Exception as e:
print(e)
sys.exit(-1)
#
|
py | b40e76d5b6bd22bd8c4214a610478f528e9bd871 | import sys
import platform
import timeit
from tests.performance import xml_sax
from tests.performance import xml_dom_minidom
from tests.performance import xml_etree
from tests.performance import ecoxipy_pyxom_output
from tests.performance import ecoxipy_string_output
from tests.performance import ecoxipy_dom_output
from tests.performance import ecoxipy_etree_output
LOREM_IPSUM = u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
if __name__ == '__main__':
if len(sys.argv) not in (4, 5):
print('''\
arguments: <string output> <repetitions> <data_count> [<file path>]
<string output> If this is `true`, a byte string will be created.
<repetitions> Specifies how often the tests should be run by `timeit`.
<data count> Determines the length of the document, a linear increase
of this value yields exponential test document size
increase.
<CSV output path> If this argument is given, the results are writen to a
file of this name as a comma separated table. Otherwise
the results are printed to the console.
''')
sys.exit(1)
else:
string_output = sys.argv[1].lower() == 'true'
repetitions = int(sys.argv[2])
data_count = int(sys.argv[3])
if string_output:
method_postfix = '_string'
else:
method_postfix = ''
create_test_run = lambda module: (
"{0}.create_testdoc{2}(u'Test Page', u'Hello World!', {1}, u'{3}')".format(
module.__name__, data_count, method_postfix, LOREM_IPSUM)
)
create_test_setup = lambda module: (
"import {0}; {0}.create_testdoc{2}(u'startup', u'startup', {1}, u'{3}')".format(
module.__name__, data_count, method_postfix, LOREM_IPSUM)
)
timeit_run = lambda module: timeit.timeit(
create_test_run(module),
setup=create_test_setup(module),
number=repetitions)
sax_time = timeit_run(xml_sax)
dom_time = timeit_run(xml_dom_minidom)
etree_time = timeit_run(xml_etree)
element_out_time = timeit_run(ecoxipy_pyxom_output)
string_out_time = timeit_run(ecoxipy_string_output)
dom_out_time = timeit_run(ecoxipy_dom_output)
etree_out_time = timeit_run(ecoxipy_etree_output)
python_version = platform.python_version()
python_platform = platform.python_implementation()
try:
pypy_version_info = sys.pypy_version_info
except AttributeError:
pass
else:
python_platform = '{} {}.{}.{}'.format(python_platform,
pypy_version_info.major, pypy_version_info.minor,
pypy_version_info.micro)
output_name = 'Bytes' if string_output else 'Native'
if len(sys.argv) < 5:
min_time = min(sax_time, dom_time, element_out_time, string_out_time, dom_out_time)
max_time = max(sax_time, dom_time, element_out_time, string_out_time, dom_out_time)
create_percent = lambda t: '| {: >6.3f} secs | {: >6.3f} secs ({: >6.2f} %) |'.format(
t, t-min_time, (t-min_time)/(max_time-min_time)*100)
print('''\
# ECoXiPy Performance Tests
Python: Version {} on {}
Output: {}
Number of repetitions: {}
Number of data elements: {}
## Run Time Results
Minimum: {: >6.3f} secs
Maximum: {: >6.3f} secs
Difference: {: >6.3f} secs
Running Times:
| API | absolute | relative |
|------------------------|-------------|------------------------|
| xml.sax {}
| xml.dom.minidom {}
| xml.etree {}
| ecoxipy.pyxom.output {}
| ecoxipy.string_output {}
| ecoxipy.dom_output {}
| ecoxipy.etree_output {}
\
'''.format(
python_version, python_platform,
output_name, repetitions, data_count,
min_time, max_time, max_time - min_time,
create_percent(sax_time),
create_percent(dom_time),
create_percent(etree_time),
create_percent(element_out_time),
create_percent(string_out_time),
create_percent(dom_out_time),
create_percent(etree_out_time),
))
else:
path = sys.argv[4]
import os.path
if not os.path.isfile(path):
with open(path, 'w') as f:
f.write('Output,Python Platform,Python Version,xml.sax,xml.etree,xml.dom.minidom,ecoxipy.dom_output,ecoxipy.etree_output,ecoxipy.pyxom.output,ecoxipy.string_output,Repetitions,Data Count\n')
with open(path, 'a') as f:
f.write('{},{},{},{},{},{},{},{},{},{},{},{}\n'.format(
output_name, python_platform, python_version,
sax_time, etree_time, dom_time, dom_out_time, etree_out_time,
element_out_time, string_out_time, repetitions, data_count))
|
py | b40e7784cfada604034b2ed0084cbf5ba05d1657 | __________________________________________________________________________________________________
sample 152 ms submission
class Solution:
def maxSumAfterPartitioning(self, A: List[int], K: int) -> int:
dp = [0]*len(A)
max_val = 0
for i in range(K):
if max_val < A[i]:
max_val = A[i]
dp[i] = max_val * (i+1)
for i in range(K, len(A)):
max_val=0
for p in range(0, K):
if A[i-p] > max_val:
max_val = A[i-p]
update = dp[i-p-1] + max_val * (p+1)
if update > dp[i]:
dp[i] = update
return dp[-1]
__________________________________________________________________________________________________
sample 204 ms submission
class Solution:
def helper(self, A, K, i, mem):
if i== len(A):
return
if i < K:
mem.append(max(A[:i+1])*(i+1))
self.helper(A, K, i+1, mem)
else:
best = 0
best_index = None
running = 0
for j in range(i, i-K,-1):
running = max(running,A[j])
r = running*(i-j+1)
l = mem[j-1]
if r+l > best:
best = r+l
best_index = j
mem.append(best)
self.helper(A, K, i+1, mem)
def maxSumAfterPartitioning(self, A: List[int], K: int) -> int:
if len(A) == 0:
return 0
mem = []
self.helper(A, K, 0 ,mem)
return mem[-1]
__________________________________________________________________________________________________
sample 232 ms submission
class Solution:
def maxSumAfterPartitioning(self, A: List[int], K: int) -> int:
dp_array = []
#First part
max_so_far = A[0]
for i in range(K):
max_so_far = max(max_so_far, A[i])
dp_array.append(max_so_far * (i + 1))
#Second part
for i in range(K, len(A)):
best_option = 0
max_so_far = A[i]
for j in range(i - 1, i - K - 1, -1):
this_option = dp_array[j] + (max_so_far * (i - j))
best_option = max(this_option, best_option)
max_so_far = max(max_so_far, A[j])
dp_array.append(best_option)
return dp_array[-1] |
py | b40e77dce21f4786107f4736ba247cbd2990f008 | #!/usr/bin/env python3
import unittest
def parse(s):
lines = [line.strip() for line in s.strip().splitlines()]
res = []
for line in lines:
li = []
for e in line:
if e == '*':
li.append(None)
else:
li.append(int(e))
res.append(li)
return res
def sudoku(s):
''' returns True if s is fully filled successfully. False, otherwise '''
def pick_one(s):
for i in range(9):
for j in range(9):
if s[i][j] == None:
return i,j
def is_sudoku_complete(s):
for line in s:
if None in line:
return False
return True
def is_possible(s, i, j, v):
if v in s[i]:
return False
for r in range(9):
if s[r][j] == v:
return False
r_start = (i//3)*3
c_start = (j//3)*3
for r in range(r_start, r_start+3):
for c in range(c_start, c_start+3):
if s[r][c]==v:
return False
return True
i,j = pick_one(s)
for v in range(1,10):
if not is_possible(s,i,j,v):
continue
s[i][j] = v
if is_sudoku_complete(s):
return True
if sudoku(s):
return True
s[i][j] = None
return False
class SudokuTest(unittest.TestCase):
def setUp(self):
self.sample = '''
128**547*
**5*8*39*
9*36428**
4**51*68*
78*4*3*59
*36*98**1
**43791*8
*69*2*5**
*178**924'''
self.sample2 = '''
*2*63****
6**4***1*
****5**7*
**39****4
**8***6**
7****35**
*4**2****
*5***8**9
****91*3*
'''
self.sample3 = '''
*********
*********
*********
*********
*********
*********
*********
*********
*********
'''
def test_parse(self):
s = parse(self.sample)
self.assertEqual(len(s), 9)
for i in range(9):
self.assertEqual(len(s[i]), 9)
def test_sudoku(self):
s = parse(self.sample3)
succeed = sudoku(s)
self.assertTrue(succeed)
import pprint
pprint.pprint(s)
for line in s:
self.assertEqual(sum(line), 45)
for col in range(9):
col_sum = 0
for row in range(9):
col_sum += s[row][col]
self.assertEqual(col_sum, 45)
for r in [0,3,6]:
for c in [0,3,6]:
self.assertEqual(sum(s[x][y] for x in [r,r+1,r+2] for y in [c,c+1,c+2]), 45)
if __name__=="__main__":
unittest.main()
|
py | b40e77f3225d433757eb82901a20fa89b677c5d0 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from multiprocessing.pool import ThreadPool
from internal_device_glue import InternalDeviceGlue
from ..abstract_device_api import AbstractDeviceApi
from .base_module_or_device_api import BaseModuleOrDeviceApi
device_object_list = []
class DeviceApi(BaseModuleOrDeviceApi, AbstractDeviceApi):
def __init__(self):
self.glue = InternalDeviceGlue()
self.pool = ThreadPool()
def connect(self, transport, connection_string, ca_certificate):
device_object_list.append(self)
if "cert" in ca_certificate:
cert = ca_certificate["cert"]
else:
cert = None
self.glue.connect(transport, connection_string, cert)
def disconnect(self):
if self in device_object_list:
device_object_list.remove(self)
self.glue.disconnect()
self.glue = None
def enable_c2d(self):
self.glue.enable_c2d()
def wait_for_c2d_message_async(self):
return self.pool.apply_async(self.glue.wait_for_c2d_message)
|
py | b40e78a7411fd28e0023e180efab7548ff29633d | from typing import Callable
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions
from pages import waits
from pages.page import Component
from pages.waits import web_element_locator, dynamic_web_element_locator, button_locator
class ConfirmMakeMainModal(Component):
CONFIRM = '#hook_FormButton_button_cover_confirm'
CANCEL = '#hook_Form_PopLayerSetAltGroupAlbumCoverForm a'
def __init__(self, driver, on_confirm: Callable = None, on_cancel: Callable = None):
super().__init__(driver)
self.on_confirm = on_confirm
self.on_cancel = on_cancel
def confirm(self):
self.confirm_button.click()
if self.on_confirm is not None:
self.on_confirm()
def cancel(self):
self.cancel_button.click()
if self.on_cancel is not None:
self.on_cancel()
@property
@button_locator((By.CSS_SELECTOR, CANCEL))
def cancel_button(self):
return self.driver.find_element_by_css_selector(self.CANCEL)
@property
@button_locator((By.CSS_SELECTOR, CONFIRM))
def confirm_button(self):
return self.driver.find_element_by_css_selector(self.CONFIRM)
class ExpandedImageCard(Component):
DESCRIPTION = 'span[data-link-source="photo-desc"]'
@property
@web_element_locator((By.CSS_SELECTOR, DESCRIPTION))
def description(self) -> str:
return self.driver.find_element_by_css_selector(self.DESCRIPTION).text
class ImageCard(Component):
EDIT_TEMPLATE: str = '//div[@id="trigger_{}"]'
DELETE_BUTTON_TEMPLATE: str = '#popup_{} .ic_delete'
MAKE_MAIN_TEMPLATE: str = '#popup_{} .ic_make-main'
EDIT_DESCRIPTION_TEMPLATE: str = '//textarea[@id="descrInp{}"]'
IMAGE_TEMPLATE: str = '#img_{}'
RESTORE_BUTTON_TEMPLATE: str = '#hook_Block_DeleteRestorePhotoMRB{} .photo-sc_i_utility_undo-delete'
CHECK_BUTTON_TEMPLATE: str = '#hook_Block_PhotoCardV2Block{} .selectable-card_ic'
CARD_TEMPLATE: str = 'span.photo-card_cnt #img_{}'
def __init__(self, driver, img_id: str):
super().__init__(driver)
self.id: str = img_id
self.IMAGE = self.IMAGE_TEMPLATE.format(self.id)
self.EDIT_DESCRIPTION: str = self.EDIT_DESCRIPTION_TEMPLATE.format(self.id)
self.EDIT: str = self.EDIT_TEMPLATE.format(self.id)
self.DELETE: str = self.DELETE_BUTTON_TEMPLATE.format(self.id)
self.MAKE_MAIN: str = self.MAKE_MAIN_TEMPLATE.format(self.id)
self.RESTORE: str = self.RESTORE_BUTTON_TEMPLATE.format(self.id)
self.CHECK_BUTTON: str = self.CHECK_BUTTON_TEMPLATE.format(self.id)
self.CARD_TEMPLATE: str = self.CARD_TEMPLATE.format(self.id)
@property
def description(self) -> str:
return self.driver.find_element_by_xpath(self.EDIT_DESCRIPTION).get_attribute('value')
@property
def edit(self) -> WebElement:
return self.driver.find_element_by_xpath(self.EDIT)
@property
def delete_button(self) -> WebElement:
return self.driver.find_element_by_css_selector(self.DELETE)
@description.setter
def description(self, value) -> None:
self.driver.find_element_by_xpath(self.EDIT_DESCRIPTION).send_keys(value)
@property
def image_src(self) -> WebElement:
return self.driver.find_element_by_css_selector(self.IMAGE)
@property
def check_button(self) -> WebElement:
return self.driver.find_element_by_css_selector(self.CHECK_BUTTON)
@property
@dynamic_web_element_locator(lambda self: (By.CSS_SELECTOR, self.RESTORE))
def restore_button(self) -> WebElement:
return self.driver.find_element_by_css_selector(self.RESTORE)
def restore(self) -> None:
restore_button = self.restore_button
if restore_button is None:
return
restore_button.click()
def check(self):
self.check_button.click()
@dynamic_web_element_locator(lambda self: (By.CSS_SELECTOR, self.MAKE_MAIN))
def make_main(self):
self.driver.execute_script('''
document.querySelector(`{}`).click()
'''.format(self.MAKE_MAIN))
return ConfirmMakeMainModal(self.driver)
def delete_image_card(self) -> None:
self.driver.execute_script('''
document.querySelector(`{}`).click()
'''.format(self.DELETE))
waits.wait(self.driver).until(
expected_conditions.presence_of_element_located((By.CSS_SELECTOR, self.RESTORE))
)
def expand(self) -> ExpandedImageCard:
self.image_src.click()
return ExpandedImageCard(self.driver)
def take_position(self, other) -> None:
ActionChains(self.driver) \
.move_to_element(self.image_src) \
.click_and_hold() \
.pause(1) \
.move_to_element(other.image_src) \
.release() \
.perform()
|
py | b40e7928998b9b3e0074126156591a18856ea996 | # Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
from oslo_utils import strutils
import six
from six.moves.urllib import parse
from .._i18n import _
from . import exceptions
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in six.iteritems(kwargs.copy()):
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as bearclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
|
py | b40e795aa68defe515406477f5b1774ad40025eb | import weakref
import copy
import logging
import tempfile
import os
import uuid
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
import pytest
from brian2 import (Clock, Network, ms, us, second, BrianObject, defaultclock,
run, stop, NetworkOperation, network_operation,
MagicError, Synapses,
NeuronGroup, StateMonitor, SpikeMonitor,
SpikeGeneratorGroup,
PopulationRateMonitor, MagicNetwork, magic_network,
PoissonGroup, Hz, collect, store, restore, BrianLogger,
start_scope, prefs, profiling_summary, Quantity, TimedArray)
from brian2.core.network import schedule_propagation_offset, scheduling_summary
from brian2.devices.device import (reinit_and_delete, Device, all_devices,
set_device, get_device, reset_device, device,
RuntimeDevice)
from brian2.utils.logger import catch_logs
from brian2.tests.utils import assert_allclose
@pytest.mark.codegen_independent
def test_incorrect_network_use():
"""Test some wrong uses of `Network` and `MagicNetwork`"""
with pytest.raises(TypeError):
Network(name='mynet', anotherkwd='does not exist')
with pytest.raises(TypeError):
Network('not a BrianObject')
net = Network()
with pytest.raises(TypeError):
net.add('not a BrianObject')
with pytest.raises(ValueError):
MagicNetwork()
G = NeuronGroup(10, 'v:1')
net.add(G)
with pytest.raises(TypeError):
net.remove(object())
with pytest.raises(MagicError):
magic_network.add(G)
with pytest.raises(MagicError):
magic_network.remove(G)
@pytest.mark.codegen_independent
def test_network_contains():
"""
Test `Network.__contains__`.
"""
G = NeuronGroup(1, 'v:1', name='mygroup')
net = Network(G)
assert 'mygroup' in net
assert 'neurongroup' not in net
@pytest.mark.codegen_independent
def test_empty_network():
# Check that an empty network functions correctly
net = Network()
net.run(1*second)
class Counter(BrianObject):
add_to_magic_network = True
def __init__(self, **kwds):
super(Counter, self).__init__(**kwds)
self.count = 0
self.state = {'state': 0}
def get_states(self, *args, **kwds):
return dict(self.state)
def set_states(self, values, *args, **kwds):
for k, v in values.items():
self.state[k] = v
def run(self):
self.count += 1
class CounterWithContained(Counter):
add_to_magic_network = True
def __init__(self, **kwds):
super(CounterWithContained, self).__init__(**kwds)
self.sub_counter = Counter()
self.contained_objects.append(self.sub_counter)
@pytest.mark.codegen_independent
def test_network_single_object():
# Check that a network with a single object functions correctly
x = Counter()
net = Network(x)
net.run(1*ms)
assert_equal(x.count, 10)
@pytest.mark.codegen_independent
def test_network_two_objects():
# Check that a network with two objects and the same clock function correctly
x = Counter(order=5)
y = Counter(order=6)
net = Network()
net.add([x, [y]]) # check that a funky way of adding objects work correctly
net.run(1*ms)
assert_equal(len(net.objects), 2)
assert_equal(x.count, 10)
assert_equal(y.count, 10)
@pytest.mark.codegen_independent
def test_network_from_dict():
# Check that a network from a dictionary works
x = Counter()
y = Counter()
d = dict(a=x, b=y)
net = Network()
net.add(d)
net.run(1*ms)
assert_equal(len(net.objects), 2)
assert_equal(x.count, 10)
assert_equal(y.count, 10)
class NameLister(BrianObject):
add_to_magic_network = True
updates = []
def __init__(self, **kwds):
super(NameLister, self).__init__(**kwds)
def run(self):
NameLister.updates.append(self.name)
@pytest.mark.codegen_independent
def test_network_different_clocks():
NameLister.updates[:] = []
# Check that a network with two different clocks functions correctly
x = NameLister(name='x', dt=.1*ms, order=0)
y = NameLister(name='y', dt=1*ms, order=1)
net = Network(x, y)
net.run(100*second+defaultclock.dt, report='text')
updates = ''.join(NameLister.updates)[2:] # ignore the first time step
assert updates == ('xxxxxxxxxxy'*100000)
@pytest.mark.codegen_independent
def test_network_different_when():
# Check that a network with different when attributes functions correctly
NameLister.updates[:] = []
x = NameLister(name='x', when='start')
y = NameLister(name='y', when='end')
net = Network(x, y)
net.run(0.3*ms)
assert_equal(''.join(NameLister.updates), 'xyxyxy')
@pytest.mark.codegen_independent
def test_network_default_schedule():
net = Network()
assert net.schedule == ['start', 'groups', 'thresholds', 'synapses', 'resets', 'end']
# Set the preference and check that the change is taken into account
prefs.core.network.default_schedule = list(reversed(['start', 'groups', 'thresholds', 'synapses', 'resets', 'end']))
assert net.schedule == list(reversed(['start', 'groups', 'thresholds', 'synapses', 'resets', 'end']))
@pytest.mark.codegen_independent
def test_network_schedule_change():
# Check that a changed schedule is taken into account correctly
NameLister.updates[:] = []
x = NameLister(name='x', when='thresholds')
y = NameLister(name='y', when='resets')
net = Network(x, y)
net.run(0.3*ms)
assert_equal(''.join(NameLister.updates), 'xyxyxy')
NameLister.updates[:] = []
net.schedule = ['start', 'groups', 'synapses', 'resets', 'thresholds', 'end']
net.run(0.3*ms)
assert_equal(''.join(NameLister.updates), 'yxyxyx')
@pytest.mark.codegen_independent
def test_network_before_after_schedule():
# Test that before... and after... slot names can be used
NameLister.updates[:] = []
x = NameLister(name='x', when='before_resets')
y = NameLister(name='y', when='after_thresholds')
net = Network(x, y)
net.schedule = ['thresholds', 'resets', 'end']
net.run(0.3*ms)
assert_equal(''.join(NameLister.updates), 'yxyxyx')
@pytest.mark.codegen_independent
def test_network_custom_slots():
# Check that custom slots can be inserted into the schedule
NameLister.updates[:] = []
x = NameLister(name='x', when='thresholds')
y = NameLister(name='y', when='in_between')
z = NameLister(name='z', when='resets')
net = Network(x, y, z)
net.schedule = ['start', 'groups', 'thresholds', 'in_between', 'synapses', 'resets', 'end']
net.run(0.3*ms)
assert_equal(''.join(NameLister.updates), 'xyzxyzxyz')
@pytest.mark.codegen_independent
def test_network_incorrect_schedule():
# Test that incorrect arguments provided to schedule raise errors
net = Network()
# net.schedule = object()
with pytest.raises(TypeError):
setattr(net, 'schedule', object())
# net.schedule = 1
with pytest.raises(TypeError):
setattr(net, 'schedule', 1)
# net.schedule = {'slot1', 'slot2'}
with pytest.raises(TypeError):
setattr(net, 'schedule', {'slot1', 'slot2'})
# net.schedule = ['slot', 1]
with pytest.raises(TypeError):
setattr(net, 'schedule', ['slot', 1])
# net.schedule = ['start', 'after_start']
with pytest.raises(ValueError):
setattr(net, 'schedule', ['start', 'after_start'])
# net.schedule = ['before_start', 'start']
with pytest.raises(ValueError):
setattr(net, 'schedule', ['before_start', 'start'])
@pytest.mark.codegen_independent
def test_schedule_warning():
previous_device = get_device()
from uuid import uuid4
# TestDevice1 supports arbitrary schedules, TestDevice2 does not
class TestDevice1(Device):
# These functions are needed during the setup of the defaultclock
def get_value(self, var):
return np.array([0.0001])
def add_array(self, var):
pass
def init_with_zeros(self, var, dtype):
pass
def fill_with_array(self, var, arr):
pass
class TestDevice2(TestDevice1):
def __init__(self):
super(TestDevice2, self).__init__()
self.network_schedule = ['start', 'groups', 'synapses',
'thresholds', 'resets', 'end']
# Unique names are important for getting the warnings again for multiple
# runs of the test suite
name1 = f"testdevice_{str(uuid4())}"
name2 = f"testdevice_{str(uuid4())}"
all_devices[name1] = TestDevice1()
all_devices[name2] = TestDevice2()
set_device(name1)
assert schedule_propagation_offset() == 0*ms
net = Network()
assert schedule_propagation_offset(net) == 0*ms
# Any schedule should work
net.schedule = list(reversed(net.schedule))
with catch_logs() as l:
net.run(0*ms)
assert len(l) == 0, 'did not expect a warning'
assert schedule_propagation_offset(net) == defaultclock.dt
set_device(name2)
assert schedule_propagation_offset() == defaultclock.dt
# Using the correct schedule should work
net.schedule = ['start', 'groups', 'synapses', 'thresholds', 'resets', 'end']
with catch_logs() as l:
net.run(0*ms)
assert len(l) == 0, 'did not expect a warning'
assert schedule_propagation_offset(net) == defaultclock.dt
# Using another (e.g. the default) schedule should raise a warning
net.schedule = None
with catch_logs() as l:
net.run(0*ms)
assert len(l) == 1 and l[0][1].endswith('schedule_conflict')
reset_device(previous_device)
@pytest.mark.codegen_independent
def test_scheduling_summary_magic():
basename = f"name{str(uuid.uuid4()).replace('-', '_')}"
group = NeuronGroup(10, 'dv/dt = -v/(10*ms) : 1', threshold='v>1',
reset='v=1', name=basename)
group.run_regularly('v = rand()', dt=defaultclock.dt*10, when='end')
state_mon = StateMonitor(group, 'v', record=True, name=f"{basename}_sm")
inactive_state_mon = StateMonitor(group, 'v', record=True,
name=f"{basename}_sm_ia", when='after_end')
inactive_state_mon.active = False
summary_before = scheduling_summary()
assert [entry.name for entry in summary_before.entries] == [f"{basename}_sm",
f"{basename}_stateupdater",
f"{basename}_spike_thresholder",
f"{basename}_spike_resetter",
f"{basename}_run_regularly",
f"{basename}_sm_ia"]
assert [entry.when for entry in summary_before.entries] == ['start',
'groups',
'thresholds',
'resets',
'end',
'after_end']
assert [entry.dt for entry in summary_before.entries] == [defaultclock.dt,
defaultclock.dt,
defaultclock.dt,
defaultclock.dt,
defaultclock.dt*10,
defaultclock.dt]
assert [entry.active for entry in summary_before.entries] == [True,
True,
True,
True,
True,
False]
assert len(str(summary_before))
assert len(summary_before._repr_html_())
run(defaultclock.dt)
summary_after = scheduling_summary()
assert str(summary_after) == str(summary_before)
assert summary_after._repr_html_() == summary_before._repr_html_()
@pytest.mark.codegen_independent
def test_scheduling_summary():
basename = f"name{str(uuid.uuid4()).replace('-', '_')}"
group = NeuronGroup(10, 'dv/dt = -v/(10*ms) : 1', threshold='v>1',
reset='v=1', name=basename)
group.run_regularly('v = rand()', dt=defaultclock.dt * 10, when='end')
state_mon = StateMonitor(group, 'v', record=True, name=f"{basename}_sm")
inactive_state_mon = StateMonitor(group, 'v', record=True,
name=f"{basename}_sm_ia",
when='after_end')
inactive_state_mon.active = False
@network_operation(name=f"{basename}_net_op", when='before_end')
def foo():
pass
net = Network(group, state_mon, inactive_state_mon, foo)
summary_before = scheduling_summary(net)
assert [entry.name for entry in summary_before.entries] == [f"{basename}_sm",
f"{basename}_stateupdater",
f"{basename}_spike_thresholder",
f"{basename}_spike_resetter",
f"{basename}_net_op",
f"{basename}_run_regularly",
f"{basename}_sm_ia"]
assert [entry.when for entry in summary_before.entries] == ['start',
'groups',
'thresholds',
'resets',
'before_end',
'end',
'after_end']
assert [entry.dt for entry in summary_before.entries] == [defaultclock.dt,
defaultclock.dt,
defaultclock.dt,
defaultclock.dt,
defaultclock.dt,
defaultclock.dt*10,
defaultclock.dt]
assert [entry.active for entry in summary_before.entries] == [True,
True,
True,
True,
True,
True,
False]
assert len(str(summary_before))
assert len(summary_before._repr_html_())
run(defaultclock.dt)
summary_after = scheduling_summary(net)
assert str(summary_after) == str(summary_before)
assert summary_after._repr_html_() == summary_before._repr_html_()
class Preparer(BrianObject):
add_to_magic_network = True
def __init__(self, **kwds):
super(Preparer, self).__init__(**kwds)
self.did_reinit = False
self.did_pre_run = False
self.did_post_run = False
def reinit(self, level=0):
self.did_reinit = True
def before_run(self, namespace=None, level=0):
self.did_pre_run = True
def after_run(self):
self.did_post_run = True
@pytest.mark.codegen_independent
def test_magic_network():
# test that magic network functions correctly
x = Counter()
y = Counter()
run(10*ms)
assert_equal(x.count, 100)
assert_equal(y.count, 100)
assert len(repr(magic_network)) # very basic test...
assert len(str(magic_network)) # very basic test...
class Stopper(BrianObject):
add_to_magic_network = True
def __init__(self, stoptime, stopfunc, **kwds):
super(Stopper, self).__init__(**kwds)
self.stoptime = stoptime
self.stopfunc = stopfunc
def run(self):
self.stoptime -= 1
if self.stoptime<=0:
self.stopfunc()
@pytest.mark.codegen_independent
def test_network_stop():
# test that Network.stop and global stop() work correctly
net = Network()
x = Stopper(10, net.stop)
net.add(x)
net.run(10*ms)
assert_equal(defaultclock.t, 1*ms)
x = Stopper(10, stop)
net = Network(x)
net.run(10*ms)
assert_equal(defaultclock.t, 1*ms)
@pytest.mark.codegen_independent
def test_network_operations():
# test NetworkOperation and network_operation
seq = []
def f1():
seq.append('a')
op1 = NetworkOperation(f1, when='start', order=1)
@network_operation
def f2():
seq.append('b')
@network_operation(when='end', order=1)
def f3():
seq.append('c')
# In complex frameworks, network operations might be object methods that
# access some common data
class Container(object):
def __init__(self):
self.g1_data = 'B'
self.g2_data = 'C'
def g1(self):
seq.append(self.g1_data)
def g2(self):
seq.append(self.g2_data)
c = Container()
c_op1 = NetworkOperation(c.g1)
c_op2 = NetworkOperation(c.g2, when='end', order=1)
net = Network(op1, f2, f3, c_op1, c_op2)
net.run(1*ms)
assert_equal(''.join(seq), 'bBacC'*10)
@pytest.mark.codegen_independent
def test_incorrect_network_operations():
# Network operations with more than one argument are not allowed
def func(x, y):
pass
class Container(object):
def func(self, x, y):
pass
c = Container()
with pytest.raises(TypeError):
NetworkOperation(func)
with pytest.raises(TypeError):
NetworkOperation(c.func)
# Incorrect use of @network_operation -- it does not work on an instance
# method
try:
class Container(object):
@network_operation
def func(self):
pass
raise AssertionError("expected a TypeError")
except TypeError:
pass # this is what we expected
@pytest.mark.codegen_independent
def test_network_operations_name():
# test NetworkOperation name input
seq = []
def f1():
seq.append('a')
def f2():
seq.append('b')
def x():
pass
op = NetworkOperation(lambda: x)
assert_equal(op.name, 'networkoperation')
op0 = NetworkOperation(lambda: x, name='named_network')
assert_equal(op0.name, 'named_network')
op1 = NetworkOperation(f1, name='networkoperation_1')
op2 = NetworkOperation(f1, name='networkoperation_3')
op3 = NetworkOperation(f2, name='networkoperation_2')
net = Network(op1, op2, op3)
net.run(1*ms)
assert_equal(''.join(seq), 'aba'*10)
@pytest.mark.codegen_independent
def test_network_active_flag():
# test that the BrianObject.active flag is recognised by Network.run
x = Counter()
y = Counter()
y.active = False
run(1*ms)
assert_equal(x.count, 10)
assert_equal(y.count, 0)
@pytest.mark.standalone_compatible
@pytest.mark.multiple_runs
def test_spikes_after_deactivating():
# Make sure that a spike in the last time step gets cleared. See #1319
always_spike = NeuronGroup(1, '', threshold='True', reset='')
spike_mon = SpikeMonitor(always_spike)
run(defaultclock.dt)
always_spike.active = False
run(defaultclock.dt)
device.build(direct_call=False, **device.build_options)
assert_equal(spike_mon.t[:], [0]*second)
@pytest.mark.codegen_independent
def test_network_t():
# test that Network.t works as expected
x = Counter(dt=1*ms)
y = Counter(dt=2*ms)
net = Network(x, y)
net.run(4*ms)
assert_equal(net.t, 4*ms)
net.run(1*ms)
assert_equal(net.t, 5*ms)
assert_equal(x.count, 5)
assert_equal(y.count, 3)
net.run(0.5*ms) # should only update x
assert_equal(net.t, 5.5*ms)
assert_equal(x.count, 6)
assert_equal(y.count, 3)
net.run(0.5*ms) # shouldn't do anything
assert_equal(net.t, 6*ms)
assert_equal(x.count, 6)
assert_equal(y.count, 3)
net.run(0.5*ms) # should update x and y
assert_equal(net.t, 6.5*ms)
assert_equal(x.count, 7)
assert_equal(y.count, 4)
del x, y, net
# now test with magic run
x = Counter(dt=1*ms)
y = Counter(dt=2*ms)
run(4*ms)
assert_equal(magic_network.t, 4*ms)
assert_equal(x.count, 4)
assert_equal(y.count, 2)
run(4*ms)
assert_equal(magic_network.t, 8*ms)
assert_equal(x.count, 8)
assert_equal(y.count, 4)
run(1*ms)
assert_equal(magic_network.t, 9*ms)
assert_equal(x.count, 9)
assert_equal(y.count, 5)
@pytest.mark.codegen_independent
def test_incorrect_dt_defaultclock():
defaultclock.dt = 0.5*ms
G = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1')
net = Network(G)
net.run(0.5*ms)
defaultclock.dt = 1*ms
with pytest.raises(ValueError):
net.run(0*ms)
@pytest.mark.codegen_independent
def test_incorrect_dt_custom_clock():
clock = Clock(dt=0.5*ms)
G = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1', clock=clock)
net = Network(G)
net.run(0.5*ms)
clock.dt = 1*ms
with pytest.raises(ValueError):
net.run(0*ms)
@pytest.mark.codegen_independent
def test_network_remove():
x = Counter()
y = Counter()
net = Network(x, y)
net.remove(y)
net.run(1*ms)
assert_equal(x.count, 10)
assert_equal(y.count, 0)
# the relevance of this test is when we use weakref.proxy objects in
# Network.objects, we should be able to add and remove these from
# the Network just as much as the original objects
# TODO: Does this test make sense now that Network does not store weak
# references by default?
for obj in copy.copy(net.objects):
net.remove(obj)
net.run(1*ms)
assert_equal(x.count, 10)
assert_equal(y.count, 0)
@pytest.mark.codegen_independent
def test_contained_objects():
obj = CounterWithContained()
net = Network(obj)
# The contained object should not be stored explicitly
assert len(net.objects) == 1
# It should be accessible via the network interface, though
assert len(net) == 2
net.run(defaultclock.dt)
# The contained object should be executed during the run
assert obj.count == 1
assert obj.sub_counter.count == 1
# contained objects should be accessible via get_states/set_states
states = net.get_states()
assert len(states) == 2
assert set(states.keys()) == {obj.name, obj.sub_counter.name}
assert set(states[obj.name].keys()) == {'state'}
assert set(states[obj.sub_counter.name].keys()) == {'state'}
net[obj.name].set_states({'state': 1})
net[obj.sub_counter.name].set_states({'state': 2})
net.remove(obj)
assert len(net.objects) == 0
assert len(net) == 0
assert len(net.get_states()) == 0
net.run(defaultclock.dt)
assert obj.count == 1
assert obj.sub_counter.count == 1
class NoninvalidatingCounter(Counter):
add_to_magic_network = True
invalidates_magic_network = False
@pytest.mark.codegen_independent
def test_invalid_magic_network():
x = Counter()
run(1*ms)
assert_equal(x.count, 10)
y = Counter()
try:
run(1*ms)
raise AssertionError("Expected a MagicError")
except MagicError:
pass # this is expected
del x, y
x = Counter()
run(1*ms)
y = NoninvalidatingCounter()
run(1*ms)
assert_equal(x.count, 20)
assert_equal(y.count, 10)
del y
run(1*ms)
assert_equal(x.count, 30)
del x
x = Counter()
run(1*ms)
assert_equal(magic_network.t, 1*ms)
del x
x = Counter()
y = Counter()
run(1*ms)
assert_equal(x.count, 10)
assert_equal(y.count, 10)
@pytest.mark.codegen_independent
def test_multiple_networks_invalid():
x = Counter()
net = Network(x)
net.run(1*ms)
try:
run(1*ms)
raise AssertionError("Expected a RuntimeError")
except RuntimeError:
pass # this is expected
try:
net2 = Network(x)
raise AssertionError("Expected a RuntimeError")
except RuntimeError:
pass # this is expected
@pytest.mark.codegen_independent
def test_magic_weak_reference():
"""
Test that holding a weak reference to an object does not make it get
simulated."""
G1 = NeuronGroup(1, 'v:1')
# this object should not be included
G2 = weakref.ref(NeuronGroup(1, 'v:1'))
with catch_logs(log_level=logging.DEBUG) as l:
run(1*ms)
# Check the debug messages for the number of included objects
magic_objects = [msg[2] for msg in l
if msg[1] == 'brian2.core.magic.magic_objects'][0]
assert '2 objects' in magic_objects, f'Unexpected log message: {magic_objects}'
@pytest.mark.codegen_independent
def test_magic_unused_object():
"""Test that creating unused objects does not affect the magic system."""
def create_group():
# Produce two objects but return only one
G1 = NeuronGroup(1, 'v:1') # no Thresholder or Resetter
G2 = NeuronGroup(1, 'v:1') # This object should be garbage collected
return G1
G = create_group()
with catch_logs(log_level=logging.DEBUG) as l:
run(1*ms)
# Check the debug messages for the number of included objects
magic_objects = [msg[2] for msg in l
if msg[1] == 'brian2.core.magic.magic_objects'][0]
assert '2 objects' in magic_objects, f'Unexpected log message: {magic_objects}'
@pytest.mark.codegen_independent
def test_network_access():
x = Counter(name='counter')
net = Network(x)
assert len(net) == 1
assert len(repr(net)) # very basic test...
assert len(str(net)) # very basic test...
# accessing objects
assert net['counter'] is x
with pytest.raises(TypeError):
net[123]
with pytest.raises(TypeError):
net[1:3]
with pytest.raises(KeyError):
net['non-existing']
objects = [obj for obj in net]
assert set(objects) == set(net.objects)
# deleting objects
del net['counter']
with pytest.raises(TypeError):
net.__delitem__(123)
with pytest.raises(TypeError):
net.__delitem__(slice(1, 3))
with pytest.raises(KeyError):
net.__delitem__('counter')
@pytest.mark.codegen_independent
def test_dependency_check():
def create_net():
G = NeuronGroup(10, 'v: 1', threshold='False')
dependent_objects = [
StateMonitor(G, 'v', record=True),
SpikeMonitor(G),
PopulationRateMonitor(G),
Synapses(G, G, on_pre='v+=1')
]
return dependent_objects
dependent_objects = create_net()
# Trying to simulate the monitors/synapses without the group should fail
for obj in dependent_objects:
with pytest.raises(ValueError):
Network(obj).run(0*ms)
# simulation with a magic network should work when we have an explicit
# reference to one of the objects, but the object should be inactive and
# we should get a warning
assert all(obj.active for obj in dependent_objects)
for obj in dependent_objects: # obj is our explicit reference
with catch_logs() as l:
run(0*ms)
dependency_warnings = [msg[2] for msg in l
if msg[1] == 'brian2.core.magic.dependency_warning']
assert len(dependency_warnings) == 1
assert not obj.active
def test_loop():
"""
Somewhat realistic test with a loop of magic networks
"""
def run_simulation():
G = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1',
reset='v=0', threshold='v>1')
G.v = np.linspace(0, 1, 10)
run(1*ms)
# We return potentially problematic references to a VariableView
return G.v
# First run
with catch_logs(log_level=logging.DEBUG) as l:
v = run_simulation()
assert v[0] == 0 and 0 < v[-1] < 1
# Check the debug messages for the number of included objects
magic_objects = [msg[2] for msg in l
if msg[1] == 'brian2.core.magic.magic_objects'][0]
assert '4 objects' in magic_objects
# Second run
with catch_logs(log_level=logging.DEBUG) as l:
v = run_simulation()
assert v[0] == 0 and 0 < v[-1] < 1
# Check the debug messages for the number of included objects
magic_objects = [msg[2] for msg in l
if msg[1] == 'brian2.core.magic.magic_objects'][0]
assert '4 objects' in magic_objects
@pytest.mark.codegen_independent
def test_magic_collect():
"""
Make sure all expected objects are collected in a magic network
"""
P = PoissonGroup(10, rates=100*Hz)
G = NeuronGroup(10, 'v:1', threshold='False')
S = Synapses(G, G, '')
state_mon = StateMonitor(G, 'v', record=True)
spike_mon = SpikeMonitor(G)
rate_mon = PopulationRateMonitor(G)
objects = collect()
assert len(objects) == 6, (f'expected {int(6)} objects, got {len(objects)}')
from contextlib import contextmanager
from io import StringIO, BytesIO
import sys
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
@pytest.mark.codegen_independent
def test_progress_report():
"""
Very basic test of progress reporting
"""
G = NeuronGroup(1, '')
net = Network(G)
# No output
with captured_output() as (out, err):
net.run(1*ms, report=None)
# There should be at least two lines of output
out, err = out.getvalue(), err.getvalue()
assert len(out) == 0 and len(err) == 0
with captured_output() as (out, err):
net.run(1*ms)
# There should be at least two lines of output
out, err = out.getvalue(), err.getvalue()
assert len(out) == 0 and len(err) == 0
# Progress should go to stdout
with captured_output() as (out, err):
net.run(1*ms, report='text')
# There should be at least two lines of output
out, err = out.getvalue(), err.getvalue()
assert len(out.split('\n')) >= 2 and len(err) == 0
with captured_output() as (out, err):
net.run(1*ms, report='stdout')
# There should be at least two lines of output
out, err = out.getvalue(), err.getvalue()
assert len(out.split('\n')) >= 2 and len(err) == 0
# Progress should go to stderr
with captured_output() as (out, err):
net.run(1*ms, report='stderr')
# There should be at least two lines of output
out, err = out.getvalue(), err.getvalue()
assert len(err.split('\n')) >= 2 and len(out) == 0
# Custom function
calls = []
def capture_progress(elapsed, complete, start, duration):
calls.append((elapsed, complete, start, duration))
with captured_output() as (out, err):
net.run(1*ms, report=capture_progress)
out, err = out.getvalue(), err.getvalue()
assert len(err) == 0 and len(out) == 0
# There should be at least a call for the start and the end
assert len(calls) >= 2 and calls[0][1] == 0.0 and calls[-1][1] == 1.0
@pytest.mark.codegen_independent
def test_progress_report_incorrect():
"""
Test wrong use of the report option
"""
G = NeuronGroup(1, '')
net = Network(G)
with pytest.raises(ValueError):
net.run(1*ms, report='unknown')
with pytest.raises(TypeError):
net.run(1*ms, report=object())
@pytest.mark.standalone_compatible
@pytest.mark.multiple_runs
def test_multiple_runs_report_standalone():
group = NeuronGroup(1, 'dv/dt = 1*Hz : 1')
run(1*ms, report='text')
run(1*ms)
device.build(direct_call=False, **device.build_options)
@pytest.mark.standalone_compatible
@pytest.mark.multiple_runs
def test_multiple_runs_report_standalone_2():
group = NeuronGroup(1, 'dv/dt = 1*Hz : 1')
run(1*ms)
run(1*ms, report='text')
device.build(direct_call=False, **device.build_options)
@pytest.mark.standalone_compatible
@pytest.mark.multiple_runs
def test_multiple_runs_report_standalone_3():
group = NeuronGroup(1, 'dv/dt = 1*Hz : 1')
run(1*ms, report='text')
run(1*ms, report='text')
device.build(direct_call=False, **device.build_options)
# This tests a specific limitation of the C++ standalone mode (cannot mix
# multiple report methods)
@pytest.mark.cpp_standalone
@pytest.mark.standalone_only
def test_multiple_runs_report_standalone_incorrect():
set_device('cpp_standalone', build_on_run=False)
group = NeuronGroup(1, 'dv/dt = 1*Hz : 1')
run(1*ms, report='text')
with pytest.raises(NotImplementedError):
run(1*ms, report='stderr')
@pytest.mark.codegen_independent
def test_store_restore():
source = NeuronGroup(10, """dv/dt = rates : 1
rates : Hz""", threshold='v>1', reset='v=0')
source.rates = 'i*100*Hz'
target = NeuronGroup(10, 'v:1')
synapses = Synapses(source, target, model='w:1', on_pre='v+=w')
synapses.connect(j='i')
synapses.w = 'i*1.0'
synapses.delay = 'i*ms'
state_mon = StateMonitor(target, 'v', record=True)
spike_mon = SpikeMonitor(source)
net = Network(source, target, synapses, state_mon, spike_mon)
net.store() # default time slot
net.run(10*ms)
net.store('second')
net.run(10*ms)
v_values = state_mon.v[:, :]
spike_indices, spike_times = spike_mon.it_
net.restore() # Go back to beginning
assert defaultclock.t == 0*ms
assert net.t == 0*ms
net.run(20*ms)
assert_equal(v_values, state_mon.v[:, :])
assert_equal(spike_indices, spike_mon.i[:])
assert_equal(spike_times, spike_mon.t_[:])
# Go back to middle
net.restore('second')
assert defaultclock.t == 10*ms
assert net.t == 10*ms
net.run(10*ms)
assert_equal(v_values, state_mon.v[:, :])
assert_equal(spike_indices, spike_mon.i[:])
assert_equal(spike_times, spike_mon.t_[:])
# Go back again (see github issue #681)
net.restore('second')
assert defaultclock.t == 10 * ms
assert net.t == 10 * ms
@pytest.mark.codegen_independent
def test_store_restore_to_file():
filename = tempfile.mktemp(suffix='state', prefix='brian_test')
source = NeuronGroup(10, """dv/dt = rates : 1
rates : Hz""", threshold='v>1', reset='v=0')
source.rates = 'i*100*Hz'
target = NeuronGroup(10, 'v:1')
synapses = Synapses(source, target, model='w:1', on_pre='v+=w')
synapses.connect(j='i')
synapses.w = 'i*1.0'
synapses.delay = 'i*ms'
state_mon = StateMonitor(target, 'v', record=True)
spike_mon = SpikeMonitor(source)
net = Network(source, target, synapses, state_mon, spike_mon)
net.store(filename=filename) # default time slot
net.run(10*ms)
net.store('second', filename=filename)
net.run(10*ms)
v_values = state_mon.v[:, :]
spike_indices, spike_times = spike_mon.it_
net.restore(filename=filename) # Go back to beginning
assert defaultclock.t == 0*ms
assert net.t == 0*ms
net.run(20*ms)
assert_equal(v_values, state_mon.v[:, :])
assert_equal(spike_indices, spike_mon.i[:])
assert_equal(spike_times, spike_mon.t_[:])
# Go back to middle
net.restore('second', filename=filename)
assert defaultclock.t == 10*ms
assert net.t == 10*ms
net.run(10*ms)
assert_equal(v_values, state_mon.v[:, :])
assert_equal(spike_indices, spike_mon.i[:])
assert_equal(spike_times, spike_mon.t_[:])
try:
os.remove(filename)
except OSError:
pass
@pytest.mark.codegen_independent
def test_store_restore_to_file_new_objects():
# A more realistic test where the objects are completely re-created
filename = tempfile.mktemp(suffix='state', prefix='brian_test')
def create_net():
# Use a bit of a complicated spike and connection pattern with
# heterogeneous delays
# Note: it is important that all objects have the same name, this would
# be the case if we were running this in a new process but to not rely
# on garbage collection we will assign explicit names here
source = SpikeGeneratorGroup(5, np.arange(5).repeat(3),
[3, 4, 1, 2, 3, 7, 5, 4, 1, 0, 5, 9, 7, 8, 9]*ms,
name='source')
target = NeuronGroup(10, 'v:1', name='target')
synapses = Synapses(source, target, model='w:1', on_pre='v+=w',
name='synapses')
synapses.connect('j>=i')
synapses.w = 'i*1.0 + j*2.0'
synapses.delay = '(5-i)*ms'
state_mon = StateMonitor(target, 'v', record=True, name='statemonitor')
input_spikes = SpikeMonitor(source, name='input_spikes')
net = Network(source, target, synapses, state_mon, input_spikes)
return net
net = create_net()
net.store(filename=filename) # default time slot
net.run(5*ms)
net.store('second', filename=filename)
net.run(5*ms)
input_spike_indices = np.array(net['input_spikes'].i)
input_spike_times = Quantity(net['input_spikes'].t, copy=True)
v_values_full_sim = Quantity(net['statemonitor'].v[:, :], copy=True)
net = create_net()
net.restore(filename=filename) # Go back to beginning
net.run(10*ms)
assert_equal(input_spike_indices, net['input_spikes'].i)
assert_equal(input_spike_times, net['input_spikes'].t)
assert_equal(v_values_full_sim, net['statemonitor'].v[:, :])
net = create_net()
net.restore('second', filename=filename) # Go back to middle
net.run(5*ms)
assert_equal(input_spike_indices, net['input_spikes'].i)
assert_equal(input_spike_times, net['input_spikes'].t)
assert_equal(v_values_full_sim, net['statemonitor'].v[:, :])
try:
os.remove(filename)
except OSError:
pass
@pytest.mark.codegen_independent
def test_store_restore_to_file_differing_nets():
# Check that the store/restore mechanism is not used with differing
# networks
filename = tempfile.mktemp(suffix='state', prefix='brian_test')
source = SpikeGeneratorGroup(5, [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]*ms,
name='source_1')
mon = SpikeMonitor(source, name='monitor')
net = Network(source, mon)
net.store(filename=filename)
source_2 = SpikeGeneratorGroup(5, [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]*ms,
name='source_2')
mon = SpikeMonitor(source_2, name='monitor')
net = Network(source_2, mon)
with pytest.raises(KeyError):
net.restore(filename=filename)
net = Network(source) # Without the monitor
with pytest.raises(KeyError):
net.restore(filename=filename)
@pytest.mark.codegen_independent
def test_store_restore_magic():
source = NeuronGroup(10, """dv/dt = rates : 1
rates : Hz""", threshold='v>1', reset='v=0')
source.rates = 'i*100*Hz'
target = NeuronGroup(10, 'v:1')
synapses = Synapses(source, target, model='w:1', on_pre='v+=w')
synapses.connect(j='i')
synapses.w = 'i*1.0'
synapses.delay = 'i*ms'
state_mon = StateMonitor(target, 'v', record=True)
spike_mon = SpikeMonitor(source)
store() # default time slot
run(10*ms)
store('second')
run(10*ms)
v_values = state_mon.v[:, :]
spike_indices, spike_times = spike_mon.it_
restore() # Go back to beginning
assert magic_network.t == 0*ms
run(20*ms)
assert defaultclock.t == 20*ms
assert_equal(v_values, state_mon.v[:, :])
assert_equal(spike_indices, spike_mon.i[:])
assert_equal(spike_times, spike_mon.t_[:])
# Go back to middle
restore('second')
assert magic_network.t == 10*ms
run(10*ms)
assert defaultclock.t == 20*ms
assert_equal(v_values, state_mon.v[:, :])
assert_equal(spike_indices, spike_mon.i[:])
assert_equal(spike_times, spike_mon.t_[:])
@pytest.mark.codegen_independent
def test_store_restore_magic_to_file():
filename = tempfile.mktemp(suffix='state', prefix='brian_test')
source = NeuronGroup(10, """dv/dt = rates : 1
rates : Hz""", threshold='v>1', reset='v=0')
source.rates = 'i*100*Hz'
target = NeuronGroup(10, 'v:1')
synapses = Synapses(source, target, model='w:1', on_pre='v+=w')
synapses.connect(j='i')
synapses.w = 'i*1.0'
synapses.delay = 'i*ms'
state_mon = StateMonitor(target, 'v', record=True)
spike_mon = SpikeMonitor(source)
store(filename=filename) # default time slot
run(10*ms)
store('second', filename=filename)
run(10*ms)
v_values = state_mon.v[:, :]
spike_indices, spike_times = spike_mon.it_
restore(filename=filename) # Go back to beginning
assert magic_network.t == 0*ms
run(20*ms)
assert defaultclock.t == 20*ms
assert_equal(v_values, state_mon.v[:, :])
assert_equal(spike_indices, spike_mon.i[:])
assert_equal(spike_times, spike_mon.t_[:])
# Go back to middle
restore('second', filename=filename)
assert magic_network.t == 10*ms
run(10*ms)
assert defaultclock.t == 20*ms
assert_equal(v_values, state_mon.v[:, :])
assert_equal(spike_indices, spike_mon.i[:])
assert_equal(spike_times, spike_mon.t_[:])
try:
os.remove(filename)
except OSError:
pass
@pytest.mark.codegen_independent
def test_store_restore_spikequeue():
# See github issue #938
source = SpikeGeneratorGroup(1, [0], [0] * ms)
target = NeuronGroup(1, 'v : 1')
conn = Synapses(source, target, on_pre='v += 1', delay=2 * defaultclock.dt)
conn.connect()
run(defaultclock.dt) # Spike is not yet delivered
store()
run(2 * defaultclock.dt)
assert target.v[0] == 1
restore()
run(2 * defaultclock.dt)
assert target.v[0] == 1
restore()
run(2 * defaultclock.dt)
assert target.v[0] == 1
@pytest.mark.skipif(not isinstance(get_device(), RuntimeDevice),
reason='Getting/setting random number state only supported '
'for runtime device.')
def test_restore_with_random_state():
group = NeuronGroup(10, 'dv/dt = -v/(10*ms) + (10*ms)**-0.5*xi : 1',
method='euler')
group.v = 'rand()'
mon = StateMonitor(group, 'v', record=True)
store()
run(10*ms)
old_v = np.array(group.v)
restore() # Random state is not restored
run(10 * ms)
assert np.var(old_v - group.v) > 0 # very basic test for difference
restore(restore_random_state=True) # Random state is restored
run(10 * ms)
assert_equal(old_v, group.v)
@pytest.mark.codegen_independent
def test_store_restore_restore_synapses():
group = NeuronGroup(10, 'x : 1', threshold='False', reset='', name='group')
synapses = Synapses(group, group, on_pre='x += 1', name='synapses')
synapses.connect(i=[1, 3, 5], j=[6, 4, 2])
net = Network(group, synapses)
tmp_file = tempfile.mktemp()
net.store(filename=tmp_file)
# clear up
del net
del synapses
del group
# Recreate the network without connecting the synapses
group = NeuronGroup(10, 'x: 1', threshold='False', reset='', name='group')
synapses = Synapses(group, group, '', on_pre='x += 1', name='synapses')
net = Network(group, synapses)
try:
net.restore(filename=tmp_file)
assert len(synapses) == 3
assert_array_equal(synapses.i, [1, 3, 5])
assert_array_equal(synapses.j, [6, 4, 2])
# Tunning the network should not raise an error, despite the lack
# of Synapses.connect
net.run(0*ms)
finally:
os.remove(tmp_file)
@pytest.mark.codegen_independent
def test_defaultclock_dt_changes():
BrianLogger.suppress_name('resolution_conflict')
for dt in [0.1*ms, 0.01*ms, 0.5*ms, 1*ms, 3.3*ms]:
defaultclock.dt = dt
G = NeuronGroup(1, 'v:1')
mon = StateMonitor(G, 'v', record=True)
net = Network(G, mon)
net.run(2*dt)
assert_equal(mon.t[:], [0, dt/ms]*ms)
@pytest.mark.standalone_compatible
@pytest.mark.multiple_runs
def test_dt_changes_between_runs():
defaultclock.dt = 0.1*ms
G = NeuronGroup(1, 'v:1')
mon = StateMonitor(G, 'v', record=True)
run(.5*ms)
defaultclock.dt = .5*ms
run(.5*ms)
defaultclock.dt = 0.1*ms
run(.5*ms)
device.build(direct_call=False, **device.build_options)
assert len(mon.t[:]) == 5 + 1 + 5
assert_allclose(mon.t[:],
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 1., 1.1, 1.2, 1.3, 1.4]*ms)
@pytest.mark.codegen_independent
def test_dt_restore():
defaultclock.dt = 0.5*ms
G = NeuronGroup(1, 'dv/dt = -v/(10*ms) : 1')
mon = StateMonitor(G, 'v', record=True)
net = Network(G, mon)
net.store()
net.run(1*ms)
assert_equal(mon.t[:], [0, 0.5]*ms)
defaultclock.dt = 1*ms
net.run(2*ms)
assert_equal(mon.t[:], [0, 0.5, 1, 2]*ms)
net.restore()
assert_equal(mon.t[:], [])
net.run(1*ms)
assert defaultclock.dt == 0.5*ms
assert_equal(mon.t[:], [0, 0.5]*ms)
@pytest.mark.codegen_independent
def test_continuation():
defaultclock.dt = 1*ms
G = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1')
G.v = 1
mon = StateMonitor(G, 'v', record=True)
net = Network(G, mon)
net.run(2*ms)
# Run the same simulation but with two runs that use sub-dt run times
G2 = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1')
G2.v = 1
mon2 = StateMonitor(G2, 'v', record=True)
net2 = Network(G2, mon2)
net2.run(0.5*ms)
net2.run(1.5*ms)
assert_equal(mon.t[:], mon2.t[:])
assert_equal(mon.v[:], mon2.v[:])
@pytest.mark.codegen_independent
def test_get_set_states():
G = NeuronGroup(10, 'v:1', name='a_neurongroup')
G.v = 'i'
net = Network(G)
states1 = net.get_states()
states2 = magic_network.get_states()
states3 = net.get_states(read_only_variables=False)
assert set(states1.keys()) == set(states2.keys()) == set(states3.keys()) == {'a_neurongroup'}
assert (set(states1['a_neurongroup'].keys()) ==
set(states2['a_neurongroup'].keys()) ==
{'i', 'dt', 'N', 't', 'v', 't_in_timesteps'})
assert set(states3['a_neurongroup']) == {'v'}
# Try re-setting the state
G.v = 0
net.set_states(states3)
assert_equal(G.v, np.arange(10))
@pytest.mark.codegen_independent
def test_multiple_runs_defaultclock():
defaultclock.dt = 0.1*ms
G = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1')
net = Network(G)
net.run(0.5*ms)
# The new dt is not compatible with the previous time but it should not
# raise an error because we start a new simulation at time 0
defaultclock.dt = 1*ms
G = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1')
net = Network(G)
net.run(1*ms)
@pytest.mark.codegen_independent
def test_multiple_runs_defaultclock_incorrect():
defaultclock.dt = 0.1*ms
G = NeuronGroup(1, 'dv/dt = -v / (10*ms) : 1')
net = Network(G)
net.run(0.5*ms)
# The new dt is not compatible with the previous time since we cannot
# continue at 0.5ms with a dt of 1ms
defaultclock.dt = 1*ms
with pytest.raises(ValueError):
net.run(1*ms)
@pytest.mark.standalone_compatible
def test_profile():
G = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1', threshold='v>1',
reset='v=0', name='profile_test')
G.v = 1.1
net = Network(G)
net.run(1*ms, profile=True)
# The should be four simulated CodeObjects, one for the group and one each
# for state update, threshold and reset + 1 for the clock
info = net.profiling_info
info_dict = dict(info)
# Standalone does not include the NeuronGroup object (which is not doing
# anything during the run) in the profiling information, while runtime
# does
assert 3 <= len(info) <= 4
assert len(info) == 3 or 'profile_test' in info_dict
for obj in ['stateupdater', 'spike_thresholder', 'spike_resetter']:
name = f"profile_test_{obj}"
assert name in info_dict or f"{name}_codeobject" in info_dict
assert all([t>=0*second for _, t in info])
@pytest.mark.standalone_compatible
def test_profile_off():
G = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1', threshold='v>1',
reset='v=0', name='profile_test')
net = Network(G)
net.run(1*ms, profile=False)
with pytest.raises(ValueError):
profiling_summary(net)
@pytest.mark.codegen_independent
def test_profile_ipython_html():
G = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1', threshold='v>1',
reset='v=0', name='profile_test')
G.v = 1.1
net = Network(G)
net.run(1*ms, profile=True)
summary = profiling_summary(net)
assert len(summary._repr_html_())
@pytest.mark.codegen_independent
def test_magic_scope():
"""
Check that `start_scope` works as expected.
"""
G1 = NeuronGroup(1, 'v:1', name='G1')
G2 = NeuronGroup(1, 'v:1', name='G2')
objs1 = {obj.name for obj in collect()}
start_scope()
G3 = NeuronGroup(1, 'v:1', name='G3')
G4 = NeuronGroup(1, 'v:1', name='G4')
objs2 = {obj.name for obj in collect()}
assert objs1=={'G1', 'G2'}
assert objs2=={'G3', 'G4'}
@pytest.mark.standalone_compatible
def test_runtime_rounding():
# Test that runtime and standalone round in the same way, see github issue
# #695 for details
defaultclock.dt = 20.000000000020002 * us
G = NeuronGroup(1, 'v:1')
mon = StateMonitor(G, 'v', record=True)
run(defaultclock.dt * 250)
assert len(mon.t) == 250
@pytest.mark.codegen_independent
def test_small_runs():
# One long run and multiple small runs should give the same results
group_1 = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1')
group_1.v = '(i + 1) / N'
mon_1 = StateMonitor(group_1, 'v', record=True)
net_1 = Network(group_1, mon_1)
net_1.run(1*second)
group_2 = NeuronGroup(10, 'dv/dt = -v / (10*ms) : 1')
group_2.v = '(i + 1) / N'
mon_2 = StateMonitor(group_2, 'v', record=True)
net_2 = Network(group_2, mon_2)
runtime = 1*ms
while True:
runtime *= 3
runtime = min([runtime, 1*second - net_2.t])
net_2.run(runtime)
if net_2.t >= 1*second:
break
assert_allclose(mon_1.t_[:], mon_2.t_[:])
assert_allclose(mon_1.v_[:], mon_2.v_[:])
@pytest.mark.codegen_independent
def test_both_equal():
#check all objects added by Network.add() also have their contained_objects added to 'Network'
tau = 10*ms
diff_eqn="""dv/dt = (1-v)/tau : 1"""
chg_code="""v = 2*v"""
Ng = NeuronGroup(1, diff_eqn, method='exact')
M1 = StateMonitor(Ng, 'v', record=True)
netObj = Network(Ng, M1)
Ng.run_regularly(chg_code, dt=20*ms)
netObj.run(100*ms)
start_scope()
Ng = NeuronGroup(1, diff_eqn, method='exact')
M2 = StateMonitor(Ng, 'v', record=True)
Ng.run_regularly(chg_code, dt=20*ms)
run(100*ms)
assert (M1.v == M2.v).all()
@pytest.mark.codegen_independent
def test_long_run_dt_change():
# Check that the dt check is not too restrictive, see issue #730 for details
group = NeuronGroup(1, '') # does nothing...
defaultclock.dt = 0.1*ms
run(100*second)
# print profiling_summary()
defaultclock.dt = 0.01*ms
run(1*second)
@pytest.mark.standalone_compatible
@pytest.mark.multiple_runs
def test_multiple_runs_constant_change():
const_v = 1
group = NeuronGroup(1, 'v = const_v : 1')
mon = StateMonitor(group, 'v', record=0)
run(defaultclock.dt)
const_v = 2
run(defaultclock.dt)
device.build(direct_call=False, **device.build_options)
assert_equal(mon.v[0], [1, 2])
@pytest.mark.standalone_compatible
@pytest.mark.multiple_runs
def test_multiple_runs_function_change():
inp = TimedArray([1, 2], dt=defaultclock.dt)
group = NeuronGroup(1, 'v = inp(t) : 1')
mon = StateMonitor(group, 'v', record=0)
run(2*defaultclock.dt)
inp = TimedArray([0, 0, 3, 4], dt=defaultclock.dt)
run(2*defaultclock.dt)
device.build(direct_call=False, **device.build_options)
assert_equal(mon.v[0], [1, 2, 3, 4])
if __name__ == '__main__':
BrianLogger.log_level_warn()
for t in [
test_incorrect_network_use,
test_network_contains,
test_empty_network,
test_network_single_object,
test_network_two_objects,
test_network_from_dict,
test_network_different_clocks,
test_network_different_when,
test_network_default_schedule,
test_network_schedule_change,
test_network_before_after_schedule,
test_network_custom_slots,
test_network_incorrect_schedule,
test_schedule_warning,
test_scheduling_summary_magic,
test_scheduling_summary,
test_magic_network,
test_network_stop,
test_network_operations,
test_incorrect_network_operations,
test_network_operations_name,
test_network_active_flag,
test_network_t,
test_incorrect_dt_defaultclock,
test_incorrect_dt_custom_clock,
test_network_remove,
test_magic_weak_reference,
test_magic_unused_object,
test_invalid_magic_network,
test_multiple_networks_invalid,
test_network_access,
test_loop,
test_magic_collect,
test_progress_report,
test_progress_report_incorrect,
test_multiple_runs_report_standalone,
test_multiple_runs_report_standalone_2,
test_multiple_runs_report_standalone_3,
test_multiple_runs_report_standalone_incorrect,
test_store_restore,
test_store_restore_to_file,
test_store_restore_to_file_new_objects,
test_store_restore_to_file_differing_nets,
test_store_restore_magic,
test_store_restore_magic_to_file,
test_store_restore_spikequeue,
test_store_restore_restore_synapses,
test_defaultclock_dt_changes,
test_dt_changes_between_runs,
test_dt_restore,
test_continuation,
test_get_set_states,
test_multiple_runs_defaultclock,
test_multiple_runs_defaultclock_incorrect,
test_profile,
test_profile_off,
test_profile_ipython_html,
test_magic_scope,
test_runtime_rounding,
test_small_runs,
test_both_equal,
test_long_run_dt_change,
test_multiple_runs_constant_change,
test_multiple_runs_function_change
]:
set_device(all_devices['runtime'])
t()
reinit_and_delete()
|
py | b40e7ba885878c6d700bf7303c5411864301ae92 | from django.forms import CharField, ComboField, EmailField, ValidationError
from django.test import SimpleTestCase
class ComboFieldTest(SimpleTestCase):
def test_combofield_1(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()])
self.assertEqual('[email protected]', f.clean('[email protected]'))
with self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'"):
f.clean('[email protected]')
with self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'"):
f.clean('not an email')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
def test_combofield_2(self):
f = ComboField(fields=[CharField(max_length=20), EmailField()], required=False)
self.assertEqual('[email protected]', f.clean('[email protected]'))
with self.assertRaisesMessage(ValidationError, "'Ensure this value has at most 20 characters (it has 28).'"):
f.clean('[email protected]')
with self.assertRaisesMessage(ValidationError, "'Enter a valid email address.'"):
f.clean('not an email')
self.assertEqual('', f.clean(''))
self.assertEqual('', f.clean(None))
|
py | b40e7c6aa1fbebc9adaca85bbc66deaa31639fd8 | import cv2
import numpy as np
class CardDetector():
def __init__(self, background_thresh = 100, card_min_area_ratio = 500, card_max_area_ratio = 4, card_height_to_width_ratio = 1.55):
self.background_thresh = background_thresh
self.card_min_area_ratio = card_min_area_ratio
self.card_max_area_ratio = card_max_area_ratio
self.card_height_to_width_ratio = card_height_to_width_ratio
def __call__(self):
pass
def detect_cards(self, img):
"""Returns contours for the detected cards"""
_, contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_area = img.shape[0] * img.shape[1]
card_countours = []
for c in contours:
area = cv2.contourArea(c)
if area < img_area // self.card_min_area_ratio or area > img_area // self.card_max_area_ratio:
continue
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.015 * peri, True)
if len(approx) == 4:
card_countours.append(approx)
return card_countours
def project_cards(self, img, contours):
card_img_width = img.shape[1] // 2
card_img_height = int(card_img_width * self.card_height_to_width_ratio)
# flat card points in anticlockwise, top left corner first
card_projected_contour = np.zeros_like(contours[0])
card_projected_contour[1, :] = np.array([0, card_img_height])
card_projected_contour[2, :] = np.array([card_img_width, card_img_height])
card_projected_contour[3, :] = np.array([card_img_width, 0])
card_img_list = []
homographies = []
for c in contours:
first_edge_length = np.linalg.norm(c[0, :, :] - c[1, :, :])
second_edge_length = np.linalg.norm(c[1, :, :] - c[2, :, :])
if second_edge_length > first_edge_length:
c = np.roll(c, 1, axis = 0)
h, _ = cv2.findHomography(c, card_projected_contour)
card_img = cv2.warpPerspective(img, h, (card_img_width, card_img_height))
card_img_list.append(card_img)
homographies.append(h)
return card_img_list, homographies
def preprocess_image(self, image):
"""Returns a grayed, blurred, and adaptively thresholded camera image."""
width, height, _ = image.shape
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
bkg_level = np.percentile(blur, 25)
thresh_level = bkg_level + self.background_thresh
_, thresh = cv2.threshold(blur, thresh_level, 255, cv2.THRESH_BINARY)
return thresh
|
py | b40e7cc6589f1307ff08ff9cf369d7c633cb4cd2 | from __future__ import unicode_literals
import re, frappe
def resolve_redirect(path):
'''
Resolve redirects from hooks
Example:
website_redirect = [
# absolute location
{"source": "/from", "target": "https://mysite/from"},
# relative location
{"source": "/from", "target": "/main"},
# use regex
{"source": "/from/(.*)", "target": "/main/\1"}
]
'''
redirects = frappe.get_hooks('website_redirects')
if not redirects: return
redirect_to = frappe.cache().hget('website_redirects', path)
if redirect_to:
frappe.flags.redirect_location = redirect_to
raise frappe.Redirect
for rule in redirects:
pattern = rule['source'].strip('/ ') + '$'
if re.match(pattern, path):
redirect_to = re.sub(pattern, rule['target'].replace('\\', '\\\\'), path)
frappe.flags.redirect_location = redirect_to
frappe.cache().hset('website_redirects', path, redirect_to)
raise frappe.Redirect
|
py | b40e7cfe9b1b93895e1c58349faa7be27e0af84e | """Minimal setup script to appease buildout for Melange.
"""
import os
import re
from setuptools import setup, find_packages
match_version = re.compile("version: ([0-9\-]+)")
try:
appyaml = open(os.path.join("app", "app.yaml.template"))
version = match_version.findall(appyaml.read())[0]
except:
version = "UNKNOWN"
setup(
name = 'melange',
description=("The goal of this project is to create a framework for "
"representing Open Source contribution workflows, such as"
" the existing Google Summer of Code TM (GSoC) program."),
version = version,
package_dir = {'':'src'},
packages=find_packages('src'),
author=open("AUTHORS").read(),
url='http://code.google.com/p/soc',
license='Apache2',
install_requires = [
'PyYAML',
'WebOb',
'zope.testbrowser',
'pylint',
'nose',
'Django==1.1.0',
'fixture',
],
tests_require=[
],
entry_points = {'console_scripts': ['run-tests = tests.run:main',
'gen-app-yaml = scripts.gen_app_yaml:main',
],
},
include_package_data = True,
zip_safe = False,
)
|
py | b40e7d82f74ea518cd36dedad957b9908da921df | from bson import ObjectId
import simplejson as json
from eve.tests import TestBase
from eve.tests.test_settings import MONGO_DBNAME
from eve.tests.utils import DummyEvent
from eve import STATUS_OK, LAST_UPDATED, ID_FIELD, ISSUES, STATUS, ETAG
from eve.methods.patch import patch_internal
# @unittest.skip("don't need no freakin' tests!")
class TestPatch(TestBase):
def test_patch_to_resource_endpoint(self):
_, status = self.patch(self.known_resource_url, data={})
self.assert405(status)
def test_readonly_resource(self):
_, status = self.patch(self.readonly_id_url, data={})
self.assert405(status)
def test_unknown_id(self):
_, status = self.patch(self.unknown_item_id_url,
data={"key1": 'value1'})
self.assert404(status)
def test_unknown_id_different_resource(self):
# patching a 'user' with a valid 'contact' id will 404
_, status = self.patch('%s/%s/' % (self.different_resource,
self.item_id),
data={"key1": "value1"})
self.assert404(status)
# of course we can still patch a 'user'
_, status = self.patch('%s/%s/' % (self.different_resource,
self.user_id),
data={'key1': '{"username": "username1"}'},
headers=[('If-Match', self.user_etag)])
self.assert200(status)
def test_by_name(self):
_, status = self.patch(self.item_name_url, data={'key1': 'value1'})
self.assert405(status)
def test_ifmatch_missing(self):
_, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert403(status)
def test_ifmatch_disabled(self):
self.app.config['IF_MATCH'] = False
r, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert200(status)
self.assertTrue(ETAG not in r)
def test_ifmatch_bad_etag(self):
_, status = self.patch(self.item_id_url,
data={'key1': 'value1'},
headers=[('If-Match', 'not-quite-right')])
self.assert412(status)
def test_unique_value(self):
# TODO
# for the time being we are happy with testing only Eve's custom
# validation. We rely on Cerberus' own test suite for other validation
# unit tests. This test also makes sure that response status is
# syntatically correcy in case of validation issues.
# We should probably test every single case as well (seems overkill).
r, status = self.patch(self.item_id_url,
data={"ref": "%s" % self.alt_ref},
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'ref': "value '%s' is not unique" %
self.alt_ref})
def test_patch_string(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_integer(self):
field = "prog"
test_value = 9999
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_list_as_array(self):
field = "role"
test_value = ["vendor", "client"]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertTrue(set(test_value).issubset(db_value))
def test_patch_rows(self):
field = "rows"
test_value = [
{'sku': 'AT1234', 'price': 99},
{'sku': 'XF9876', 'price': 9999}
]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
for test_item in test_value:
self.assertTrue(test_item in db_value)
def test_patch_list(self):
field = "alist"
test_value = ["a_string", 99]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_dict(self):
field = "location"
test_value = {'address': 'an address', 'city': 'a city'}
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_datetime(self):
field = "born"
test_value = "Tue, 06 Nov 2012 10:33:31 GMT"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_objectid(self):
field = "tid"
test_value = "4f71c129c88e2018d4000000"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_null_objectid(self):
# verify that #341 is fixed.
field = "tid"
test_value = None
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_defaults(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title', r)
def test_patch_defaults_with_post_override(self):
field = "ref"
test_value = "1234567890123456789012345"
r = self.perform_patch_with_post_override(field, test_value)
self.assert200(r.status_code)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title',
json.loads(r.get_data()))
def test_patch_multiple_fields(self):
fields = ['ref', 'prog', 'role']
test_values = ["9876543210987654321054321", 123, ["agent"]]
changes = {"ref": test_values[0], "prog": test_values[1],
"role": test_values[2]}
r = self.perform_patch(changes)
db_values = self.compare_patch_with_get(fields, r)
for i in range(len(db_values)):
self.assertEqual(db_values[i], test_values[i])
def test_patch_with_post_override(self):
# a POST request with PATCH override turns into a PATCH request
r = self.perform_patch_with_post_override('prog', 1)
self.assert200(r.status_code)
def test_patch_internal(self):
# test that patch_internal is available and working properly.
test_field = 'ref'
test_value = "9876543210987654321098765"
data = {test_field: test_value}
with self.app.test_request_context(self.item_id_url):
r, _, _, status = patch_internal(
self.known_resource, data, concurrency_check=False,
**{'_id': self.item_id})
db_value = self.compare_patch_with_get(test_field, r)
self.assertEqual(db_value, test_value)
self.assert200(status)
def perform_patch(self, changes):
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
return r
def perform_patch_with_post_override(self, field, value):
headers = [('X-HTTP-Method-Override', 'PATCH'),
('If-Match', self.item_etag),
('Content-Type', 'application/json')]
return self.test_client.post(self.item_id_url,
data=json.dumps({field: value}),
headers=headers)
def compare_patch_with_get(self, fields, patch_response):
raw_r = self.test_client.get(self.item_id_url)
r, status = self.parse_response(raw_r)
self.assert200(status)
self.assertEqual(raw_r.headers.get('ETag'),
patch_response[ETAG])
if isinstance(fields, str):
return r[fields]
else:
return [r[field] for field in fields]
def test_patch_allow_unknown(self):
changes = {"unknown": "unknown"}
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'unknown': 'unknown field'})
self.app.config['DOMAIN'][self.known_resource]['allow_unknown'] = True
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
def test_patch_x_www_form_urlencoded(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
headers = [('If-Match', self.item_etag)]
r, status = self.parse_response(self.test_client.patch(
self.item_id_url, data=changes, headers=headers))
self.assert200(status)
self.assertTrue('OK' in r[STATUS])
def test_patch_referential_integrity(self):
data = {"person": self.unknown_item_id}
headers = [('If-Match', self.invoice_etag)]
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.app.config['ID_FIELD']))
self.assertValidationError(r, {'person': expected})
data = {"person": self.item_id}
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(r, self.invoice_id)
def test_patch_write_concern_success(self):
# 0 and 1 are the only valid values for 'w' on our mongod instance (1
# is the default)
self.domain['contacts']['mongo_write_concern'] = {'w': 0}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
def test_patch_write_concern_fail(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert500(status)
def test_patch_missing_standard_date_fields(self):
"""Documents created outside the API context could be lacking the
LAST_UPDATED and/or DATE_CREATED fields.
"""
# directly insert a document, without DATE_CREATED e LAST_UPDATED
# values.
contacts = self.random_contacts(1, False)
ref = 'test_update_field'
contacts[0]['ref'] = ref
_db = self.connection[MONGO_DBNAME]
_db.contacts.insert(contacts)
# now retrieve same document via API and get its etag, which is
# supposed to be computed on default DATE_CREATED and LAST_UPDATAED
# values.
response, status = self.get(self.known_resource, item=ref)
etag = response[ETAG]
_id = response['_id']
# attempt a PATCH with the new etag.
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch('%s/%s' % (self.known_resource_url, _id),
data=changes, headers=[('If-Match', etag)])
self.assert200(status)
def test_patch_subresource(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# GET all invoices by new contact
response, status = self.get('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id))
etag = response[ETAG]
data = {"inv_number": "new_number"}
headers = [('If-Match', etag)]
response, status = self.patch('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id),
data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(response, self.invoice_id)
def test_patch_bandwidth_saver(self):
changes = {'ref': '1234567890123456789012345'}
# bandwidth_saver is on by default
self.assertTrue(self.app.config['BANDWIDTH_SAVER'])
r = self.perform_patch(changes)
self.assertFalse('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
self.item_etag = r[self.app.config['ETAG']]
# test return all fields (bandwidth_saver off)
self.app.config['BANDWIDTH_SAVER'] = False
r = self.perform_patch(changes)
self.assertTrue('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
def test_patch_readonly_field_with_previous_document(self):
schema = self.domain['contacts']['schema']
del(schema['ref']['required'])
# disable read-only on the field so we can store a value which is
# also different form its default value.
schema['read_only_field']['readonly'] = False
changes = {'read_only_field': 'value'}
r = self.perform_patch(changes)
# resume read-only status for the field
self.domain['contacts']['schema']['read_only_field']['readonly'] = True
# test that if the read-only field is included with the payload and its
# value is equal to the one stored with the document, validation
# succeeds (#479).
etag = r['_etag']
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
# test that if the read-only field is included with the payload and its
# value is different from the stored document, validation fails.
etag = r['_etag']
changes = {'read_only_field': 'another value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert422(status)
self.assertTrue('is read-only' in r['_issues']['read_only_field'])
def assertPatchResponse(self, response, item_id):
self.assertTrue(STATUS in response)
self.assertTrue(STATUS_OK in response[STATUS])
self.assertFalse(ISSUES in response)
self.assertTrue(ID_FIELD in response)
self.assertEqual(response[ID_FIELD], item_id)
self.assertTrue(LAST_UPDATED in response)
self.assertTrue(ETAG in response)
self.assertTrue('_links' in response)
self.assertItemLink(response['_links'], item_id)
def patch(self, url, data, headers=[]):
headers.append(('Content-Type', 'application/json'))
r = self.test_client.patch(url,
data=json.dumps(data),
headers=headers)
return self.parse_response(r)
class TestEvents(TestBase):
new_ref = "0123456789012345678901234"
def test_on_pre_PATCH(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_pre_PATCH_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_PATCH_dynamic_filter(self):
def filter_this(resource, request, lookup):
lookup["_id"] = self.unknown_item_id
self.app.on_pre_PATCH += filter_this
# Would normally patch the known document; will return 404 instead.
r, s = self.parse_response(self.patch())
self.assert404(s)
def test_on_post_PATCH(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(200, devent.called[2].status_code)
self.assertEqual(3, len(devent.called))
def test_on_post_PATCH_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH_contacts += devent
self.patch()
self.assertEqual(200, devent.called[1].status_code)
self.assertEqual(2, len(devent.called))
def test_on_update(self):
devent = DummyEvent(self.before_update)
self.app.on_update += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_update_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_update_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_updated(self):
devent = DummyEvent(self.after_update)
self.app.on_updated += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_updated_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_updated_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def before_update(self):
db = self.connection[MONGO_DBNAME]
contact = db.contacts.find_one(ObjectId(self.item_id))
return contact['ref'] == self.item_name
def after_update(self):
return not self.before_update()
def patch(self):
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
data = json.dumps({"ref": self.new_ref})
return self.test_client.patch(
self.item_id_url, data=data, headers=headers)
|
py | b40e7dda951cfff210ca530228e3643fd9f93fd9 | # azurerm nit tests - resource groups
# to run tests: python -m unittest resource_groups_test.py
import sys
import unittest
from haikunator import Haikunator
import json
import time
import azurerm
class TestAzurermPy(unittest.TestCase):
def setUp(self):
# Load Azure app defaults
try:
with open('azurermconfig.json') as configFile:
configData = json.load(configFile)
except FileNotFoundError:
print("Error: Expecting vmssConfig.json in current folder")
sys.exit()
tenant_id = configData['tenantId']
app_id = configData['appId']
app_secret = configData['appSecret']
self.subscription_id = configData['subscriptionId']
self.access_token = azurerm.get_access_token(
tenant_id, app_id, app_secret)
self.location = configData['location']
h = Haikunator()
self.rgname = h.haikunate()
def tearDown(self):
pass
def test_resource_groups(self):
# create resource group
print('Creating resource group: ' + self.rgname)
response = azurerm.create_resource_group(self.access_token, self.subscription_id,
self.rgname, self.location)
self.assertEqual(response.status_code, 201)
# get resource group
print('Getting resource group: ' + self.rgname)
response = azurerm.get_resource_group(
self.access_token, self.subscription_id, self.rgname)
self.assertEqual(response['name'], self.rgname)
# export resource group
print('Exporting resource group: ' + self.rgname)
response = azurerm.export_template(
self.access_token, self.subscription_id, self.rgname)
self.assertEqual(response.status_code, 200)
# get resource group resources
print('Getting resources for resource group: ' + self.rgname)
response = azurerm.get_resource_group_resources(self.access_token, self.subscription_id,
self.rgname)
#print(json.dumps(response, sort_keys=False, indent=2, separators=(',', ': ')))
self.assertTrue('value' in response)
# list resource groups
print('List resource groups: ' + self.rgname)
response = azurerm.list_resource_groups(
self.access_token, self.subscription_id)
self.assertTrue('value' in response)
# delete resource group
print('Deleting resource group: ' + self.rgname)
response = azurerm.delete_resource_group(
self.access_token, self.subscription_id, self.rgname)
self.assertEqual(response.status_code, 202)
if __name__ == '__main__':
unittest.main()
|
py | b40e7fdeede134d3747ce11e8c0ffe4d844c77d2 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testsettings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) |
py | b40e80c5ea76da949b6a6e9a7fcdea8e53c7d448 | import os
import pprint
import re
import jinja2
from zppy.utils import checkStatus, getComponent, getTasks, getYears, submitScript
# -----------------------------------------------------------------------------
def ts(config, scriptDir):
# --- Initialize jinja2 template engine ---
templateLoader = jinja2.FileSystemLoader(
searchpath=config["default"]["templateDir"]
)
templateEnv = jinja2.Environment(loader=templateLoader)
template = templateEnv.get_template("ts.bash")
# --- List of tasks ---
tasks = getTasks(config, "ts")
if len(tasks) == 0:
return
# --- Generate and submit ts scripts ---
for c in tasks:
# Grid name (if not explicitly defined)
# 'native' if no remapping
# or extracted from mapping filename
if c["grid"] == "":
if c["mapping_file"] == "":
c["grid"] = "native"
elif c["mapping_file"] == "glb":
c["grid"] = "glb"
else:
tmp = os.path.basename(c["mapping_file"])
# FIXME: W605 invalid escape sequence '\.'
tmp = re.sub("\.[^.]*\.nc$", "", tmp) # noqa: W605
tmp = tmp.split("_")
if tmp[0] == "map":
c["grid"] = "%s_%s" % (tmp[-2], tmp[-1])
else:
raise Exception(
"Cannot extract target grid name from mapping file %s"
% (c["mapping_file"])
)
# Component
c["component"] = getComponent(c["input_files"])
# Loop over year sets
year_sets = getYears(c["years"])
for s in year_sets:
c["yr_start"] = s[0]
c["yr_end"] = s[1]
c["ypf"] = s[1] - s[0] + 1
c["scriptDir"] = scriptDir
if c["subsection"]:
sub = c["subsection"]
else:
sub = c["grid"]
prefix = "ts_%s_%04d-%04d-%04d" % (
sub,
c["yr_start"],
c["yr_end"],
c["ypf"],
)
print(prefix)
c["prefix"] = prefix
scriptFile = os.path.join(scriptDir, "%s.bash" % (prefix))
statusFile = os.path.join(scriptDir, "%s.status" % (prefix))
settingsFile = os.path.join(scriptDir, "%s.settings" % (prefix))
skip = checkStatus(statusFile)
if skip:
continue
# Create script
with open(scriptFile, "w") as f:
f.write(template.render(**c))
with open(settingsFile, "w") as sf:
p = pprint.PrettyPrinter(indent=2, stream=sf)
p.pprint(c)
p.pprint(s)
if not c["dry_run"]:
# Submit job
jobid = submitScript(scriptFile)
if jobid != -1:
# Update status file
with open(statusFile, "w") as f:
f.write("WAITING %d\n" % (jobid))
|
py | b40e80db6b8e43d400d8ffa8f007e06c4807fbd7 | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False,
)
def validate(self, attrs):
"""Validates and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
|
py | b40e82d49651961a21fcbecb463fa2ed23d5ada9 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
import seaborn as sns
import pandas as pd
import numpy as np
import pickle
import os
# Read and split data
df = pd.read_csv("Real_estate.csv")
X = df.iloc[:,1:-1]
y = df.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.05,random_state = 0)
# Training Model
reg = LinearRegression()
reg.fit (X_train, y_train)
# Testing Model
y_pred = reg.predict(X_test)
# Evaluat Model
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
(rmse, mae, r2) = eval_metrics(y_test, y_pred)
print(" RMSE: %s" % rmse)
print(" MAE: %s" % mae)
print(" R2 score: %s" % r2)
with open("metrics.txt", 'w') as outfile:
outfile.write("RMSE: " + str(rmse) + "\n")
outfile.write("MAE: " + str(mae) + "\n")
outfile.write("R2 score: " + str(r2) + "\n")
# Draw, show, and save regplot
ax = sns.regplot(x=y_test,y=y_pred,fit_reg=True)
ax.figure.savefig('regplot.png')
# Create (Pickle file) and save regression model to disk
pickle.dump(reg, open('dockerfille_dev/model.pkl','wb'))
# Copy the update files (Ml model and Flask web Api) and Restart Service with container (in the background )
#os.system("docker cp dockerfille_dev/flask_api.py real_estat_price_container:/usr/app/flask_api.py\
#&& docker cp dockerfille_dev/model.pkl real_estat_price_container:/usr/app/model.pkl\
#&& docker restart real_estat_price_container")
|
py | b40e8353d6315be6449ece2973c621e0c478bfdb | import tensorflow as tf
import tensorbayes as tb
from codebase.args import args
from codebase.datasets import PseudoData, get_info
from .utils import delete_existing, save_value, save_model
import os
import sys
import numpy as np
def update_dict(M, feed_dict, src=None, trg=None, bs=100):
"""Update feed_dict with new mini-batch
M - (TensorDict) the model
feed_dict - (dict) tensorflow feed dict
src - (obj) source domain. Contains train/test Data obj
trg - (obj) target domain. Contains train/test Data obj
bs - (int) batch size
"""
if src:
src_x, src_y = src.train.next_batch(bs)
feed_dict.update({M.src_x: src_x, M.src_y: src_y})
if trg:
trg_x, trg_y = trg.train.next_batch(bs)
feed_dict.update({M.trg_x: trg_x, M.trg_y: trg_y})
def train(M, src=None, trg=None, has_disc=True, saver=None, model_name=None):
"""Main training function
Creates log file, manages datasets, trains model
M - (TensorDict) the model
src - (obj) source domain. Contains train/test Data obj
trg - (obj) target domain. Contains train/test Data obj
has_disc - (bool) whether model requires a discriminator update
saver - (Saver) saves models during training
model_name - (str) name of the model being run with relevant parms info
"""
# Training settings
bs = 20
iterep = 1000
itersave = 20000
n_epoch = 80
epoch = 0
feed_dict = {}
# Create a log directory and FileWriter
log_dir = os.path.join(args.logdir, model_name)
delete_existing(log_dir)
train_writer = tf.summary.FileWriter(log_dir)
# Create a save directory
if saver:
model_dir = os.path.join('checkpoints', model_name)
delete_existing(model_dir)
os.makedirs(model_dir)
# Replace src domain with psuedolabeled trg
if args.dirt > 0:
print("Setting backup and updating backup model")
src = PseudoData(args.trg, trg, M.teacher)
M.sess.run(M.update_teacher)
# Sanity check model
print_list = []
if src:
save_value(M.fn_ema_acc, 'test/src_test_ema_1k',
src.test, train_writer, 0, print_list, full=False)
if trg:
save_value(M.fn_ema_acc, 'test/trg_test_ema',
trg.test, train_writer, 0, print_list)
save_value(M.fn_ema_acc, 'test/trg_train_ema_1k',
trg.train, train_writer, 0, print_list, full=False)
print(print_list)
if src: get_info(args.src, src)
if trg: get_info(args.trg, trg)
print("Batch size:", bs)
print("Iterep:", iterep)
print("Total iterations:", n_epoch * iterep)
print("Log directory:", log_dir)
for i in range(n_epoch * iterep):
# Run discriminator optimizer
if has_disc:
update_dict(M, feed_dict, src, trg, bs)
summary, _ = M.sess.run(M.ops_disc, feed_dict)
train_writer.add_summary(summary, i + 1)
# Run main optimizer
update_dict(M, feed_dict, src, trg, bs)
summary, _ = M.sess.run(M.ops_main, feed_dict)
train_writer.add_summary(summary, i + 1)
train_writer.flush()
end_epoch, epoch = tb.utils.progbar(i, iterep,
message='{}/{}'.format(epoch, i),
display=args.run >= 999)
# Update pseudolabeler
if args.dirt and (i + 1) % args.dirt == 0:
print("Updating teacher model")
M.sess.run(M.update_teacher)
# Log end-of-epoch values
if end_epoch:
print_list = M.sess.run(M.ops_print, feed_dict)
if src:
save_value(M.fn_ema_acc, 'test/src_test_ema_1k',
src.test, train_writer, i + 1, print_list, full=False)
if trg:
save_value(M.fn_ema_acc, 'test/trg_test_ema',
trg.test, train_writer, i + 1, print_list)
save_value(M.fn_ema_acc, 'test/trg_train_ema_1k',
trg.train, train_writer, i + 1, print_list, full=False)
print_list += ['epoch', epoch]
print(print_list)
if saver and (i + 1) % itersave == 0:
save_model(saver, M, model_dir, i + 1)
# Saving final model
if saver:
save_model(saver, M, model_dir, i + 1)
|
py | b40e842775e838d2dd3b29cf00e1cf5d19eb008c | """plot metrics with matplotlib"""
import os.path
import shutil
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from ._numpy import NumpyMetric
__all__ = ['PlotMetric']
class PlotMetric(NumpyMetric):
"""Plot graphs of metrics. See :class:`NumpyMetric <._numpy.NumpyMetric>` for usage.
:cvar outdir: directory to save plots in. Defaults to ``./instrument_plots``.
"""
instances = {}
outdir = os.path.abspath("instrument_plots")
@classmethod
def _pre_dump(cls):
"""Output all recorded stats"""
shutil.rmtree(cls.outdir, ignore_errors=True)
os.makedirs(cls.outdir)
super(PlotMetric, cls)._pre_dump()
def _cleanup(self):
plt.clf()
plt.close()
super(PlotMetric, self)._cleanup()
def _output(self):
plt.figure(1, figsize = (8, 18))
plt.subplot(3, 1, 1)
self._histogram('count', self.count_mean, self.count_std, self.count_arr)
plt.subplot(3, 1, 2)
self._histogram('elapsed', self.elapsed_mean, self.elapsed_std, self.elapsed_arr)
plt.subplot(3, 1, 3)
self._scatter()
plt.savefig(os.path.join(self.outdir, ".".join((self.name, 'png'))),
bbox_inches="tight")
super(PlotMetric, self)._output()
def _histogram(self, which, mu, sigma, data):
"""plot a histogram. For internal use only"""
weights = np.ones_like(data)/len(data) # make bar heights sum to 100%
n, bins, patches = plt.hist(data, bins=25, weights=weights, facecolor='blue', alpha=0.5)
plt.title(r'%s %s: $\mu=%.2f$, $\sigma=%.2f$' % (self.name, which.capitalize(), mu, sigma))
plt.xlabel('Items' if which == 'count' else 'Seconds')
plt.ylabel('Frequency')
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, position: "{:.1f}%".format(y*100)))
def _scatter(self):
"""plot a scatter plot of count vs. elapsed. For internal use only"""
plt.scatter(self.count_arr, self.elapsed_arr)
plt.title('{}: Count vs. Elapsed'.format(self.name))
plt.xlabel('Items')
plt.ylabel('Seconds')
|
py | b40e842810cd42d8023d8dc6d739a3c8b6704f3e | # -*- coding: utf-8 -*-
"""
tests.test_timer
~~~~~~~~~~~~~~~~
Test timer feature.
"""
import re
import time
from yaspin import yaspin
def test_no_timer():
sp = yaspin(timer=False)
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}.\d{2}\)", sp._last_frame) is None
def test_timer_idle():
sp = yaspin(timer=True)
assert sp.elapsed_time == 0
sp._freeze("")
assert "(0:00:00.00)" in sp._last_frame
def test_timer_in_progress():
sp = yaspin(timer=True)
sp.start()
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
sp.stop()
assert t2 - t1 >= 0.001
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}.\d{2}\)", sp._last_frame) is not None
def test_timer_rounding_down():
sp = yaspin(timer=True)
sp.start()
sp.stop()
sp._stop_time = sp._start_time + 0.994
sp._freeze("")
assert "(0:00:00.99)" in sp._last_frame
def test_timer_rounding_up():
sp = yaspin(timer=True)
sp.start()
sp.stop()
sp._stop_time = sp._start_time + 0.996
sp._freeze("")
assert "(0:00:01.00)" in sp._last_frame
def test_timer_finished():
sp = yaspin(timer=True)
sp.start()
time.sleep(0.001)
sp.stop()
assert sp.elapsed_time >= 0.001
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
assert t1 == t2
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}.\d{2}\)", sp._last_frame) is not None
|
py | b40e8542665fbc2e64401c73ca6ac4056ba559f2 | # Generated by Django 3.0.4 on 2020-03-15 02:08
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import modelcluster.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailcore', '0045_assign_unlock_grouppagepermission'),
]
operations = [
migrations.CreateModel(
name='Nav',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='NavItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_title', models.CharField(blank=True, max_length=100, null=True)),
('link_url', models.CharField(blank=True, max_length=500)),
('open_in_new_tab', models.BooleanField(blank=True, default=False)),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='nav_items', to='navigator.Nav')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
py | b40e857259bdea89e449f53706a1cea1ea07c1eb | import os
import time
import sys
import math
import numpy as np
import functools
import re
import logging
import glob
import paddle
import paddle.fluid as fluid
from models.resnet import ResNet101
from datasets.readers import ReaderConfig
# import cv2
# import skimage
# import matplotlib.pyplot as plt
# from paddle.fluid.core import PaddleTensor
# from paddle.fluid.core import AnalysisConfig
# from paddle.fluid.core import create_paddle_predictor
from args import args
from datasets.data_path import global_data_path
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
if args.seed is not None:
np.random.seed(args.seed)
print(os.environ.get('LD_LIBRARY_PATH', None))
print(os.environ.get('PATH', None))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def load_vars_by_dict(executor, name_var_dict, main_program=None):
from paddle.fluid.framework import Program, Variable
from paddle.fluid import core
load_prog = Program()
load_block = load_prog.global_block()
if main_program is None:
main_program = fluid.default_main_program()
if not isinstance(main_program, Program):
raise TypeError("program should be as Program type or None")
for each_var_name in name_var_dict.keys():
assert isinstance(name_var_dict[each_var_name], Variable)
if name_var_dict[each_var_name].type == core.VarDesc.VarType.RAW:
continue
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [name_var_dict[each_var_name]]},
attrs={'file_path': each_var_name})
executor.run(load_prog)
def get_model_id():
prefix = ''
if args.prefix is not None:
prefix = args.prefix + '-' # for some notes.
model_id = prefix + args.dataset + \
'-epo_' + str(args.num_epoch) + \
'-b_' + str(args.batch_size) + \
'-reg_' + str(args.delta_reg) + \
'-wd_' + str(args.wd_rate)
return model_id
def train():
dataset = args.dataset
image_shape = [3, 224, 224]
pretrained_model = args.pretrained_model
class_map_path = f'{global_data_path}/{dataset}/readable_label.txt'
if os.path.exists(class_map_path):
logger.info(
"The map of readable label and numerical label has been found!")
with open(class_map_path) as f:
label_dict = {}
strinfo = re.compile(r"\d+ ")
for item in f.readlines():
key = int(item.split(" ")[0])
value = [
strinfo.sub("", l).replace("\n", "")
for l in item.split(", ")
]
label_dict[key] = value[0]
assert os.path.isdir(
pretrained_model), "please load right pretrained model path for infer"
# data reader
batch_size = args.batch_size
reader_config = ReaderConfig(f'{global_data_path}/{dataset}', is_test=False)
reader = reader_config.get_reader()
train_reader = paddle.batch(
paddle.reader.shuffle(reader, buf_size=batch_size),
batch_size,
drop_last=True)
# model ops
image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
model = ResNet101(is_test=False)
features, logits = model.net(
input=image, class_dim=reader_config.num_classes)
out = fluid.layers.softmax(logits)
# loss, metric
cost = fluid.layers.mean(fluid.layers.cross_entropy(out, label))
accuracy = fluid.layers.accuracy(input=out, label=label)
# delta regularization
# teacher model pre-trained on Imagenet, 1000 classes.
global_name = 't_'
t_model = ResNet101(is_test=True, global_name=global_name)
t_features, _ = t_model.net(input=image, class_dim=1000)
for f in t_features.keys():
t_features[f].stop_gradient = True
# delta loss. hard code for the layer name, which is just before global pooling.
delta_loss = fluid.layers.square(t_features['t_res5c.add.output.5.tmp_0'] -
features['res5c.add.output.5.tmp_0'])
delta_loss = fluid.layers.reduce_mean(delta_loss)
params = fluid.default_main_program().global_block().all_parameters()
parameters = []
for param in params:
if param.trainable:
if global_name in param.name:
print('\tfixing', param.name)
else:
print('\ttraining', param.name)
parameters.append(param.name)
# optimizer, with piecewise_decay learning rate.
total_steps = len(reader_config.image_paths) * args.num_epoch // batch_size
boundaries = [int(total_steps * 2 / 3)]
print('\ttotal learning steps:', total_steps)
print('\tlr decays at:', boundaries)
values = [0.01, 0.001]
optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=boundaries, values=values),
momentum=0.9,
parameter_list=parameters,
regularization=fluid.regularizer.L2Decay(args.wd_rate))
cur_lr = optimizer._global_learning_rate()
optimizer.minimize(
cost + args.delta_reg * delta_loss, parameter_list=parameters)
# data reader
feed_order = ['image', 'label']
# executor (session)
place = fluid.CUDAPlace(
args.use_cuda) if args.use_cuda >= 0 else fluid.CPUPlace()
exe = fluid.Executor(place)
# running
main_program = fluid.default_main_program()
start_program = fluid.default_startup_program()
feed_var_list_loop = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place)
exe.run(start_program)
loading_parameters = {}
t_loading_parameters = {}
for p in main_program.all_parameters():
if 'fc' not in p.name:
if global_name in p.name:
new_name = os.path.join(pretrained_model,
p.name.split(global_name)[-1])
t_loading_parameters[new_name] = p
print(new_name, p.name)
else:
name = os.path.join(pretrained_model, p.name)
loading_parameters[name] = p
print(name, p.name)
else:
print(f'not loading {p.name}')
load_vars_by_dict(exe, loading_parameters, main_program=main_program)
load_vars_by_dict(exe, t_loading_parameters, main_program=main_program)
step = 0
# test_data = reader_creator_all_in_memory('./datasets/PetImages', is_test=True)
for e_id in range(args.num_epoch):
avg_delta_loss = AverageMeter()
avg_loss = AverageMeter()
avg_accuracy = AverageMeter()
batch_time = AverageMeter()
end = time.time()
for step_id, data_train in enumerate(train_reader()):
wrapped_results = exe.run(
main_program,
feed=feeder.feed(data_train),
fetch_list=[cost, accuracy, delta_loss, cur_lr])
# print(avg_loss_value[2])
batch_time.update(time.time() - end)
end = time.time()
avg_loss.update(wrapped_results[0][0], len(data_train))
avg_accuracy.update(wrapped_results[1][0], len(data_train))
avg_delta_loss.update(wrapped_results[2][0], len(data_train))
if step % 100 == 0:
print(
f"\tEpoch {e_id}, Global_Step {step}, Batch_Time {batch_time.avg: .2f},"
f" LR {wrapped_results[3][0]}, "
f"Loss {avg_loss.avg: .4f}, Acc {avg_accuracy.avg: .4f}, Delta_Loss {avg_delta_loss.avg: .4f}"
)
step += 1
if args.outdir is not None:
try:
os.makedirs(args.outdir, exist_ok=True)
fluid.io.save_params(
executor=exe, dirname=args.outdir + '/' + get_model_id())
except:
print('\t Not saving trained parameters.')
if e_id == args.num_epoch - 1:
print("kpis\ttrain_cost\t%f" % avg_loss.avg)
print("kpis\ttrain_acc\t%f" % avg_accuracy.avg)
def test():
image_shape = [3, 224, 224]
pretrained_model = args.outdir + '/' + get_model_id()
# data reader
batch_size = args.batch_size
reader_config = ReaderConfig(
f'{global_data_path}/{args.dataset}', is_test=True)
reader = reader_config.get_reader()
test_reader = paddle.batch(reader, batch_size)
# model ops
image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
model = ResNet101(is_test=True)
_, logits = model.net(input=image, class_dim=reader_config.num_classes)
out = fluid.layers.softmax(logits)
# loss, metric
cost = fluid.layers.mean(fluid.layers.cross_entropy(out, label))
accuracy = fluid.layers.accuracy(input=out, label=label)
# data reader
feed_order = ['image', 'label']
# executor (session)
place = fluid.CUDAPlace(
args.use_cuda) if args.use_cuda >= 0 else fluid.CPUPlace()
exe = fluid.Executor(place)
# running
main_program = fluid.default_main_program()
start_program = fluid.default_startup_program()
feed_var_list_loop = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place)
exe.run(start_program)
fluid.io.load_params(exe, pretrained_model)
step = 0
avg_loss = AverageMeter()
avg_accuracy = AverageMeter()
for step_id, data_train in enumerate(test_reader()):
avg_loss_value = exe.run(
main_program,
feed=feeder.feed(data_train),
fetch_list=[cost, accuracy])
avg_loss.update(avg_loss_value[0], len(data_train))
avg_accuracy.update(avg_loss_value[1], len(data_train))
if step_id % 10 == 0:
print("\nBatch %d, Loss %f, Acc %f" % (step_id, avg_loss.avg,
avg_accuracy.avg))
step += 1
print("test counts:", avg_loss.count)
print("test_cost\t%f" % avg_loss.avg)
print("test_acc\t%f" % avg_accuracy.avg)
if __name__ == '__main__':
print(args)
train()
test()
|
py | b40e859a40a9e011e0cb7777091cc4e8d9f8d552 | #this was initiated by atom(conan)
#partially modified by opkr
import os
import math
from cereal import car, log
from common.params import Params
from selfdrive.car.hyundai.spdcontroller import SpdController
import common.log as trace1
from selfdrive.controls.lib.events import Events
EventName = car.CarEvent.EventName
class SpdctrlRelaxed(SpdController):
def __init__(self, CP=None):
super().__init__( CP )
self.cv_Raio = 0.45
self.cv_Dist = -5
self.steer_mode = ""
self.cruise_gap = 0.0
self.cut_in = False
self.map_enable = False
self.map_spdlimit_offset = 0
self.target_speed = 0
self.target_speed_camera = 0
self.target_speed_map = 0.0
self.target_speed_map_counter = 0
self.target_speed_map_counter1 = 0
self.target_speed_map_counter2 = 0
self.hesitant_status = False
self.hesitant_timer = 0
self.map_decel_only = False
self.map_spdlimit_offset = int(Params().get("OpkrSpeedLimitOffset", encoding="utf8"))
def update_lead(self, sm, CS, dRel, yRel, vRel, CC):
self.map_decel_only = CS.out.cruiseState.modeSel == 4
plan = sm['longitudinalPlan']
dRele = plan.dRel1 #EON Lead
yRele = plan.yRel1 #EON Lead
vRele = plan.vRel1 * 3.6 + 0.5 #EON Lead
dRelef = plan.dRel2 #EON Lead
yRelef = plan.yRel2 #EON Lead
vRelef = plan.vRel2 * 3.6 + 0.5 #EON Lead
lead2_status = plan.status2
self.target_speed_camera = self.target_speed_camera = CS.out.safetySign + round(CS.out.safetySign*0.01*self.map_spdlimit_offset)
if self.target_speed_camera <= 29:
self.map_enable = False
self.target_speed = 0
elif self.target_speed_camera > 29:
self.target_speed = self.target_speed_camera
self.map_enable = True
else:
self.target_speed = 0
lead_set_speed = int(round(self.cruise_set_speed_kph))
lead_wait_cmd = 250
dRel = 150
vRel = 0
dRel2 = 140
vRel2 = 0
#dRel, yRel, vRel = self.get_lead( sm, CS )
if 1 < dRele < 149:
dRel = int(dRele) # dRele(이온 차간간격)값 사용
vRel = int(vRele)
elif 1 < CS.lead_distance < 149:
dRel = int(CS.lead_distance) # CS.lead_distance(레이더 차간간격)값 사용
vRel = int(CS.lead_objspd)
else:
dRel = 150
vRel = 0
if 1 < dRelef < 140:
dRel2 = int(dRelef)
vRel2 = int(vRelef) # for cut-in detection??
dst_lead_distance = int(CS.clu_Vanz*self.cv_Raio) # 기준 유지 거리
dst_lead_distance2 = int(CS.clu_Vanz*0.45) # 기준 유지 거리
if dst_lead_distance > 100:
dst_lead_distance = 100
#elif dst_lead_distance < 15:
#dst_lead_distance = 15
if 1 < dRel < 149: #앞차와의 간격이 150미터 미만이면, 즉 앞차가 인식되면,
self.time_no_lean = 0
d_delta = dRel - dst_lead_distance # d_delta = 앞차간격(이온값) - 유지거리
lead_objspd = vRel # 선행차량 상대속도.
else:
d_delta = 0
lead_objspd = 0
if 1 < dRel2 < 140:
d_delta2 = dRel2 - dst_lead_distance2
else:
d_delta2 = 0
if CS.driverAcc_time and not self.map_decel_only: #운전자가 가속페달 밟으면 크루즈 설정속도를 현재속도+1로 동기화
if int(CS.VSetDis) < int(round(CS.clu_Vanz)):
lead_set_speed = int(round(CS.clu_Vanz)) + 1
self.seq_step_debug = "운전자가속"
lead_wait_cmd = 8
elif int(round(self.target_speed)) < int(CS.VSetDis) and self.map_enable and ((int(round(self.target_speed)) < int(round(self.cruise_set_speed_kph))) and self.target_speed != 0):
self.seq_step_debug = "맵기반감속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif CC.res_speed != 0 and CC.res_speed < int(CS.VSetDis):
self.seq_step_debug = "RES속도조정"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
# 거리 유지 조건
elif d_delta < 0 or d_delta2 < 0 and not self.map_decel_only: # 기준유지거리(현재속도*0.4)보다 가까이 있게 된 상황
if (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and dRele - dRelef > 3 and lead2_status:
self.seq_step_debug = "끼어들기감지"
#lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 15, -5)
self.cut_in = True
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-7) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.3 and int(CS.clu_Vanz) > 80:
self.seq_step_debug = "거리확보3"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-5) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.35 and int(CS.clu_Vanz) > 50:
self.seq_step_debug = "거리확보2"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-3) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.4 and int(CS.clu_Vanz) > 20:
self.seq_step_debug = "거리확보1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd <= 0 and self.cut_in == True and (int(CS.clu_Vanz)-4) <= int(CS.VSetDis):
self.seq_step_debug = "끼어들기감속중"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd < -30 or (dRel < 60 and CS.clu_Vanz > 60 and lead_objspd < -5) and (int(CS.clu_Vanz)-6) <= int(CS.VSetDis): # 끼어든 차가 급감속 하는 경우
self.seq_step_debug = "기준내,-5"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -5)
self.cut_in = False
elif lead_objspd < -20 or (dRel < 80 and CS.clu_Vanz > 80 and lead_objspd < -5) and (int(CS.clu_Vanz)-5) <= int(CS.VSetDis): # 끼어든 차가 급감속 하는 경우
self.seq_step_debug = "기준내,-4"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -4)
self.cut_in = False
elif lead_objspd < 0 and int(CS.clu_Vanz)//abs(lead_objspd) <= int(CS.VSetDis)//abs(lead_objspd):
self.seq_step_debug = "기준내-가변"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, max(8, 120-(abs(lead_objspd**3))), -2)
self.cut_in = False
elif lead_objspd > 3 and int(CS.clu_Vanz) <= int(CS.VSetDis):
self.seq_step_debug = "기준내,앞차가속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 60, 1)
self.cut_in = False
elif lead_objspd >= 0 and int(CS.clu_Vanz) <= int(CS.VSetDis):
self.seq_step_debug = "기준내,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 240, -1)
self.cut_in = False
else:
self.seq_step_debug = "거리유지"
self.cut_in = False
# 선행차량이 멀리 있는 상태에서 감속 조건
elif 20 <= dRel < 149 and lead_objspd < -15 and not self.map_decel_only: #정지 차량 및 급감속 차량 발견 시
self.cut_in = False
if int(CS.clu_Vanz//abs(lead_objspd)) <= int(CS.VSetDis//abs(lead_objspd)):
self.seq_step_debug = "정차차량 감속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -20)
elif self.cruise_set_speed_kph > int(round((CS.clu_Vanz))) and not self.map_decel_only: #이온설정속도가 차량속도보다 큰경우
self.cut_in = False
if 10 > dRel > 3 and lead_objspd <= 0 and 1 < int(CS.clu_Vanz) <= 7 and CS.VSetDis < 45 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "출발속도조정"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 5)
elif 20 > dRel > 3 and lead_objspd > 5 and CS.clu_Vanz <= 25 and CS.VSetDis < 55 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "SS>VS,출발"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 100, 1)
#elif lead_objspd > 9 and CS.clu_Vanz > 20 and CS.VSetDis < 45: # 처음출발시 선행차량 급가속할 때 설정속도 많이 업
# self.seq_step_debug = "SS>VS,초가"
# lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 10, 5)
#elif lead_objspd > 8 and CS.clu_Vanz > 45 and CS.VSetDis < 60: # 중간속도에서 선행차량 급가속할 때 설정속도 많이 업
# self.seq_step_debug = "SS>VS,중가"
# lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 5)
#elif lead_objspd > 7 and CS.clu_Vanz > 65 and CS.VSetDis < 80:
# self.seq_step_debug = "SS>VS,종가"
# lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 5)
elif lead_objspd > 0 and int(CS.clu_Vanz//lead_objspd) >= int(CS.VSetDis//lead_objspd) and int(CS.clu_Vanz*0.4) < dRel < 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "SS>VS,++1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 1)
elif lead_objspd > 0 and int(CS.clu_Vanz)+lead_objspd >= int(CS.VSetDis) and int(CS.clu_Vanz*0.4) < dRel < 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "SS>VS,+1"
if int(CS.VSetDis) > int(CS.clu_Vanz)+14:
self.hesitant_status = True
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 1)
elif CS.clu_Vanz > 80 and lead_objspd < -1 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*1.6 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>80,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(8, 50+(lead_objspd*2)), -1)
elif CS.clu_Vanz > 60 and lead_objspd < -1 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*1.8 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>60,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(8, 50+(lead_objspd*2)), -1)
elif CS.clu_Vanz > 40 and lead_objspd < -1 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*2.1 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>40,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(8, 50+(lead_objspd*2)), -1)
elif 70 > CS.clu_Vanz > 30 and lead_objspd < -1 and int(CS.clu_Vanz)//abs(lead_objspd*2.2) <= int(CS.VSetDis)//abs(lead_objspd*2.2) and int(CS.clu_Vanz) >= dRel*0.8 and 1 < dRel < 149:
self.seq_step_debug = "SS>VS,70>v>30,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(8, 120-(abs(lead_objspd**3))), -2)
elif 7 < int(CS.clu_Vanz) < 30 and lead_objspd < 0 and CS.VSetDis > 30:
self.seq_step_debug = "SS>VS,30이하"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, -5)
elif lead_objspd == 0 and int(CS.clu_Vanz)+3 <= int(CS.VSetDis) and int(CS.clu_Vanz) > 40 and 1 < dRel < 149: # 앞차와 속도 같을 시 현재속도+5로 크루즈설정속도 유지
self.seq_step_debug = "SS>VS,vRel=0"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, -1)
elif d_delta == 0 and lead_objspd == 0 and int(CS.clu_Vanz//10) >= int(CS.VSetDis//10) and dRel > 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "선행차없음"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 5)
elif d_delta == 0 and lead_objspd == 0 and self.cruise_set_speed_kph > int(CS.VSetDis) and int(CS.clu_Vanz//10) >= int(CS.VSetDis//10) and dRel > 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "점진가속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 1)
elif lead_objspd == 0 and int(CS.clu_Vanz) == 0 and dRel <= 6:
self.seq_step_debug = "출발대기"
else:
self.seq_step_debug = "SS>VS,거리유지"
if self.hesitant_status and self.hesitant_timer > 150:
self.hesitant_status = False
self.hesitant_timer = 0
elif self.hesitant_status:
self.hesitant_timer += 1
# 유지거리 범위 박 점진 감속
elif 20 <= dRel < int(CS.clu_Vanz*0.75) and lead_objspd < -1 and not self.map_decel_only:
self.cut_in = False
if int(CS.clu_Vanz//abs(lead_objspd)) <= int(CS.VSetDis//abs(lead_objspd)):
self.seq_step_debug = "점진감속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, max(8, 200-(abs(lead_objspd**3))), -1)
elif lead_objspd >= 0 and CS.clu_Vanz >= int(CS.VSetDis) and int(CS.clu_Vanz * 0.5) < dRel < 149 and not self.map_decel_only:
self.cut_in = False
self.seq_step_debug = "속도유지"
elif self.map_decel_only and self.cruise_set_speed_kph > int(round(CS.VSetDis)) and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "속도원복"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 1)
else:
self.cut_in = False
self.seq_step_debug = "속도유지"
return lead_wait_cmd, lead_set_speed
def update_curv(self, CS, sm, curve_speed):
wait_time_cmd = 0
set_speed = self.cruise_set_speed_kph
# 2. 커브 감속.
#if self.cruise_set_speed_kph >= 100:
if CS.out.cruiseState.modeSel == 1 and Events().names not in [EventName.laneChangeManual, EventName.laneChange] and not (CS.left_blinker_flash or CS.right_blinker_flash)and not self.map_decel_only:
if curve_speed < 25 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(45, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.3))
self.seq_step_debug = "커브감속-5"
wait_time_cmd = 8
elif curve_speed < 40 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(55, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.25))
self.seq_step_debug = "커브감속-4"
wait_time_cmd = 8
elif curve_speed < 60 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(65, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.2))
self.seq_step_debug = "커브감속-3"
wait_time_cmd = 8
elif curve_speed < 75 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(75, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.15))
self.seq_step_debug = "커브감속-2"
wait_time_cmd = 8
elif curve_speed < 90 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(85, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.1))
self.seq_step_debug = "커브감속-1"
wait_time_cmd = 8
return wait_time_cmd, set_speed
def update_log(self, CS, set_speed, target_set_speed, long_wait_cmd ):
if CS.out.cruiseState.modeSel == 0:
self.steer_mode = "오파모드"
elif CS.out.cruiseState.modeSel == 1:
self.steer_mode = "차간+커브"
elif CS.out.cruiseState.modeSel == 2:
self.steer_mode = "차간ONLY"
elif CS.out.cruiseState.modeSel == 3:
self.steer_mode = "편도1차선"
elif CS.out.cruiseState.modeSel == 4:
self.steer_mode = "맵감속ONLY"
if self.cruise_gap != CS.cruiseGapSet:
self.cruise_gap = CS.cruiseGapSet
str3 = 'MODE={:s} VL={:03.0f}/{:03.0f} TM={:03.0f}/{:03.0f} TS={:03.0f}'.format( self.steer_mode, set_speed, CS.VSetDis, long_wait_cmd, self.long_curv_timer, int(round(self.target_speed)) )
str4 = ' RD=D:{:03.0f}/V:{:03.0f} CG={:1.0f} DG={:s}'.format( CS.lead_distance, CS.lead_objspd, self.cruise_gap, self.seq_step_debug )
str5 = str3 + str4
trace1.printf2( str5 )
|
py | b40e88785bc724d583a03a3b3c6c686f334fe00b | """empty message
Revision ID: 2357b6b3d76
Revises: fecca96b9d
Create Date: 2015-10-27 10:26:52.074526
"""
# revision identifiers, used by Alembic.
revision = '2357b6b3d76'
down_revision = 'fecca96b9d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('citizen_complaints', sa.Column('service_type', sa.String(length=255), nullable=True))
op.add_column('citizen_complaints', sa.Column('source', sa.String(length=255), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('citizen_complaints', 'source')
op.drop_column('citizen_complaints', 'service_type')
### end Alembic commands ###
|
py | b40e88a405d98b614b8dbf8b9b585040f64df113 | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import pytorch_lightning as pl
import torch
from pytorch_lightning import Trainer
from solo.methods import MeanShift
from .utils import DATA_KWARGS, gen_base_kwargs, gen_batch, prepare_dummy_dataloaders
def test_meanshift():
method_kwargs = {
"proj_output_dim": 512,
"proj_hidden_dim": 4096,
"pred_hidden_dim": 4096,
"momentum_classifier": True,
"num_neighbors": 5,
"queue_size": 65536,
}
BASE_KWARGS = gen_base_kwargs(cifar=False, momentum=True, batch_size=8)
kwargs = {**BASE_KWARGS, **DATA_KWARGS, **method_kwargs}
model = MeanShift(**kwargs, disable_knn_eval=True)
# test arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
assert model.add_model_specific_args(parser) is not None
# test parameters
assert model.learnable_params is not None
# test forward
batch, _ = gen_batch(BASE_KWARGS["batch_size"], BASE_KWARGS["num_classes"], "imagenet100")
out = model(batch[1][0])
assert (
"logits" in out
and isinstance(out["logits"], torch.Tensor)
and out["logits"].size() == (BASE_KWARGS["batch_size"], BASE_KWARGS["num_classes"])
)
assert (
"feats" in out
and isinstance(out["feats"], torch.Tensor)
and out["feats"].size() == (BASE_KWARGS["batch_size"], model.features_dim)
)
assert (
"z" in out
and isinstance(out["z"], torch.Tensor)
and out["z"].size() == (BASE_KWARGS["batch_size"], method_kwargs["proj_output_dim"])
)
assert (
"p" in out
and isinstance(out["p"], torch.Tensor)
and out["p"].size() == (BASE_KWARGS["batch_size"], method_kwargs["proj_output_dim"])
)
# imagenet
BASE_KWARGS = gen_base_kwargs(cifar=False, momentum=True, batch_size=8)
kwargs = {**BASE_KWARGS, **DATA_KWARGS, **method_kwargs}
model = MeanShift(**kwargs, disable_knn_eval=True)
args = argparse.Namespace(**kwargs)
trainer = Trainer.from_argparse_args(args, fast_dev_run=True)
train_dl, val_dl = prepare_dummy_dataloaders(
"imagenet100",
num_large_crops=BASE_KWARGS["num_large_crops"],
num_small_crops=0,
num_classes=BASE_KWARGS["num_classes"],
multicrop=False,
batch_size=BASE_KWARGS["batch_size"],
)
trainer.fit(model, train_dl, val_dl)
# cifar
BASE_KWARGS = gen_base_kwargs(cifar=False, momentum=True, batch_size=8)
kwargs = {**BASE_KWARGS, **DATA_KWARGS, **method_kwargs}
model = MeanShift(**kwargs, disable_knn_eval=True)
args = argparse.Namespace(**kwargs)
trainer = Trainer.from_argparse_args(args, fast_dev_run=True)
train_dl, val_dl = prepare_dummy_dataloaders(
"cifar10",
num_large_crops=BASE_KWARGS["num_large_crops"],
num_small_crops=0,
num_classes=BASE_KWARGS["num_classes"],
multicrop=False,
batch_size=BASE_KWARGS["batch_size"],
)
trainer.fit(model, train_dl, val_dl)
|
py | b40e88eb6e11478674f78c384f3ce6bc59da04a8 | import os
import sys
import numpy as np
import pytest
from google.protobuf.json_format import MessageToJson, MessageToDict
from jina import Document, Flow
from jina.clients.request import request_generator
from jina.clients.request.helper import _new_doc_from_data
from jina.enums import DataInputType
from jina.excepts import BadDocType
from jina.proto import jina_pb2
from jina.proto.jina_pb2 import DocumentProto
from jina.types.ndarray.generic import NdArray
@pytest.mark.skipif(
sys.version_info < (3, 8, 0),
reason='somehow this does not work on Github workflow with Py3.7, '
'but Py 3.8 is fine, local Py3.7 is fine',
)
def test_on_bad_iterator():
# this should not stuck the server as request_generator's error is handled on the client side
f = Flow().add()
with f:
f.index([1, 2, 3])
@pytest.mark.parametrize(
'builder',
[
lambda x: x.SerializeToString(),
lambda x: MessageToJson(x),
lambda x: MessageToDict(x),
lambda x: Document(x),
],
)
def test_data_type_builder_doc(builder):
a = DocumentProto()
a.id = 'a236cbb0eda62d58'
d, t = _new_doc_from_data(builder(a), DataInputType.DOCUMENT)
assert d.id == a.id
assert t == DataInputType.DOCUMENT
def test_data_type_builder_doc_bad():
a = DocumentProto()
a.id = 'a236cbb0eda62d58'
with pytest.raises(BadDocType):
_new_doc_from_data(b'BREAKIT!' + a.SerializeToString(), DataInputType.DOCUMENT)
with pytest.raises(BadDocType):
_new_doc_from_data(MessageToJson(a) + '🍔', DataInputType.DOCUMENT)
@pytest.mark.parametrize('input_type', [DataInputType.AUTO, DataInputType.CONTENT])
def test_data_type_builder_auto(input_type):
if 'JINA_ARRAY_QUANT' in os.environ:
print(f'quant is on: {os.environ["JINA_ARRAY_QUANT"]}')
del os.environ['JINA_ARRAY_QUANT']
d, t = _new_doc_from_data('123', input_type)
assert d.text == '123'
assert t == DataInputType.CONTENT
d, t = _new_doc_from_data(b'45678', input_type)
assert t == DataInputType.CONTENT
assert d.buffer == b'45678'
d, t = _new_doc_from_data(b'123', input_type)
assert t == DataInputType.CONTENT
assert d.buffer == b'123'
c = np.random.random([10, 10])
d, t = _new_doc_from_data(c, input_type)
np.testing.assert_equal(d.blob, c)
assert t == DataInputType.CONTENT
def test_request_generate_lines():
def random_lines(num_lines):
for j in range(1, num_lines + 1):
yield f'i\'m dummy doc {j}'
req = request_generator(data=random_lines(100), request_size=100)
request = next(req)
assert len(request.index.docs) == 100
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 100
assert doc.mime_type == 'text/plain'
assert doc.text == f'i\'m dummy doc {index}'
def test_request_generate_lines_from_list():
def random_lines(num_lines):
return [f'i\'m dummy doc {j}' for j in range(1, num_lines + 1)]
req = request_generator(data=random_lines(100), request_size=100)
request = next(req)
assert len(request.index.docs) == 100
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 100
assert doc.mime_type == 'text/plain'
assert doc.text == f'i\'m dummy doc {index}'
def test_request_generate_lines_with_fake_url():
def random_lines(num_lines):
for j in range(1, num_lines + 1):
yield f'https://github.com i\'m dummy doc {j}'
req = request_generator(data=random_lines(100), request_size=100)
request = next(req)
assert len(request.index.docs) == 100
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 100
assert doc.mime_type == 'text/plain'
assert doc.text == f'https://github.com i\'m dummy doc {index}'
def test_request_generate_bytes():
def random_lines(num_lines):
for j in range(1, num_lines + 1):
yield f'i\'m dummy doc {j}'
req = request_generator(data=random_lines(100), request_size=100)
request = next(req)
assert len(request.index.docs) == 100
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 100
assert doc.text == f'i\'m dummy doc {index}'
assert doc.mime_type == 'text/plain'
def test_request_generate_docs():
def random_docs(num_docs):
for j in range(1, num_docs + 1):
doc = jina_pb2.DocumentProto()
doc.text = f'i\'m dummy doc {j}'
doc.offset = 1000
doc.tags['id'] = 1000 # this will be ignored
doc.mime_type = 'mime_type'
yield doc
req = request_generator(data=random_docs(100), request_size=100)
request = next(req)
assert len(request.index.docs) == 100
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 100
assert doc.mime_type == 'mime_type'
assert doc.text == f'i\'m dummy doc {index}'
assert doc.offset == 1000
def test_request_generate_dict():
def random_docs(num_docs):
for j in range(1, num_docs + 1):
doc = {
'text': f'i\'m dummy doc {j}',
'offset': 1000,
'tags': {'id': 1000},
'chunks': [
{'text': f'i\'m chunk 1', 'modality': 'text'},
{'text': f'i\'m chunk 2', 'modality': 'image'},
],
}
yield doc
req = request_generator(data=random_docs(100), request_size=100)
request = next(req)
assert len(request.index.docs) == 100
for index, doc in enumerate(request.index.docs, 1):
assert doc.text == f'i\'m dummy doc {index}'
assert doc.offset == 1000
assert doc.tags['id'] == 1000
assert len(doc.chunks) == 2
assert doc.chunks[0].modality == 'text'
assert doc.chunks[0].text == f'i\'m chunk 1'
assert doc.chunks[1].modality == 'image'
assert doc.chunks[1].text == f'i\'m chunk 2'
def test_request_generate_dict_str():
import json
def random_docs(num_docs):
for j in range(1, num_docs + 1):
doc = {
'text': f'i\'m dummy doc {j}',
'offset': 1000,
'tags': {'id': 1000},
'chunks': [
{'text': f'i\'m chunk 1', 'modality': 'text'},
{'text': f'i\'m chunk 2', 'modality': 'image'},
],
}
yield json.dumps(doc)
req = request_generator(data=random_docs(100), request_size=100)
request = next(req)
assert len(request.index.docs) == 100
for index, doc in enumerate(request.index.docs, 1):
assert doc.text == f'i\'m dummy doc {index}'
assert doc.offset == 1000
assert doc.tags['id'] == 1000
assert len(doc.chunks) == 2
assert doc.chunks[0].modality == 'text'
assert doc.chunks[0].text == f'i\'m chunk 1'
assert doc.chunks[1].modality == 'image'
assert doc.chunks[1].text == f'i\'m chunk 2'
def test_request_generate_numpy_arrays():
input_array = np.random.random([10, 10])
req = request_generator(data=input_array, request_size=5)
request = next(req)
assert len(request.index.docs) == 5
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 5
assert NdArray(doc.blob).value.shape == (10,)
request = next(req)
assert len(request.index.docs) == 5
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 5
assert NdArray(doc.blob).value.shape == (10,)
def test_request_generate_numpy_arrays_iterator():
input_array = np.random.random([10, 10])
def generator():
for array in input_array:
yield array
req = request_generator(data=generator(), request_size=5)
request = next(req)
assert len(request.index.docs) == 5
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 5
assert NdArray(doc.blob).value.shape == (10,)
request = next(req)
assert len(request.index.docs) == 5
for index, doc in enumerate(request.index.docs, 1):
assert doc.length == 5
assert NdArray(doc.blob).value.shape == (10,)
|
py | b40e8b06400e29852887658f7286d466ac60f83e | """Support for Airthings sensors."""
from __future__ import annotations
from airthings import AirthingsDevice
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
SensorEntity,
SensorEntityDescription,
StateType,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CO2,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
PRESSURE_MBAR,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import DOMAIN
SENSORS: dict[str, SensorEntityDescription] = {
"radonShortTermAvg": SensorEntityDescription(
key="radonShortTermAvg",
native_unit_of_measurement="Bq/m³",
name="Radon",
),
"temp": SensorEntityDescription(
key="temp",
device_class=DEVICE_CLASS_TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
name="Temperature",
),
"humidity": SensorEntityDescription(
key="humidity",
device_class=DEVICE_CLASS_HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
name="Humidity",
),
"pressure": SensorEntityDescription(
key="pressure",
device_class=DEVICE_CLASS_PRESSURE,
native_unit_of_measurement=PRESSURE_MBAR,
name="Pressure",
),
"battery": SensorEntityDescription(
key="battery",
device_class=DEVICE_CLASS_BATTERY,
native_unit_of_measurement=PERCENTAGE,
name="Battery",
),
"co2": SensorEntityDescription(
key="co2",
device_class=DEVICE_CLASS_CO2,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
name="CO2",
),
"voc": SensorEntityDescription(
key="voc",
native_unit_of_measurement=CONCENTRATION_PARTS_PER_BILLION,
name="VOC",
),
}
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Airthings sensor."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities = [
AirthingsHeaterEnergySensor(
coordinator,
airthings_device,
SENSORS[sensor_types],
)
for airthings_device in coordinator.data.values()
for sensor_types in airthings_device.sensor_types
if sensor_types in SENSORS
]
async_add_entities(entities)
class AirthingsHeaterEnergySensor(CoordinatorEntity, SensorEntity):
"""Representation of a Airthings Sensor device."""
_attr_state_class = STATE_CLASS_MEASUREMENT
def __init__(
self,
coordinator: DataUpdateCoordinator,
airthings_device: AirthingsDevice,
entity_description: SensorEntityDescription,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator)
self.entity_description = entity_description
self._attr_name = f"{airthings_device.name} {entity_description.name}"
self._attr_unique_id = f"{airthings_device.device_id}_{entity_description.key}"
self._id = airthings_device.device_id
self._attr_device_info = {
"identifiers": {(DOMAIN, airthings_device.device_id)},
"name": self.name,
"manufacturer": "Airthings",
}
@property
def native_value(self) -> StateType:
"""Return the value reported by the sensor."""
return self.coordinator.data[self._id].sensors[self.entity_description.key]
|
py | b40e8b4f08f0da622d85dea0c56edf856debb4c6 | from phraseModelMgr import *
def main():
pmm = PhraseModelMgr()
test_phrases = [("HI MYNA ME IS ROB BOYER", 1.0),
("HI MY NAME IS ROB BOYER", 0.66),
("HI MY SAME IS ROB BOYER", 0.33),
("I MY NAME IS ROB BOYER", 0.1),
("I MYNA ME IS ROB BOYER", 0.05)]
print('Best when empty: %s, Conf=%.2f' % pmm.get_best_phrase())
for (phrase, conf) in test_phrases:
print(phrase, pmm.add_phrase(phrase, conf))
print('Best: %s, Conf=%.2f' % pmm.get_best_phrase())
pmm.dump_phrases()
pmm.reset()
for (phrase, conf) in test_phrases:
print(phrase, pmm.add_phrase(phrase, conf))
print('Best: %s, Conf=%.2f' % pmm.get_best_phrase())
pmm.dump_phrases()
pmm.reset()
if __name__ == '__main__':
main()
|
py | b40e8cbae37e44b82b2a53d569b74b8827407319 | from unittest.mock import patch
from django.core.exceptions import ValidationError
from django.test import TestCase
from feeds.forms import FeedAdminAddForm, FeedAdminChangeForm
@patch('feeds.models.Feed.fetch_and_set_feed_details')
class FeedAdminAddFormTest(TestCase):
def test_invalid_when_link_is_missing(self, mock_fetch):
form = FeedAdminAddForm(data={})
self.assertFalse(form.is_valid())
def test_valid_link(self, mock_fetch):
data = {'link': 'https://some-feed.com/'}
form = FeedAdminAddForm(data=data)
self.assertTrue(form.is_valid())
def test_saving_new_feed_calls_fetch(self, mock_fetch):
data = {'link': 'https://some-feed.com/'}
form = FeedAdminAddForm(data=data)
form.save()
self.assertTrue(mock_fetch.called)
@patch('feeds.models.Feed.fetch_and_set_feed_details')
class FeedAdminChangeFormTest(TestCase):
def test_invalid_when_link_is_missing(self, mock_fetch):
form = FeedAdminChangeForm(data={})
self.assertFalse(form.is_valid())
def test_ok_even_if_missing_title_and_description(self, mock_fetch):
data = {'link': 'https://some-feed.com/'}
form = FeedAdminChangeForm(data=data)
self.assertTrue(form.is_valid())
def test_updating_existing_feed(self, mock_fetch):
with patch('feeds.models.Feed.objects.get') as mock_get:
mock_get.return_value = True
data = {
'link': 'https://some-feed.com/',
'title': 'Title',
'description': 'Description'
}
form = FeedAdminAddForm(data=data)
form.save()
self.assertFalse(mock_fetch.called)
|
py | b40e8d90aacb2f2888a251a71e1dbd377b993216 | from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from questionnaire.models import Landing
class Book(models.Model):
title = models.CharField(max_length=1000, default="")
landings = GenericRelation(Landing, related_query_name='items')
def __unicode__(self):
return self.title
__str__ = __unicode__
|
py | b40e8e3d62a10c5cbd67fd7613e804c50190e6a6 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetRouteFilterRuleResult',
'AwaitableGetRouteFilterRuleResult',
'get_route_filter_rule',
]
@pulumi.output_type
class GetRouteFilterRuleResult:
"""
Route Filter Rule Resource
"""
def __init__(__self__, access=None, communities=None, etag=None, id=None, location=None, name=None, provisioning_state=None, route_filter_rule_type=None):
if access and not isinstance(access, str):
raise TypeError("Expected argument 'access' to be a str")
pulumi.set(__self__, "access", access)
if communities and not isinstance(communities, list):
raise TypeError("Expected argument 'communities' to be a list")
pulumi.set(__self__, "communities", communities)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if route_filter_rule_type and not isinstance(route_filter_rule_type, str):
raise TypeError("Expected argument 'route_filter_rule_type' to be a str")
pulumi.set(__self__, "route_filter_rule_type", route_filter_rule_type)
@property
@pulumi.getter
def access(self) -> str:
"""
The access type of the rule.
"""
return pulumi.get(self, "access")
@property
@pulumi.getter
def communities(self) -> Sequence[str]:
"""
The collection for bgp community values to filter on. e.g. ['12076:5010','12076:5020']
"""
return pulumi.get(self, "communities")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource. Possible values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeFilterRuleType")
def route_filter_rule_type(self) -> str:
"""
The rule type of the rule. Valid value is: 'Community'
"""
return pulumi.get(self, "route_filter_rule_type")
class AwaitableGetRouteFilterRuleResult(GetRouteFilterRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteFilterRuleResult(
access=self.access,
communities=self.communities,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
route_filter_rule_type=self.route_filter_rule_type)
def get_route_filter_rule(resource_group_name: Optional[str] = None,
route_filter_name: Optional[str] = None,
rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteFilterRuleResult:
"""
Route Filter Rule Resource
:param str resource_group_name: The name of the resource group.
:param str route_filter_name: The name of the route filter.
:param str rule_name: The name of the rule.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['routeFilterName'] = route_filter_name
__args__['ruleName'] = rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20190201:getRouteFilterRule', __args__, opts=opts, typ=GetRouteFilterRuleResult).value
return AwaitableGetRouteFilterRuleResult(
access=__ret__.access,
communities=__ret__.communities,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
route_filter_rule_type=__ret__.route_filter_rule_type)
|
py | b40e8ef0ff6799da2eea46d67ac7b72042ca2f96 | import logging
import pickle
import re
from itertools import starmap
from typing import Dict
import ujson
from redis.client import Redis
from scrapy.http.cookies import CookieJar
from scrapy.settings import Settings
from scrapy.spiders import Spider
from scrapy_cookies.storage import BaseStorage
logger = logging.getLogger(__name__)
pattern = re.compile("^COOKIES_REDIS_(?P<kwargs>(?!KWARGS).*)$")
def get_arguments(var):
return {str: {"name": var}, dict: var}[type(var)]
def write_cookiejar(cookiejar):
return {
"cookiejar": pickle.dumps(cookiejar),
"cookies": ujson.dumps(cookiejar._cookies),
}
def read_cookiejar(document):
try:
return pickle.loads(document["cookiejar"])
except (TypeError, KeyError):
return None
class RedisStorage(BaseStorage):
def __init__(self, settings: Settings):
super(RedisStorage, self).__init__(settings)
self.redis_settings: Dict[str, str] = dict(
starmap(
lambda k, v: (pattern.sub(lambda x: x.group(1).lower(), k), v),
filter(
lambda pair: pattern.match(pair[0]), settings.copy_to_dict().items()
),
)
)
self.r: Redis = None
@classmethod
def from_middleware(cls, middleware):
obj = cls(middleware.settings)
return obj
def open_spider(self, spider: Spider):
self.r: Redis = Redis(**self.redis_settings)
def close_spider(self, spider: Spider):
pass
def __missing__(self, k) -> CookieJar:
cookiejar: CookieJar = CookieJar()
self[k] = cookiejar
return cookiejar
def __delitem__(self, v):
self.r.delete(v)
def __getitem__(self, k) -> CookieJar:
v: CookieJar = read_cookiejar(self.r.hgetall(k))
if isinstance(v, CookieJar):
return v
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, k)
raise KeyError(k)
def __iter__(self):
return self.r.scan_iter()
def __len__(self) -> int:
return self.r.dbsize()
def __setitem__(self, k, v: CookieJar):
self.r.hmset(name=k, mapping=write_cookiejar(v))
|
py | b40e8f38d89fff370c9ddffa5891818886e7cb4c | #!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""Main module of django"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timevortex.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | b40e8ff8855e6d1994f7d2b6522a0649f9a40252 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Module under test
import bokeh.colors.named as bcn # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
COLORS = (
("aliceblue", 240, 248, 255),
("antiquewhite", 250, 235, 215),
("aqua", 0, 255, 255),
("aquamarine", 127, 255, 212),
("azure", 240, 255, 255),
("beige", 245, 245, 220),
("bisque", 255, 228, 196),
("black", 0, 0, 0),
("blanchedalmond", 255, 235, 205),
("blue", 0, 0, 255),
("blueviolet", 138, 43, 226),
("brown", 165, 42, 42),
("burlywood", 222, 184, 135),
("cadetblue", 95, 158, 160),
("chartreuse", 127, 255, 0),
("chocolate", 210, 105, 30),
("coral", 255, 127, 80),
("cornflowerblue", 100, 149, 237),
("cornsilk", 255, 248, 220),
("crimson", 220, 20, 60),
("cyan", 0, 255, 255),
("darkblue", 0, 0, 139),
("darkcyan", 0, 139, 139),
("darkgoldenrod", 184, 134, 11),
("darkgray", 169, 169, 169),
("darkgreen", 0, 100, 0),
("darkgrey", 169, 169, 169),
("darkkhaki", 189, 183, 107),
("darkmagenta", 139, 0, 139),
("darkolivegreen", 85, 107, 47),
("darkorange", 255, 140, 0),
("darkorchid", 153, 50, 204),
("darkred", 139, 0, 0),
("darksalmon", 233, 150, 122),
("darkseagreen", 143, 188, 143),
("darkslateblue", 72, 61, 139),
("darkslategray", 47, 79, 79),
("darkslategrey", 47, 79, 79),
("darkturquoise", 0, 206, 209),
("darkviolet", 148, 0, 211),
("deeppink", 255, 20, 147),
("deepskyblue", 0, 191, 255),
("dimgray", 105, 105, 105),
("dimgrey", 105, 105, 105),
("dodgerblue", 30, 144, 255),
("firebrick", 178, 34, 34),
("floralwhite", 255, 250, 240),
("forestgreen", 34, 139, 34),
("fuchsia", 255, 0, 255),
("gainsboro", 220, 220, 220),
("ghostwhite", 248, 248, 255),
("gold", 255, 215, 0),
("goldenrod", 218, 165, 32),
("gray", 128, 128, 128),
("green", 0, 128, 0),
("greenyellow", 173, 255, 47),
("grey", 128, 128, 128),
("honeydew", 240, 255, 240),
("hotpink", 255, 105, 180),
("indianred", 205, 92, 92),
("indigo", 75, 0, 130),
("ivory", 255, 255, 240),
("khaki", 240, 230, 140),
("lavender", 230, 230, 250),
("lavenderblush", 255, 240, 245),
("lawngreen", 124, 252, 0),
("lemonchiffon", 255, 250, 205),
("lightblue", 173, 216, 230),
("lightcoral", 240, 128, 128),
("lightcyan", 224, 255, 255),
("lightgoldenrodyellow", 250, 250, 210),
("lightgray", 211, 211, 211),
("lightgreen", 144, 238, 144),
("lightgrey", 211, 211, 211),
("lightpink", 255, 182, 193),
("lightsalmon", 255, 160, 122),
("lightseagreen", 32, 178, 170),
("lightskyblue", 135, 206, 250),
("lightslategray", 119, 136, 153),
("lightslategrey", 119, 136, 153),
("lightsteelblue", 176, 196, 222),
("lightyellow", 255, 255, 224),
("lime", 0, 255, 0),
("limegreen", 50, 205, 50),
("linen", 250, 240, 230),
("magenta", 255, 0, 255),
("maroon", 128, 0, 0),
("mediumaquamarine", 102, 205, 170),
("mediumblue", 0, 0, 205),
("mediumorchid", 186, 85, 211),
("mediumpurple", 147, 112, 219),
("mediumseagreen", 60, 179, 113),
("mediumslateblue", 123, 104, 238),
("mediumspringgreen", 0, 250, 154),
("mediumturquoise", 72, 209, 204),
("mediumvioletred", 199, 21, 133),
("midnightblue", 25, 25, 112),
("mintcream", 245, 255, 250),
("mistyrose", 255, 228, 225),
("moccasin", 255, 228, 181),
("navajowhite", 255, 222, 173),
("navy", 0, 0, 128),
("oldlace", 253, 245, 230),
("olive", 128, 128, 0),
("olivedrab", 107, 142, 35),
("orange", 255, 165, 0),
("orangered", 255, 69, 0),
("orchid", 218, 112, 214),
("palegoldenrod", 238, 232, 170),
("palegreen", 152, 251, 152),
("paleturquoise", 175, 238, 238),
("palevioletred", 219, 112, 147),
("papayawhip", 255, 239, 213),
("peachpuff", 255, 218, 185),
("peru", 205, 133, 63),
("pink", 255, 192, 203),
("plum", 221, 160, 221),
("powderblue", 176, 224, 230),
("purple", 128, 0, 128),
("rebeccapurple", 102, 51, 153),
("red", 255, 0, 0),
("rosybrown", 188, 143, 143),
("royalblue", 65, 105, 225),
("saddlebrown", 139, 69, 19),
("salmon", 250, 128, 114),
("sandybrown", 244, 164, 96),
("seagreen", 46, 139, 87),
("seashell", 255, 245, 238),
("sienna", 160, 82, 45),
("silver", 192, 192, 192),
("skyblue", 135, 206, 235),
("slateblue", 106, 90, 205),
("slategray", 112, 128, 144),
("slategrey", 112, 128, 144),
("snow", 255, 250, 250),
("springgreen", 0, 255, 127),
("steelblue", 70, 130, 180),
("tan", 210, 180, 140),
("teal", 0, 128, 128),
("thistle", 216, 191, 216),
("tomato", 255, 99, 71),
("turquoise", 64, 224, 208),
("violet", 238, 130, 238),
("wheat", 245, 222, 179),
("white", 255, 255, 255),
("whitesmoke", 245, 245, 245),
("yellow", 255, 255, 0),
("yellowgreen", 154, 205, 50),
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test__all__() -> None:
assert len(bcn.__all__) == 148
@pytest.mark.parametrize('name,R,G,B', COLORS)
def test_color(name, R, G, B) -> None:
assert name in bcn.__all__
c = getattr(bcn, name)
assert (c.r, c.g, c.b) == (R, G, B)
assert c.a == 1.0
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
py | b40e9050b050955954bc14eb04f90d8fe7110d2b | # -*- test-case-name: vumi.components.tests.test_tagpool -*-
# -*- coding: utf-8 -*-
"""Tag pool manager."""
import json
import time
from twisted.internet.defer import returnValue
from vumi.errors import VumiError
from vumi.persist.redis_base import Manager
class TagpoolError(VumiError):
"""An error occurred during an operation on a tag pool."""
class TagpoolManager(object):
"""Manage a set of tag pools.
:param redis:
An instance of :class:`vumi.persist.redis_base.Manager`.
"""
encoding = "UTF-8"
def __init__(self, redis):
self.redis = redis
self.manager = redis # TODO: This is a bit of a hack to make the
# the calls_manager decorator work
def _encode(self, unicode_text):
return unicode_text.encode(self.encoding)
def _decode(self, binary_data):
return binary_data.decode(self.encoding)
@Manager.calls_manager
def acquire_tag(self, pool, owner=None, reason=None):
local_tag = yield self._acquire_tag(pool, owner, reason)
returnValue((pool, local_tag) if local_tag is not None else None)
@Manager.calls_manager
def acquire_specific_tag(self, tag, owner=None, reason=None):
pool, local_tag = tag
acquired = yield self._acquire_specific_tag(pool, local_tag,
owner, reason)
if acquired:
returnValue(tag)
returnValue(None)
@Manager.calls_manager
def release_tag(self, tag):
pool, local_tag = tag
yield self._release_tag(pool, local_tag)
@Manager.calls_manager
def declare_tags(self, tags):
pools = {}
for pool, local_tag in tags:
pools.setdefault(pool, []).append(local_tag)
for pool, local_tags in pools.items():
yield self._register_pool(pool)
yield self._declare_tags(pool, local_tags)
@Manager.calls_manager
def get_metadata(self, pool):
metadata_key = self._tag_pool_metadata_key(pool)
metadata = yield self.redis.hgetall(metadata_key)
metadata = dict((self._decode(k), json.loads(v))
for k, v in metadata.iteritems())
returnValue(metadata)
@Manager.calls_manager
def set_metadata(self, pool, metadata):
metadata_key = self._tag_pool_metadata_key(pool)
metadata = dict((self._encode(k), json.dumps(v))
for k, v in metadata.iteritems())
yield self._register_pool(pool)
yield self.redis.delete(metadata_key)
yield self.redis.hmset(metadata_key, metadata)
@Manager.calls_manager
def purge_pool(self, pool):
free_list_key, free_set_key, inuse_set_key = self._tag_pool_keys(pool)
metadata_key = self._tag_pool_metadata_key(pool)
in_use_count = yield self.redis.scard(inuse_set_key)
if in_use_count:
raise TagpoolError('%s tags of pool %s still in use.' % (
in_use_count, pool))
else:
yield self.redis.delete(free_set_key)
yield self.redis.delete(free_list_key)
yield self.redis.delete(inuse_set_key)
yield self.redis.delete(metadata_key)
yield self._unregister_pool(pool)
@Manager.calls_manager
def list_pools(self):
pool_list_key = self._pool_list_key()
pools = yield self.redis.smembers(pool_list_key)
returnValue(set(self._decode(pool) for pool in pools))
@Manager.calls_manager
def free_tags(self, pool):
_free_list, free_set_key, _inuse_set = self._tag_pool_keys(pool)
free_tags = yield self.redis.smembers(free_set_key)
returnValue([(pool, self._decode(local_tag))
for local_tag in free_tags])
@Manager.calls_manager
def inuse_tags(self, pool):
_free_list, _free_set, inuse_set_key = self._tag_pool_keys(pool)
inuse_tags = yield self.redis.smembers(inuse_set_key)
returnValue([(pool, self._decode(local_tag))
for local_tag in inuse_tags])
@Manager.calls_manager
def acquired_by(self, tag):
pool, local_tag = tag
local_tag = self._encode(local_tag)
reason_hash_key = self._tag_pool_reason_key(pool)
raw_reason = yield self.redis.hget(reason_hash_key, local_tag)
if raw_reason is not None:
reason = json.loads(raw_reason)
owner = reason.get('owner')
else:
reason, owner = None, None
returnValue((owner, reason))
@Manager.calls_manager
def owned_tags(self, owner):
owner_tag_list_key = self._owner_tag_list_key(owner)
owned_tags = yield self.redis.smembers(owner_tag_list_key)
returnValue([json.loads(raw_tag) for raw_tag in owned_tags])
def _pool_list_key(self):
return ":".join(["tagpools", "list"])
@Manager.calls_manager
def _register_pool(self, pool):
"""Add a pool to list of pools."""
pool = self._encode(pool)
pool_list_key = self._pool_list_key()
yield self.redis.sadd(pool_list_key, pool)
@Manager.calls_manager
def _unregister_pool(self, pool):
"""Remove a pool to list of pools."""
pool = self._encode(pool)
pool_list_key = self._pool_list_key()
yield self.redis.srem(pool_list_key, pool)
def _tag_pool_keys(self, pool):
pool = self._encode(pool)
return tuple(":".join(["tagpools", pool, state])
for state in ("free:list", "free:set", "inuse:set"))
def _tag_pool_metadata_key(self, pool):
pool = self._encode(pool)
return ":".join(["tagpools", pool, "metadata"])
@Manager.calls_manager
def _acquire_tag(self, pool, owner, reason):
free_list_key, free_set_key, inuse_set_key = self._tag_pool_keys(pool)
tag = yield self.redis.lpop(free_list_key)
if tag is not None:
yield self.redis.smove(free_set_key, inuse_set_key, tag)
yield self._store_reason(pool, tag, owner, reason)
returnValue(self._decode(tag) if tag is not None else None)
@Manager.calls_manager
def _acquire_specific_tag(self, pool, local_tag, owner, reason):
local_tag = self._encode(local_tag)
free_list_key, free_set_key, inuse_set_key = self._tag_pool_keys(pool)
moved = yield self.redis.lrem(free_list_key, local_tag, num=1)
if moved:
yield self.redis.smove(free_set_key, inuse_set_key, local_tag)
yield self._store_reason(pool, local_tag, owner, reason)
returnValue(moved)
@Manager.calls_manager
def _release_tag(self, pool, local_tag):
local_tag = self._encode(local_tag)
free_list_key, free_set_key, inuse_set_key = self._tag_pool_keys(pool)
count = yield self.redis.smove(inuse_set_key, free_set_key, local_tag)
if count == 1:
yield self.redis.rpush(free_list_key, local_tag)
yield self._remove_reason(pool, local_tag)
@Manager.calls_manager
def _declare_tags(self, pool, local_tags):
free_list_key, free_set_key, inuse_set_key = self._tag_pool_keys(pool)
new_tags = set(self._encode(tag) for tag in local_tags)
old_tags = yield self.redis.sunion(free_set_key, inuse_set_key)
old_tags = set(old_tags)
for tag in sorted(new_tags - old_tags):
yield self.redis.sadd(free_set_key, tag)
yield self.redis.rpush(free_list_key, tag)
def _tag_pool_reason_key(self, pool):
pool = self._encode(pool)
return ":".join(["tagpools", pool, "reason:hash"])
def _owner_tag_list_key(self, owner):
if owner is None:
return ":".join(["tagpools", "unowned", "tags"])
owner = self._encode(owner)
return ":".join(["tagpools", "owners", owner, "tags"])
@Manager.calls_manager
def _store_reason(self, pool, local_tag, owner, reason):
if reason is None:
reason = {}
reason['timestamp'] = time.time()
reason['owner'] = owner
reason_hash_key = self._tag_pool_reason_key(pool)
yield self.redis.hset(reason_hash_key, local_tag, json.dumps(reason))
owner_tag_list_key = self._owner_tag_list_key(owner)
yield self.redis.sadd(owner_tag_list_key,
json.dumps([pool, self._decode(local_tag)]))
@Manager.calls_manager
def _remove_reason(self, pool, local_tag):
reason_hash_key = self._tag_pool_reason_key(pool)
reason = yield self.redis.hget(reason_hash_key, local_tag)
if reason is not None:
reason = json.loads(reason)
owner = reason.get('owner')
owner_tag_list_key = self._owner_tag_list_key(owner)
self.redis.srem(owner_tag_list_key,
json.dumps([pool, self._decode(local_tag)]))
|
py | b40e905e927b177e92efbbb2c3050aa72d437573 | import matplotlib.pyplot as plt
from datetime import datetime
from poker_deck import Deck, display_cards
class Game:
def __init__(self, players, server):
self.players = players
self.server = server
self.num_games = 0
def start_new_game(self):
self.num_games += 1
# Create new deck of shuffled cards
deck = Deck()
# Draw 2 cards for each player
for player in self.players:
card1 = deck.draw()
card2 = deck.draw()
from datetime import datetime
subject = 'Texas Poker Game # {:d} @ {:s}'.format(self.num_games, datetime.now().strftime('%H:%M:%S'))
self.server.send_message(player, subject, [card1, card2])
# Game starts
common_cards = []
# Preflop
if input('Preflop betting starts from person after big blind.\nGame ends? Y/N ') == 'Y':
return
# Flop
common_cards = [deck.draw(), deck.draw(), deck.draw()]
fig = display_cards(common_cards)
if input('Flop betting starts from small blind.\nGame ends? Y/N ') == 'Y':
return
# Turn
common_cards.append(deck.draw())
plt.close(fig)
fig = display_cards(common_cards)
if input('Turn betting starts from small blind.\nGame ends? Y/N ') == 'Y':
return
# River
common_cards.append(deck.draw())
plt.close(fig)
fig = display_cards(common_cards)
print('River betting starts from small blind.')
# End
|
py | b40e910899ce5d0e5b1875ed7e42b66c7976c0bd | """ Tests for gracefully reloading the caches """
import unittest
from datetime import timedelta
import redis
import transaction
from mock import MagicMock
from pyramid.testing import DummyRequest
from sqlalchemy.exc import OperationalError
from pypicloud.cache import RedisCache, SQLCache
from pypicloud.cache.dynamo import DynamoCache, DynamoPackage, PackageSummary
from pypicloud.cache.sql import SQLPackage
from pypicloud.dateutil import utcnow
from pypicloud.storage import IStorage
from . import make_package
from .db_utils import get_mysql_url, get_postgres_url, get_sqlite_url
# pylint: disable=W0707
class TestDynamoCache(unittest.TestCase):
"""Tests for the DynamoCache"""
dynamo = None
@classmethod
def setUpClass(cls):
super(TestDynamoCache, cls).setUpClass()
host = cls.dynamo.host[cls.dynamo.host.index("//") + 2 :]
host, port = host.split(":")
settings = {
"pypi.storage": "tests.DummyStorage",
"db.region_name": "us-east-1",
"db.host": host,
"db.port": port,
"db.namespace": "test.",
"db.aws_access_key_id": "",
"db.aws_secret_access_key": "",
"db.graceful_reload": True,
}
cls.kwargs = DynamoCache.configure(settings)
cls.engine = cls.kwargs["engine"]
@classmethod
def tearDownClass(cls):
super(TestDynamoCache, cls).tearDownClass()
cls.engine.delete_schema()
def setUp(self):
super(TestDynamoCache, self).setUp()
self.db = DynamoCache(DummyRequest(), **self.kwargs)
self.storage = self.db.storage = MagicMock(spec=IStorage)
def tearDown(self):
super(TestDynamoCache, self).tearDown()
for model in (DynamoPackage, PackageSummary):
self.engine.scan(model).delete()
def _save_pkgs(self, *pkgs):
"""Save a DynamoPackage to the db"""
for pkg in pkgs:
self.engine.save(pkg)
summary = PackageSummary(pkg)
self.engine.save(summary, overwrite=True)
def test_add_missing(self):
"""Add missing packages to cache"""
keys = [make_package(factory=DynamoPackage)]
self.storage.list.return_value = keys
self.db.reload_from_storage()
all_pkgs = self.engine.scan(DynamoPackage).all()
self.assertCountEqual(all_pkgs, keys)
all_summaries = self.engine.scan(PackageSummary).all()
self.assertEqual(len(all_summaries), 1)
def test_remove_extra(self):
"""Remove extra packages from cache"""
keys = [
make_package(factory=DynamoPackage),
make_package("mypkg2", "1.3.4", factory=DynamoPackage),
]
self.db.save(keys[0])
self.db.save(keys[1])
self.storage.list.return_value = keys[:1]
self.db.reload_from_storage()
all_pkgs = self.engine.scan(DynamoPackage).all()
self.assertCountEqual(all_pkgs, keys[:1])
# It should have removed the summary as well
self.assertEqual(self.engine.scan(PackageSummary).count(), 1)
def test_remove_extra_leave_concurrent(self):
"""Removing extra packages will leave packages that were uploaded concurrently"""
pkgs = [
make_package(factory=DynamoPackage),
make_package("mypkg2", factory=DynamoPackage),
]
self.db.save(pkgs[0])
self.db.save(pkgs[1])
# Return first pkgs[1], then pkgs[1:] because the second time we list
# we will have "uploaded" pkgs[2]
return_values = [lambda: pkgs[1:2], lambda: pkgs[1:]]
def list_storage(factory):
"""mocked method for listing storage packages"""
# The first time we list from storage, concurrently "upload"
# pkgs[2]
if len(return_values) == 2:
pkg = make_package("mypkg3", factory=DynamoPackage)
pkgs.append(pkg)
self.db.save(pkg)
return return_values.pop(0)()
self.storage.list.side_effect = list_storage
self.db.reload_from_storage()
all_pkgs = self.engine.scan(DynamoPackage).all()
self.assertCountEqual(all_pkgs, pkgs[1:])
self.assertEqual(self.engine.scan(PackageSummary).count(), 2)
def test_remove_extra_concurrent_deletes(self):
"""Remove packages from cache that were concurrently deleted"""
pkgs = [
make_package(factory=DynamoPackage),
make_package("mypkg2", factory=DynamoPackage),
]
self.db.save(pkgs[0])
# Return first pkgs[:], then pkgs[:1] because the second time we list
# we will have "deleted" pkgs[1]
return_values = [pkgs[:], pkgs[:1]]
self.storage.list.side_effect = lambda _: return_values.pop(0)
self.db.reload_from_storage()
all_pkgs = self.engine.scan(DynamoPackage).all()
self.assertCountEqual(all_pkgs, pkgs[:1])
self.assertEqual(self.engine.scan(PackageSummary).count(), 1)
def test_add_missing_more_recent(self):
"""If we sync a more recent package, update the summary"""
pkgs = [
make_package(
last_modified=utcnow() - timedelta(hours=1),
factory=DynamoPackage,
),
make_package(version="1.5", factory=DynamoPackage),
]
self.db.save(pkgs[0])
self.storage.list.return_value = pkgs
self.db.reload_from_storage()
all_pkgs = self.engine.scan(DynamoPackage).all()
self.assertCountEqual(all_pkgs, pkgs)
summaries = self.db.summary()
self.assertEqual(len(summaries), 1)
summary = summaries[0]
self.assertEqual(summary["last_modified"], pkgs[1].last_modified)
def test_same_package_name_version(self):
"""Storage can have packages with the same name and version (different filename)"""
pkgs = [
make_package(filename="mypkg-1.1-win32.whl", factory=DynamoPackage),
make_package(filename="mypkg-1.1-macosx.whl", factory=DynamoPackage),
make_package(filename="mypkg-1.1-x86_64.whl", factory=DynamoPackage),
]
self.storage.list.return_value = pkgs
self.db.reload_from_storage()
all_pkgs = self.engine.scan(DynamoPackage).all()
self.assertCountEqual(all_pkgs, pkgs)
summaries = self.db.summary()
self.assertEqual(len(summaries), 1)
class TestRedisCache(unittest.TestCase):
"""Tests for the RedisCache"""
@classmethod
def setUpClass(cls):
super(TestRedisCache, cls).setUpClass()
settings = {
"pypi.storage": "tests.DummyStorage",
"db.url": "redis://localhost",
"db.graceful_reload": True,
}
cls.kwargs = RedisCache.configure(settings)
cls.redis = cls.kwargs["db"]
try:
cls.redis.flushdb()
except redis.exceptions.ConnectionError:
msg = "Redis not found on port 6379"
setattr(cls, "setUp", lambda cls: unittest.TestCase.skipTest(cls, msg))
@classmethod
def tearDownClass(cls):
super(TestRedisCache, cls).tearDownClass()
def setUp(self):
super(TestRedisCache, self).setUp()
self.db = RedisCache(DummyRequest(), **self.kwargs)
self.storage = self.db.storage = MagicMock(spec=IStorage)
def tearDown(self):
super(TestRedisCache, self).tearDown()
self.redis.flushdb()
def _save_pkgs(self, *pkgs):
"""Save packages to the db"""
pipe = self.redis.pipeline()
for pkg in pkgs:
self.db.save(pkg, pipe)
pipe.execute()
def test_add_missing(self):
"""Add missing packages to cache"""
keys = [make_package()]
self.storage.list.return_value = keys
self.db.reload_from_storage()
all_pkgs = self.db._load_all_packages()
self.assertCountEqual(all_pkgs, keys)
self.assertEqual(len(self.db.summary()), 1)
def test_remove_extra(self):
"""Remove extra packages from cache"""
keys = [make_package(), make_package("mypkg2", "1.3.4")]
self.db.save(keys[0])
self.db.save(keys[1])
self.storage.list.return_value = keys[:1]
self.db.reload_from_storage()
all_pkgs = self.db._load_all_packages()
self.assertCountEqual(all_pkgs, keys[:1])
# It should have removed the summary as well
self.assertEqual(len(self.db.summary()), 1)
def test_remove_extra_leave_concurrent(self):
"""Removing extra packages will leave packages that were uploaded concurrently"""
pkgs = [make_package(), make_package("mypkg2")]
self.db.save(pkgs[0])
self.db.save(pkgs[1])
# Return first pkgs[1], then pkgs[1:] because the second time we list
# we will have "uploaded" pkgs[2]
return_values = [lambda: pkgs[1:2], lambda: pkgs[1:]]
def list_storage(factory):
"""mocked method for listing storage packages"""
# The first time we list from storage, concurrently "upload"
# pkgs[2]
if len(return_values) == 2:
pkg = make_package("mypkg3")
pkgs.append(pkg)
self.db.save(pkg)
return return_values.pop(0)()
self.storage.list.side_effect = list_storage
self.db.reload_from_storage()
all_pkgs = self.db._load_all_packages()
self.assertCountEqual(all_pkgs, pkgs[1:])
self.assertEqual(len(self.db.summary()), 2)
def test_remove_extra_concurrent_deletes(self):
"""Remove packages from cache that were concurrently deleted"""
pkgs = [make_package(), make_package("mypkg2")]
self.db.save(pkgs[0])
# Return first pkgs[:], then pkgs[:1] because the second time we list
# we will have "deleted" pkgs[1]
return_values = [pkgs[:], pkgs[:1]]
self.storage.list.side_effect = lambda _: return_values.pop(0)
self.db.reload_from_storage()
all_pkgs = self.db._load_all_packages()
self.assertCountEqual(all_pkgs, pkgs[:1])
self.assertEqual(len(self.db.summary()), 1)
def test_add_missing_more_recent(self):
"""If we sync a more recent package, update the summary"""
pkgs = [
make_package(last_modified=utcnow() - timedelta(hours=1)),
make_package(version="1.5"),
]
self.db.save(pkgs[0])
self.storage.list.return_value = pkgs
self.db.reload_from_storage()
all_pkgs = self.db._load_all_packages()
self.assertCountEqual(all_pkgs, pkgs)
summaries = self.db.summary()
self.assertEqual(len(summaries), 1)
summary = summaries[0]
self.assertEqual(summary["last_modified"].hour, pkgs[1].last_modified.hour)
def test_same_package_name_version(self):
"""Storage can have packages with the same name and version (different filename)"""
pkgs = [
make_package(filename="mypkg-1.1-win32.whl"),
make_package(filename="mypkg-1.1-macosx.whl"),
make_package(filename="mypkg-1.1-x86_64.whl"),
]
self.storage.list.return_value = pkgs
self.db.reload_from_storage()
all_pkgs = self.db._load_all_packages()
self.assertCountEqual(all_pkgs, pkgs)
summaries = self.db.summary()
self.assertEqual(len(summaries), 1)
class TestSQLiteCache(unittest.TestCase):
"""Tests for the SQLCache"""
@classmethod
def get_db_url(cls) -> str:
return get_sqlite_url()
@classmethod
def setUpClass(cls):
super(TestSQLiteCache, cls).setUpClass()
db_url = cls.get_db_url()
settings = {
"pypi.storage": "tests.DummyStorage",
"db.url": db_url,
"db.graceful_reload": True,
}
try:
cls.kwargs = SQLCache.configure(settings)
except OperationalError:
raise unittest.SkipTest(f"Couldn't connect to database {db_url}")
def setUp(self):
super(TestSQLiteCache, self).setUp()
transaction.begin()
self.request = DummyRequest()
self.request.tm = transaction.manager
self.db = SQLCache(self.request, **self.kwargs)
self.sql = self.db.db
self.storage = self.db.storage = MagicMock(spec=IStorage)
def tearDown(self):
super(TestSQLiteCache, self).tearDown()
transaction.abort()
self.sql.query(SQLPackage).delete()
transaction.commit()
self.request._process_finished_callbacks()
def _make_package(self, *args, **kwargs):
"""Wrapper around make_package"""
# Some SQL dbs are rounding the timestamps (looking at you MySQL >:|
# which is a problem if they round UP to the future, as our
# calculations depend on the timestamps being monotonically increasing.
now = utcnow() - timedelta(seconds=1)
kwargs.setdefault("last_modified", now)
kwargs.setdefault("factory", SQLPackage)
return make_package(*args, **kwargs)
def test_add_missing(self):
"""Add missing packages to cache"""
keys = [self._make_package()]
self.storage.list.return_value = keys
self.db.reload_from_storage()
all_pkgs = self.sql.query(SQLPackage).all()
self.assertCountEqual(all_pkgs, keys)
def test_remove_extra(self):
"""Remove extra packages from cache"""
keys = [self._make_package(), self._make_package("mypkg2", "1.3.4")]
self.db.save(keys[0])
self.db.save(keys[1])
self.storage.list.return_value = keys[:1]
self.db.reload_from_storage()
all_pkgs = self.sql.query(SQLPackage).all()
self.assertCountEqual(all_pkgs, keys[:1])
def test_remove_extra_leave_concurrent(self):
"""Removing extra packages will leave packages that were uploaded concurrently"""
pkgs = [self._make_package(), self._make_package("mypkg2")]
self.db.save(pkgs[0])
self.db.save(pkgs[1])
# Return first pkgs[1], then pkgs[1:] because the second time we list
# we will have "uploaded" pkgs[2]
return_values = [lambda: pkgs[1:2], lambda: pkgs[1:]]
def list_storage(factory):
"""mocked method for listing storage packages"""
# The first time we list from storage, concurrently "upload"
# pkgs[2]
if len(return_values) == 2:
nowish = utcnow() + timedelta(seconds=1)
pkg = self._make_package("mypkg3", last_modified=nowish)
pkgs.append(pkg)
self.db.save(pkg)
return return_values.pop(0)()
self.storage.list.side_effect = list_storage
self.db.reload_from_storage()
all_pkgs = self.sql.query(SQLPackage).all()
self.assertCountEqual(all_pkgs, pkgs[1:])
def test_remove_extra_concurrent_deletes(self):
"""Remove packages from cache that were concurrently deleted"""
pkgs = [self._make_package(), self._make_package("mypkg2")]
self.db.save(pkgs[0])
# Return first pkgs[:], then pkgs[:1] because the second time we list
# we will have "deleted" pkgs[1]
return_values = [pkgs[:], pkgs[:1]]
self.storage.list.side_effect = lambda _: return_values.pop(0)
self.db.reload_from_storage()
all_pkgs = self.sql.query(SQLPackage).all()
self.assertCountEqual(all_pkgs, pkgs[:1])
def test_add_missing_more_recent(self):
"""If we sync a more recent package, update the summary"""
pkgs = [
self._make_package(last_modified=utcnow() - timedelta(hours=1)),
self._make_package(version="1.5"),
]
self.db.save(pkgs[0])
self.storage.list.return_value = pkgs
self.db.reload_from_storage()
all_pkgs = self.sql.query(SQLPackage).all()
self.assertCountEqual(all_pkgs, pkgs)
def test_same_package_name_version(self):
"""Storage can have packages with the same name and version (different filename)"""
pkgs = [
self._make_package(filename="mypkg-1.1-win32.whl"),
self._make_package(filename="mypkg-1.1-macosx.whl"),
self._make_package(filename="mypkg-1.1-x86_64.whl"),
]
self.storage.list.return_value = pkgs
self.db.reload_from_storage()
all_pkgs = self.sql.query(SQLPackage).all()
self.assertCountEqual(all_pkgs, pkgs)
class TestMySQLCache(TestSQLiteCache):
"""Test the SQLAlchemy cache on a MySQL DB"""
@classmethod
def get_db_url(cls) -> str:
return get_mysql_url()
class TestPostgresCache(TestSQLiteCache):
"""Test the SQLAlchemy cache on a Postgres DB"""
@classmethod
def get_db_url(cls) -> str:
return get_postgres_url()
|
py | b40e912af6fe61e8567301731ec0ecff9a70d083 | from django import forms
from django.conf import settings
from material import Layout, Row, Column, Span4
from web.grant_management.models import GrantManagementProcess
VERIFY_CHOICES = ((True, 'Accept'), (False, 'Reject'))
VERIFY_LABEL = 'Do you accept or reject the applicant’s answer?'
SCORE_LABEL = 'How would you assess the applicant’s response?'
RATIONALE_LABEL = 'Rationale'
RATIONALE_PLACEHOLDER = 'Enter your rationale here'
def str_to_bool(value):
if str(value).lower() in ['true', 't', '1']:
return True
elif str(value).lower() in ['false', 'f', '0']:
return False
raise ValueError(f'Cannot convert {value} to boolean')
class VerifyPreviousApplicationsForm(forms.ModelForm):
class Meta:
model = GrantManagementProcess
fields = ['previous_applications_is_verified']
previous_applications_is_verified = forms.TypedChoiceField(
label=VERIFY_LABEL,
coerce=str_to_bool,
choices=VERIFY_CHOICES,
widget=forms.RadioSelect
)
class VerifyEventCommitmentForm(forms.ModelForm):
class Meta:
model = GrantManagementProcess
fields = ['event_commitment_is_verified']
event_commitment_is_verified = forms.TypedChoiceField(
label=VERIFY_LABEL,
coerce=str_to_bool,
choices=VERIFY_CHOICES,
widget=forms.RadioSelect
)
class VerifyBusinessEntityForm(forms.ModelForm):
class Meta:
model = GrantManagementProcess
fields = ['business_entity_is_verified']
business_entity_is_verified = forms.TypedChoiceField(
label=VERIFY_LABEL,
coerce=str_to_bool,
choices=VERIFY_CHOICES,
widget=forms.RadioSelect
)
class VerifyStateAidForm(forms.ModelForm):
class Meta:
model = GrantManagementProcess
fields = ['state_aid_is_verified']
state_aid_is_verified = forms.TypedChoiceField(
label=VERIFY_LABEL,
coerce=str_to_bool,
choices=VERIFY_CHOICES,
widget=forms.RadioSelect
)
class EventBookingDocumentRenewForm(forms.ModelForm):
class Meta:
model = GrantManagementProcess
fields = ['event_booking_document_renew_decision']
event_booking_document_renew_decision = forms.CharField(
widget=forms.RadioSelect(choices=GrantManagementProcess.Decision.choices)
)
class ProductsAndServicesForm(forms.ModelForm):
layout = Layout(
Row('products_and_services_score'),
Row(Span4('products_and_services_justification'), Column())
)
class Meta:
model = GrantManagementProcess
fields = ['products_and_services_score', 'products_and_services_justification']
products_and_services_score = forms.IntegerField(
label=SCORE_LABEL,
widget=forms.RadioSelect(choices=GrantManagementProcess.ScoreChoices.choices)
)
products_and_services_justification = forms.CharField(
label=RATIONALE_LABEL,
widget=forms.Textarea(
attrs={'placeholder': RATIONALE_PLACEHOLDER}
)
)
class ProductsAndServicesCompetitorsForm(forms.ModelForm):
layout = Layout(
Row('products_and_services_competitors_score'),
Row(Span4('products_and_services_competitors_justification'), Column())
)
class Meta:
model = GrantManagementProcess
fields = [
'products_and_services_competitors_score',
'products_and_services_competitors_justification'
]
products_and_services_competitors_score = forms.IntegerField(
label='Score',
widget=forms.RadioSelect(choices=GrantManagementProcess.ScoreChoices.choices)
)
products_and_services_competitors_justification = forms.CharField(
label=RATIONALE_LABEL,
widget=forms.Textarea(
attrs={'placeholder': RATIONALE_PLACEHOLDER}
)
)
class ExportStrategyForm(forms.ModelForm):
layout = Layout(
Row('export_strategy_score'),
Row(Span4('export_strategy_justification'), Column())
)
class Meta:
model = GrantManagementProcess
fields = ['export_strategy_score', 'export_strategy_justification']
export_strategy_score = forms.IntegerField(
label='Score',
widget=forms.RadioSelect(choices=GrantManagementProcess.ScoreChoices.choices)
)
export_strategy_justification = forms.CharField(
label=RATIONALE_LABEL,
widget=forms.Textarea(
attrs={'placeholder': RATIONALE_PLACEHOLDER}
)
)
class EventIsAppropriateForm(forms.ModelForm):
class Meta:
model = GrantManagementProcess
fields = ['event_is_appropriate']
event_is_appropriate = forms.TypedChoiceField(
label='Is the trade show appropriate?',
widget=forms.RadioSelect,
coerce=str_to_bool,
choices=settings.BOOLEAN_CHOICES
)
class DecisionForm(forms.ModelForm):
class Meta:
model = GrantManagementProcess
fields = ['decision']
decision = forms.CharField(
widget=forms.RadioSelect(choices=GrantManagementProcess.Decision.choices)
)
|
py | b40e9142bdbd32e9889261269aa913108d359ba9 | import os
from time import strftime as now
FORMAT = '%Y-%m-%d %H:%M:%S'
if __name__ == '__main__':
print(now(FORMAT), os.path.dirname(__file__))
s = input()
while s:
with open('log.txt', 'a+') as f:
f.write(now(FORMAT) + ' ' + s + '\n')
f.close()
s = input() |
py | b40e926b85f37805d1ea660315d858426f8545ee | """
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
import os
def _main():
annotation_path = os.path.join(os.getcwd(), '../helmet_data/yolo_annotations')
log_dir = 'logs/000/'
classes_path = os.path.join(os.getcwd(), '../helmet_data/helmet_classes.txt')
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 4
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 4 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
|
py | b40e927488272c69bd2511acf200cda021dd3a2e | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
:copyright: © 2012-2013 by the SaltStack Team, see AUTHORS for more details
:license: Apache 2.0, see LICENSE for more details.
bonneville.utils.parsers
~~~~~~~~~~~~~~~~~~
This is were all the black magic happens on all of salt's CLI tools.
'''
# Import python libs
import os
import sys
import getpass
import logging
import optparse
import traceback
from functools import partial
# Import bonneville libs
import bonneville.config as config
import bonneville.loader as loader
import bonneville.utils as utils
import bonneville.version as version
import bonneville.syspaths as syspaths
import bonneville.log.setup as log
import bonneville.utils
from bonneville.utils.validate.path import is_writeable
def _sorted(mixins_or_funcs):
return sorted(
mixins_or_funcs, key=lambda mf: getattr(mf, '_mixin_prio_', 1000)
)
class MixInMeta(type):
# This attribute here won't actually do anything. But, if you need to
# specify an order or a dependency within the mix-ins, please define the
# attribute on your own MixIn
_mixin_prio_ = 0
def __new__(mcs, name, bases, attrs):
instance = super(MixInMeta, mcs).__new__(mcs, name, bases, attrs)
if not hasattr(instance, '_mixin_setup'):
raise RuntimeError(
'Don\'t subclass {0} in {1} if you\'re not going to use it '
'as a salt parser mix-in.'.format(mcs.__name__, name)
)
return instance
class OptionParserMeta(MixInMeta):
def __new__(mcs, name, bases, attrs):
instance = super(OptionParserMeta, mcs).__new__(mcs,
name,
bases,
attrs)
if not hasattr(instance, '_mixin_setup_funcs'):
instance._mixin_setup_funcs = []
if not hasattr(instance, '_mixin_process_funcs'):
instance._mixin_process_funcs = []
if not hasattr(instance, '_mixin_after_parsed_funcs'):
instance._mixin_after_parsed_funcs = []
for base in _sorted(bases + (instance,)):
func = getattr(base, '_mixin_setup', None)
if func is not None and func not in instance._mixin_setup_funcs:
instance._mixin_setup_funcs.append(func)
func = getattr(base, '_mixin_after_parsed', None)
if func is not None and func not in \
instance._mixin_after_parsed_funcs:
instance._mixin_after_parsed_funcs.append(func)
# Mark process_<opt> functions with the base priority for sorting
for func in dir(base):
if not func.startswith('process_'):
continue
func = getattr(base, func)
if getattr(func, '_mixin_prio_', None) is not None:
# Function already has the attribute set, don't override it
continue
func._mixin_prio_ = getattr(base, '_mixin_prio_', 1000)
return instance
class OptionParser(optparse.OptionParser):
VERSION = version.__version__
usage = '%prog'
epilog = ('You can find additional help about %prog issuing "man %prog" '
'or on http://docs.saltstack.org')
description = None
# Private attributes
_mixin_prio_ = 100
def __init__(self, *args, **kwargs):
kwargs.setdefault('version', '%prog {0}'.format(self.VERSION))
kwargs.setdefault('usage', self.usage)
if self.description:
kwargs.setdefault('description', self.description)
if self.epilog:
kwargs.setdefault('epilog', self.epilog)
optparse.OptionParser.__init__(self, *args, **kwargs)
if self.epilog and '%prog' in self.epilog:
self.epilog = self.epilog.replace('%prog', self.get_prog_name())
def parse_args(self, args=None, values=None):
options, args = optparse.OptionParser.parse_args(self, args, values)
if options.versions_report:
self.print_versions_report()
self.options, self.args = options, args
# Let's get some proper sys.stderr logging as soon as possible!!!
# This logging handler will be removed once the proper console or
# logfile logging is setup.
log.setup_temp_logger(
getattr(self.options, 'log_level', 'error')
)
# Gather and run the process_<option> functions in the proper order
process_option_funcs = []
for option_key in options.__dict__.keys():
process_option_func = getattr(
self, 'process_{0}'.format(option_key), None
)
if process_option_func is not None:
process_option_funcs.append(process_option_func)
for process_option_func in _sorted(process_option_funcs):
try:
process_option_func()
except Exception as err:
logging.getLogger(__name__).exception(err)
self.error(
'Error while processing {0}: {1}'.format(
process_option_func, traceback.format_exc(err)
)
)
# Run the functions on self._mixin_after_parsed_funcs
for mixin_after_parsed_func in self._mixin_after_parsed_funcs:
try:
mixin_after_parsed_func(self)
except Exception as err:
logging.getLogger(__name__).exception(err)
self.error(
'Error while processing {0}: {1}'.format(
mixin_after_parsed_func, traceback.format_exc(err)
)
)
if self.config.get('conf_file', None) is not None:
logging.getLogger(__name__).debug(
'Configuration file path: {0}'.format(
self.config['conf_file']
)
)
# Retain the standard behaviour of optparse to return options and args
return options, args
def _populate_option_list(self, option_list, add_help=True):
optparse.OptionParser._populate_option_list(
self, option_list, add_help=add_help
)
for mixin_setup_func in self._mixin_setup_funcs:
mixin_setup_func(self)
def _add_version_option(self):
optparse.OptionParser._add_version_option(self)
self.add_option(
'--versions-report', action='store_true',
help='show program\'s dependencies version number and exit'
)
def print_versions_report(self, file=sys.stdout):
print >> file, '\n'.join(version.versions_report())
self.exit()
class MergeConfigMixIn(object):
'''
This mix-in will simply merge the CLI-passed options, by overriding the
configuration file loaded settings.
This mix-in should run last.
'''
__metaclass__ = MixInMeta
_mixin_prio_ = sys.maxsize
def _mixin_setup(self):
if not hasattr(self, 'setup_config') and not hasattr(self, 'config'):
# No configuration was loaded on this parser.
# There's nothing to do here.
return
# Add an additional function that will merge the shell options with
# the config options and if needed override them
self._mixin_after_parsed_funcs.append(self.__merge_config_with_cli)
def __merge_config_with_cli(self, *args):
# Merge parser options
for option in self.option_list:
if option.dest is None:
# --version does not have dest attribute set for example.
# All options defined by us, even if not explicitly(by kwarg),
# will have the dest attribute set
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if option.dest not in self.config:
# There's no value in the configuration file
if value is not None:
# There's an actual value, add it to the config
self.config[option.dest] = value
elif value is not None and value != default:
# Only set the value in the config file IF it's not the default
# value, this allows to tweak settings on the configuration
# files bypassing the shell option flags
self.config[option.dest] = value
elif option.dest in self.config:
# Let's update the option value with the one from the
# configuration file. This allows the parsers to make use of
# the updated value by using self.options.<option>
setattr(self.options, option.dest, self.config[option.dest])
# Merge parser group options if any
for group in self.option_groups:
for option in group.option_list:
if option.dest is None:
continue
# Get the passed value from shell. If empty get the default one
default = self.defaults.get(option.dest)
value = getattr(self.options, option.dest, default)
if option.dest not in self.config:
# There's no value in the configuration file
if value is not None:
# There's an actual value, add it to the config
self.config[option.dest] = value
elif value is not None and value != default:
# Only set the value in the config file IF it's not the
# default value, this allows to tweak settings on the
# configuration files bypassing the shell option flags
self.config[option.dest] = value
elif option.dest in self.config:
# Let's update the option value with the one from the
# configuration file. This allows the parsers to make use
# of the updated value by using self.options.<option>
setattr(self.options,
option.dest,
self.config[option.dest])
class ConfigDirMixIn(object):
__metaclass__ = MixInMeta
_mixin_prio_ = -10
_config_filename_ = None
def _mixin_setup(self):
self.add_option(
'-c', '--config-dir', default=syspaths.CONFIG_DIR,
help=('Pass in an alternative configuration directory. Default: '
'%default')
)
def process_config_dir(self):
if not os.path.isdir(self.options.config_dir):
# No logging is configured yet
sys.stderr.write(
'WARNING: {0!r} directory does not exist.\n'.format(
self.options.config_dir
)
)
# Make sure we have an absolute path
self.options.config_dir = os.path.abspath(self.options.config_dir)
if hasattr(self, 'setup_config'):
self.config = self.setup_config()
def get_config_file_path(self, configfile=None):
if configfile is None:
configfile = self._config_filename_
return os.path.join(self.options.config_dir, configfile)
class LogLevelMixIn(object):
__metaclass__ = MixInMeta
_mixin_prio_ = 10
_default_logging_level_ = 'warning'
_default_logging_logfile_ = None
_logfile_config_setting_name_ = 'log_file'
_loglevel_config_setting_name_ = 'log_level'
_logfile_loglevel_config_setting_name_ = 'log_level_logfile'
_skip_console_logging_config_ = False
def _mixin_setup(self):
if self._default_logging_logfile_ is None:
# This is an attribute available for programmers, so, raise a
# RuntimeError to let them know about the proper usage.
raise RuntimeError(
'Please set {0}._default_logging_logfile_'.format(
self.__class__.__name__
)
)
group = self.logging_options_group = optparse.OptionGroup(
self, 'Logging Options',
'Logging options which override any settings defined on the '
'configuration files.'
)
self.add_option_group(group)
if not getattr(self, '_skip_console_logging_config_', False):
group.add_option(
'-l', '--log-level',
choices=list(log.LOG_LEVELS),
help='Console logging log level. One of {0}. '
'Default: \'{1}\'.'.format(
', '.join([repr(l) for l in log.SORTED_LEVEL_NAMES]),
getattr(self, '_default_logging_level_', 'warning')
)
)
group.add_option(
'--log-file',
default=None,
help='Log file path. Default: {0}.'.format(
self._default_logging_logfile_
)
)
group.add_option(
'--log-file-level',
dest=self._logfile_loglevel_config_setting_name_,
choices=list(log.LOG_LEVELS),
help='Logfile logging log level. One of {0}. '
'Default: \'{1}\'.'.format(
', '.join([repr(l) for l in log.SORTED_LEVEL_NAMES]),
getattr(self, '_default_logging_level_', 'warning')
)
)
def process_log_level(self):
if not self.options.log_level:
cli_log_level = 'cli_{0}_log_level'.format(
self.get_prog_name().replace('-', '_')
)
if self.config.get(cli_log_level, None) is not None:
self.options.log_level = self.config.get(cli_log_level)
elif self.config.get(self._loglevel_config_setting_name_, None):
self.options.log_level = self.config.get(
self._loglevel_config_setting_name_
)
else:
self.options.log_level = self._default_logging_level_
# Setup extended logging right before the last step
self._mixin_after_parsed_funcs.append(self.__setup_extended_logging)
# Setup the console as the last _mixin_after_parsed_func to run
self._mixin_after_parsed_funcs.append(self.__setup_console_logger)
def process_log_file(self):
if not self.options.log_file:
cli_setting_name = 'cli_{0}_log_file'.format(
self.get_prog_name().replace('-', '_')
)
if self.config.get(cli_setting_name, None) is not None:
# There's a configuration setting defining this log file path,
# ie, `key_log_file` if the cli tool is `salt-key`
self.options.log_file = self.config.get(cli_setting_name)
elif self.config.get(self._logfile_config_setting_name_, None):
# Is the regular log file setting set?
self.options.log_file = self.config.get(
self._logfile_config_setting_name_
)
else:
# Nothing is set on the configuration? Let's use the cli tool
# defined default
self.options.log_file = self._default_logging_logfile_
def process_log_file_level(self):
if not self.options.log_file_level:
cli_setting_name = 'cli_{0}_log_file_level'.format(
self.get_prog_name().replace('-', '_')
)
if self.config.get(cli_setting_name, None) is not None:
# There's a configuration setting defining this log file
# logging level, ie, `key_log_file_level` if the cli tool is
# `salt-key`
self.options.log_file_level = self.config.get(cli_setting_name)
elif self.config.get(
self._logfile_loglevel_config_setting_name_, None):
# Is the regular log file level setting set?
self.options.log_file_level = self.config.get(
self._logfile_loglevel_config_setting_name_
)
else:
# Nothing is set on the configuration? Let's use the cli tool
# defined default
self.options.log_level = self._default_logging_level_
def setup_logfile_logger(self):
if self._logfile_loglevel_config_setting_name_ in self.config and not \
self.config.get(self._logfile_loglevel_config_setting_name_):
# Remove it from config so it inherits from log_level
self.config.pop(self._logfile_loglevel_config_setting_name_)
loglevel = self.config.get(
self._logfile_loglevel_config_setting_name_,
self.config.get(
# From the config setting
self._loglevel_config_setting_name_,
# From the console setting
self.config['log_level']
)
)
cli_log_path = 'cli_{0}_log_file'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_path in self.config and not self.config.get(cli_log_path):
# Remove it from config so it inherits from log_level_logfile
self.config.pop(cli_log_path)
if self._logfile_config_setting_name_ in self.config and not \
self.config.get(self._logfile_config_setting_name_):
# Remove it from config so it inherits from log_file
self.config.pop(self._logfile_config_setting_name_)
logfile = self.config.get(
# First from the config cli setting
cli_log_path,
self.config.get(
# From the config setting
self._logfile_config_setting_name_,
# From the default setting
self._default_logging_logfile_
)
)
cli_log_file_fmt = 'cli_{0}_log_file_fmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_file_fmt in self.config and not \
self.config.get(cli_log_file_fmt):
# Remove it from config so it inherits from log_fmt_logfile
self.config.pop(cli_log_file_fmt)
if self.config.get('log_fmt_logfile', None) is None:
# Remove it from config so it inherits from log_fmt_console
self.config.pop('log_fmt_logfile', None)
log_file_fmt = self.config.get(
cli_log_file_fmt,
self.config.get(
'cli_{0}_log_fmt'.format(
self.get_prog_name().replace('-', '_')
),
self.config.get(
'log_fmt_logfile',
self.config.get(
'log_fmt_console',
self.config.get(
'log_fmt',
config._DFLT_LOG_FMT_CONSOLE
)
)
)
)
)
cli_log_file_datefmt = 'cli_{0}_log_file_datefmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_file_datefmt in self.config and not \
self.config.get(cli_log_file_datefmt):
# Remove it from config so it inherits from log_datefmt_logfile
self.config.pop(cli_log_file_datefmt)
if self.config.get('log_datefmt_logfile', None) is None:
# Remove it from config so it inherits from log_datefmt_console
self.config.pop('log_datefmt_logfile', None)
if self.config.get('log_datefmt_console', None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop('log_datefmt_console', None)
log_file_datefmt = self.config.get(
cli_log_file_datefmt,
self.config.get(
'cli_{0}_log_datefmt'.format(
self.get_prog_name().replace('-', '_')
),
self.config.get(
'log_datefmt_logfile',
self.config.get(
'log_datefmt_console',
self.config.get(
'log_datefmt',
'%Y-%m-%d %H:%M:%S'
)
)
)
)
)
if not is_writeable(logfile, check_parent=True):
# Since we're not be able to write to the log file or it's parent
# directory(if the log file does not exit), are we the same user
# as the one defined in the configuration file?
current_user = getpass.getuser()
if self.config['user'] != current_user:
# Yep, not the same user!
# Is the current user in ACL?
if current_user in self.config.get('client_acl', {}).keys():
# Yep, the user is in ACL!
# Let's write the logfile to it's home directory instead.
user_salt_dir = os.path.expanduser('~/.salt')
if not os.path.isdir(user_salt_dir):
os.makedirs(user_salt_dir, 0o750)
logfile_basename = os.path.basename(
self._default_logging_logfile_
)
logging.getLogger(__name__).debug(
'The user {0!r} is not allowed to write to {1!r}. '
'The log file will be stored in '
'\'~/.salt/{2!r}.log\''.format(
current_user,
logfile,
logfile_basename
)
)
logfile = os.path.join(
user_salt_dir, '{0}.log'.format(logfile_basename)
)
# If we haven't changed the logfile path and it's not writeable,
# salt will fail once we try to setup the logfile logging.
log.setup_logfile_logger(
logfile,
loglevel,
log_format=log_file_fmt,
date_format=log_file_datefmt
)
for name, level in self.config['log_granular_levels'].items():
log.set_logger_level(name, level)
def __setup_extended_logging(self, *args):
log.setup_extended_logging(self.config)
def __setup_console_logger(self, *args):
# If daemon is set force console logger to quiet
if getattr(self.options, 'daemon', False) is True:
return
# Since we're not going to be a daemon, setup the console logger
cli_log_fmt = 'cli_{0}_log_fmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_fmt in self.config and not self.config.get(cli_log_fmt):
# Remove it from config so it inherits from log_fmt_console
self.config.pop(cli_log_fmt)
logfmt = self.config.get(
cli_log_fmt, self.config.get(
'log_fmt_console',
self.config.get(
'log_fmt',
config._DFLT_LOG_FMT_CONSOLE
)
)
)
cli_log_datefmt = 'cli_{0}_log_datefmt'.format(
self.get_prog_name().replace('-', '_')
)
if cli_log_datefmt in self.config and not \
self.config.get(cli_log_datefmt):
# Remove it from config so it inherits from log_datefmt_console
self.config.pop(cli_log_datefmt)
if self.config.get('log_datefmt_console', None) is None:
# Remove it from config so it inherits from log_datefmt
self.config.pop('log_datefmt_console', None)
datefmt = self.config.get(
cli_log_datefmt,
self.config.get(
'log_datefmt_console',
self.config.get(
'log_datefmt',
'%Y-%m-%d %H:%M:%S'
)
)
)
log.setup_console_logger(
self.config['log_level'], log_format=logfmt, date_format=datefmt
)
for name, level in self.config['log_granular_levels'].items():
log.set_logger_level(name, level)
class RunUserMixin(object):
__metaclass__ = MixInMeta
_mixin_prio_ = 20
def _mixin_setup(self):
self.add_option(
'-u', '--user',
help='Specify user to run {0}'.format(self.get_prog_name())
)
class DaemonMixIn(object):
__metaclass__ = MixInMeta
_mixin_prio_ = 30
def _mixin_setup(self):
self.add_option(
'-d', '--daemon',
default=False,
action='store_true',
help='Run the {0} as a daemon'.format(self.get_prog_name())
)
def daemonize_if_required(self):
if self.options.daemon:
# Late import so logging works correctly
import bonneville.utils
bonneville.utils.daemonize()
class PidfileMixin(object):
__metaclass__ = MixInMeta
_mixin_prio_ = 40
def _mixin_setup(self):
self.add_option(
'--pid-file', dest='pidfile',
default=os.path.join(
syspaths.PIDFILE_DIR, '{0}.pid'.format(self.get_prog_name())
),
help=('Specify the location of the pidfile. Default: %default')
)
def set_pidfile(self):
from bonneville.utils.process import set_pidfile
set_pidfile(self.config['pidfile'], self.config['user'])
class TargetOptionsMixIn(object):
__metaclass__ = MixInMeta
_mixin_prio_ = 20
selected_target_option = None
def _mixin_setup(self):
group = self.target_options_group = optparse.OptionGroup(
self, 'Target Options', 'Target Selection Options'
)
self.add_option_group(group)
group.add_option(
'-E', '--pcre',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'servers, use pcre regular expressions')
)
group.add_option(
'-L', '--list',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'servers, take a comma or space delimited list of '
'servers.')
)
group.add_option(
'-G', '--grain',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a grain value to identify targets, the syntax '
'for the target is the grain key followed by a glob'
'expression:\n"os:Arch*"')
)
group.add_option(
'--grain-pcre',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a grain value to identify targets, the syntax '
'for the target is the grain key followed by a pcre '
'regular expression:\n"os:Arch.*"')
)
group.add_option(
'-N', '--nodegroup',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use one of the predefined nodegroups to identify a '
'list of targets.')
)
group.add_option(
'-R', '--range',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a range expression to identify targets. '
'Range expressions look like %cluster')
)
self._create_process_functions()
def _create_process_functions(self):
for option in self.target_options_group.option_list:
def process(opt):
if getattr(self.options, opt.dest):
self.selected_target_option = opt.dest
funcname = 'process_{0}'.format(option.dest)
if not hasattr(self, funcname):
setattr(self, funcname, partial(process, option))
def _mixin_after_parsed(self):
group_options_selected = filter(
lambda option: getattr(self.options, option.dest) is True,
self.target_options_group.option_list
)
if len(list(group_options_selected)) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format('/'.join(
[option.get_opt_string()
for option in group_options_selected]))
)
self.config['selected_target_option'] = self.selected_target_option
class ExtendedTargetOptionsMixIn(TargetOptionsMixIn):
def _mixin_setup(self):
TargetOptionsMixIn._mixin_setup(self)
group = self.target_options_group
group.add_option(
'-C', '--compound',
default=False,
action='store_true',
help=('The compound target option allows for multiple target '
'types to be evaluated, allowing for greater granularity in '
'target matching. The compound target is space delimited, '
'targets other than globs are preceded with an identifier '
'matching the specific targets argument type: salt '
'\'G@os:RedHat and webser* or E@database.*\'')
)
group.add_option(
'-X', '--exsel',
default=False,
action='store_true',
help=('Instead of using shell globs use the return code of '
'a function.')
)
group.add_option(
'-I', '--pillar',
default=False,
action='store_true',
help=('Instead of using shell globs to evaluate the target '
'use a pillar value to identify targets, the syntax '
'for the target is the pillar key followed by a glob'
'expression:\n"role:production*"')
)
group.add_option(
'-S', '--ipcidr',
default=False,
action='store_true',
help=('Match based on Subnet (CIDR notation) or IPv4 address.')
)
self._create_process_functions()
class TimeoutMixIn(object):
__metaclass__ = MixInMeta
_mixin_prio_ = 10
def _mixin_setup(self):
if not hasattr(self, 'default_timeout'):
raise RuntimeError(
'You need to define the \'default_timeout\' attribute '
'on {0}'.format(self.__class__.__name__)
)
self.add_option(
'-t', '--timeout',
type=int,
default=self.default_timeout,
help=('Change the timeout, if applicable, for the running '
'command; default=%default')
)
class OutputOptionsMixIn(object):
__metaclass__ = MixInMeta
_mixin_prio_ = 40
_include_text_out_ = False
selected_output_option = None
def _mixin_setup(self):
group = self.output_options_group = optparse.OptionGroup(
self, 'Output Options', 'Configure your preferred output format'
)
self.add_option_group(group)
outputters = loader.outputters(
config.minion_config(None)
)
group.add_option(
'--out', '--output',
dest='output',
help=(
'Print the output from the {0!r} command using the '
'specified outputter. The builtins are {1}.'.format(
self.get_prog_name(),
', '.join([repr(k) for k in outputters])
)
)
)
group.add_option(
'--out-indent', '--output-indent',
dest='output_indent',
default=None,
type=int,
help=('Print the output indented by the provided value in spaces. '
'Negative values disables indentation. Only applicable in '
'outputters that support indentation.')
)
group.add_option(
'--out-file', '--output-file',
dest='output_file',
default=None,
help='Write the output to the specified file'
)
group.add_option(
'--no-color', '--no-colour',
default=False,
action='store_true',
help='Disable all colored output'
)
group.add_option(
'--force-color', '--force-colour',
default=False,
action='store_true',
help='Force colored output'
)
for option in self.output_options_group.option_list:
def process(opt):
default = self.defaults.get(opt.dest)
if getattr(self.options, opt.dest, default) is False:
return
self.selected_output_option = opt.dest
funcname = 'process_{0}'.format(option.dest)
if not hasattr(self, funcname):
setattr(self, funcname, partial(process, option))
def process_output(self):
self.selected_output_option = self.options.output
def process_output_file(self):
if self.options.output_file is not None:
if os.path.isfile(self.options.output_file):
try:
os.remove(self.options.output_file)
except (IOError, OSError) as exc:
self.error(
'{0}: Access denied: {1}'.format(
self.options.output_file,
exc
)
)
def _mixin_after_parsed(self):
group_options_selected = filter(
lambda option: (
getattr(self.options, option.dest) and
(option.dest.endswith('_out') or option.dest == 'output')
),
self.output_options_group.option_list
)
if len(list(group_options_selected)) > 1:
self.error(
'The options {0} are mutually exclusive. Please only choose '
'one of them'.format('/'.join([
option.get_opt_string() for
option in group_options_selected
]))
)
self.config['selected_output_option'] = self.selected_output_option
class OutputOptionsWithTextMixIn(OutputOptionsMixIn):
# This should also be removed
_include_text_out_ = True
def __new__(cls, *args, **kwargs):
instance = super(OutputOptionsWithTextMixIn, cls).__new__(
cls, *args, **kwargs
)
utils.warn_until(
(0, 19),
'\'OutputOptionsWithTextMixIn\' has been deprecated. Please '
'start using \'OutputOptionsMixIn\'; your code should not need '
'any further changes.'
)
return instance
class MasterOptionParser(OptionParser, ConfigDirMixIn, MergeConfigMixIn,
LogLevelMixIn, RunUserMixin, DaemonMixIn,
PidfileMixin):
__metaclass__ = OptionParserMeta
description = 'The Salt master, used to control the Salt minions.'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
def setup_config(self):
return config.master_config(self.get_config_file_path())
class MinionOptionParser(MasterOptionParser):
__metaclass__ = OptionParserMeta
description = (
'The Salt minion, receives commands from a remote Salt master.'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'minion'
# LogLevelMixIn attributes
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'minion')
def setup_config(self):
return config.minion_config(self.get_config_file_path(),
minion_id=True)
class SyndicOptionParser(OptionParser, ConfigDirMixIn, MergeConfigMixIn,
LogLevelMixIn, RunUserMixin, DaemonMixIn,
PidfileMixin):
__metaclass__ = OptionParserMeta
description = (
'A seamless master of masters. Scale Salt to thousands of hosts or '
'across many different networks.'
)
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
def setup_config(self):
return config.syndic_config(
self.get_config_file_path(),
self.get_config_file_path('minion'))
class SaltCMDOptionParser(OptionParser, ConfigDirMixIn, MergeConfigMixIn,
TimeoutMixIn, ExtendedTargetOptionsMixIn,
OutputOptionsMixIn, LogLevelMixIn):
__metaclass__ = OptionParserMeta
default_timeout = 5
usage = '%prog [options] \'<target>\' <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = 'warning'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
_loglevel_config_setting_name_ = 'cli_salt_log_file'
def _mixin_setup(self):
self.add_option(
'-s', '--static',
default=False,
action='store_true',
help=('Return the data from minions as a group after they '
'all return.')
)
self.add_option(
'--async',
default=False,
dest='async',
action='store_true',
help=('Run the salt command but don\'t wait for a reply')
)
self.add_option(
'--state-output', '--state_output',
default='full',
help=('Override the configured state_output value for minion output'
'. Default: full')
)
self.add_option(
'--subset',
default=0,
type=int,
help=('Execute the routine on a random subset of the targeted '
'minions. The minions will be verified that they have the '
'named function before executing')
)
self.add_option(
'-v', '--verbose',
default=False,
action='store_true',
help=('Turn on command verbosity, display jid and active job '
'queries')
)
self.add_option(
'--show-timeout',
default=False,
action='store_true',
help=('Display minions that timeout')
)
self.add_option(
'-b', '--batch',
'--batch-size',
default='',
dest='batch',
help=('Execute the salt job in batch mode, pass either the number '
'of minions to batch at a time, or the percentage of '
'minions to have running')
)
self.add_option(
'-a', '--auth', '--eauth', '--extended-auth',
default='',
dest='eauth',
help=('Specify an extended authentication system to use.')
)
self.add_option(
'-T', '--make-token',
default=False,
dest='mktoken',
action='store_true',
help=('Generate and save an authentication token for re-use. The'
'token is generated and made available for the period '
'defined in the Salt Master.')
)
self.add_option(
'--return',
default='',
metavar='RETURNER',
help=('Set an alternative return method. By default salt will '
'send the return data from the command back to the master, '
'but the return data can be redirected into any number of '
'systems, databases or applications.')
)
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Return the documentation for the specified module or for '
'all modules if none are specified.')
)
self.add_option(
'--args-separator',
dest='args_separator',
default=',',
help=('Set the special argument used as a delimiter between '
'command arguments of compound commands. This is useful '
'when one wants to pass commas as arguments to '
'some of the commands in a compound command.')
)
def _mixin_after_parsed(self):
if len(self.args) <= 1 and not self.options.doc:
try:
self.print_help()
except Exception:
# We get an argument that Python's optparser just can't
# deal with. Perhaps stdout was redirected, or a file
# glob was passed in. Regardless, we're in an unknown
# state here.
sys.stdout.write("Invalid options passed. Please try -h for help.") # Try to warn if we can.
sys.exit(1)
if self.options.doc:
# Include the target
if not self.args:
self.args.insert(0, '*')
if len(self.args) < 2:
# Include the function
self.args.insert(1, 'sys.doc')
if self.args[1] != 'sys.doc':
self.args.insert(1, 'sys.doc')
self.args[2] = self.args[2]
if self.options.list:
try:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].split(',')
else:
self.config['tgt'] = self.args[0].split()
except IndexError:
self.exit(42, '\nCannot execute command without defining a target.\n\n')
else:
try:
self.config['tgt'] = self.args[0]
except IndexError:
self.exit(42, '\nCannot execute command without defining a target.\n\n')
# Detect compound command and set up the data for it
if self.args:
try:
if ',' in self.args[1]:
self.config['fun'] = self.args[1].split(',')
self.config['arg'] = [[]]
cmd_index = 0
if (self.args[2:].count(self.options.args_separator) ==
len(self.config['fun']) - 1):
# new style parsing: standalone argument separator
for arg in self.args[2:]:
if arg == self.options.args_separator:
cmd_index += 1
self.config['arg'].append([])
else:
self.config['arg'][cmd_index].append(arg)
else:
# old style parsing: argument separator can be inside args
for arg in self.args[2:]:
if self.options.args_separator in arg:
sub_args = arg.split(self.options.args_separator)
for sub_arg_index, sub_arg in enumerate(sub_args):
if sub_arg:
self.config['arg'][cmd_index].append(sub_arg)
if sub_arg_index != len(sub_args) - 1:
cmd_index += 1
self.config['arg'].append([])
else:
self.config['arg'][cmd_index].append(arg)
if len(self.config['fun']) != len(self.config['arg']):
self.exit(42, 'Cannot execute compound command without '
'defining all arguments.')
except IndexError:
self.exit(42, '\nIncomplete options passed.\n\n')
else:
self.config['fun'] = self.args[1]
self.config['arg'] = self.args[2:]
def setup_config(self):
return config.client_config(self.get_config_file_path())
class SaltCPOptionParser(OptionParser, ConfigDirMixIn, MergeConfigMixIn,
TimeoutMixIn, TargetOptionsMixIn, LogLevelMixIn):
__metaclass__ = OptionParserMeta
description = (
'salt-cp is NOT intended to broadcast large files, it is intended to '
'handle text files.\nsalt-cp can be used to distribute configuration '
'files.'
)
default_timeout = 5
usage = '%prog [options] \'<target>\' SOURCE DEST'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = 'warning'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
_loglevel_config_setting_name_ = 'cli_salt_cp_log_file'
def _mixin_after_parsed(self):
# salt-cp needs arguments
if len(self.args) <= 1:
self.print_help()
self.exit(1)
if self.options.list:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].split(',')
else:
self.config['tgt'] = self.args[0].split()
else:
self.config['tgt'] = self.args[0]
self.config['src'] = self.args[1:-1]
self.config['dest'] = self.args[-1]
def setup_config(self):
return config.master_config(self.get_config_file_path())
class SaltKeyOptionParser(OptionParser, ConfigDirMixIn, MergeConfigMixIn,
LogLevelMixIn, OutputOptionsMixIn):
__metaclass__ = OptionParserMeta
description = 'Salt key is used to manage Salt authentication keys'
usage = '%prog [options]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_skip_console_logging_config_ = True
_logfile_config_setting_name_ = 'key_logfile'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'key')
def _mixin_setup(self):
# XXX: Remove '--key-logfile' support in 0.18.0
utils.warn_until((0, 18), '', _dont_call_warnings=True)
self.logging_options_group.add_option(
'--key-logfile',
default=None,
help='Send all output to a file. Default is {0!r}'.format(
self._default_logging_logfile_
)
)
actions_group = optparse.OptionGroup(self, 'Actions')
actions_group.add_option(
'-l', '--list',
default='',
metavar='ARG',
help=('List the public keys. The args '
'"pre", "un", and "unaccepted" will list '
'unaccepted/unsigned keys. '
'"acc" or "accepted" will list accepted/signed keys. '
'"rej" or "rejected" will list rejected keys. '
'Finally, "all" will list all keys.')
)
actions_group.add_option(
'-L', '--list-all',
default=False,
action='store_true',
help='List all public keys. Deprecated: use "--list all"'
)
actions_group.add_option(
'-a', '--accept',
default='',
help='Accept the specified public key (use --include-all to '
'match rejected keys in addition to pending keys)'
)
actions_group.add_option(
'-A', '--accept-all',
default=False,
action='store_true',
help='Accept all pending keys'
)
actions_group.add_option(
'-r', '--reject',
default='',
help='Reject the specified public key (use --include-all to '
'match accepted keys in addition to pending keys)'
)
actions_group.add_option(
'-R', '--reject-all',
default=False,
action='store_true',
help='Reject all pending keys'
)
actions_group.add_option(
'--include-all',
default=False,
action='store_true',
help='Include non-pending keys when accepting/rejecting'
)
actions_group.add_option(
'-p', '--print',
default='',
help='Print the specified public key'
)
actions_group.add_option(
'-P', '--print-all',
default=False,
action='store_true',
help='Print all public keys'
)
actions_group.add_option(
'-d', '--delete',
default='',
help='Delete the named key'
)
actions_group.add_option(
'-D', '--delete-all',
default=False,
action='store_true',
help='Delete all keys'
)
actions_group.add_option(
'-f', '--finger',
default='',
help='Print the named key\'s fingerprint'
)
actions_group.add_option(
'-F', '--finger-all',
default=False,
action='store_true',
help='Print all key\'s fingerprints'
)
self.add_option_group(actions_group)
self.add_option(
'-q', '--quiet',
default=False,
action='store_true',
help='Suppress output'
)
self.add_option(
'-y', '--yes',
default=False,
action='store_true',
help='Answer Yes to all questions presented, defaults to False'
)
key_options_group = optparse.OptionGroup(
self, 'Key Generation Options'
)
self.add_option_group(key_options_group)
key_options_group.add_option(
'--gen-keys',
default='',
help='Set a name to generate a keypair for use with salt'
)
key_options_group.add_option(
'--gen-keys-dir',
default='.',
help=('Set the directory to save the generated keypair, only '
'works with "gen_keys_dir" option; default=.')
)
key_options_group.add_option(
'--keysize',
default=2048,
type=int,
help=('Set the keysize for the generated key, only works with '
'the "--gen-keys" option, the key size must be 2048 or '
'higher, otherwise it will be rounded up to 2048; '
'; default=%default')
)
def process_config_dir(self):
if self.options.gen_keys:
# We're generating keys, override the default behaviour of this
# function if we don't have any access to the configuration
# directory.
if not os.access(self.options.config_dir, os.R_OK):
if not os.path.isdir(self.options.gen_keys_dir):
# This would be done at a latter stage, but we need it now
# so no errors are thrown
os.makedirs(self.options.gen_keys_dir)
self.options.config_dir = self.options.gen_keys_dir
super(SaltKeyOptionParser, self).process_config_dir()
# Don't change it's mixin priority!
process_config_dir._mixin_prio_ = ConfigDirMixIn._mixin_prio_
def setup_config(self):
keys_config = config.master_config(self.get_config_file_path())
if self.options.gen_keys:
# Since we're generating the keys, some defaults can be assumed
# or tweaked
keys_config['key_logfile'] = os.devnull
keys_config['pki_dir'] = self.options.gen_keys_dir
return keys_config
def process_keysize(self):
if self.options.keysize < 2048:
self.error('The minimum value for keysize is 2048')
elif self.options.keysize > 32768:
self.error('The maximum value for keysize is 32768')
def process_gen_keys_dir(self):
# Schedule __create_keys_dir() to run if there's a value for
# --create-keys-dir
self._mixin_after_parsed_funcs.append(self.__create_keys_dir)
def process_key_logfile(self):
if self.options.key_logfile:
# XXX: Remove '--key-logfile' support in 0.18.0
# In < 0.18.0 error out
utils.warn_until((0, 18), '', _dont_call_warnings=True)
self.error(
'The \'--key-logfile\' option has been deprecated in favour '
'of \'--log-file\''
)
def _mixin_after_parsed(self):
# It was decided to always set this to info, since it really all is
# info or error.
self.config['loglevel'] = 'info'
def __create_keys_dir(self, *args):
if not os.path.isdir(self.config['gen_keys_dir']):
os.makedirs(self.config['gen_keys_dir'])
class SaltCallOptionParser(OptionParser, ConfigDirMixIn, MergeConfigMixIn,
LogLevelMixIn, OutputOptionsMixIn,
metaclass=OptionParserMeta):
description = ('Salt call is used to execute module functions locally '
'on a minion')
usage = '%prog [options] <function> [arguments]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'minion'
# LogLevelMixIn attributes
_default_logging_level_ = 'info'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'minion')
def _mixin_setup(self):
self.add_option(
'-g', '--grains',
dest='grains_run',
default=False,
action='store_true',
help='Return the information generated by the salt grains'
)
self.add_option(
'-m', '--module-dirs',
default=[],
action='append',
help=('Specify an additional directory to pull modules from. '
'Multiple directories can be provided by passing '
'`-m/--module-dirs` multiple times.')
)
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Return the documentation for the specified module or for '
'all modules if none are specified.')
)
self.add_option(
'--master',
default='',
dest='master',
help=('Specify the master to use. The minion must be '
'authenticated with the master. If this option is omitted, '
'the master options from the minion config will be used. '
'If multi masters are set up the first listed master that '
'responds will be used.')
)
self.add_option(
'--return',
default='',
metavar='RETURNER',
help=('Set salt-call to pass the return data to one or many '
'returner interfaces.')
)
self.add_option(
'--local',
default=False,
action='store_true',
help='Run salt-call locally, as if there was no master running.'
)
self.add_option(
'--retcode-passthrough',
default=False,
action='store_true',
help=('Exit with the salt call retcode and not the salt binary '
'retcode')
)
self.add_option(
'--id',
default='',
dest='id',
help=('Specify the minion id to use. If this option is omitted, '
'the id option from the minion config will be used.')
)
def _mixin_after_parsed(self):
if not self.args and not self.options.grains_run \
and not self.options.doc:
self.print_help()
self.exit(1)
elif len(self.args) >= 1:
if self.options.grains_run:
self.error('-g/--grains does not accept any arguments')
self.config['fun'] = self.args[0]
self.config['arg'] = self.args[1:]
def setup_config(self):
return config.minion_config(self.get_config_file_path(),
minion_id=True)
def process_module_dirs(self):
for module_dir in self.options.module_dirs:
# Provide some backwards compatibility with previous comma
# delimited format
if ',' in module_dir:
self.config.setdefault('module_dirs', []).extend(
os.path.abspath(x) for x in module_dir.split(','))
continue
self.config.setdefault('module_dirs',
[]).append(os.path.abspath(module_dir))
class SaltRunOptionParser(OptionParser, ConfigDirMixIn, MergeConfigMixIn,
TimeoutMixIn, LogLevelMixIn):
__metaclass__ = OptionParserMeta
default_timeout = 1
usage = '%prog [options]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = 'warning'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'master')
_loglevel_config_setting_name_ = 'cli_salt_run_log_file'
def _mixin_setup(self):
self.add_option(
'-d', '--doc', '--documentation',
dest='doc',
default=False,
action='store_true',
help=('Display documentation for runners, pass a module or a '
'runner to see documentation on only that module/runner.')
)
def _mixin_after_parsed(self):
if len(self.args) > 0:
self.config['fun'] = self.args[0]
else:
self.config['fun'] = ''
if len(self.args) > 1:
self.config['arg'] = self.args[1:]
else:
self.config['arg'] = []
def setup_config(self):
return config.master_config(self.get_config_file_path())
class SaltSSHOptionParser(OptionParser, ConfigDirMixIn, MergeConfigMixIn,
LogLevelMixIn, TargetOptionsMixIn,
OutputOptionsMixIn):
__metaclass__ = OptionParserMeta
usage = '%prog [options]'
# ConfigDirMixIn config filename attribute
_config_filename_ = 'master'
# LogLevelMixIn attributes
_default_logging_level_ = 'warning'
_default_logging_logfile_ = os.path.join(syspaths.LOGS_DIR, 'ssh')
_loglevel_config_setting_name_ = 'cli_salt_run_log_file'
def _mixin_setup(self):
self.add_option(
'-r', '--raw', '--raw-shell',
dest='raw_shell',
default=False,
action='store_true',
help=('Don\'t execute a salt routine on the targets, execute a '
'raw shell command')
)
self.add_option(
'--priv',
dest='ssh_priv',
help=('Ssh private key file'))
self.add_option(
'--roster',
dest='roster',
default='',
help=('Define which roster system to use'))
self.add_option(
'--refresh', '--refresh-cache',
dest='refresh_cache',
default=False,
action='store_true',
help=('Force a refresh of the master side data cache of the '
'target\'s data. This is needed if a target\'s grains have '
'been changed and the auto refresh timeframe has not been '
'reached.'))
self.add_option(
'--max-procs',
dest='ssh_max_procs',
default=25,
type=int,
help='Set the number of concurrent minions to communicate with. '
'This value defines how many processes are opened up at a '
'time to manage connections, the more running processes the '
'faster communication should be, default is 25')
self.add_option(
'-i',
'--ignore-host-keys',
dest='ignore_host_keys',
default=False,
action='store_true',
help='By default ssh host keys are honored and connections will '
'ask for approval')
self.add_option(
'--passwd',
dest='ssh_passwd',
default='',
help='Set the default password to attempt to use when '
'authenticating')
self.add_option(
'--key-deploy',
dest='ssh_key_deploy',
default=False,
action='store_true',
help='Set this flag to atempt to deploy the authorized ssh key '
'with all minions. This combined with --passwd can make '
'initial deployment of keys very fast and easy')
def _mixin_after_parsed(self):
if not self.args:
self.print_help()
self.exit(1)
if self.options.list:
if ',' in self.args[0]:
self.config['tgt'] = self.args[0].split(',')
else:
self.config['tgt'] = self.args[0].split()
else:
self.config['tgt'] = self.args[0]
if len(self.args) > 0:
self.config['arg_str'] = ' '.join(self.args[1:])
def setup_config(self):
return config.master_config(self.get_config_file_path())
|
py | b40e92a93b94e3af3e7bffd8d1d971d756a9f02c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2018-09-11 23:58:02
# @Author : Racter ([email protected])
# @Link : https://racterub.me
from flask import request, flash, session, render_template, redirect, url_for, make_response, send_from_directory
from website import app
from lxml import etree
import requests
from website.views.lib.crawler import login, get_term_score, get_history_pr
import json
#Initial globs
uid = u''
commit = ''
def getLastCommit():
global commit
r = requests.get('https://api.github.com/repos/racterub/tyshscore-web/commits/master')
data = json.loads(r.text)
commit = data['sha'][:6]
@app.route('/', methods=['POST', 'GET'])
def index():
global uid, commit
if not commit:
getLastCommit()
if request.method == "POST":
if request.form:
stdid = request.form['stdid']
stdpwd = request.form['stdpwd']
status = login(stdid, stdpwd)
if status:
uid = request.form['stdid']
session['user'] = request.form['stdid']
flash(u"登入成功")
return render_template('index.jinja.html', stdid=uid, commit=commit)
else:
info = u"帳號密碼錯誤,請再次確認"
return render_template('index.jinja.html', info=info, commit=commit)
else:
error_header = u"資料無法處理"
error_context = u"您的登入資料無法處理,請重新登入"
return render_template('error.jinja.html', stdid=uid, error_header=error_header, error_context=error_context, commit=commit),400
else:
if 'redirect' in session:
info = session['redirect']
session.pop('redirect', None)
return render_template('index.jinja.html', info=info, commit=commit)
elif 'logout' in session:
info = session['logout']
session.pop('logout', None)
return render_template('index.jinja.html', info=info, commit=commit)
else:
return render_template('index.jinja.html', stdid=uid, commit=commit)
@app.route('/scoreboard/<int:counter>')
def scoreboard(counter):
global uid
if 'user' in session:
if not commit:
getLastCommit()
if (counter <= 0 | counter > 5):
error_header = u"資料無法處理"
error_context = u"您所選的資料目前無法處理或是校方系統資料已清空,請稍後再試"
return render_template('error.jinja.html',
stdid=uid,
error_header=error_header,
error_context=error_context,
commit=commit), 400
exam_score_type, exam_score, below_subject = get_term_score()
if exam_score == False:
error_header = u"資料無法處理"
error_context = u"您所選的資料目前無法處理或是校方系統資料已清空,請稍後再試"
return render_template('error.jinja.html', stdid=uid, commit=commit)
body = []
if exam_score_type == 2:
if counter == 4:
subject = u'平時成績'
head = [u'科目', u'成績']
for i in exam_score:
body.append(i[3])
elif counter == 5:
subject = u'補考'
head = [u'科目', u'學期成績', u'最後成績', u'第1次補考成績']
body = below_subject
else:
subject = '第'+str(counter)+'次段考'
for i in exam_score:
body.append(i[counter-1])
head = [u'科目', u'成績', u'全班平均', u'班級排名', u'班級人數']
return render_template('scoreboard.jinja.html',
head=head,
body=body,
stdid=uid,
count=counter,
commit=commit,
subject=subject)
else:
if counter == 3:
error_header = u"資料無法處理"
error_context = u"高三並無第三次段考"
return render_template('error.jinja.html', error_context=error_context, error_header=error_header, commit=commit)
else:
if counter == 4:
subject = u'平時成績'
head = [u'科目', u'成績']
for i in exam_score:
body.append(i[2])
elif counter == 5:
subject = u'補考'
head = [u'科目', u'學期成績', u'最後成績', u'第1次補考成績']
body = below_subject
else:
subject = '第'+str(counter)+'次段考'
for i in exam_score:
body.append(i[counter-1])
head = [u'科目', u'成績', u'全班平均', u'班級排名', u'班級人數']
return render_template('scoreboard.jinja.html',
head=head,
body=body,
stdid=uid,
count=counter,
commit=commit,
subject=subject)
else:
session['redirect'] = u'請先登入系統'
return redirect(url_for('index'))
@app.route('/logout/')
def logout():
global uid
uid = ''
session.pop('user', None)
session['logout'] = u'已登出系統'
return redirect(url_for('index'))
@app.route('/robots.txt')
def robotstxt():
return send_from_directory('static', 'robots.txt')
@app.route('/history_pr/')
def history_pr():
global uid
if 'user' in session:
if not commit:
getLastCommit()
pr_rew_chart_data, pr_pen_chart_data, pr_chart_total, d_pr_rew_chart_data, d_pr_pen_chart_data= get_history_pr()
pen_result = [int(i) for i in pr_chart_total[5:-1]]
sumcheck = 0
for i in range(len(pen_result)):
sumcheck += pen_result[::-1][i] * (3**i)
if sumcheck >= 27:
pen_check = False
else:
pen_check = True
pr_rew_chart_header=[u'年度', u'學期', u'大功', u'小功', u'嘉獎', u'優點']
pr_pen_chart_header=[u'年度', u'學期', u'大過', u'小過', u'警告', u'缺點']
return render_template('history_pr.jinja.html',
stdid=uid,
pr_rew_chart_header=pr_rew_chart_header,
pr_pen_chart_header=pr_pen_chart_header,
pr_rew_chart_data=pr_rew_chart_data,
pr_pen_chart_data=pr_pen_chart_data,
d_pr_rew_chart_header=d_pr_rew_chart_data[0],
d_pr_rew_chart_data=d_pr_rew_chart_data[1:],
d_pr_pen_chart_header=d_pr_pen_chart_data[0],
d_pr_pen_chart_data=d_pr_pen_chart_data[1:],
pen_check=pen_check,
t_p=pen_result[0],
s_p=pen_result[1],
f_p=pen_result[2],
commit=commit,
subject=u'歷年獎懲')
else:
session['redirect'] = u'請先登入系統'
return redirect(url_for('index'))
|
py | b40e9374bd1ee0966a36463da8f3404e159aca28 | import os
import numpy as np
try:
import seaborn as sns
seaborn_plot = True
except ImportError:
import warnings
warnings.warn("Seaborn is not installed. Plot with matplotlib.")
seaborn_plot = False
import matplotlib.pyplot as plt
from matplotlib import rcParams
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['image.cmap'] = 'gnuplot2'
plt.rcParams["font.serif"] = "Times New Roman"
rcParams.update({'xtick.major.pad': '5.0'})
rcParams.update({'xtick.major.size': '4'})
rcParams.update({'xtick.major.width': '1.'})
rcParams.update({'xtick.minor.pad': '5.0'})
rcParams.update({'xtick.minor.size': '4'})
rcParams.update({'xtick.minor.width': '0.8'})
rcParams.update({'ytick.major.pad': '5.0'})
rcParams.update({'ytick.major.size': '4'})
rcParams.update({'ytick.major.width': '1.'})
rcParams.update({'ytick.minor.pad': '5.0'})
rcParams.update({'ytick.minor.size': '4'})
rcParams.update({'ytick.minor.width': '0.8'})
rcParams.update({'axes.labelsize': 16})
rcParams.update({'font.size': 16})
from mpl_toolkits.axes_grid1 import make_axes_locatable
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import LogStretch, AsinhStretch, HistEqStretch
from astropy.stats import mad_std
from photutils import CircularAperture
### Plotting Helpers ###
def LogNorm():
return ImageNormalize(stretch=LogStretch())
def AsinhNorm(a=0.1):
return ImageNormalize(stretch=AsinhStretch(a=a))
def HistEqNorm(data):
return ImageNormalize(stretch=HistEqStretch(data))
def vmin_3mad(img):
""" lower limit of visual imshow defined by 3 mad above median """
return np.median(img)-3*mad_std(img)
def vmax_2sig(img):
""" upper limit of visual imshow defined by 2 sigma above median """
return np.median(img)+2*np.std(img)
def colorbar(mappable, pad=0.2, size="5%", loc="right", color_nan='gray', **args):
""" Customized colorbar """
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
if loc=="bottom":
orent = "horizontal"
pad = 1.5*pad
rot = 75
else:
orent = "vertical"
rot = 0
cax = divider.append_axes(loc, size=size, pad=pad)
cb = fig.colorbar(mappable, cax=cax, orientation=orent, **args)
cb.ax.set_xticklabels(cb.ax.get_xticklabels(),rotation=rot)
cmap = cb.get_cmap()
cmap.set_bad(color=color_nan, alpha=0.3)
return cb
def make_rand_cmap(n_label, rand_state = 12345):
from photutils.utils import make_random_cmap
rand_cmap = make_random_cmap(n_label, random_state=rand_state)
rand_cmap.set_under(color='black')
rand_cmap.set_over(color='white')
return rand_cmap
def make_rand_color(n_color, seed=1234,
colour = ["indianred", "plum", "seagreen", "lightcyan",
"orchid", 'gray', 'orange', 'yellow', "brown" ]):
import random
random.seed(seed)
rand_colours = [random.choice(colour) for i in range(n_color)]
return rand_colours
def draw_mask_map(image, seg_map, mask_deep, stars,
r_core=None, r_out=None, vmin=None, vmax=None,
pad=0, save=False, save_dir='./'):
""" Visualize mask map """
from matplotlib import patches
mu = np.nanmedian(image)
std = mad_std(image)
if vmin is None:
vmin = mu - std
if vmax is None:
vmax = mu + 10*std
fig, (ax1,ax2,ax3) = plt.subplots(ncols=3, nrows=1, figsize=(20,6))
im1 = ax1.imshow(image, cmap='gray', norm=LogNorm(), vmin=vmin, vmax=1e4)
ax1.set_title("Image")
n_label = seg_map.max()
ax2.imshow(seg_map, vmin=1, vmax=n_label-2, cmap=make_rand_cmap(n_label))
ax2.set_title("Deep Mask")
image2 = image.copy()
image2[mask_deep] = 0
im3 = ax3.imshow(image2, norm=LogNorm(), vmin=vmin, vmax=vmax, aspect='auto')
ax3.set_title("'Sky'")
colorbar(im3)
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core, r_core]
star_pos_A = stars.star_pos_verybright + pad
star_pos_B = stars.star_pos_medbright + pad
aper = CircularAperture(star_pos_A, r=r_core[0])
aper.plot(color='lime',lw=2,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_core[1])
aper.plot(color='c',lw=2,label="",alpha=0.7, axes=ax3)
if r_out is not None:
aper = CircularAperture(star_pos_A, r=r_out[0])
aper.plot(color='lime',lw=1.5,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_out[1])
aper.plot(color='c',lw=1.5,label="",alpha=0.7, axes=ax3)
patch_size = image.shape[0] - pad * 2
rec = patches.Rectangle((pad, pad), patch_size, patch_size, facecolor='none',
edgecolor='w', linewidth=2, linestyle='--',alpha=0.8)
ax3.add_patch(rec)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Mask_dual.png"), dpi=120)
plt.show()
plt.close()
else:
plt.show()
def draw_mask_map_strip(image, seg_comb, mask_comb, stars,
ma_example=None, r_core=None, vmin=None, vmax=None,
pad=0, save=False, save_dir='./'):
""" Visualize mask map w/ strips """
from matplotlib import patches
star_pos_A = stars.star_pos_verybright + pad
star_pos_B = stars.star_pos_medbright + pad
if ma_example is not None:
mask_strip, mask_cross = ma_example
mu = np.nanmedian(image)
std = mad_std(image)
if vmin is None:
vmin = mu - std
if vmax is None:
vmax = mu + 10*std
fig, (ax1,ax2,ax3) = plt.subplots(ncols=3, nrows=1, figsize=(20,6))
mask_strip[mask_cross.astype(bool)]=0.5
ax1.imshow(mask_strip, cmap="gray_r")
ax1.plot(star_pos_A[0][0], star_pos_A[0][1], "r*",ms=18)
ax1.set_title("Strip/Cross")
n_label = seg_comb.max()
ax2.imshow(seg_comb, vmin=1, vmax=n_label-3, cmap=make_rand_cmap(n_label))
ax2.plot(star_pos_A[:,0], star_pos_A[:,1], "r*",ms=18)
ax2.set_title("Mask Comb.")
image3 = image.copy()
image3[mask_comb] = 0
im3 = ax3.imshow(image3, norm=LogNorm(), aspect='auto', vmin=vmin, vmax=vmax)
ax3.plot(star_pos_A[:,0], star_pos_A[:,1], "r*",ms=18)
ax3.set_title("'Sky'")
colorbar(im3)
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core, r_core]
aper = CircularAperture(star_pos_A, r=r_core[0])
aper.plot(color='lime',lw=2,label="",alpha=0.9, axes=ax3)
aper = CircularAperture(star_pos_B, r=r_core[1])
aper.plot(color='c',lw=2,label="",alpha=0.7, axes=ax3)
size = image.shape[0] - pad * 2
rec = patches.Rectangle((pad, pad), size, size, facecolor='none',
edgecolor='w', linewidth=2, linestyle='--',alpha=0.8)
ax3.add_patch(rec)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Mask_strip.png"), dpi=120)
plt.show()
plt.close()
else:
plt.show()
def Fit_background_distribution(image, mask_deep):
# Check background, fit with gaussian and exp-gaussian distribution
from scipy import stats
plt.figure(figsize=(6,4))
z_sky = image[~mask_deep]
if seaborn_plot:
sns.distplot(z_sky, label='Data', hist_kws={'alpha':0.3})
else:
plt.hist(z_sky, label='Data', alpha=0.3)
mu_fit, std_fit = stats.norm.fit(z_sky)
print(mu_fit, std_fit)
d_mod = stats.norm(loc=mu_fit, scale=std_fit)
x = np.linspace(d_mod.ppf(0.001), d_mod.ppf(0.999), 100)
plt.plot(x, d_mod.pdf(x), 'g-', lw=2, alpha=0.6, label='Norm Fit')
K_fit, mu_fit, std_fit = stats.exponnorm.fit(z_sky)
print(K_fit, mu_fit, std_fit)
d_mod2 = stats.exponnorm(loc=mu_fit, scale=std_fit, K=K_fit)
x = np.linspace(d_mod2.ppf(0.001), d_mod2.ppf(0.9999), 100)
plt.plot(x, d_mod2.pdf(x), 'r-', lw=2, alpha=0.6, label='Exp-Norm Fit')
plt.legend(fontsize=12)
def plot_PSF_model_1D(frac, f_core, f_aureole, psf_range=400,
yunit='Intensity', label='combined', log_scale=True,
ZP=27.1, pixel_scale=2.5, decompose=True):
from .utils import Intensity2SB
r = np.logspace(0, np.log10(psf_range), 100)
I_core = (1-frac) * f_core(r)
I_aureole = frac * f_aureole(r)
I_tot = I_core + I_aureole
if log_scale:
I_core, I_aureole, I_tot = np.log10(I_core), np.log10(I_aureole), np.log10(I_tot)
if yunit=='Intensity':
plt.semilogx(r, I_tot,
ls="-", lw=3,alpha=0.9, zorder=5, label=label)
if decompose:
plt.semilogx(r, I_core,
ls="--", lw=3, alpha=0.9, zorder=1, label='core')
plt.semilogx(r, I_aureole,
ls="--", lw=3, alpha=0.9, label='aureole')
plt.ylabel('log Intensity', fontsize=14)
plt.ylim(I_aureole.min(), I_tot.max()+0.25)
elif yunit=='SB':
plt.semilogx(r, -14.5+Intensity2SB(I=I_tot, BKG=0,
ZP=27.1, pixel_scale=pixel_scale),
ls="-", lw=3,alpha=0.9, zorder=5, label=label)
if decompose:
plt.semilogx(r, -14.5+Intensity2SB(I=I_core, BKG=0,
ZP=27.1, pixel_scale=pixel_scale),
ls="--", lw=3, alpha=0.9, zorder=1, label='core')
plt.semilogx(r, -14.5+Intensity2SB(I=I_aureole, BKG=0,
ZP=27.1, pixel_scale=pixel_scale),
ls="--", lw=3, alpha=0.9, label='aureole')
plt.ylabel("Surface Brightness [mag/arcsec$^2$]")
plt.ylim(31,17)
plt.legend(loc=1, fontsize=12)
plt.xlabel('r [pix]', fontsize=14)
def plot_PSF_model_galsim(psf, image_size=800, contrast=None,
figsize=(7,6), save=False, save_dir='.'):
""" Plot and 1D PSF model and Galsim 2D model averaged in 1D """
from .utils import Intensity2SB, cal_profile_1d
pixel_scale = psf.pixel_scale
frac = psf.frac
psf_core = psf.psf_core
psf_aureole = psf.psf_aureole
psf_star = psf.psf_star
img_core = psf_core.drawImage(scale=pixel_scale, method="no_pixel")
img_aureole = psf_aureole.drawImage(nx=201, ny=201, scale=pixel_scale, method="no_pixel")
img_star = psf_star.drawImage(nx=image_size, ny=image_size, scale=pixel_scale, method="no_pixel")
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d(frac*img_aureole.array, color="g",
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label=psf.aureole_model)
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d((1-frac)*img_core.array, color="orange",
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label="Moffat")
r_rbin, z_rbin, logzerr_rbin = cal_profile_1d(img_star.array,
pixel_scale=pixel_scale,
core_undersample=True, mock=True,
xunit="pix", yunit="Intensity",
label="Combined")
plt.legend(loc=1, fontsize=12)
r = np.logspace(0, np.log10(image_size), 100)
comp1 = psf.f_core1D(r)
comp2 = psf.f_aureole1D(r)
plt.plot(r, np.log10((1-frac) * comp1 + comp2 * frac), ls="-", lw=3, zorder=5)
plt.plot(r, np.log10((1-frac) * comp1), ls="--", lw=3, zorder=1)
plt.plot(r, np.log10(comp2 * frac), ls="--", lw=3)
if psf.aureole_model == "multi-power":
for t in psf.theta_s_pix:
plt.axvline(t, ls="--", color="k",alpha=0.3, zorder=1)
if contrast is not None:
plt.axhline(np.log10(comp1.max()/contrast),color="k",ls="--")
plt.title("Model PSF",fontsize=14)
plt.ylim(-8.5, -0.5)
plt.xlim(r_rbin.min()*0.8, r_rbin.max()*1.2)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Model_PSF.png"), dpi=120)
plt.close()
return img_star
def plot_flux_dist(Flux, Flux_thresholds, ZP=None,
save=False, save_dir='.', figsize=None, **kwargs):
import seaborn as sns
F_bright, F_verybright = Flux_thresholds
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
plt.axvline(np.log10(F_bright), color="k", ls="-",alpha=0.7, zorder=1)
plt.axvline(np.log10(F_verybright), color="k", ls="--",alpha=0.7, zorder=1)
plt.axvspan(1, np.log10(F_bright),
color='gray', alpha=0.15, zorder=0)
plt.axvspan(np.log10(F_bright), np.log10(F_verybright),
color='seagreen', alpha=0.15, zorder=0)
plt.axvspan(np.log10(F_verybright), 9,
color='steelblue', alpha=0.15, zorder=0)
if seaborn_plot:
sns.distplot(np.log10(Flux), kde=False, **kwargs)
else:
plt.hist(np.log10(Flux), alpha=0.5)
plt.yscale('log')
plt.xlabel('Estimated log Flux$_{tot}$ / Mag', fontsize=15)
plt.ylabel('# of stars', fontsize=15)
plt.legend(loc=1)
if ZP is not None:
ax1 = plt.gca()
xticks1 = ax1.get_xticks()
ax2 = ax1.twiny()
ax2.set_xticks(xticks1)
ax2.set_xticklabels(np.around(-2.5*xticks1+ZP ,1))
ax2.set_xbound(ax1.get_xbound())
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Flux_dist.png"), dpi=80)
plt.show()
plt.close()
def draw_independent_priors(priors, xlabels=None, plabels=None,
save=False, save_dir='./'):
x_s = [np.linspace(d.ppf(0.01), d.ppf(0.99), 100) for d in priors]
fig, axes = plt.subplots(1, len(priors), figsize=(15,4))
for k, ax in enumerate(axes):
ax.plot(x_s[k], priors[k].pdf(x_s[k]),'-', lw=5, alpha=0.6, label=plabels[k])
ax.legend()
if xlabels is not None:
ax.set_xlabel(xlabels[k], fontsize=12)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Prior.png"), dpi=100)
plt.close()
def draw_cornerplot(results, ndim, labels=None, truths=None, figsize=(16,14),
save=False, save_dir='.', suffix=''):
from dynesty import plotting as dyplot
fig = plt.subplots(ndim, ndim, figsize=figsize)
dyplot.cornerplot(results, truths=truths, labels=labels,
color="royalblue", truth_color="indianred",
title_kwargs={'fontsize':18, 'y': 1.04},
label_kwargs={'fontsize':16},
show_titles=True, fig=fig)
if save:
plt.savefig(os.path.join(save_dir, "Cornerplot%s.png"%suffix), dpi=150)
plt.show()
plt.close()
else:
plt.show()
def draw2D_fit_vs_truth_PSF_mpow(results, psf, stars, labels, image,
image_base=None, vmin=None, vmax=None,
avg_func='median', save=False, save_dir="."):
""" Compare 2D fit and truth image """
from .sampler import get_params_fit
N_n = len([lab for lab in labels if "n" in lab])
N_theta = len([lab for lab in labels if "theta" in lab])
pmed, pmean, pcov = get_params_fit(results)
fits = pmed if avg_func=='median' else pmean
print("Fitting (mean) : ", np.around(pmean,3))
print("Fitting (median) : ", np.around(pmed,3))
n_s_fit = fits[:N_n]
if N_theta > 0:
theta_s_fit = np.append([psf.theta_s[0]], 10**fits[N_n:N_n+N_theta])
else:
theta_s_fit = psf.theta_s
mu_fit, sigma_fit = fits[-2], 10**fits[-1]
noise_fit = make_noise_image(psf.image_size, sigma_fit)
psf_fit = psf.copy()
psf_fit.update({'n_s':n_s_fit, 'theta_s': theta_s_fit})
psf_range = psf.image_size * psf.pixel_scale
image_fit = generate_image_by_flux(psf_fit, stars, draw_real=True,
psf_range=[psf_range//2, psf_range])
image_fit = image_fit + mu_fit + noise_fit
if image_base is not None:
image_fit += image_base
if vmin is None:
vmin = mu_fit - 0.3 * sigma_fit
if vmax is None:
vmax = vmin + 11
fig, (ax1, ax2, ax3) = plt.subplots(1,3,figsize=(18,6))
im = ax1.imshow(image_fit, vmin=vmin, vmax=vmax, norm=LogNorm()); colorbar(im)
im = ax2.imshow(image, vmin=vmin, vmax=vmax, norm=LogNorm()); colorbar(im)
Diff = (image_fit-image)/image
im = ax3.imshow(Diff, vmin=-0.1, vmax=0.1, cmap='seismic'); colorbar(im)
ax1.set_title("Fit: I$_f$")
ax2.set_title("Original: I$_0$")
ax3.set_title("Frac.Diff: (I$_f$ - I$_0$) / I$_0$")
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir,
"Fit_vs_truth_image.png"), dpi=120)
plt.close()
def draw_comparison_2D(image_fit, data, mask, image_star, noise_fit=0,
r_core=None, vmin=None, vmax=None, cmap='gnuplot2', norm=None,
save=False, save_dir=".", suffix=""):
""" Compare data and fit in 2D """
mask_fit = getattr(mask, 'mask_comb', mask.mask_deep)
if vmin is None:
vmin = np.median(image_fit[~mask_fit]) - 1
if vmax is None:
vmax = vmin + 150
if norm is None:
norm1 = LogNorm()
norm2 = LogNorm()
else:
from copy import deepcopy
norm1 = norm
norm2 = deepcopy(norm1)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2,3,figsize=(16,9))
im = ax1.imshow(data, vmin=vmin, vmax=vmax, norm=norm1, cmap=cmap)
ax1.set_title("Data [I$_0$]", fontsize=15); colorbar(im)
im = ax2.imshow(image_fit+noise_fit, vmin=vmin, vmax=vmax, norm=norm1, cmap=cmap)
ax2.set_title("Fit [I$_f$]", fontsize=15); colorbar(im)
im = ax3.imshow(image_star, vmin=0, vmax=vmax-vmin, norm=norm2, cmap=cmap)
ax3.set_title("Bright Stars [I$_{f,B}$]", fontsize=15); colorbar(im)
frac_diff = (image_fit-data)/data
# frac_diff[mask_fit] = 0
im = ax4.imshow(frac_diff, vmin=-0.1, vmax=0.1, cmap="seismic")
ax4.set_title("Frac. Diff. [(I$_f$ - I$_0$)/I$_0$]", fontsize=15); colorbar(im)
# noise = np.sqrt((data/0.37/618)**2+(2/0.37/618)**2)
# chi = (image_fit-data)/noise
# im = ax4.imshow(chi, vmin=-10, vmax=10, cmap="seismic")
# ax4.set_title("Chi. [(I$_f$ - I$_0$)/$\sigma_0$]", fontsize=15); colorbar(im)
residual = (data-image_star)
im = ax5.imshow(residual, vmin=vmin, vmax=vmax, norm=norm1, cmap=cmap)
ax5.set_title("Bright Subtracted [I$_0$ - I$_{f,B}$]", fontsize=15); colorbar(im)
residual[mask_fit] = 0
im = ax6.imshow(residual, vmin=vmin, vmax=vmax, norm=norm1, cmap=cmap)
ax6.set_title("Bright Subtracted (masked)"); colorbar(im)
if r_core is not None:
if np.ndim(r_core) == 0:
r_core = [r_core,r_core]
aper1 = CircularAperture(mask.stars.star_pos_verybright, r=r_core[0])
aper1.plot(color='lime',lw=2,alpha=0.9, axes=ax6)
aper2 = CircularAperture(mask.stars.star_pos_medbright, r=r_core[1])
aper2.plot(color='skyblue',lw=2,label="",alpha=0.7, axes=ax6)
plt.tight_layout()
if save:
plt.savefig(os.path.join(save_dir, "Comparison_fit_data2D%s.png"%suffix), dpi=120)
plt.show()
plt.close()
else:
plt.show()
def plot_fit_PSF1D(results, psf, n_spline=2,
n_bootstrap=500, truth=None,
Amp_max=None, r_core=None,
n_out=4, theta_out=1200, image_size=800,
save=False, save_dir="./",
suffix='', figsize=(7,6)):
from astropy.stats import bootstrap
from .sampler import get_params_fit
pixel_scale = psf.pixel_scale
frac = psf.frac
if figsize is not None:
fig, ax = plt.subplots(1,1, figsize=figsize)
if truth is not None:
print("Truth : ", psf.params)
psf.plot1D(psf_range=600, decompose=False, label='Truth')
# read fitting results
pmed, pmean, pcov, samples_eq = get_params_fit(results, return_sample=True)
print("Fitting (mean) : ", np.around(pmean,3))
print("Fitting (median) : ", np.around(pmed,3))
samples_eq_bs = bootstrap(samples_eq, bootnum=1, samples=n_bootstrap)[0]
# Number of n and theta in the fitting
if psf.aureole_model != "moffat":
theta_0 = psf.theta_0
N_n = n_spline
N_theta = n_spline - 1
psf_fit = psf.copy()
r = np.logspace(0., np.log10(image_size), 100)
comp1 = psf.f_core1D(r)
# Sample distribution from joint PDF
for sample in samples_eq_bs:
frac_k = frac
if psf.aureole_model == "moffat":
gamma1_k = sample[0]
beta1_k = sample[1]
psf_fit.update({'gamma1':gamma1_k, 'beta1':beta1_k})
else:
if psf.aureole_model == "power":
n_k = sample[0]
psf_fit.update({'n':n_k})
elif psf.aureole_model == "multi-power":
n_s_k = np.concatenate([sample[:N_n], [n_out]])
theta_s_k = np.concatenate([[theta_0],
np.atleast_1d(10**sample[N_n:N_n+N_theta]),
[theta_out]])
psf_fit.update({'n_s':n_s_k, 'theta_s':theta_s_k})
comp2_k = psf_fit.f_aureole1D(r)
plt.semilogy(r, (1-frac_k) * comp1 + frac_k * comp2_k,
color="lightblue", lw=2,alpha=0.1,zorder=1)
# Median and mean fitting
for fits, c, ls, lab in zip([pmed, pmean], ["royalblue", "b"],
["-.","-"], ["mean", "med"]):
if psf.aureole_model == "moffat":
gamma1_fit = fits[0]
beta1_fit = fits[1]
psf_fit.update({'gamma1':gamma1_k, 'beta1':beta1_k})
else:
if psf.aureole_model == "power":
n_fit = fits[0]
psf_fit.update({'n':n_fit})
elif psf.aureole_model == "multi-power":
n_s_fit = np.concatenate([fits[:N_n], [n_out]])
theta_s_fit = np.concatenate([[theta_0],
np.atleast_1d(10**fits[N_n:N_n+N_theta]),
[theta_out]])
psf_fit.update({'n_s':n_s_fit, 'theta_s':theta_s_fit})
comp2 = psf_fit.f_aureole1D(r)
y_fit = (1-frac) * comp1 + frac * comp2
plt.semilogy(r, y_fit, color=c, lw=2.5, ls=ls, alpha=0.8, label=lab+' comb.', zorder=4)
if lab=="med":
plt.semilogy(r, (1-frac) * comp1,
color="orange", lw=2, ls="--", alpha=0.7, label="med core",zorder=4)
plt.semilogy(r, frac * comp2,
color="seagreen", lw=2, ls="--", alpha=0.7, label="med aureole",zorder=4)
# if Amp_max is not None:
# std_fit = 10**fits[-1]
# contrast = Amp_max/(std_fit)
# y_min_contrast = y_fit.max()/contrast
# plt.axhline(y_min_contrast, color="k", ls="-.", alpha=0.5)
# plt.axhline(y_min_contrast*2, color="k", ls=":", alpha=0.5)
# plt.text(1, y_fit.max()/contrast*1.2, '1 $\sigma$', fontsize=10)
# plt.text(1, y_fit.max()/contrast*2.5, '2 $\sigma$', fontsize=10)
# r_max = r[np.argmin(abs(y_fit-y_fit.max()/contrast))]
# plt.xlim(0.9, 5*r_max)
# Draw boundaries etc.
if r_core is not None:
if figsize is not None:
plt.axvspan(np.atleast_1d(r_core).max(), theta_out/pixel_scale,
color='steelblue', alpha=0.15, zorder=1)
plt.axvspan(np.atleast_1d(r_core).min(), np.atleast_1d(r_core).max(),
color='seagreen', alpha=0.15, zorder=1)
plt.axvspan(plt.gca().get_xlim()[0], np.atleast_1d(r_core).min(),
color='gray', alpha=0.15, zorder=1)
if psf.aureole_model != "moffat":
for t in psf_fit.theta_s_pix:
plt.axvline(t, lw=2, ls='--', color='k', alpha=0.5)
plt.legend(loc=1, fontsize=12)
plt.xlabel(r"$\rm r\,[pix]$",fontsize=18)
plt.ylabel(r"$\rm Intensity$",fontsize=18)
plt.title("Recovered PSF from Fitting",fontsize=18)
plt.ylim(3e-9, 0.5)
plt.xscale("log")
plt.tight_layout()
if save:
plt.savefig("%s/Fit_PSF1D%s.png"%(save_dir, suffix),dpi=150)
plt.show()
plt.close()
def plot_bright_star_profile(tab_target, table_res_Rnorm, res_thumb,
bkg_sky=460, std_sky=2, pixel_scale=2.5, ZP=27.1,
mag_name='MAG_AUTO_corr', figsize=(8,6)):
from .utils import Intensity2SB, cal_profile_1d
r = np.logspace(0.03,3,100)
z_mean_s, z_med_s = table_res_Rnorm['Imean'], table_res_Rnorm['Imed']
z_std_s, sky_mean_s = table_res_Rnorm['Istd'], table_res_Rnorm['Isky']
plt.figure(figsize=figsize)
ax = plt.subplot(111)
# adaptive colormap
cmap = plt.cm.plasma(np.linspace(0.01, 0.99, len(res_thumb)+np.sum(tab_target[mag_name]<10)+1))
ax.set_prop_cycle(plt.cycler('color', cmap))
for i, (num, sky_m, mag) in enumerate(zip(list(res_thumb.keys())[::-1],
sky_mean_s[::-1],tab_target[mag_name][::-1])):
if num in tab_target["NUMBER"]:
alpha = min(0.05*(18-mag), 0.8)
errorbar = True if mag<10 else False
ms = max((15-mag), 0)
lw = max((12-mag), 1.5)
else:
alpha = 0.5; errorbar=False
ms, lw = 3, 3
img, ma, cen = res_thumb[num]['image'], res_thumb[num]['mask'], res_thumb[num]['center']
r_rbin, I_rbin, _ = cal_profile_1d(img, cen=cen, mask=ma, dr=1.25,
ZP=ZP, sky_mean=bkg_sky, sky_std=std_sky,
xunit="pix", yunit="SB", errorbar=errorbar,
core_undersample=False,
color=None, lw=lw, markersize=ms, alpha=alpha)
if i==0:
plt.text(3, I_rbin[np.argmin(abs(r_rbin-10))], '%s mag'%np.around(mag, 1))
plt.text(14, I_rbin[np.argmin(abs(r_rbin-10))], '%s mag'%np.around(mag, 1))
I_sky = Intensity2SB(std_sky, 0, ZP=ZP, pixel_scale=pixel_scale)
plt.axhline(I_sky, color="k", ls="-.", alpha=0.5)
plt.text(1.1, I_sky+0.5, '1 $\sigma$', fontsize=10)
plt.ylim(30.5,16.5)
plt.xlim(1.,3e2)
plt.xscale('log')
plt.show()
|
py | b40e93fc8f5dc8f9ee6e96a3dbc444ce9a84f561 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 16 21:21:10 2018.
"""
import sys
data = []
n = int(input('Input Data range\n'))
for k in range(n):
a = len(data)
b = sys.getsizeof(data)
print('Length: {0:3d}; Size in bytes: {1:4d}'.format(a,b))
data.append(None)
|
py | b40e94a882b3a836b18824746f9752455bbdd836 | from abc import ABC, abstractmethod
from vision.domain.image import Image
class ICamera(ABC):
@abstractmethod
def take_picture(self) -> Image:
pass
|
py | b40e953427390e662d3de1145c83007254d651b7 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Support library for tests that start multiple brokers, e.g. HA or federation
import os, signal, string, tempfile, subprocess, socket, threading, time, imp, re
import qpid, traceback, signal
from qpid import connection, util
from qpid.compat import format_exc
from unittest import TestCase
from copy import copy
from threading import Thread, Lock, Condition
from logging import getLogger
from qpidtoollibs import BrokerAgent
# NOTE: Always import native client qpid.messaging, import swigged client
# qpid_messaging if possible. qpid_messaing is set to None if not available.
#
# qm is set to qpid_messaging if it is available, qpid.messaging if not.
# Use qm.X to specify names from the default messaging module.
#
# Set environment variable QPID_PY_NO_SWIG=1 to prevent qpid_messaging from loading.
#
# BrokerTest can be configured to determine which protocol is used by default:
#
# -DPROTOCOL="amqpX": Use protocol "amqpX". Defaults to amqp1.0 if available.
#
# The configured defaults can be over-ridden on BrokerTest.connect and some
# other methods by specifying native=True|False and protocol="amqpX"
#
import qpid.messaging
qm = qpid.messaging
qpid_messaging = None
def env_has_log_config():
"""True if there are qpid log configuratoin settings in the environment."""
return "QPID_LOG_ENABLE" in os.environ or "QPID_TRACE" in os.environ
if not os.environ.get("QPID_PY_NO_SWIG"):
try:
import qpid_messaging
from qpid.datatypes import uuid4
qm = qpid_messaging
# Silence warnings from swigged messaging library unless enabled in environment.
if not env_has_log_config():
qm.Logger.configure(["--log-enable=error"])
except ImportError:
print "Cannot load python SWIG bindings, falling back to native qpid.messaging."
log = getLogger("brokertest")
# Values for expected outcome of process at end of test
EXPECT_EXIT_OK=1 # Expect to exit with 0 status before end of test.
EXPECT_EXIT_FAIL=2 # Expect to exit with non-0 status before end of test.
EXPECT_RUNNING=3 # Expect to still be running at end of test
EXPECT_UNKNOWN=4 # No expectation, don't check exit status.
def find_exe(program):
"""Find an executable in the system PATH"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
mydir, name = os.path.split(program)
if mydir:
if is_exe(program): return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file): return exe_file
return None
def is_running(pid):
try:
os.kill(pid, 0)
return True
except:
return False
class BadProcessStatus(Exception):
pass
def error_line(filename, n=1):
"""Get the last n line(s) of filename for error messages"""
result = []
try:
f = open(filename)
try:
for l in f:
if len(result) == n: result.pop(0)
result.append(" "+l)
finally:
f.close()
except: return ""
return ":\n" + "".join(result)
def retry(function, timeout=10, delay=.001, max_delay=1):
"""Call function until it returns a true value or timeout expires.
Double the delay for each retry up to max_delay.
Returns what function returns if true, None if timeout expires."""
deadline = time.time() + timeout
ret = None
while True:
ret = function()
if ret: return ret
remaining = deadline - time.time()
if remaining <= 0: return False
delay = min(delay, remaining)
time.sleep(delay)
delay = min(delay*2, max_delay)
class AtomicCounter:
def __init__(self):
self.count = 0
self.lock = Lock()
def next(self):
self.lock.acquire();
ret = self.count
self.count += 1
self.lock.release();
return ret
_popen_id = AtomicCounter() # Popen identifier for use in output file names.
# Constants for file descriptor arguments to Popen
FILE = "FILE" # Write to file named after process
from subprocess import PIPE, STDOUT
class Popen(subprocess.Popen):
"""
Can set and verify expectation of process status at end of test.
Dumps command line, stdout, stderr to data dir for debugging.
"""
def __init__(self, cmd, expect=EXPECT_EXIT_OK, stdin=None, stdout=FILE, stderr=FILE):
"""Run cmd (should be a list of program and arguments)
expect - if set verify expectation at end of test.
stdout, stderr - can have the same values as for subprocess.Popen as well as
FILE (the default) which means write to a file named after the process.
stdin - like subprocess.Popen but defauts to PIPE
"""
self._clean = False
self._clean_lock = Lock()
if type(cmd) is type(""): cmd = [cmd] # Make it a list.
self.cmd = [ str(x) for x in cmd ]
self.expect = expect
self.id = _popen_id.next()
self.pname = "%s-%d" % (os.path.split(self.cmd[0])[1], self.id)
if stdout == FILE: stdout = open(self.outfile("out"), "w")
if stderr == FILE: stderr = open(self.outfile("err"), "w")
subprocess.Popen.__init__(self, self.cmd, bufsize=0, executable=None,
stdin=stdin, stdout=stdout, stderr=stderr)
f = open(self.outfile("cmd"), "w")
try: f.write("%s\n%d"%(self.cmd_str(), self.pid))
finally: f.close()
log.debug("Started process %s: %s" % (self.pname, " ".join(self.cmd)))
def __repr__(self): return "Popen<%s>"%(self.pname)
def outfile(self, ext): return "%s.%s" % (self.pname, ext)
def unexpected(self,msg):
err = error_line(self.outfile("err")) or error_line(self.outfile("out"))
raise BadProcessStatus("%s %s%s" % (self.pname, msg, err))
def teardown(self): # Clean up at end of test.
if self.expect == EXPECT_UNKNOWN:
try: self.kill() # Just make sure its dead
except: pass
elif self.expect == EXPECT_RUNNING:
if self.poll() != None:
self.unexpected("expected running, exit code %d" % self.returncode)
else:
try:
self.kill()
except Exception,e:
self.unexpected("exception from kill: %s" % str(e))
else:
retry(lambda: self.poll() is not None)
if self.returncode is None: # Still haven't stopped
self.kill()
self.unexpected("still running")
elif self.expect == EXPECT_EXIT_OK and self.returncode != 0:
self.unexpected("exit code %d" % self.returncode)
elif self.expect == EXPECT_EXIT_FAIL and self.returncode == 0:
self.unexpected("expected error")
self.wait()
def communicate(self, input=None):
ret = subprocess.Popen.communicate(self, input)
self._cleanup()
return ret
def is_running(self): return self.poll() is None
def assert_running(self):
if not self.is_running(): self.unexpected("Exit code %d" % self.returncode)
def wait(self):
ret = subprocess.Popen.wait(self)
self._cleanup()
return ret
def assert_exit_ok(self):
if self.wait() != 0: self.unexpected("Exit code %d" % self.returncode)
def terminate(self):
try: subprocess.Popen.terminate(self)
except AttributeError: # No terminate method
try:
os.kill( self.pid , signal.SIGTERM)
except AttributeError: # no os.kill, using taskkill.. (Windows only)
os.popen('TASKKILL /PID ' +str(self.pid) + ' /F')
self.wait()
def kill(self):
# Set to EXPECT_UNKNOWN, EXPECT_EXIT_FAIL creates a race condition
# if the process exits normally concurrent with the call to kill.
self.expect = EXPECT_UNKNOWN
try: subprocess.Popen.kill(self)
except AttributeError: # No terminate method
try:
os.kill( self.pid , signal.SIGKILL)
except AttributeError: # no os.kill, using taskkill.. (Windows only)
os.popen('TASKKILL /PID ' +str(self.pid) + ' /F')
self.wait()
def _cleanup(self):
"""Clean up after a dead process"""
self._clean_lock.acquire()
if not self._clean:
self._clean = True
try: self.stdin.close()
except: pass
try: self.stdout.close()
except: pass
try: self.stderr.close()
except: pass
self._clean_lock.release()
def cmd_str(self): return " ".join([str(s) for s in self.cmd])
def checkenv(name):
value = os.getenv(name)
if not value: raise Exception("Environment variable %s is not set" % name)
return value
def find_in_file(str, filename):
if not os.path.exists(filename): return False
f = open(filename)
try: return str in f.read()
finally: f.close()
class Broker(Popen):
"A broker process. Takes care of start, stop and logging."
_broker_count = 0
_log_count = 0
def __repr__(self): return "<Broker:%s:%d>"%(self.log, self.port())
def get_log(self):
return os.path.abspath(self.log)
def __init__(self, test, args=[], test_store=False, name=None, expect=EXPECT_RUNNING, port=0, wait=None, show_cmd=False):
"""Start a broker daemon. name determines the data-dir and log
file names."""
self.test = test
self._port=port
args = copy(args)
if BrokerTest.amqp_lib: args += ["--load-module", BrokerTest.amqp_lib]
if BrokerTest.store_lib and not test_store:
args += ['--load-module', BrokerTest.store_lib]
if BrokerTest.sql_store_lib:
args += ['--load-module', BrokerTest.sql_store_lib]
args += ['--catalog', BrokerTest.sql_catalog]
if BrokerTest.sql_clfs_store_lib:
args += ['--load-module', BrokerTest.sql_clfs_store_lib]
args += ['--catalog', BrokerTest.sql_catalog]
cmd = [BrokerTest.qpidd_exec, "--port", port, "--interface", "127.0.0.1", "--no-module-dir"] + args
if not "--auth" in args: cmd.append("--auth=no")
if wait != None:
cmd += ["--wait", str(wait)]
if name: self.name = name
else:
self.name = "broker%d" % Broker._broker_count
Broker._broker_count += 1
self.log = "%03d:%s.log" % (Broker._log_count, self.name)
self.store_log = "%03d:%s.store.log" % (Broker._log_count, self.name)
Broker._log_count += 1
cmd += ["--log-to-file", self.log]
cmd += ["--log-to-stderr=no"]
# Add default --log-enable arguments unless args already has --log arguments.
if not env_has_log_config() and not [l for l in args if l.startswith("--log")]:
args += ["--log-enable=info+"]
if test_store: cmd += ["--load-module", BrokerTest.test_store_lib,
"--test-store-events", self.store_log]
self.datadir = os.path.abspath(self.name)
cmd += ["--data-dir", self.datadir]
if show_cmd: print cmd
Popen.__init__(self, cmd, expect, stdout=PIPE)
test.teardown_add(self)
self._host = "127.0.0.1"
self._agent = None
log.debug("Started broker %s" % self)
def host(self): return self._host
def port(self):
# Read port from broker process stdout if not already read.
if (self._port == 0):
try: self._port = int(self.stdout.readline())
except ValueError, e:
raise Exception("Can't get port for broker %s (%s)%s: %s" %
(self.name, self.pname, error_line(self.log,5), e))
return self._port
def unexpected(self,msg):
raise BadProcessStatus("%s: %s (%s)" % (msg, self.name, self.pname))
def connect(self, timeout=5, native=False, **kwargs):
"""New API connection to the broker.
@param native if True force use of the native qpid.messaging client
even if swig client is available.
"""
if native: connection_class = qpid.messaging.Connection
else:
connection_class = qm.Connection
if (self.test.protocol and qm == qpid_messaging):
kwargs.setdefault("protocol", self.test.protocol)
return connection_class.establish(self.host_port(), timeout=timeout, **kwargs)
@property
def agent(self, **kwargs):
"""Return a BrokerAgent for this broker"""
if not self._agent: self._agent = BrokerAgent(self.connect(**kwargs))
return self._agent
def declare_queue(self, queue):
self.agent.addQueue(queue)
def _prep_sender(self, queue, durable, xprops):
s = queue + "; {create:always, node:{durable:" + str(durable)
if xprops != None: s += ", x-declare:{" + xprops + "}"
return s + "}}"
def send_message(self, queue, message, durable=True, xprops=None, session=None):
if session == None:
s = self.connect().session()
else:
s = session
s.sender(self._prep_sender(queue, durable, xprops)).send(message)
if session == None:
s.connection.close()
def send_messages(self, queue, messages, durable=True, xprops=None, session=None):
if session == None:
s = self.connect().session()
else:
s = session
sender = s.sender(self._prep_sender(queue, durable, xprops))
for m in messages: sender.send(m)
if session == None:
s.connection.close()
def get_message(self, queue):
s = self.connect().session()
m = s.receiver(queue+"; {create:always}", capacity=1).fetch(timeout=1)
s.acknowledge()
s.connection.close()
return m
def get_messages(self, queue, n):
s = self.connect().session()
receiver = s.receiver(queue+"; {create:always}", capacity=n)
m = [receiver.fetch(timeout=1) for i in range(n)]
s.acknowledge()
s.connection.close()
return m
def host_port(self): return "%s:%s" % (self.host(), self.port())
def ready(self, timeout=10, **kwargs):
"""Wait till broker is ready to serve clients"""
deadline = time.time()+timeout
while True:
try:
c = self.connect(timeout=timeout, **kwargs)
try:
c.session()
return # All good
finally: c.close()
except Exception,e: # Retry up to timeout
if time.time() > deadline:
raise RethrownException(
"Broker %s not responding: (%s)%s"%(
self.name,e,error_line(self.log, 5)))
def assert_log_clean(self, ignore=None):
log = open(self.get_log())
try:
error = re.compile("] error|] critical")
if ignore: ignore = re.compile(ignore)
else: ignore = re.compile("\000") # Won't match anything
for line in log.readlines():
assert not error.search(line) or ignore.search(line), "Errors in log file %s: %s"%(log, line)
finally: log.close()
def receiver_iter(receiver, timeout=0):
"""Make an iterator out of a receiver. Returns messages till Empty is raised."""
try:
while True:
yield receiver.fetch(timeout=timeout)
except qm.Empty:
pass
def browse(session, queue, timeout=0, transform=lambda m: m.content):
"""Return a list with the contents of each message on queue."""
r = session.receiver("%s;{mode:browse}"%(queue))
r.capacity = 100
try:
return [transform(m) for m in receiver_iter(r, timeout)]
finally:
r.close()
def assert_browse(session, queue, expect_contents, timeout=0, transform=lambda m: m.content, msg=None):
"""Assert that the contents of messages on queue (as retrieved
using session and timeout) exactly match the strings in
expect_contents"""
if msg is None: msg = "browse '%s' failed" % queue
actual_contents = browse(session, queue, timeout, transform=transform)
if msg: msg = "%s: %r != %r"%(msg, expect_contents, actual_contents)
assert expect_contents == actual_contents, msg
def assert_browse_retry(session, queue, expect_contents, timeout=1, delay=.001, transform=lambda m:m.content, msg="browse failed"):
"""Wait up to timeout for contents of queue to match expect_contents"""
test = lambda: browse(session, queue, 0, transform=transform) == expect_contents
retry(test, timeout, delay)
actual_contents = browse(session, queue, 0, transform=transform)
if msg: msg = "%s: %r != %r"%(msg, expect_contents, actual_contents)
assert expect_contents == actual_contents, msg
class BrokerTest(TestCase):
"""
Tracks processes started by test and kills at end of test.
Provides a well-known working directory for each test.
"""
def __init__(self, *args, **kwargs):
self.longMessage = True # Enable long messages for assert*(..., msg=xxx)
TestCase.__init__(self, *args, **kwargs)
# Environment settings.
qpidd_exec = "qpidd"
ha_lib = os.getenv("HA_LIB")
xml_lib = os.getenv("XML_LIB")
amqp_lib = os.getenv("AMQP_LIB")
qpid_config_exec = "qpid-config"
qpid_route_exec = "qpid-route"
receiver_exec = "receiver"
sender_exec = "sender"
sql_store_lib = os.getenv("STORE_SQL_LIB")
sql_clfs_store_lib = os.getenv("STORE_SQL_CLFS_LIB")
sql_catalog = os.getenv("STORE_CATALOG")
store_lib = os.getenv("STORE_LIB")
test_store_lib = os.getenv("TEST_STORE_LIB")
rootdir = os.getcwd()
try:
import proton
PN_VERSION = (proton.VERSION_MAJOR, proton.VERSION_MINOR)
except ImportError:
# proton not on path, can't determine version
PN_VERSION = (0, 0)
except AttributeError:
# prior to 0.8 proton did not expose version info
PN_VERSION = (0, 7)
PN_TX_VERSION = (0, 9)
amqp_tx_supported = PN_VERSION >= PN_TX_VERSION
def configure(self, config): self.config=config
def setUp(self):
defs = self.config.defines
outdir = defs.get("OUTDIR") or "brokertest.tmp"
self.dir = os.path.join(self.rootdir, outdir, self.id())
os.makedirs(self.dir)
os.chdir(self.dir)
self.teardown_list = [] # things to tear down at end of test
if qpid_messaging and self.amqp_lib: default_protocol="amqp1.0"
else: default_protocol="amqp0-10"
self.protocol = defs.get("PROTOCOL") or default_protocol
self.tx_protocol = self.protocol
if not self.amqp_tx_supported: self.tx_protocol = "amqp0-10"
def tearDown(self):
err = []
self.teardown_list.reverse() # Tear down in reverse order
for p in self.teardown_list:
log.debug("Tearing down %s", p)
try:
# Call the first of the methods that is available on p.
for m in ["teardown", "close"]:
a = getattr(p, m, None)
if a: a(); break
else: raise Exception("Don't know how to tear down %s", p)
except Exception, e:
if m != "close": # Ignore connection close errors.
err.append("%s: %s"%(e.__class__.__name__, str(e)))
self.teardown_list = [] # reset in case more processes start
os.chdir(self.rootdir)
if err: raise Exception("Unexpected process status:\n "+"\n ".join(err))
def teardown_add(self, thing):
"""Call thing.teardown() or thing.close() at end of test"""
self.teardown_list.append(thing)
def popen(self, cmd, expect=EXPECT_EXIT_OK, stdin=None, stdout=FILE, stderr=FILE):
"""Start a process that will be killed at end of test, in the test dir."""
os.chdir(self.dir)
p = Popen(cmd, expect, stdin=stdin, stdout=stdout, stderr=stderr)
self.teardown_add(p)
return p
def broker(self, args=[], name=None, expect=EXPECT_RUNNING, wait=True, port=0, show_cmd=False, **kw):
"""Create and return a broker ready for use"""
b = Broker(self, args=args, name=name, expect=expect, port=port, show_cmd=show_cmd, **kw)
if (wait):
try: b.ready()
except Exception, e:
raise RethrownException("Failed to start broker %s(%s): %s" % (b.name, b.log, e))
return b
def check_output(self, args, stdin=None):
p = self.popen(args, stdout=PIPE, stderr=STDOUT)
out = p.communicate(stdin)
if p.returncode != 0:
raise Exception("%s exit code %s, output:\n%s" % (args, p.returncode, out[0]))
return out[0]
def browse(self, *args, **kwargs): browse(*args, **kwargs)
def assert_browse(self, *args, **kwargs): assert_browse(*args, **kwargs)
def assert_browse_retry(self, *args, **kwargs): assert_browse_retry(*args, **kwargs)
def protocol_option(self, connection_options=""):
if "protocol" in connection_options: return connection_options
else: return ",".join(filter(None, [connection_options,"protocol:'%s'"%self.protocol]))
def join(thread, timeout=30):
thread.join(timeout)
if thread.isAlive(): raise Exception("Timed out joining thread %s"%thread)
class RethrownException(Exception):
"""Captures the stack trace of the current exception to be thrown later"""
def __init__(self, msg=""):
Exception.__init__(self, msg+"\n"+format_exc())
class StoppableThread(Thread):
"""
Base class for threads that do something in a loop and periodically check
to see if they have been stopped.
"""
def __init__(self):
self.stopped = False
self.error = None
Thread.__init__(self)
def stop(self):
self.stopped = True
join(self)
if self.error: raise self.error
# Options for a client that wants to reconnect automatically.
RECONNECT_OPTIONS="reconnect:true,reconnect-timeout:10,reconnect-urls-replace:true"
class NumberedSender(Thread):
"""
Thread to run a sender client and send numbered messages until stopped.
"""
def __init__(self, broker, max_depth=None, queue="test-queue",
connection_options=RECONNECT_OPTIONS,
failover_updates=False, url=None, args=[]):
"""
max_depth: enable flow control, ensure sent - received <= max_depth.
Requires self.notify_received(n) to be called each time messages are received.
"""
Thread.__init__(self)
cmd = ["qpid-send",
"--broker", url or broker.host_port(),
"--address", "%s;{create:always}"%queue,
"--connection-options", "{%s}"%(broker.test.protocol_option(connection_options)),
"--content-stdin"
] + args
if failover_updates: cmd += ["--failover-updates"]
self.sender = broker.test.popen(
cmd, expect=EXPECT_RUNNING, stdin=PIPE)
self.condition = Condition()
self.max = max_depth
self.received = 0
self.stopped = False
self.error = None
self.queue = queue
def write_message(self, n):
self.sender.stdin.write(str(n)+"\n")
self.sender.stdin.flush()
def run(self):
try:
self.sent = 0
while not self.stopped:
self.sender.assert_running()
if self.max:
self.condition.acquire()
while not self.stopped and self.sent - self.received > self.max:
self.condition.wait()
self.condition.release()
self.write_message(self.sent)
self.sent += 1
except Exception, e:
self.error = RethrownException(
"%s: (%s)%s"%(self.sender.pname,e,
error_line(self.sender.outfile("err"))))
def notify_received(self, count):
"""Called by receiver to enable flow control. count = messages received so far."""
self.condition.acquire()
self.received = count
self.condition.notify()
self.condition.release()
def stop(self):
self.condition.acquire()
try:
self.stopped = True
self.condition.notify()
finally: self.condition.release()
join(self)
self.write_message(-1) # end-of-messages marker.
if self.error: raise self.error
class NumberedReceiver(Thread):
"""
Thread to run a receiver client and verify it receives
sequentially numbered messages.
"""
def __init__(self, broker, sender=None, queue="test-queue",
connection_options=RECONNECT_OPTIONS,
failover_updates=False, url=None, args=[]):
"""
sender: enable flow control. Call sender.received(n) for each message received.
"""
Thread.__init__(self)
self.test = broker.test
cmd = ["qpid-receive",
"--broker", url or broker.host_port(),
"--address", "%s;{create:always}"%queue,
"--connection-options", "{%s}"%(broker.test.protocol_option(connection_options)),
"--forever"
]
if failover_updates: cmd += [ "--failover-updates" ]
cmd += args
self.receiver = self.test.popen(
cmd, expect=EXPECT_RUNNING, stdout=PIPE)
self.lock = Lock()
self.error = None
self.sender = sender
self.received = 0
self.queue = queue
def read_message(self):
n = int(self.receiver.stdout.readline())
return n
def run(self):
try:
m = self.read_message()
while m != -1:
self.receiver.assert_running()
assert m <= self.received, "%s missing message %s>%s"%(self.queue, m, self.received)
if (m == self.received): # Ignore duplicates
self.received += 1
if self.sender:
self.sender.notify_received(self.received)
m = self.read_message()
except Exception, e:
self.error = RethrownException(
"%s: (%s)%s"%(self.receiver.pname,e,
error_line(self.receiver.outfile("err"))))
def check(self):
"""Raise an exception if there has been an error"""
if self.error: raise self.error
def stop(self):
"""Returns when termination message is received"""
join(self)
self.check()
def import_script(path):
"""
Import executable script at path as a module.
Requires some trickery as scripts are not in standard module format
"""
f = open(path)
try:
name=os.path.split(path)[1].replace("-","_")
return imp.load_module(name, f, path, ("", "r", imp.PY_SOURCE))
finally: f.close()
|
py | b40e9592fe62c2017e79612d2b201dbc82a4fb4e | import os
import sys
import pathlib
from utilities import get_random_hash
from flask import Flask, flash, request, redirect, url_for, send_from_directory, jsonify, Response
UPLOAD_FOLDER = os.environ.get('UPLOAD_FOLDER') if os.environ.get('UPLOAD_FOLDER') else '/tmp'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
SECRET = os.environ.get('SECRET')
app = Flask(__name__)
app.config['SERVER_NAME'] = os.environ.get('SERVER_NAME')
def allowed_file(filename):
return '.' in filename and \
pathlib.Path(filename).suffix[1:] in ALLOWED_EXTENSIONS
def is_secret_valid(guess):
try:
if guess == SECRET:
return True
return False
except KeyError:
return False
def verify_auth_headers():
if 'secret' in request.headers:
guess = request.headers['secret']
return is_secret_valid(guess)
return False
def upload_file_and_return_external_path(file):
extension = pathlib.Path(file.filename).suffix
filename = get_random_hash() + extension
filepath = os.path.join(UPLOAD_FOLDER, filename)
if os.path.exists(filepath):
upload_file_and_return_external_path(file)
else:
file.save(filepath)
return url_for('upload', filename=filename, _external=True)
@app.route('/')
def index():
return '''
<!doctype html>
'''
@app.route('/<filename>', methods=['GET'])
def upload(filename):
if allowed_file(filename):
return send_from_directory(UPLOAD_FOLDER, filename)
@app.route('/api/auth', methods=['GET'])
def api_auth():
if verify_auth_headers():
return jsonify(
success=True
)
return jsonify(
success=False,
message='Invalid secret'
)
@app.route('/api/upload', methods=['POST'])
def api_upload():
if verify_auth_headers():
# check if the post request has the file part
if 'file' not in request.files:
return jsonify(
success=False,
message='No file present'
)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return jsonify(
success=False,
message='Filename missing'
)
if file and allowed_file(file.filename):
path = upload_file_and_return_external_path(file)
return jsonify(
success=True,
path=path
)
else:
return jsonify(
success=False,
message='File type not allowed'
)
return jsonify(
success=False,
message='Invalid secret'
)
|
py | b40e95bcb993a2077fc8935f9da74674d82dac0a | #!/usr/bin/env python
"""Script for creating release tarballs"""
import os
import sys
import glob
import shutil
import subprocess
import zipfile
exclude = [".svn", "*.pyc", "*~", "*.orig", "*.patch", "__basedir__/utils",
"__basedir__/setup_base.py", "*.prof", "#*", "__basedir__/build",
'__basedir__/tests', '*.out', '__basedir__/dist',
'__basedir__/html5lib.egg-info', '__basedir__/print-stats.py']
class Package(object):
def __init__(self, inDir, outDir, version="0", status=4, installDir="~"):
#List of files to remove on exit
self.version = str(version)
self.status = str(status)
self.cleanupFiles = []
self.inDir = os.path.abspath(inDir)
self.outDir = os.path.abspath(outDir)
self.exclude = self.getExcludeList()
self.fileList = self.getFileList()
self.installDir = installDir
self.outFiles = []
def runall(self):
self.copyTestData()
self.copy()
self.makeInitFile()
self.makeSetupFile()
self.preprocess()
#if self.test():
self.makeZipFile()
self.cleanup()
def getExcludeList(self):
rv = []
for item in exclude:
rv.append(item.replace("__basedir__", self.inDir))
return rv
def copyTestData(self):
outDir = os.path.join(self.inDir, "tests/testdata")
print()
try:
os.mkdir(outDir)
except OSError:
#the directory already exists
if not os.path.exists(outDir):
raise
inBaseDir = os.path.abspath(os.path.join(self.inDir, "..", "testdata"))
dirWalker = os.walk(inBaseDir)
for (curDir, dirs, files) in dirWalker:
outDir = os.path.join(self.inDir, "tests", "testdata", curDir[len(inBaseDir)+1:])
for dir in dirs[:]:
if self.excludeItem(curDir, dir):
dirs.remove(dir)
else:
try:
os.mkdir(os.path.join(outDir, dir))
except OSError:
#the directory already exists
pass
for fn in files[:]:
if not self.excludeItem(curDir, fn):
newFn = os.path.join(outDir, fn)
shutil.copy(os.path.join(curDir, fn), newFn)
self.cleanupFiles.append(newFn)
def getFileList(self):
"""Get a list of files to copy"""
fileList = []
dirWalker = os.walk(self.inDir)
for (inDir, dirs, files) in dirWalker:
basePath = os.path.abspath(inDir)
for dir in dirs[:]:
if self.excludeItem(basePath, dir):
dirs.remove(dir)
else:
fileList.append(os.path.join(basePath, dir))
for fn in files[:]:
if self.excludeItem(basePath, fn):
files.remove(fn)
else:
fileList.append(os.path.join(basePath, fn))
return fileList
def excludeItem(self, baseName, filename):
rv = False
fn = os.path.join(baseName,filename)
for item in self.exclude:
for f in glob.glob(os.path.join(baseName, item)):
if os.path.samefile(f, fn):
rv = True
break
if rv:
break
return rv
def makeSetupFile(self):
statusStrings = {"1":"1 - Planning",
"2":"2 - Pre-Alpha",
"3":"3 - Alpha",
"4":"4 - Beta",
"5":"5 - Production/Stable",
"6":"6 - Mature",
"7":"7 - Inactive"}
inFile = open(os.path.join(self.outDir, "setup.py"))
text = "".join(inFile.readlines())
inFile.close()
outFile = open(os.path.join(self.outDir, "setup.py"), "w")
outFile.write(text%{"status":statusStrings[self.status],
"version":self.version})
def makeInitFile(self):
inFile = open(os.path.join(self.outDir, "src", "html5lib", "__init__.py"))
text = "".join(inFile.readlines())
outFile = open(os.path.join(self.outDir, "src", "html5lib", "__init__.py"),
"w")
outFile.write(text%{"version":self.version})
def copy(self):
if not os.path.exists(self.outDir):
os.mkdir(self.outDir)
for inPath in self.fileList:
filename = inPath[len(self.inDir)+1:]
outPath = os.path.join(self.outDir, filename)
self.outFiles.append(outPath)
if os.path.isdir(inPath):
try:
os.mkdir(outPath)
except OSError:
#File may already exist
pass
else:
shutil.copyfile(inPath, outPath)
def preprocess(self):
p = Preprocessor()
newOutFiles = []
for fn in self.outFiles:
if os.path.isfile(fn):
newOutFiles.append(p.process(fn, self.outDir))
self.outFiles = newOutFiles
def test(self):
dir = os.path.abspath(os.curdir)
os.chdir(self.outDir)
install = subprocess.call(("python", os.path.join(self.outDir,
"setup.py"),
"install", "--home="+self.installDir))
subprocess.call(("python", os.path.join(self.outDir, "setup.py"),
"clean"))
test = subprocess.call(("python", os.path.join(self.outDir, "tests",
"runtests.py")))
os.chdir(dir)
return install==0 and test==0
def makeZipFile(self):
z = zipfile.ZipFile(os.path.join(self.outDir,
"html5lib-%s.zip"%self.version), 'w',
zipfile.ZIP_DEFLATED)
for f in self.outFiles:
z.write(f, os.path.join("html5lib-%s"%self.version,
f[len(self.outDir)+1:]))
z.close()
def cleanup(self):
#Doesn't yet clean up everything
for f in self.outFiles:
os.remove(f)
class Preprocessor(object):
def __init__(self):
self.instructions = {"remove":self.remove,
"add":self.add,
"move":self.move}
def process(self, fn, inDir):
self.inDir = inDir
self.outPath = fn
f = open(fn)
self.inData = f.readlines()
self.outData = []
while self.inData:
line = self.inData.pop(0)
instruction = self.getInstruction(line)
if instruction is not None:
self.instructions[instruction](line)
else:
self.outData.append(line)
#Write to the output file
f = open(self.outPath, 'w')
for line in self.outData:
f.write(line)
if self.outPath != fn:
os.remove(fn)
return self.outPath
def getInstruction(self, line):
rv = None
if line.startswith("#RELEASE"):
for item in list(self.instructions.keys()):
if line[len("#RELEASE"):].strip().startswith(item):
rv = item
break
return rv
def remove(self, line):
"""Remove a section of the input data"""
while self.inData:
data = self.inData.pop(0)
if data.startswith("#END RELEASE"):
break
def add(self, line):
while self.inData:
data = self.inData.pop(0)
if data.startswith("#END RELEASE"):
break
else:
self.outData.append(data.strip("#"))
def move(self, line):
self.outPath = os.path.abspath(os.path.join(self.inDir,
line[line.find("move")+4:].strip(),
self.outPath[len(self.inDir)+1:]))
dirName = os.path.dirname(self.outPath)
if not os.path.exists(dirName):
dirsToCreate = []
while not os.path.exists(dirName):
dirsToCreate.append(dirName)
dirName = os.path.dirname(dirName)
for item in dirsToCreate[::-1]:
os.mkdir(item)
|
py | b40e95ea2486e3320808db3ed1522a597c57a064 | # --------------------------------------------------------
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Copyright (c) 2017 ShanghaiTech PLUS Group
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Zheng Zhang
# Written by Songyang Zhang
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix='', withRelu=False, withBn=True, bn_mom=0.9, workspace=256):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad,
name='%s%s_conv2d' % (name, suffix), workspace=workspace)
if withBn:
conv = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='%s%s_bn' % (name, suffix))
if withRelu:
conv = mx.sym.Activation(data=conv, act_type='relu', name='%s%s_relu' % (name, suffix))
return conv
def Separable_Conv(data, num_in_channel, num_out_channel, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name=None, suffix='', depth_mult=1, withBn=True, bn_mom=0.9, workspace=256):
# original version of Separable Convolution
# depthwise convolution
#channels = mx.sym.split(data=data, axis=1, num_outputs=num_in_channel) # for new version of mxnet > 0.8
channels = mx.sym.SliceChannel(data=data, axis=1, num_outputs=num_in_channel) # for old version of mxnet <= 0.8
depthwise_outs = [mx.sym.Convolution(data=channels[i], num_filter=depth_mult, kernel=kernel,
stride=stride, pad=pad, name=name+'_depthwise_kernel_'+str(i), workspace=workspace)
for i in range(num_in_channel)]
depthwise_out = mx.sym.Concat(*depthwise_outs)
# pointwise convolution
pointwise_out = Conv(data=depthwise_out, num_filter=num_out_channel, name=name+'_pointwise_kernel', withBn=False, bn_mom=0.9, workspace=256)
if withBn:
pointwise_out = mx.sym.BatchNorm(data=pointwise_out, fix_gamma=False, eps=2e-5, momentum=bn_mom, name='%s%s_bn' % (name, suffix))
return pointwise_out
class xception_65_deeplab_v3_plus(Symbol):
"""Xception 65 Deeplab symbol
"""
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.eps = 1e-5
self.use_global_stats = True
self.workspace = 4096
self.units = (3, 4, 23, 3) # use for 101
self.filter_list = [256, 512, 1024, 2048]
def get_resnet_conv(self, data):
# Conv -- 1
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64,
pad=(3, 3), kernel=(7, 7), stride=(2, 2),
no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1,
use_global_stats=True, fix_gamma=False,
eps = self.eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1,
act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu,
pooling_convention='full', pad=(0, 0),
kernel=(3, 3),stride=(2, 2), pool_type='max')
# Conv -- 2
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1,
num_filter=256, pad=(0, 0),kernel=(1, 1),
stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1,
use_global_stats=True, fix_gamma=False, eps = self.eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1,
num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a,
use_global_stats=True, fix_gamma=False, eps = self.eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu',
data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b',
data=res2a_branch2a_relu, num_filter=64,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b,
use_global_stats=True, fix_gamma=False, eps = self.eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
# Conv -- 3
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
# Conv -- 4
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
# Conv -- 5
###################### Conv5 BottleNeck 1
# 1x1 skip
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5a_branch1 = bn5a_branch1
# 1x1 conv
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
# 3x3 conv dilated
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512,
pad=(2, 2), kernel=(3, 3), dilate=(2, 2), stride=(1, 1), no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
# 1x1 conv
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5a_branch2c = bn5a_branch2c
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
###################### Conv5 BottleNeck 2
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512,
pad=(2, 2), kernel=(3, 3), dilate=(2, 2), stride=(1, 1), no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5b_branch2c = bn5b_branch2c
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
###################### Conv5 BottleNeck 3
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512,
pad=(2, 2), kernel=(3, 3), dilate=(2, 2), stride=(1, 1), no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True,
fix_gamma=False, eps = self.eps)
scale5c_branch2c = bn5c_branch2c
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res5c_relu
def get_xception_backbone(self, data):
filter_list=[64, 64, 128, 364, 512, 768, 1024] # smaller one
def get_entry_flow(self, data):
block1 = mx.symbol.Convolution()
def get_middle_flow(self, data):
pass
def get_exit_flow(self, data):
pass
def get_aspp_symbol(self, atrous_rate):
data = mx.symbol.Variable(name="data")
aspp_1 = mx.symbol.Convolution(name='aspp_conv1', data=data, num_filter=,
pad=(0,0), kernel=(1,1), stride=(1,1), no_bias=True)
def get_train_symbol(self, num_classes):
"""Get symbol for training
Args:
num_classes:
num of classes
Return:
the symbol for training
"""
data = mx.symbol.Variable(name="data")
seg_cls_gt = mx.symbol.Variable(name='label')
# shared convolutional layers
conv_feat = self.get_resnet_conv(data)
# subsequent fc layers by haozhi
fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0)
fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0)
fc6 = mx.symbol.Convolution(data=conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="fc6",
bias=fc6_bias, weight=fc6_weight, workspace=self.workspace)
relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6')
score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0)
score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0)
score = mx.symbol.Convolution(data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name="score",
bias=score_bias, weight=score_weight, workspace=self.workspace)
upsampling = mx.symbol.Deconvolution(data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16),
num_group=num_classes, no_bias=True, name='upsampling',
attr={'lr_mult': '0.0'}, workspace=self.workspace)
croped_score = mx.symbol.Crop(*[upsampling, data], offset=(8, 8), name='croped_score')
softmax = mx.symbol.SoftmaxOutput(data=croped_score, label=seg_cls_gt, normalization='valid', multi_output=True,
use_ignore=True, ignore_label=255, name="softmax")
return softmax
def get_test_symbol(self, num_classes):
"""Get symbol for testing
Args:
num_classes:
num of classes
Return:
the symbol for testing
"""
data = mx.symbol.Variable(name="data")
# shared convolutional layers
conv_feat = self.get_resnet_conv(data)
fc6_bias = mx.symbol.Variable('fc6_bias', lr_mult=2.0)
fc6_weight = mx.symbol.Variable('fc6_weight', lr_mult=1.0)
fc6 = mx.symbol.Convolution(
data=conv_feat, kernel=(1, 1), pad=(0, 0), num_filter=1024, name="fc6", bias=fc6_bias, weight=fc6_weight,
workspace=self.workspace)
relu_fc6 = mx.sym.Activation(data=fc6, act_type='relu', name='relu_fc6')
score_bias = mx.symbol.Variable('score_bias', lr_mult=2.0)
score_weight = mx.symbol.Variable('score_weight', lr_mult=1.0)
score = mx.symbol.Convolution(
data=relu_fc6, kernel=(1, 1), pad=(0, 0), num_filter=num_classes, name="score", bias=score_bias,
weight=score_weight, workspace=self.workspace)
upsampling = mx.symbol.Deconvolution(
data=score, num_filter=num_classes, kernel=(32, 32), stride=(16, 16), num_group=num_classes, no_bias=True,
name='upsampling', attr={'lr_mult': '0.0'}, workspace=self.workspace)
croped_score = mx.symbol.Crop(*[upsampling, data], offset=(8, 8), name='croped_score')
softmax = mx.symbol.SoftmaxOutput(data=croped_score, normalization='valid', multi_output=True, use_ignore=True,
ignore_label=255, name="softmax")
return softmax
def get_symbol(self, cfg, is_train=True):
"""Return a generated symbol, it also need to be assigned to self.sym
Args:
Return:
"""
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
if is_train:
self.sym = self.get_train_symbol(num_classes=num_classes)
else:
self.sym = self.get_test_symbol(num_classes=num_classes)
return self.sym
def init_weights(self, cfg, arg_params, aux_params):
"""Initialize the weight
Args:
cfg:
configurations
arg_params:
weight parameters
aug_params:
auxliary paramters
"""
arg_params['fc6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc6_weight'])
arg_params['fc6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc6_bias'])
arg_params['score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['score_weight'])
arg_params['score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['score_bias'])
arg_params['upsampling_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['upsampling_weight'])
init = mx.init.Initializer()
init._init_bilinear('upsample_weight', arg_params['upsampling_weight'])
|
py | b40e9671db6e5ed74f9d8c1f36de09bc2245880b | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2013 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
# ****************************************************************************
# This module is part of the package MOM.DBW.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# MOM.DBW.Pid_Manager
#
# Purpose
# Base class for database backend specific manager for permanent ids
#
# Revision Dates
# 11-May-2010 (CT) Creation
# 11-May-2010 (MG) `__init__` added
# 12-May-2010 (CT) `retire` added
# 17-May-2010 (MG) `kw` added to `__call__`
# 4-Aug-2012 (CT) Remove implementation of `retire`
# 6-Jun-2013 (CT) Use `@subclass_responsibility`
# 26-Aug-2013 (CT) Move `__call__` to `HPS`
# ««revision-date»»···
#--
from _TFL import TFL
from _MOM import MOM
import _MOM._DBW
from _TFL.Decorator import subclass_responsibility
import _TFL._Meta.Object
class _Pid_Manager_ (TFL.Meta.Object) :
"""Base class for database backend specific manager for permanent ids."""
_real_name = "Pid_Manager"
def __init__ (self, ems, db_url) :
self.ems = ems
# end def __init__
@subclass_responsibility
def new (self, entity) :
"""Return a new `pid` to be used for `entity`."""
# end def new
@subclass_responsibility
def query (self, pid) :
"""Return entity with `pid`."""
# end def query
@subclass_responsibility
def reserve (self, entity, pid) :
"""Reserve `pid` for use for `entity.` `pid` must not be already used
for any other entity.
"""
# end def reserve
@subclass_responsibility
def retire (self, entity) :
"""Retire any resources held for `entity` (but `entity.pid` won't get
reused, ever).
"""
# end def retire
Pid_Manager = _Pid_Manager_ # end class
if __name__ != "__main__" :
MOM.DBW._Export ("*")
### __END__ MOM.DBW.Pid_Manager
|
py | b40e975276627e5b48367180fb55a2e933df40b7 | """
>>> import pydra.tasks.MRtrix3
"""
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
|
py | b40e975edcb61c30438c032b5b1c1e9d81ec7961 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('ToDo')
class TestToDo(unittest.TestCase):
def test_delete(self):
todo = frappe.get_doc(dict(doctype='ToDo', description='test todo',
assigned_by='Administrator')).insert()
frappe.db.sql('delete from `tabDeleted Document`')
todo.delete()
deleted = frappe.get_doc('Deleted Document', dict(deleted_doctype=todo.doctype, deleted_name=todo.name))
self.assertEqual(todo.as_json(), deleted.data)
def test_fetch(self):
todo = frappe.get_doc(dict(doctype='ToDo', description='test todo',
assigned_by='Administrator')).insert()
self.assertEqual(todo.assigned_by_full_name,
frappe.db.get_value('User', todo.assigned_by, 'full_name'))
def test_fetch_setup(self):
frappe.db.sql('delete from tabToDo')
todo_meta = frappe.get_doc('DocType', 'ToDo')
todo_meta.get('fields', dict(fieldname='assigned_by_full_name'))[0].fetch_from = ''
todo_meta.save()
frappe.clear_cache(doctype='ToDo')
todo = frappe.get_doc(dict(doctype='ToDo', description='test todo',
assigned_by='Administrator')).insert()
self.assertFalse(todo.assigned_by_full_name)
todo_meta = frappe.get_doc('DocType', 'ToDo')
todo_meta.get('fields', dict(fieldname='assigned_by_full_name'))[0].fetch_from = 'assigned_by.full_name'
todo_meta.save()
todo.reload()
self.assertEqual(todo.assigned_by_full_name,
frappe.db.get_value('User', todo.assigned_by, 'full_name'))
|
py | b40e97acc3e84a7dc7411a7ad3c3f8c1dc8171a6 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_client import Client
from paddle_serving_app.reader import *
import numpy as np
preprocess = Sequential([
File2Image(), BGR2RGB(), Div(255.0),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], False),
Resize(800, 1333), Transpose((2, 0, 1)), PadStride(32)
])
postprocess = RCNNPostprocess("label_list.txt", "output")
client = Client()
client.load_client_config("serving_client/serving_client_conf.prototxt")
client.connect(['127.0.0.1:9292'])
im = preprocess('000000570688.jpg')
fetch_map = client.predict(
feed={
"image": im,
"im_info": np.array(list(im.shape[1:]) + [1.0]),
"im_shape": np.array(list(im.shape[1:]) + [1.0])
},
fetch=["multiclass_nms_0.tmp_0"],
batch=False)
fetch_map["image"] = '000000570688.jpg'
print(fetch_map)
postprocess(fetch_map)
print(fetch_map)
|
py | b40e97c9b45e91eef909ef3792239703f3de432f | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions to detect NaN/Inf values. """
import logging
import torch
import torch.nn as nn
log = logging.getLogger(__name__)
def print_nan_gradients(model: nn.Module) -> None:
""" Iterates over model parameters and prints out parameter + gradient information if NaN. """
for param in model.parameters():
if (param.grad is not None) and torch.isnan(param.grad.float()).any():
log.info(param, param.grad)
def detect_nan_parameters(model: nn.Module) -> None:
"""
Iterates over model parameters and prints gradients if any parameter is not finite.
Raises:
ValueError:
If ``NaN`` or ``inf`` values are found
"""
for name, param in model.named_parameters():
if not torch.isfinite(param).all():
print_nan_gradients(model)
raise ValueError(
f'Detected nan and/or inf values in `{name}`.'
' Check your forward pass for numerically unstable operations.'
)
|
py | b40e986e563d822a075cd60817c1fc6c1be971a3 | # -*- coding: utf-8 -*-
"""
Survey_LocalTangentPlane.py
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Leandro França'
__date__ = '2019-10-28'
__copyright__ = '(C) 2019, Leandro França'
from PyQt5.QtCore import *
from qgis.core import *
import processing
from numpy import sin, cos, sqrt, matrix, radians, arctan, pi, floor
from pyproj.crs import CRS
from lftools.geocapt.imgs import Imgs
from lftools.geocapt.topogeo import geod2geoc, geoc2geod, geoc2enu, enu2geoc, dd2dms, dms2dd
import os
from qgis.PyQt.QtGui import QIcon
class LocalTangentPlane(QgsProcessingAlgorithm):
INPUT = 'INPUT'
TABLE = 'TABLE'
TYPE = 'TYPE'
COORD1 = 'COORD1'
COORD2 = 'COORD2'
COORD3 = 'COORD3'
GRS = 'GRS'
LON_0 = 'LON_0'
LAT_0 = 'LAT_0'
H_0 = 'H_0'
OUTPUT = 'OUTPUT'
LOC = QgsApplication.locale()[:2]
def translate(self, string):
return QCoreApplication.translate('Processing', string)
def tr(self, *string):
# Traduzir para o portugês: arg[0] - english (translate), arg[1] - português
if self.LOC == 'pt':
if len(string) == 2:
return string[1]
else:
return self.translate(string[0])
else:
return self.translate(string[0])
def createInstance(self):
return LocalTangentPlane()
def name(self):
return 'localtangentplane'
def displayName(self):
return self.tr('Local Geodetic System transform', 'Transformação para SGL')
def group(self):
return self.tr('Survey', 'Agrimensura')
def groupId(self):
return 'survey'
def tags(self):
return self.tr('survey,agrimensura,LGS,SGL,tangent,transform,geocentric,topocentric,ECEF,geodetic,geodesic,brazil').split(',')
def icon(self):
return QIcon(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'images/total_station.png'))
txt_en = '''
This algorithm transforms coordinates between the following reference systems:
- geodetic <b>(λ, ϕ, h)</b>;
- geocentric or ECEF <b>(X, Y, Z)</b>; and
- topocentric in a local tangent plane <b>(E, N, U)</b>.
Default values for origin coordinates can be applied to Recife / Brazil.'''
txt_pt = '''Este algoritmo transforma coordenadas entre os seguintes sistemas de referência:
- Geodésico <b>(λ, ϕ, h)</b>
- Geocêntrico ou ECEF <b>(X, Y, Z)</b>;
- Topocêntrico <b>(E, N, U)</b>.
Default: coordenadas de origem para Recife-PE, Brasil.'''
figure = 'images/tutorial/survey_LTP.jpg'
def shortHelpString(self):
social_BW = Imgs().social_BW
nota_en = '''Note: Example data obtained from Mendonça et al. (2010).
Know more:'''
nota_pt = '''Nota: Dados de exemplo obtidos de Mendonça et al. (2010).
Saiba mais:'''
footer = '''<div align="center">
<img src="'''+ os.path.join(os.path.dirname(os.path.dirname(__file__)), self.figure) +'''">
</div>
<div align="right">
<div>''' + self.tr(nota_en, nota_pt) + '''
</div>
<p align="right">
<b><a href="https://geoone.com.br/sistema-geodesico-local/" target="_blank">'''+self.tr('Local Geodetic System (LGS)', 'Sistema Geodésico Local (SGL)') + '''</b>
''' +'</a><br><b>'+ self.tr('Author: Leandro Franca', 'Autor: Leandro França')+'''</b>
</p>'''+ social_BW + '''</div>
</div>'''
return self.tr(self.txt_en, self.txt_pt) + footer
def initAlgorithm(self, config=None):
# INPUT
self.addParameter(
QgsProcessingParameterFeatureSource(
self.TABLE,
self.tr('Table of coordinates', 'Tabela de coordenadas'),
[QgsProcessing.TypeVector]
)
)
types = [ self.tr('lon, lat, h'),
self.tr('X, Y, Z'),
self.tr('E, N, U')
]
self.addParameter(
QgsProcessingParameterEnum(
self.TYPE,
self.tr('Input Coordinates type', 'Tipo de Coordenadas de Entrada'),
options = types,
defaultValue= 0
)
)
self.addParameter(
QgsProcessingParameterField(
self.COORD1,
self.tr('Lon, X or E field', 'Campo Lon, X ou E'),
parentLayerParameterName=self.TABLE,
type=QgsProcessingParameterField.Numeric
)
)
self.addParameter(
QgsProcessingParameterField(
self.COORD2,
self.tr('Lat, Y or N field', 'Campo Lat, Y ou N'),
parentLayerParameterName=self.TABLE,
type=QgsProcessingParameterField.Numeric
)
)
self.addParameter(
QgsProcessingParameterField(
self.COORD3,
self.tr('h, Z or U field', 'Campo h, Z ou U'),
parentLayerParameterName=self.TABLE,
type=QgsProcessingParameterField.Numeric
)
)
self.addParameter(
QgsProcessingParameterCrs(
self.GRS,
self.tr('Ellipsoid parameters', 'Parâmetros do Elipoide'),
QgsCoordinateReferenceSystem('EPSG:4674')
)
)
self.addParameter(
QgsProcessingParameterString(
self.LON_0,
self.tr('Origin Longitude (λ)', 'Longitude (λ) da Origem'),
defaultValue = '''-34°57'05.45910"'''
)
)
self.addParameter(
QgsProcessingParameterString(
self.LAT_0,
self.tr('Origin Latitude (ϕ)', 'Latitude (ϕ) da Origem'),
defaultValue = '''-8°03'03.46970"'''
)
)
self.addParameter(
QgsProcessingParameterNumber(
self.H_0,
self.tr('Origin Elipsoid Height (h)', 'Altitude (h) da Origem'),
type=1, #Double = 1 and Integer = 0
defaultValue = 4.217
)
)
# OUTPUT
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Transformed Coordinates', 'Coordenadas Transformadas')
)
)
def processAlgorithm(self, parameters, context, feedback):
# Tabela de coordenadas
table = self.parameterAsSource(
parameters,
self.TABLE,
context
)
if table is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.TABLE))
# Tipo de Coordenadas
tipo = self.parameterAsEnum(
parameters,
self.TYPE,
context
)
if tipo < 0 or tipo >2:
raise QgsProcessingException(self.invalidSourceError(parameters, self.TYPE))
# Coordenadas
coord1 = self.parameterAsFields(
parameters,
self.COORD1,
context
)
coord2 = self.parameterAsFields(
parameters,
self.COORD2,
context
)
coord3 = self.parameterAsFields(
parameters,
self.COORD3,
context
)
# Sistema Geodésico de Referência
GRS = self.parameterAsCrs(
parameters,
self.GRS,
context
)
# Coordenadas da Origem (lon, lat, h)
lon0 = self.parameterAsString(
parameters,
self.LON_0,
context
)
lon0 = dms2dd(lon0)
if lon0 < -180 or lon0 >180:
raise QgsProcessingException('Invalid Longitude')
lat0 = self.parameterAsString(
parameters,
self.LAT_0,
context
)
lat0 = dms2dd(lat0)
if lat0 < -90 or lat0 >90:
raise QgsProcessingException('Invalid Latitude')
h0 = self.parameterAsDouble(
parameters,
self.H_0,
context
)
if h0 < -1e3 or h0 >1e4:
raise QgsProcessingException('Invalid Height')
# OUTPUT
# Camada de Saída
GeomType = QgsWkbTypes.Point
Fields = QgsFields()
itens = {
'lon' : QVariant.Double,
'lon_dms' : QVariant.String,
'lat': QVariant.Double,
'lat_dms': QVariant.String,
'h': QVariant.Double,
'X': QVariant.Double,
'Y': QVariant.Double,
'Z': QVariant.Double,
'E': QVariant.Double,
'N': QVariant.Double,
'U': QVariant.Double
}
field_list = []
for field in table.fields():
if field.name() not in itens:
Fields.append(field)
field_list += [field.name()]
for item in itens:
Fields.append(QgsField(item, itens[item]))
(sink, dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
Fields,
GeomType,
GRS
)
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
# Parâmetros a e f do elipsoide
EPSG = int(GRS.authid().split(':')[-1]) # pegando o EPGS do SRC do QGIS
proj_crs = CRS.from_epsg(EPSG) # transformando para SRC do pyproj
a=proj_crs.ellipsoid.semi_major_metre
f_inv = proj_crs.ellipsoid.inverse_flattening
f=1/f_inv
feedback.pushInfo((self.tr('Semi major axis: {}', 'Semi-eixo maior: {}')).format(str(a)))
feedback.pushInfo((self.tr('Inverse flattening: {}', 'Achatamento (inverso): {}')).format(str(f_inv)))
Xo, Yo, Zo = geod2geoc(lon0, lat0, h0, a, f)
# Field index
coord1_id = table.fields().indexFromName(coord1[0])
coord2_id = table.fields().indexFromName(coord2[0])
coord3_id = table.fields().indexFromName(coord3[0])
# Gerar output
total = 100.0 / table.featureCount() if table.featureCount() else 0
for current, feature in enumerate(table.getFeatures()):
att = feature.attributes()
coord1 = att[coord1_id]
coord2 = att[coord2_id]
coord3 = att[coord3_id]
if tipo == 0: #(lon,lat,h)
lon, lat, h = coord1, coord2, coord3
X, Y, Z = geod2geoc(lon, lat, h, a, f)
E, N, U = geoc2enu(X, Y, Z, lon0, lat0, Xo, Yo, Zo)
elif tipo == 1: #(X,Y,Z)
X, Y, Z = coord1, coord2, coord3
lon, lat, h = geoc2geod(X, Y, Z, a, f)
E, N, U = geoc2enu(X, Y, Z, lon0, lat0, Xo, Yo, Zo)
elif tipo == 2: #(E,N,U)
E, N, U = coord1, coord2, coord3
X, Y, Z = enu2geoc(E, N, U, lon0, lat0, Xo, Yo, Zo)
lon, lat, h = geoc2geod(X, Y, Z, a, f)
feat = QgsFeature(Fields)
itens = {
'lon' : float(lon),
'lon_dms' : dd2dms(float(lon),5),
'lat': float(lat),
'lat_dms': dd2dms(float(lat),5),
'h': float(h),
'X': float(X),
'Y': float(Y),
'Z': float(Z),
'E': float(E),
'N': float(N),
'U': float(U)
}
for item in itens:
feat[item] = itens[item]
for item in field_list: # atributos antigos
feat[item] = feature[item]
geom = QgsGeometry.fromPointXY(QgsPointXY(lon, lat))
feat.setGeometry(geom)
sink.addFeature(feat, QgsFeatureSink.FastInsert)
if feedback.isCanceled():
break
feedback.setProgress(int(current * total))
feedback.pushInfo(self.tr('Operation completed successfully!', 'Operação finalizada com sucesso!'))
feedback.pushInfo(self.tr('Leandro Franca - Cartographic Engineer','Leandro França - Eng Cart'))
return {self.OUTPUT: dest_id}
|
py | b40e98d3c3e66f35c8ad62b8a83829a3eb749c0a | import tempfile
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras import backend as K
class LearningRateFinder:
def __init__(self, model, stopFactor=4, beta=0.98):
self.model = model
self.stopFactor = stopFactor
self.beta = beta
self.lrs = None
self.lrMult = None
self.losses = None
self.best_loss = None
self.smooth_loss = None
self.batch_num = None
def reset(self):
self.lrs = []
self.lrMult = 1
self.losses = []
self.best_loss = 1e9
self.smooth_loss = 0
self.batch_num = 0
def plot(self, start=None, end=None):
if start is None:
start = 0
if end is None:
end = len(self.losses)
plt.figure(figsize=(8, 6))
plt.subplot(2, 1, 1)
plt.plot(range(start, end), self.losses[start:end])
plt.ylabel("loss")
plt.subplot(2, 1, 2)
plt.plot(range(start, end), self.lrs[start:end])
plt.ylabel("learning rate")
plt.show()
def on_batch_end(self, batch, logs):
"""
Save learning rate in lrs
Update learning rate value
Calculate smooth loss and save in losses
"""
# Get current learning rate and save it
lr = K.get_value(self.model.optimizer.lr)
self.lrs.append(lr)
# Calculate smooth loss and save it
loss = logs["loss"]
self.batch_num += 1
self.smooth_loss = (self.beta * self.smooth_loss) + ((1 - self.beta) * loss)
correctedloss = self.smooth_loss / (1 - (self.beta ** self.batch_num))
self.losses.append(correctedloss)
# Calculate stop loss
stoploss = self.stopFactor * self.best_loss
if self.batch_num > 1 and self.smooth_loss > stoploss:
# Stop training
self.model.stop_training = True
return
if correctedloss < self.best_loss:
# Update best loss
self.best_loss = correctedloss
# Increase learning rate
lr *= self.lrMult
K.set_value(self.model.optimizer.lr, lr)
def find(self, training_data, start_lr=1e-10, end_lr=1e+1, batch_size=32, epochs=5, sample_size=None, verbose=1):
# Reset parameters
self.reset()
# If sample size is not defined, use length of training data
if sample_size is None:
sample_size = len(training_data[0])
# Calculate update rate for learning rate
updateTimePerEpoch = np.ceil(sample_size / float(batch_size))
updateTimeTotal = epochs * updateTimePerEpoch
self.lrMult = (end_lr / start_lr) ** (1.0 / updateTimeTotal)
# Save model weights and learning rate, so we can reset it later
weightsFile = tempfile.mkstemp()[1]
self.model.save_weights(weightsFile)
orig_lr = K.get_value(self.model.optimizer.lr)
# Create callback function to update learning rate every batch
callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))
# Run training
K.set_value(self.model.optimizer.lr, start_lr)
self.model.fit(training_data[0], training_data[1],
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=[callback])
# Load model weights back
self.model.load_weights(weightsFile)
K.set_value(self.model.optimizer.lr, orig_lr)
if __name__ == "__main__":
lr_finder = LearningRateFinder(None)
|
py | b40e9a4e0aa4f21df2a3ec35c8a73421c8a44aa6 | import numpy as np
from src.core.utils import Direction
class Particle:
"""
Class for particle for the voronoi / flooding algorithm
"""
def __init__(self, position, agent_id, direction):
self.position = position
self.agent_id = agent_id
self.direction = direction
def voronoi(model, max_agent_id):
"""
Basic voronoi algorithm for every agent from the model
:param model: The model
:param max_agent_id: Id of the max agent
:return: voronoi cells, region_sizes (dict), is_endgame, all opponents that are in the same region
"""
is_endgame = True
opponent_ids = []
timestamp = model.schedule.steps
cells = model.cells
width, height = model.width, model.height
# format: (height, width, (id, timestamp))
particle_cells = np.zeros((*cells.shape, 2), dtype=np.int)
particles = []
for agent in model.active_speed_agents:
particle = Particle(agent.pos, agent.unique_id, agent.direction)
particles.extend(surrounding_cells(particle, width, height))
while len(particles) != 0:
timestamp += 1
new_particles = []
for particle in particles:
pos = (particle.position[1], particle.position[0])
if cells[pos] == 0:
# no obstacle in cells
survived = True
if particle_cells[pos[0], pos[1], 1] == 0:
# first
particle_cells[pos[0], pos[1]] = [particle.agent_id, timestamp]
elif particle_cells[pos[0], pos[1], 1] == timestamp and \
particle_cells[pos[0], pos[1], 0] != particle.agent_id:
# battlefront
particle_cells[pos[0], pos[1]] = [-1, -1]
else:
survived = False
# Check for endgame here
if particle_cells[pos[0], pos[1], 1] != 0 and bool(particle.agent_id == max_agent_id) ^ \
bool(particle_cells[pos[0], pos[1], 0] == max_agent_id) and \
particle_cells[pos[0], pos[1], 0] != -1 and particle.agent_id != -1:
is_endgame = False
if particle_cells[pos[0], pos[1], 0] not in opponent_ids:
opponent_ids.append(particle_cells[pos[0], pos[1], 0])
if particle.agent_id not in opponent_ids:
opponent_ids.append(particle.agent_id)
if survived:
new_particles.extend(surrounding_cells(particle, width, height))
particles = new_particles
return particle_cells, dict(zip(*np.unique(particle_cells[:, :, 0], return_counts=True))), is_endgame, opponent_ids
def voronoi_for_reduced_opponents(model, max_agent_id, min_agent_id, is_endgame):
"""
Voronoi algorithm for the ClosestOpponentsAgent, calculates voronoi only for the max and min agent.
:param model: The model
:param max_agent_id: Id of the max agent
:param min_agent_id: Id of the max agent
:param is_endgame: is endgame
:return: voronoi cells, region_sizes (dict), is_endgame
"""
timestamp = model.schedule.steps
cells = model.cells
width, height = model.width, model.height
region_sizes = {max_agent_id: 0, min_agent_id: 0}
# format: (height, width, (id, timestamp))
particle_cells = np.zeros((*cells.shape, 2), dtype=np.int)
particles = []
agents_list = [model.get_agent_by_id(max_agent_id), model.get_agent_by_id(min_agent_id)]
for agent in agents_list:
particle = Particle(agent.pos, agent.unique_id, agent.direction)
particles.extend(surrounding_cells(particle, width, height))
while len(particles) != 0:
timestamp += 1
new_particles = []
for particle in particles:
pos = (particle.position[1], particle.position[0])
if cells[pos] == 0:
# no obstacle in cells
survived = True
if particle_cells[pos[0], pos[1], 1] == 0:
# first
particle_cells[pos[0], pos[1]] = [particle.agent_id, timestamp]
region_sizes[particle.agent_id] += 1
elif particle_cells[pos[0], pos[1], 1] == timestamp and \
particle_cells[pos[0], pos[1], 0] != particle.agent_id:
# battlefront
region_sizes[particle_cells[pos[0], pos[1], 0]] -= 1 # decrease because it was falsely increased
particle_cells[pos[0], pos[1]] = [-1, -1]
else:
survived = False
# Check for endgame here
if particle_cells[pos[0], pos[1], 1] != 0 and bool(particle.agent_id == max_agent_id) ^ \
bool(particle_cells[pos[0], pos[1], 0] == max_agent_id):
is_endgame = False
if survived:
new_particles.extend(surrounding_cells(particle, width, height))
particles = new_particles
return particle_cells, region_sizes, is_endgame
def surrounding_cells(parent, width, height):
"""
Generates new particles from a parent particle.
:param parent: The parent Particle
:param width: Width of the field
:param height: Height of the field
:return: The new particles
"""
particles = []
x, y = parent.position
directions = [(-1, 0, Direction.LEFT), (1, 0, Direction.RIGHT), (0, -1, Direction.UP), (0, 1, Direction.DOWN)]
# remove direction behind agent
if parent.direction == Direction.UP:
directions.pop(3)
elif parent.direction == Direction.DOWN:
directions.pop(2)
elif parent.direction == Direction.RIGHT:
directions.pop(0)
elif parent.direction == Direction.LEFT:
directions.pop(1)
for d_x, d_y, direction in directions:
if 0 <= x + d_x < width and 0 <= y + d_y < height:
particles.append(Particle((x + d_x, y + d_y), parent.agent_id, direction))
return particles
|
py | b40e9ae4ec0f297f381feed3f705ca9224155fda | from flask import Flask, request, session, redirect;
import urllib.request;
import json;
app = Flask(__name__);
@app.route('/sms_received', methods = ['POST'])
def smsReceived():
id = request.form.get('id');
print(id);
message = json.loads(urllib.request.urlopen("http://sms-line1.internal.lambdanum.com/get_message.py?message_id={}".format(id)).read());
print (urllib.request.urlopen("http://sms-line1.internal.lambdanum.com/send_message.py?destination={}&message={}".format(message['chat_identifier'].strip('+'),"Thank%20you,%20your%20message%20has%20been%20received.%20%23{}.".format(message['id']))).read());
return 'OK';
if __name__ == "__main__":
app.run(port=5000, debug=False, threaded=False, host="0.0.0.0");
|
py | b40e9d01117fbbec05d3e7782392e331646abbe2 | # Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Collection of all environs related calls
"""
from deploy_board.webapp.helpers.deployclient import DeployClient
from deploy_board.settings import IS_PINTEREST
DEFAULT_ENV_SIZE = 30
BUILD_STAGE = 'BUILD'
DEPLOY_STAGE_VALUES = ['UNKNOWN', 'PRE_DOWNLOAD', 'DOWNLOADING', 'POST_DOWNLOAD', 'STAGING',
'PRE_RESTART', 'RESTARTING', 'POST_RESTART', 'SERVING_BUILD', 'STOPPING', 'STOPPED']
DEPLOY_PRIORITY_VALUES = ['LOWER', 'LOW', 'NORMAL', 'HIGH', 'HIGHER']
ACCEPTANCE_TYPE_VALUES = ['AUTO', 'MANUAL']
ACCEPTANCE_STATUS_VALUES = ['PENDING_DEPLOY', 'OUTSTANDING', 'PENDING_ACCEPT', 'ACCEPTED',
'REJECTED',
'TERMINATED']
AGENT_STATE_VALUES = ["NORMAL", "PAUSED_BY_SYSTEM", "PAUSED_BY_USER", "RESET", "DELETE",
"UNREACHABLE", "STOP"]
AGENT_STATUS_VALUES = ["SUCCEEDED", "UNKNOWN", "AGENT_FAILED", "RETRYABLE_AGENT_FAILED",
"SCRIPT_FAILED", "ABORTED_BY_SERVICE", "SCRIPT_TIMEOUT", "TOO_MANY_RETRY",
"RUNTIME_MISMATCH"]
PROMOTE_TYPE_VALUES = ['MANUAL', 'AUTO']
PROMOTE_FAILED_POLICY_VALUES = ['CONTINUE', 'DISABLE', 'ROLLBACK']
PROMOTE_DISABLE_POLICY_VALUES = ['MANUAL', 'AUTO']
OVERRIDE_POLICY_VALUES = ['OVERRIDE', 'WARN']
DEPLOY_CONSTRAINT_TYPES = ['GROUP_BY_GROUP', 'ALL_GROUPS_IN_PARALLEL']
deployclient = DeployClient()
if IS_PINTEREST:
from deploy_board.webapp.helpers.nimbusclient import NimbusClient
nimbusclient = NimbusClient()
# Nimbus-related helpers
def get_nimbus_identifier(name):
return nimbusclient.get_one_identifier(name)
def create_nimbus_identifier(data):
return nimbusclient.create_one_identifier(data)
def delete_nimbus_identifier(name):
return nimbusclient.delete_one_identifier(name)
# Teletraan Deploy client helpers
def set_external_id_on_stage(request, env_name, stage_name, external_id):
return deployclient.post("/envs/{}/{}/external_id".format(env_name, stage_name), request.teletraan_user_id.token, data=external_id)
def get_all_env_names(request, name_filter=None, name_only=True, index=1, size=DEFAULT_ENV_SIZE):
params = [('pageIndex', index), ('pageSize', size)]
if name_filter:
params.append(('nameFilter', name_filter))
return deployclient.get("/envs/names", request.teletraan_user_id.token, params=params)
def get_all_env_stages(request, env_name):
return deployclient.get("/envs", request.teletraan_user_id.token,
params=[("envName", env_name)])
def get_all_envs_by_group(request, group_name):
params = [('groupName', group_name)]
return deployclient.get("/envs/", request.teletraan_user_id.token, params=params)
def get(request, id):
return deployclient.get("/envs/%s" % id, request.teletraan_user_id.token)
def get_env_by_stage(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s" % (env_name, stage_name), request.teletraan_user_id.token)
def get_env_capacity(request, env_name, stage_name, capacity_type=None):
params = []
if capacity_type:
params.append(("capacityType", capacity_type))
return deployclient.get("/envs/%s/%s/capacity" % (env_name, stage_name),
request.teletraan_user_id.token, params=params)
def update_env_capacity(request, env_name, stage_name, capacity_type=None, data=None):
params = []
if capacity_type:
params.append(("capacityType", capacity_type))
return deployclient.put("/envs/%s/%s/capacity" % (env_name, stage_name),
request.teletraan_user_id.token, params=params, data=data)
def add_env_capacity(request, env_name, stage_name, capacity_type=None, data=None):
params = []
if capacity_type:
params.append(("capacityType", capacity_type))
return deployclient.post("/envs/%s/%s/capacity" % (env_name, stage_name),
request.teletraan_user_id.token, params=params, data=data)
def remove_env_capacity(request, env_name, stage_name, capacity_type=None, data=None):
params = []
if capacity_type:
params.append(("capacityType", capacity_type))
return deployclient.delete("/envs/%s/%s/capacity" % (env_name, stage_name),
request.teletraan_user_id.token, params=params, data=data)
def create_env(request, data):
return deployclient.post("/envs", request.teletraan_user_id.token, data=data)
def update_env_basic_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s" % (env_name, stage_name), request.teletraan_user_id.token,
data=data)
def get_env_script_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/script_configs" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_script_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/script_configs" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_agent_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/agent_configs" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_agent_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/agent_configs" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_alarms_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/alarms" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_alarms_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/alarms" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_metrics_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/metrics" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_metrics_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/metrics" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_hooks_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/web_hooks" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_hooks_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/web_hooks" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def get_env_promotes_config(request, env_name, stage_name):
return deployclient.get("/envs/%s/%s/promotes" % (env_name, stage_name),
request.teletraan_user_id.token)
def update_env_promotes_config(request, env_name, stage_name, data):
return deployclient.put("/envs/%s/%s/promotes" % (env_name, stage_name),
request.teletraan_user_id.token, data=data)
def delete_env(request, env_name, stage_name):
return deployclient.delete("/envs/%s/%s" % (env_name, stage_name),
request.teletraan_user_id.token)
def get_config_history(request, env_name, stage_name, index, size):
params = [('pageIndex', index), ('pageSize', size)]
return deployclient.get("/envs/%s/%s/history" % (env_name, stage_name),
request.teletraan_user_id.token, params=params)
def set_active_max_parallel(env):
max_parallel_pecentage = int(env['maxParallelPct'])
env['showNumber'] = True
if max_parallel_pecentage > 0:
env['showNumber'] = False
def enable_all_env_changes(request, description):
params = [("actionType", "ENABLE"), ("description", description)]
return deployclient.post("/envs/actions", request.teletraan_user_id.token, params=params)
def disable_all_env_changes(request, description):
params = [("actionType", "DISABLE"), ("description", description)]
return deployclient.post("/envs/actions", request.teletraan_user_id.token, params=params)
def enable_env_changes(request, env_name, stage_name, description):
params = [("actionType", "ENABLE"), ("description", description)]
return deployclient.post("/envs/%s/%s/actions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params)
def disable_env_changes(request, env_name, stage_name, description):
params = [("actionType", "DISABLE"), ("description", description)]
return deployclient.post("/envs/%s/%s/actions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params)
def pause_hosts(request, env_name, stage_name, host_ids):
params = [("actionType", "PAUSED_BY_USER")]
return deployclient.put("/envs/%s/%s/deploys/hostactions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params, data=host_ids)
def resume_hosts(request, env_name, stage_name, host_ids):
params = [("actionType", "NORMAL")]
return deployclient.put("/envs/%s/%s/deploys/hostactions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params, data=host_ids)
def reset_hosts(request, env_name, stage_name, host_ids):
params = [("actionType", "RESET")]
return deployclient.put("/envs/%s/%s/deploys/hostactions" % (env_name, stage_name), request.teletraan_user_id.token,
params=params, data=host_ids)
|
py | b40e9d57c9933fce1913f7e82ed65105e3d57dd4 | #!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_artifacts_container_image_signature_facts
short_description: Fetches details about one or multiple ContainerImageSignature resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple ContainerImageSignature resources in Oracle Cloud Infrastructure
- List container image signatures in an image.
- If I(image_signature_id) is specified, the details of a single ContainerImageSignature will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
image_signature_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the container image signature.
- "Example: `ocid1.containersignature.oc1..exampleuniqueID`"
- Required to get a specific container_image_signature.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
- Required to list multiple container_image_signatures.
type: str
compartment_id_in_subtree:
description:
- When set to true, the hierarchy of compartments is traversed
and all compartments and subcompartments in the tenancy are
inspected depending on the the setting of `accessLevel`.
Default is false. Can only be set to true when calling the API
on the tenancy (root compartment).
type: bool
image_id:
description:
- A filter to return a container image summary only for the specified container image OCID.
type: str
repository_id:
description:
- A filter to return container images only for the specified container repository OCID.
type: str
repository_name:
description:
- A filter to return container images or container image signatures that match the repository name.
- "Example: `foo` or `foo*`"
type: str
image_digest:
description:
- The digest of the container image.
- "Example: `sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa`"
type: str
display_name:
description:
- A filter to return only resources that match the given display name exactly.
type: str
aliases: ["name"]
kms_key_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the kmsKeyVersionId used to sign the container image.
- "Example: `ocid1.keyversion.oc1..exampleuniqueID`"
type: str
kms_key_version_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the kmsKeyVersionId used to sign the container image.
- "Example: `ocid1.keyversion.oc1..exampleuniqueID`"
type: str
signing_algorithm:
description:
- The algorithm to be used for signing. These are the only supported signing algorithms for container images.
type: str
choices:
- "SHA_224_RSA_PKCS_PSS"
- "SHA_256_RSA_PKCS_PSS"
- "SHA_384_RSA_PKCS_PSS"
- "SHA_512_RSA_PKCS_PSS"
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for
TIMECREATED is descending. Default order for DISPLAYNAME is ascending. The DISPLAYNAME
sort order is case sensitive.
- "**Note:** In general, some \\"List\\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \\"List\\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted."
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`). The DISPLAYNAME sort order
is case sensitive.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific container_image_signature
oci_artifacts_container_image_signature_facts:
# required
image_signature_id: "ocid1.imagesignature.oc1..xxxxxxEXAMPLExxxxxx"
- name: List container_image_signatures
oci_artifacts_container_image_signature_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
compartment_id_in_subtree: true
image_id: "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx"
repository_id: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
repository_name: repository_name_example
image_digest: image_digest_example
display_name: display_name_example
kms_key_id: "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx"
kms_key_version_id: "ocid1.kmskeyversion.oc1..xxxxxxEXAMPLExxxxxx"
signing_algorithm: SHA_224_RSA_PKCS_PSS
sort_by: TIMECREATED
sort_order: ASC
"""
RETURN = """
container_image_signatures:
description:
- List of ContainerImageSignature resources
returned: on success
type: complex
contains:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment in which the container repository
exists.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
created_by:
description:
- The id of the user or principal that created the resource.
- Returned for get operation
returned: on success
type: str
sample: created_by_example
display_name:
description:
- The last 10 characters of the kmsKeyId, the last 10 characters of the kmsKeyVersionId, the signingAlgorithm, and the last 10 characters of the
signatureId.
- "Example: `wrmz22sixa::qdwyc2ptun::SHA_256_RSA_PKCS_PSS::2vwmobasva`"
returned: on success
type: str
sample: display_name_example
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the container image signature.
- "Example: `ocid1.containerimagesignature.oc1..exampleuniqueID`"
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
image_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the container image.
- "Example: `ocid1.containerimage.oc1..exampleuniqueID`"
returned: on success
type: str
sample: "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx"
kms_key_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the kmsKeyId used to sign the container image.
- "Example: `ocid1.key.oc1..exampleuniqueID`"
returned: on success
type: str
sample: "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx"
kms_key_version_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the kmsKeyVersionId used to sign the container image.
- "Example: `ocid1.keyversion.oc1..exampleuniqueID`"
returned: on success
type: str
sample: "ocid1.kmskeyversion.oc1..xxxxxxEXAMPLExxxxxx"
message:
description:
- The base64 encoded signature payload that was signed.
returned: on success
type: str
sample: message_example
signature:
description:
- The signature of the message field using the kmsKeyId, the kmsKeyVersionId, and the signingAlgorithm.
returned: on success
type: str
sample: signature_example
signing_algorithm:
description:
- The algorithm to be used for signing. These are the only supported signing algorithms for container images.
returned: on success
type: str
sample: SHA_224_RSA_PKCS_PSS
time_created:
description:
- An RFC 3339 timestamp indicating when the image was created.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
sample: [{
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"created_by": "created_by_example",
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"image_id": "ocid1.image.oc1..xxxxxxEXAMPLExxxxxx",
"kms_key_id": "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx",
"kms_key_version_id": "ocid1.kmskeyversion.oc1..xxxxxxEXAMPLExxxxxx",
"message": "message_example",
"signature": "signature_example",
"signing_algorithm": "SHA_224_RSA_PKCS_PSS",
"time_created": "2013-10-20T19:20:30+01:00"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.artifacts import ArtifactsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ContainerImageSignatureFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"image_signature_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_container_image_signature,
image_signature_id=self.module.params.get("image_signature_id"),
)
def list_resources(self):
optional_list_method_params = [
"compartment_id_in_subtree",
"image_id",
"repository_id",
"repository_name",
"image_digest",
"display_name",
"kms_key_id",
"kms_key_version_id",
"signing_algorithm",
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_container_image_signatures,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
ContainerImageSignatureFactsHelperCustom = get_custom_class(
"ContainerImageSignatureFactsHelperCustom"
)
class ResourceFactsHelper(
ContainerImageSignatureFactsHelperCustom, ContainerImageSignatureFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
image_signature_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
compartment_id_in_subtree=dict(type="bool"),
image_id=dict(type="str"),
repository_id=dict(type="str"),
repository_name=dict(type="str"),
image_digest=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
kms_key_id=dict(type="str"),
kms_key_version_id=dict(type="str"),
signing_algorithm=dict(
type="str",
choices=[
"SHA_224_RSA_PKCS_PSS",
"SHA_256_RSA_PKCS_PSS",
"SHA_384_RSA_PKCS_PSS",
"SHA_512_RSA_PKCS_PSS",
],
),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="container_image_signature",
service_client_class=ArtifactsClient,
namespace="artifacts",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(container_image_signatures=result)
if __name__ == "__main__":
main()
|
py | b40e9e003d7b58cc98f1f386d7421be9759cfc6e | # importing the libraries
from covid import Covid
import pygame
# initializing
pygame.init()
covid = Covid()
# data for the world
ta = f"Total active cases in the world: {covid.get_total_active_cases()}"
tr = f"Total recovered cases in the world: {covid.get_total_recovered()}"
td = f"Total deaths in the world: {covid.get_total_deaths()}"
# load background
background = pygame.image.load('covid.jpg')
# taking input for country name
# getting data according to country name
# data will be stored as a dictionary
country = input("Enter your country name : ")
cases = covid.get_status_by_country_name(country)
# setting font and font size
covid_font = pygame.font.Font('freesansbold.ttf', 18)
# running status variable
running = True
# creating a 400*400 screen
screen = pygame.display.set_mode((400, 400))
# title
pygame.display.set_caption('Covid info Tracker @ankush_singh_gandhi')
# main loop
while running:
z=10
y=100
# for loop to quit the 400*400 screen
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# filling black colour on screen
screen.fill((0,0,0))
# background
screen.blit(background, (0, 0))
# printing world data on 400*400 screen
covid_text = covid_font.render(ta, True, (255, 255, 255))
screen.blit(covid_text, (z,10))
covid_text = covid_font.render(tr, True, (255, 255, 255))
screen.blit(covid_text, (z,30))
covid_text = covid_font.render(td, True, (255, 255, 255))
screen.blit(covid_text, (z,50))
# printing country's data using for loop
for x in cases:
text = f"{x}: {cases[x]}"
covid_text = covid_font.render(text, True, (255, 255, 255))
screen.blit(covid_text, (z,y))
y+= 20
# updating display
pygame.display.update()
pygame.quit() |
py | b40e9fd7c1279fb4263cd3e307faa2f577f321e4 | #!/usr/bin/python
"""The :class:`FunctionalTester` object provides a higher-level interface to
working with a Trac environment to make test cases more succinct.
"""
from trac.tests.functional import internal_error
from trac.tests.functional.better_twill import tc, b
from trac.tests.contentgen import random_page, random_sentence, random_word, \
random_unique_camel
from trac.util.text import unicode_quote
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class FunctionalTester(object):
"""Provides a library of higher-level operations for interacting with a
test environment.
It makes assumptions such as knowing what ticket number is next, so
avoid doing things manually in :class:`FunctionalTestCase`s when you can.
"""
def __init__(self, url):
"""Create a :class:`FunctionalTester` for the given Trac URL and
Subversion URL"""
self.url = url
self.ticketcount = 0
# Connect, and login so we can run tests.
self.go_to_front()
self.login('admin')
def login(self, username):
"""Login as the given user"""
tc.add_auth("", self.url, username, username)
self.go_to_front()
tc.find("Login")
tc.follow("Login")
# We've provided authentication info earlier, so this should
# redirect back to the base url.
tc.find("logged in as %s" % username)
tc.find("Logout")
tc.url(self.url)
tc.notfind(internal_error)
def logout(self):
"""Logout"""
tc.follow("Logout")
tc.notfind(internal_error)
def create_ticket(self, summary=None, info=None):
"""Create a new (random) ticket in the test environment. Returns
the new ticket number.
:summary:
may optionally be set to the desired summary
:info:
may optionally be set to a dictionary of field value pairs for
populating the ticket. ``info['summary']`` overrides summary.
`summary` and `description` default to randomly-generated values.
"""
self.go_to_front()
tc.follow('New Ticket')
tc.notfind(internal_error)
if summary == None:
summary = random_sentence(4)
tc.formvalue('propertyform', 'field_summary', summary)
tc.formvalue('propertyform', 'field_description', random_page())
if info:
for field, value in info.items():
tc.formvalue('propertyform', 'field_%s' % field, value)
tc.submit('submit')
# we should be looking at the newly created ticket
tc.url(self.url + '/ticket/%s' % (self.ticketcount + 1))
# Increment self.ticketcount /after/ we've verified that the ticket
# was created so a failure does not trigger spurious later
# failures.
self.ticketcount += 1
# verify the ticket creation event shows up in the timeline
self.go_to_timeline()
tc.formvalue('prefs', 'ticket', True)
tc.submit()
tc.find('Ticket.*#%s.*created' % self.ticketcount)
return self.ticketcount
def quickjump(self, search):
"""Do a quick search to jump to a page."""
tc.formvalue('search', 'q', search)
tc.submit()
tc.notfind(internal_error)
def go_to_front(self):
"""Go to the Trac front page"""
tc.go(self.url)
tc.url(self.url)
tc.notfind(internal_error)
def go_to_ticket(self, ticketid):
"""Surf to the page for the given ticket ID. Assumes ticket
exists."""
ticket_url = self.url + "/ticket/%s" % ticketid
tc.go(ticket_url)
tc.url(ticket_url)
def go_to_wiki(self, name):
"""Surf to the page for the given wiki page."""
# Used to go based on a quickjump, but if the wiki pagename isn't
# camel case, that won't work.
wiki_url = self.url + '/wiki/%s' % name
tc.go(wiki_url)
tc.url(wiki_url)
def go_to_timeline(self):
"""Surf to the timeline page."""
self.go_to_front()
tc.follow('Timeline')
tc.url(self.url + '/timeline')
def go_to_query(self):
"""Surf to the custom query page."""
self.go_to_front()
tc.follow('View Tickets')
tc.follow('Custom Query')
tc.url(self.url + '/query')
def go_to_admin(self):
"""Surf to the webadmin page."""
self.go_to_front()
tc.follow('\\bAdmin\\b')
def go_to_roadmap(self):
"""Surf to the roadmap page."""
self.go_to_front()
tc.follow('\\bRoadmap\\b')
tc.url(self.url + '/roadmap')
def add_comment(self, ticketid, comment=None):
"""Adds a comment to the given ticket ID, assumes ticket exists."""
self.go_to_ticket(ticketid)
if comment is None:
comment = random_sentence()
tc.formvalue('propertyform', 'comment', comment)
tc.submit("submit")
# Verify we're where we're supposed to be.
# The fragment is stripped since Python 2.7.1, see:
# http://trac.edgewall.org/ticket/9990#comment:18
tc.url(self.url + '/ticket/%s(?:#comment:.*)?$' % ticketid)
return comment
def attach_file_to_ticket(self, ticketid, data=None, tempfilename=None,
description=None, replace=False):
"""Attaches a file to the given ticket id, with random data if none is
provided. Assumes the ticket exists.
"""
if data is None:
data = random_page()
if description is None:
description = random_sentence()
if tempfilename is None:
tempfilename = random_word()
self.go_to_ticket(ticketid)
# set the value to what it already is, so that twill will know we
# want this form.
tc.formvalue('attachfile', 'action', 'new')
tc.submit()
tc.url(self.url + "/attachment/ticket/" \
"%s/\\?action=new&attachfilebutton=Attach\\+file" % ticketid)
fp = StringIO(data)
tc.formfile('attachment', 'attachment', tempfilename, fp=fp)
tc.formvalue('attachment', 'description', description)
if replace:
tc.formvalue('attachment', 'replace', True)
tc.submit()
tc.url(self.url + '/attachment/ticket/%s/$' % ticketid)
return tempfilename
def clone_ticket(self, ticketid):
"""Create a clone of the given ticket id using the clone button."""
ticket_url = self.url + '/ticket/%s' % ticketid
tc.go(ticket_url)
tc.url(ticket_url)
tc.formvalue('clone', 'clone', 'Clone')
tc.submit()
# we should be looking at the newly created ticket
self.ticketcount += 1
tc.url(self.url + "/ticket/%s" % self.ticketcount)
return self.ticketcount
def create_wiki_page(self, page, content=None):
"""Creates the specified wiki page, with random content if none is
provided.
"""
if content == None:
content = random_page()
page_url = self.url + "/wiki/" + page
tc.go(page_url)
tc.url(page_url)
tc.find("The page %s does not exist." % page)
tc.formvalue('modifypage', 'action', 'edit')
tc.submit()
tc.url(page_url + '\\?action=edit')
tc.formvalue('edit', 'text', content)
tc.submit('save')
tc.url(page_url+'$')
# verify the event shows up in the timeline
self.go_to_timeline()
tc.formvalue('prefs', 'wiki', True)
tc.submit()
tc.find(page + ".*created")
def attach_file_to_wiki(self, name, data=None):
"""Attaches a file to the given wiki page, with random content if none
is provided. Assumes the wiki page exists.
"""
if data == None:
data = random_page()
self.go_to_wiki(name)
# set the value to what it already is, so that twill will know we
# want this form.
tc.formvalue('attachfile', 'action', 'new')
tc.submit()
tc.url(self.url + "/attachment/wiki/" \
"%s/\\?action=new&attachfilebutton=Attach\\+file" % name)
tempfilename = random_word()
fp = StringIO(data)
tc.formfile('attachment', 'attachment', tempfilename, fp=fp)
tc.formvalue('attachment', 'description', random_sentence())
tc.submit()
tc.url(self.url + '/attachment/wiki/%s/$' % name)
return tempfilename
def create_milestone(self, name=None, due=None):
"""Creates the specified milestone, with a random name if none is
provided. Returns the name of the milestone.
"""
if name == None:
name = random_unique_camel()
milestone_url = self.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.formvalue('addmilestone', 'name', name)
if due:
# TODO: How should we deal with differences in date formats?
tc.formvalue('addmilestone', 'duedate', due)
tc.submit()
tc.notfind(internal_error)
tc.notfind('Milestone .* already exists')
tc.url(milestone_url)
tc.find(name)
# Make sure it's on the roadmap.
tc.follow('Roadmap')
tc.url(self.url + "/roadmap")
tc.find('Milestone:.*%s' % name)
tc.follow(name)
tc.url('%s/milestone/%s' % (self.url, unicode_quote(name)))
if not due:
tc.find('No date set')
return name
def create_component(self, name=None, user=None):
"""Creates the specified component, with a random camel-cased name if
none is provided. Returns the name."""
if name == None:
name = random_unique_camel()
component_url = self.url + "/admin/ticket/components"
tc.go(component_url)
tc.url(component_url)
tc.formvalue('addcomponent', 'name', name)
if user != None:
tc.formvalue('addcomponent', 'owner', user)
tc.submit()
# Verify the component appears in the component list
tc.url(component_url)
tc.find(name)
tc.notfind(internal_error)
# TODO: verify the component shows up in the newticket page
return name
def create_enum(self, kind, name=None):
"""Helper to create the specified enum (used for ``priority``,
``severity``, etc). If no name is given, a unique random word is used.
The name is returned.
"""
if name == None:
name = random_unique_camel()
priority_url = self.url + "/admin/ticket/" + kind
tc.go(priority_url)
tc.url(priority_url)
tc.formvalue('addenum', 'name', name)
tc.submit()
tc.url(priority_url)
tc.find(name)
tc.notfind(internal_error)
return name
def create_priority(self, name=None):
"""Create a new priority enum"""
return self.create_enum('priority', name)
def create_resolution(self, name=None):
"""Create a new resolution enum"""
return self.create_enum('resolution', name)
def create_severity(self, name=None):
"""Create a new severity enum"""
return self.create_enum('severity', name)
def create_type(self, name=None):
"""Create a new ticket type enum"""
return self.create_enum('type', name)
def create_version(self, name=None, releasetime=None):
"""Create a new version. The name defaults to a random camel-cased
word if not provided."""
version_admin = self.url + "/admin/ticket/versions"
if name == None:
name = random_unique_camel()
tc.go(version_admin)
tc.url(version_admin)
tc.formvalue('addversion', 'name', name)
if releasetime != None:
tc.formvalue('addversion', 'time', releasetime)
tc.submit()
tc.url(version_admin)
tc.find(name)
tc.notfind(internal_error)
# TODO: verify releasetime
def create_report(self, title, query, description):
"""Create a new report with the given title, query, and description"""
self.go_to_front()
tc.follow('View Tickets')
tc.formvalue('create_report', 'action', 'new') # select the right form
tc.submit()
tc.find('New Report')
tc.notfind(internal_error)
tc.formvalue('edit_report', 'title', title)
tc.formvalue('edit_report', 'description', description)
tc.formvalue('edit_report', 'query', query)
tc.submit()
reportnum = b.get_url().split('/')[-1]
# TODO: verify the url is correct
# TODO: verify the report number is correct
# TODO: verify the report does not cause an internal error
# TODO: verify the title appears on the report list
return reportnum
def ticket_set_milestone(self, ticketid, milestone):
"""Set the milestone on a given ticket."""
self.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'milestone', milestone)
tc.submit('submit')
# TODO: verify the change occurred.
|
py | b40ea17f536be60cebbfc0d0c02407f86e8f84f3 | # Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: [email protected]
# Update Date: 2019 - 3 - 13
import torch
import torch.nn as nn
import torch.nn.functional as F
from VSR.Util.Utility import to_list
from . import Model
from .Arch import CascadeRdn
from ..Framework.Summary import get_writer
from ..Util import Metrics
class Upsample(nn.Module):
def __init__(self, channels):
super(Upsample, self).__init__()
in_c, out_c = to_list(channels, 2)
self.c1 = nn.Conv2d(in_c, out_c, 3, 1, 1)
self.c2 = nn.Conv2d(in_c, out_c, 3, 1, 1)
def forward(self, inputs, skips, scale=2):
up = F.interpolate(inputs, scale_factor=scale)
up = self.c1(up)
con = torch.cat([up, skips], dim=1)
return self.c2(con)
class Crdn(nn.Module):
def __init__(self, blocks=(4, 4), **kwargs):
super(Crdn, self).__init__()
self.blocks = to_list(blocks, 2)
self.entry = nn.Sequential(
nn.Conv2d(3, 32, 7, 1, 3),
nn.Conv2d(32, 32, 5, 1, 2))
self.exit = nn.Sequential(
nn.Conv2d(32, 32, 3, 1, 1),
nn.Conv2d(32, 3, 3, 1, 1))
self.down1 = nn.Conv2d(32, 64, 3, 2, 1)
self.down2 = nn.Conv2d(64, 128, 3, 2, 1)
self.up1 = Upsample([128, 64])
self.up2 = Upsample([64, 32])
self.cb1 = CascadeRdn(32, 3, True)
self.cb2 = CascadeRdn(64, 3, True)
self.cb3 = CascadeRdn(128, 3, True)
self.cb4 = CascadeRdn(128, 3, True)
self.cb5 = CascadeRdn(64, 3, True)
self.cb6 = CascadeRdn(32, 3, True)
def forward(self, inputs):
entry = self.entry(inputs)
x1 = self.cb1(entry)
x = self.down1(x1)
x2 = self.cb2(x)
x = self.down2(x2)
x = self.cb3(x)
x = self.cb4(x)
x = self.up1(x, x2)
x = self.cb5(x)
x = self.up2(x, x1)
x = self.cb6(x)
x += entry
out = self.exit(x)
return out
class CRDN(Model.SuperResolution):
def __init__(self, **kwargs):
super(CRDN, self).__init__(scale=1, channel=3)
self.rsr = Crdn()
self.opt = torch.optim.Adam(self.trainable_variables(), 1e-4)
def train(self, inputs, labels, learning_rate=None):
sr = self.rsr(inputs[0])
loss = F.l1_loss(sr, labels[0])
if learning_rate:
for param_group in self.opt.param_groups:
param_group["lr"] = learning_rate
self.opt.zero_grad()
loss.backward()
self.opt.step()
return {'l1': loss.detach().cpu().numpy()}
def eval(self, inputs, labels=None, **kwargs):
metrics = {}
sr = self.rsr(inputs[0]).cpu().detach()
if labels is not None:
metrics['psnr'] = Metrics.psnr(sr.numpy(), labels[0].cpu().numpy())
writer = get_writer(self.name)
if writer is not None:
writer.image('clean', sr)
return [sr.numpy()], metrics
|
py | b40ea1ca791a169420822368b37d674b1809937b | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import datetime
import pytz
import numpy as np
from zipline.finance.trading import SimulationParameters, TradingEnvironment
from zipline.algorithm import TradingAlgorithm
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
class BuyAndHoldAlgorithm(TradingAlgorithm):
SID_TO_BUY_AND_HOLD = 1
def initialize(self):
self.holding = False
def handle_data(self, data):
if not self.holding:
self.order(self.sid(self.SID_TO_BUY_AND_HOLD), 100)
self.holding = True
class TestEventsThroughRisk(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
cls.env.write_data(equities_identifiers=[1])
@classmethod
def tearDownClass(cls):
del cls.env
def test_daily_buy_and_hold(self):
start_date = datetime.datetime(
year=2006,
month=1,
day=3,
hour=0,
minute=0,
tzinfo=pytz.utc)
end_date = datetime.datetime(
year=2006,
month=1,
day=5,
hour=0,
minute=0,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_date,
period_end=end_date,
data_frequency='daily',
emission_rate='daily'
)
algo = BuyAndHoldAlgorithm(sim_params=sim_params, env=self.env)
first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc)
second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc)
third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc)
trade_bar_data = [
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_date,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_date,
'source_id': 'test_list',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_date,
'source_id': 'test_list',
'type': DATASOURCE_TYPE.TRADE
}),
]
benchmark_data = [
Event({
'returns': 0.1,
'dt': first_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.2,
'dt': second_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.4,
'dt': third_date,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
]
algo.benchmark_return_source = benchmark_data
algo.set_sources(list([trade_bar_data]))
gen = algo._create_generator(sim_params)
# TODO: Hand derive these results.
# Currently, the output from the time of this writing to
# at least be an early warning against changes.
expected_algorithm_returns = {
first_date: 0.0,
second_date: -0.000350,
third_date: -0.050018
}
# TODO: Hand derive these results.
# Currently, the output from the time of this writing to
# at least be an early warning against changes.
expected_sharpe = {
first_date: np.nan,
second_date: -22.322677,
third_date: -9.353741
}
for bar in gen:
current_dt = algo.datetime
crm = algo.perf_tracker.cumulative_risk_metrics
dt_loc = crm.cont_index.get_loc(current_dt)
np.testing.assert_almost_equal(
crm.algorithm_returns[dt_loc],
expected_algorithm_returns[current_dt],
decimal=6)
np.testing.assert_almost_equal(
crm.sharpe[dt_loc],
expected_sharpe[current_dt],
decimal=6,
err_msg="Mismatch at %s" % (current_dt,))
def test_minute_buy_and_hold(self):
start_date = datetime.datetime(
year=2006,
month=1,
day=3,
hour=0,
minute=0,
tzinfo=pytz.utc)
end_date = datetime.datetime(
year=2006,
month=1,
day=5,
hour=0,
minute=0,
tzinfo=pytz.utc)
sim_params = SimulationParameters(
period_start=start_date,
period_end=end_date,
emission_rate='daily',
data_frequency='minute',
env=self.env)
algo = BuyAndHoldAlgorithm(
sim_params=sim_params,
env=self.env)
first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc)
first_open, first_close = self.env.get_open_and_close(first_date)
second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc)
second_open, second_close = self.env.get_open_and_close(second_date)
third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc)
third_open, third_close = self.env.get_open_and_close(third_date)
benchmark_data = [
Event({
'returns': 0.1,
'dt': first_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.2,
'dt': second_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
Event({
'returns': 0.4,
'dt': third_close,
'source_id': 'test-benchmark-source',
'type': DATASOURCE_TYPE.BENCHMARK
}),
]
trade_bar_data = [
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 10,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': first_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 15,
'close_price': 20,
'price': 20,
'volume': 2000,
'sid': 1,
'dt': second_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_open,
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
Event({
'open_price': 20,
'close_price': 15,
'price': 15,
'volume': 1000,
'sid': 1,
'dt': third_open + datetime.timedelta(minutes=10),
'source_id': 'test-trade-source',
'type': DATASOURCE_TYPE.TRADE
}),
]
algo.benchmark_return_source = benchmark_data
algo.set_sources(list([trade_bar_data]))
gen = algo._create_generator(sim_params)
crm = algo.perf_tracker.cumulative_risk_metrics
dt_loc = crm.cont_index.get_loc(algo.datetime)
first_msg = next(gen)
self.assertIsNotNone(first_msg,
"There should be a message emitted.")
# Protects against bug where the positions appeared to be
# a day late, because benchmarks were triggering
# calculations before the events for the day were
# processed.
self.assertEqual(1, len(algo.portfolio.positions), "There should "
"be one position after the first day.")
self.assertEquals(
0,
crm.algorithm_volatility[dt_loc],
"On the first day algorithm volatility does not exist.")
second_msg = next(gen)
self.assertIsNotNone(second_msg, "There should be a message "
"emitted.")
self.assertEqual(1, len(algo.portfolio.positions),
"Number of positions should stay the same.")
# TODO: Hand derive. Current value is just a canary to
# detect changes.
np.testing.assert_almost_equal(
0.050022510129558301,
crm.algorithm_returns[-1],
decimal=6)
third_msg = next(gen)
self.assertEqual(1, len(algo.portfolio.positions),
"Number of positions should stay the same.")
self.assertIsNotNone(third_msg, "There should be a message "
"emitted.")
# TODO: Hand derive. Current value is just a canary to
# detect changes.
np.testing.assert_almost_equal(
-0.047639464532418657,
crm.algorithm_returns[-1],
decimal=6)
|
py | b40ea31310cbb79216ae44a7192ccb93e1656f7b | from kapitan.inputs.kadet import BaseObj
from kapitan.utils import render_jinja2_file
class DeploymentHost(BaseObj):
def new(self):
self.update_root("components/incognito/deployment_host.yml")
def body(self):
self.root.metadata.name = self.kwargs.name
if self.kwargs.hostpath:
self.root.spec.template.spec.volumes[0] = {
"name": "data",
"hostPath": {
"path": self.kwargs.hostpath,
"type": "DirectoryOrCreate"
}
}
else:
self.root.spec.template.spec.volumes[0].persistentVolumeClaim.claimName = self.kwargs.name + "-data"
self.root.spec.template.spec.containers[0].image = self.kwargs.image
self.root.spec.template.spec.containers[0].ports[0].hostPort = self.kwargs.rpc_port
self.root.spec.template.spec.containers[0].ports[1].hostPort = self.kwargs.node_port
self.root.spec.template.spec.containers[0].env[0].value = self.kwargs.validator_key
self.root.spec.template.spec.containers[0].env[1].value = self.kwargs.infura_url
if self.kwargs.node_selector:
self.root.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[
0].matchExpressions[0]["values"][0] = self.kwargs.node_selector
else:
self.root.spec.template.spec.affinity = {}
class DeploymentTunnel(BaseObj):
def new(self):
self.update_root("components/incognito/deployment_tunnel.yml")
def body(self):
self.root.metadata.name = self.kwargs.name
if self.kwargs.hostpath:
self.root.spec.template.spec.volumes[0] = {
"name": "data",
"hostPath": {
"path": self.kwargs.hostpath,
"type": "DirectoryOrCreate"
}
}
else:
self.root.spec.template.spec.volumes[0].persistentVolumeClaim.claimName = self.kwargs.name + "-data"
self.root.spec.template.spec.volumes[1].configMap.name = self.kwargs.name + "-ssh-script"
self.root.spec.template.spec.volumes[2].secret.secretName = self.kwargs.name + "-ssh"
self.root.spec.template.spec.containers[0].image = self.kwargs.image
self.root.spec.template.spec.containers[0].env[0].value = self.kwargs.validator_key
self.root.spec.template.spec.containers[0].env[1].value = self.kwargs.infura_url
self.root.spec.template.spec.containers[0].env[2].value = str(self.kwargs.public_ip)
self.root.spec.template.spec.containers[0].env[3].value = str(self.kwargs.node_port)
self.root.spec.template.spec.containers[1].image = self.kwargs.tunnel_image
if self.kwargs.node_selector:
self.root.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[
0].matchExpressions[0]["values"][0] = self.kwargs.node_selector
else:
self.root.spec.template.spec.affinity = {}
class ConfigMap(BaseObj):
def new(self):
self.update_root("components/incognito/configmap.yml")
def body(self):
self.root.metadata.name = self.kwargs.name
self.root.data["tunnel.sh"] = render_jinja2_file("components/incognito/tunnel.sh.j2", {
"rpc_port": self.kwargs.rpc_port,
"node_port": self.kwargs.node_port,
"public_ip": self.kwargs.public_ip
})
class Secret(BaseObj):
def new(self):
self.update_root("components/incognito/secret.yml")
def body(self):
self.root.metadata.name = self.kwargs.name
self.root.data.key = self.kwargs.key
class PersistentVolumeClaim(BaseObj):
def new(self):
self.update_root("components/incognito/pvc.yml")
def body(self):
self.root.metadata.name = self.kwargs.name
self.root.spec.resources.requests.storage = self.kwargs.storage
|
py | b40ea39a91941158ce47a1b30f1a70c815354253 | from __future__ import annotations
import collections.abc
from collections import OrderedDict, namedtuple
from typing import TYPE_CHECKING, Any, Union
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.offsetbox import AnchoredText
from matplotlib.transforms import Bbox
from mpl_toolkits.axes_grid1 import axes_size, make_axes_locatable
from .utils import (
Plottable,
get_histogram_axes_title,
get_plottable_protocol_bins,
hist_object_handler,
isLight,
process_histogram_parts,
)
if TYPE_CHECKING:
from numpy.typing import ArrayLike
StairsArtists = namedtuple("StairsArtists", "stairs errorbar legend_artist")
ErrorBarArtists = namedtuple("ErrorBarArtists", "errorbar")
ColormeshArtists = namedtuple("ColormeshArtists", "pcolormesh cbar text")
Hist1DArtists = Union[StairsArtists, ErrorBarArtists]
Hist2DArtists = ColormeshArtists
def soft_update_kwargs(kwargs, mods, rc=True):
not_default = [k for k, v in mpl.rcParamsDefault.items() if v != mpl.rcParams[k]]
respect = [
"hatch.linewidth",
"lines.linewidth",
"patch.linewidth",
"lines.linestyle",
]
aliases = {"ls": "linestyle", "lw": "linewidth"}
kwargs = {aliases[k] if k in aliases else k: v for k, v in kwargs.items()}
for key, val in mods.items():
rc_modded = (key in not_default) or (
key in [k.split(".")[-1] for k in not_default if k in respect]
)
if key not in kwargs and (rc and not rc_modded):
kwargs[key] = val
return kwargs
########################################
# Histogram plotter
def histplot(
H, # Histogram object, tuple or array
bins=None, # Bins to be supplied when h is a value array or iterable of arrays
*,
yerr: ArrayLike | bool | None = None,
w2=None,
w2method=None,
stack=False,
density=False,
binwnorm=None,
histtype="step",
xerr=False,
label=None,
sort=None,
edges=True,
binticks=False,
ax=None,
**kwargs,
):
"""
Create a 1D histogram plot from `np.histogram`-like inputs.
Parameters
----------
H : object
Histogram object with containing values and optionally bins. Can be:
- `np.histogram` tuple
- PlottableProtocol histogram object
- `boost_histogram` classic (<0.13) histogram object
- raw histogram values, provided `bins` is specified.
Or list thereof.
bins : iterable, optional
Histogram bins, if not part of ``h``.
yerr : iterable or bool, optional
Histogram uncertainties. Following modes are supported:
- True, sqrt(N) errors or poissonian interval when ``w2`` is specified
- shape(N) array of for one sided errors or list thereof
- shape(Nx2) array of for two sided errors or list thereof
w2 : iterable, optional
Sum of the histogram weights squared for poissonian interval error
calculation
w2method: callable, optional
Function calculating CLs with signature ``low, high = fcn(w, w2)``. Here
``low`` and ``high`` are given in absolute terms, not relative to w.
Default is ``None``. If w2 has integer values (likely to be data) poisson
interval is calculated, otherwise the resulting error is symmetric
``sqrt(w2)``. Specifying ``poisson`` or ``sqrt`` will force that behaviours.
stack : bool, optional
Whether to stack or overlay non-axis dimension (if it exists). N.B. in
contrast to ROOT, stacking is performed in a single call aka
``histplot([h1, h2, ...], stack=True)`` as opposed to multiple calls.
density : bool, optional
If true, convert sum weights to probability density (i.e. integrates to 1
over domain of axis) (Note: this option conflicts with ``binwnorm``)
binwnorm : float, optional
If true, convert sum weights to bin-width-normalized, with unit equal to
supplied value (usually you want to specify 1.)
histtype: {'step', 'fill', 'errorbar'}, optional, default: "step"
Type of histogram to plot:
- "step": skyline/step/outline of a histogram using `plt.step <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.step.html#matplotlib.axes.Axes.step>`_
- "fill": filled histogram using `plt.fill_between <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.step.html#matplotlib.axes.Axes.step>`_
- "errorbar": single marker histogram using `plt.errorbar <https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.axes.Axes.step.html#matplotlib.axes.Axes.step>`_
xerr: bool or float, optional
Size of xerr if ``histtype == 'errorbar'``. If ``True``, bin-width will be used.
label : str or list, optional
Label for legend entry.
sort: {'label'/'l', 'yield'/'y'}, optional
Append '_r' for reverse.
edges : bool, default: True, optional
Specifies whether to draw first and last edges of the histogram
binticks : bool, default: False, optional
Attempts to draw x-axis ticks coinciding with bin boundaries if feasible.
ax : matplotlib.axes.Axes, optional
Axes object (if None, last one is fetched or one is created)
**kwargs :
Keyword arguments passed to underlying matplotlib functions -
{'step', 'fill_between', 'errorbar'}.
Returns
-------
List[Hist1DArtists]
"""
# ax check
if ax is None:
ax = plt.gca()
else:
if not isinstance(ax, plt.Axes):
raise ValueError("ax must be a matplotlib Axes object")
# arg check
_allowed_histtype = ["fill", "step", "errorbar"]
_err_message = f"Select 'histtype' from: {_allowed_histtype}"
assert histtype in _allowed_histtype, _err_message
# Convert 1/0 etc to real bools
stack = bool(stack)
density = bool(density)
edges = bool(edges)
binticks = bool(binticks)
# Process input
hists = list(process_histogram_parts(H, bins))
final_bins, xtick_labels = get_plottable_protocol_bins(hists[0].axes[0])
assert final_bins.ndim == 1, "bins need to be 1 dimensional"
_x_axes_label = ax.get_xlabel()
x_axes_label = (
_x_axes_label
if _x_axes_label != ""
else get_histogram_axes_title(hists[0].axes[0])
)
# Cast to plottables
plottables = [
Plottable(h.values(), edges=final_bins, variances=h.variances()) for h in hists
]
if w2 is not None:
for _w2, _plottable in zip(
w2.reshape(len(plottables), len(final_bins) - 1), plottables
):
_plottable.variances = _w2
_plottable.method = w2method
if w2 is not None and yerr is not None:
raise ValueError("Can only supply errors or w2")
_labels: list[str | None]
if label is None:
_labels = [None] * len(plottables)
elif isinstance(label, str):
_labels = [label] * len(plottables)
elif not np.iterable(label):
_labels = [str(label)] * len(plottables)
else:
_labels = [str(lab) for lab in label]
def iterable_not_string(arg):
return isinstance(arg, collections.abc.Iterable) and not isinstance(arg, str)
_chunked_kwargs: list[dict[str, Any]] = []
for _ in range(len(plottables)):
_chunked_kwargs.append({})
for kwarg in kwargs:
# Check if iterable
if iterable_not_string(kwargs[kwarg]):
# Check if tuple (can be used for colors)
if type(kwargs[kwarg]) == tuple:
for i in range(len(_chunked_kwargs)):
_chunked_kwargs[i][kwarg] = kwargs[kwarg]
else:
for i, kw in enumerate(kwargs[kwarg]):
_chunked_kwargs[i][kwarg] = kw
else:
for i in range(len(_chunked_kwargs)):
_chunked_kwargs[i][kwarg] = kwargs[kwarg]
_bin_widths = np.diff(final_bins)
_bin_centers = final_bins[1:] - _bin_widths / float(2)
############################
# # yerr calculation
_yerr: np.ndarray | None
if yerr is not None:
# yerr is array
if hasattr(yerr, "__len__"):
_yerr = np.asarray(yerr)
# yerr is a number
elif isinstance(yerr, (int, float)) and not isinstance(yerr, bool):
_yerr = np.ones((len(plottables), len(final_bins) - 1)) * yerr
# yerr is automatic
else:
_yerr = None
else:
_yerr = None
if _yerr is not None:
assert isinstance(_yerr, np.ndarray)
if _yerr.ndim == 3:
# Already correct format
pass
elif _yerr.ndim == 2 and len(plottables) == 1:
# Broadcast ndim 2 to ndim 3
if _yerr.shape[-2] == 2: # [[1,1], [1,1]]
_yerr = _yerr.reshape(len(plottables), 2, _yerr.shape[-1])
elif _yerr.shape[-2] == 1: # [[1,1]]
_yerr = np.tile(_yerr, 2).reshape(len(plottables), 2, _yerr.shape[-1])
else:
raise ValueError("yerr format is not understood")
elif _yerr.ndim == 2:
# Broadcast yerr (nh, N) to (nh, 2, N)
_yerr = np.tile(_yerr, 2).reshape(len(plottables), 2, _yerr.shape[-1])
elif _yerr.ndim == 1:
# Broadcast yerr (1, N) to (nh, 2, N)
_yerr = np.tile(_yerr, 2 * len(plottables)).reshape(
len(plottables), 2, _yerr.shape[-1]
)
else:
raise ValueError("yerr format is not understood")
assert _yerr is not None
for yrs, _plottable in zip(_yerr, plottables):
_plottable.fixed_errors(*yrs)
# Sorting
if sort is not None:
if isinstance(sort, str):
if sort.split("_")[0] in ["l", "label"] and isinstance(_labels, list):
order = np.argsort(label) # [::-1]
elif sort.split("_")[0] in ["y", "yield"]:
_yields = [np.sum(_h.values) for _h in plottables]
order = np.argsort(_yields)
if len(sort.split("_")) == 2 and sort.split("_")[1] == "r":
order = order[::-1]
elif isinstance(sort, list) or isinstance(sort, np.ndarray):
if len(sort) != len(plottables):
raise ValueError(
f"Sort indexing arrays is of the wrong size - {len(sort)}, {len(plottables)} expected."
)
order = np.asarray(sort)
else:
raise ValueError(f"Sort type: {sort} not understood.")
plottables = [plottables[ix] for ix in order]
_chunked_kwargs = [_chunked_kwargs[ix] for ix in order]
_labels = [_labels[ix] for ix in order]
# ############################
# # Stacking, norming, density
if density is True and binwnorm is not None:
raise ValueError("Can only set density or binwnorm.")
if density is True:
if stack:
_total = np.sum(np.array([plottable.values for plottable in plottables]))
for plottable in plottables:
plottable.flat_scale(1 / _total)
else:
for plottable in plottables:
plottable.density = True
elif binwnorm is not None:
for plottable, norm in zip(
plottables, np.broadcast_to(binwnorm, (len(plottables),))
):
plottable.flat_scale(norm / np.diff(final_bins))
# Stack
if stack and len(plottables) > 1:
from .utils import stack as stack_fun
plottables = stack_fun(*plottables)
##########
# Plotting
return_artists: list[StairsArtists | ErrorBarArtists] = []
if histtype == "step":
for i in range(len(plottables)):
do_errors = yerr is not False and (
(yerr is not None or w2 is not None)
or (plottables[i].variances is not None)
)
_kwargs = _chunked_kwargs[i]
_label = _labels[i] if do_errors else None
_step_label = _labels[i] if not do_errors else None
_kwargs = soft_update_kwargs(_kwargs, {"linewidth": 1.5})
_plot_info = plottables[i].to_stairs()
_plot_info["baseline"] = None if not edges else 0
_s = ax.stairs(
**_plot_info,
label=_step_label,
**_kwargs,
)
if do_errors:
_kwargs = soft_update_kwargs(_kwargs, {"color": _s.get_edgecolor()})
_kwargs["linestyle"] = "none"
_plot_info = plottables[i].to_errorbar()
_e = ax.errorbar(
**_plot_info,
**_kwargs,
)
_e_leg = ax.errorbar(
[], [], yerr=1, xerr=1, color=_s.get_edgecolor(), label=_label
)
return_artists.append(
StairsArtists(
_s,
_e if do_errors else None,
_e_leg if do_errors else None,
)
)
_artist = _s
elif histtype == "fill":
for i in range(len(plottables)):
_kwargs = _chunked_kwargs[i]
_f = ax.stairs(
**plottables[i].to_stairs(), label=_labels[i], fill=True, **_kwargs
)
return_artists.append(StairsArtists(_f, None, None))
_artist = _f
elif histtype == "errorbar":
err_defaults = {
"linestyle": "none",
"marker": ".",
"markersize": 10.0,
"elinewidth": 1,
}
if xerr is True:
_xerr = _bin_widths / 2
elif isinstance(xerr, (int, float)):
_xerr = xerr
for i in range(len(plottables)):
_plot_info = plottables[i].to_errorbar()
_plot_info["xerr"] = _xerr
_e = ax.errorbar(
**_plot_info,
label=_labels[i],
**soft_update_kwargs(_chunked_kwargs[i], err_defaults),
)
return_artists.append(ErrorBarArtists(_e))
_artist = _e[0]
# Add sticky edges for autoscale
_artist.sticky_edges.y.append(0)
if xtick_labels is None:
if binticks:
_slice = int(round(float(len(final_bins)) / len(ax.get_xticks()))) + 1
ax.set_xticks(final_bins[::_slice])
else:
ax.set_xticks(_bin_centers)
ax.set_xticklabels(xtick_labels)
if x_axes_label:
ax.set_xlabel(x_axes_label)
return return_artists
def hist2dplot(
H,
xbins=None,
ybins=None,
labels=None,
cbar=True,
cbarsize="7%",
cbarpad=0.2,
cbarpos="right",
cbarextend=False,
cmin=None,
cmax=None,
ax=None,
**kwargs,
):
"""
Create a 2D histogram plot from `np.histogram`-like inputs.
Parameters
----------
H : object
Histogram object with containing values and optionally bins. Can be:
- `np.histogram` tuple
- `boost_histogram` histogram object
- raw histogram values as list of list or 2d-array
xbins : 1D array-like, optional, default None
Histogram bins along x axis, if not part of ``H``.
ybins : 1D array-like, optional, default None
Histogram bins along y axis, if not part of ``H``.
labels : 2D array (H-like) or bool, default None, optional
Array of per-bin labels to display. If ``True`` will
display numerical values
cbar : bool, optional, default True
Draw a colorbar. In contrast to mpl behaviours the cbar axes is
appended in such a way that it doesn't modify the original axes
width:height ratio.
cbarsize : str or float, optional, default "7%"
Colorbar width.
cbarpad : float, optional, default 0.2
Colorbar distance from main axis.
cbarpos : {'right', 'left', 'bottom', 'top'}, optional, default "right"
Colorbar position w.r.t main axis.
cbarextend : bool, optional, default False
Extends figure size to keep original axes size same as without cbar.
Only safe for 1 axes per fig.
cmin : float, optional
Colorbar minimum.
cmax : float, optional
Colorbar maximum.
ax : matplotlib.axes.Axes, optional
Axes object (if None, last one is fetched or one is created)
**kwargs :
Keyword arguments passed to underlying matplotlib function - pcolormesh.
Returns
-------
Hist2DArtist
"""
# ax check
if ax is None:
ax = plt.gca()
else:
if not isinstance(ax, plt.Axes):
raise ValueError("ax must be a matplotlib Axes object")
hist = hist_object_handler(H, xbins, ybins)
# TODO: use Histogram everywhere
H = hist.values()
xbins, xtick_labels = get_plottable_protocol_bins(hist.axes[0])
ybins, ytick_labels = get_plottable_protocol_bins(hist.axes[1])
xbin_centers = xbins[1:] - np.diff(xbins) / float(2)
ybin_centers = ybins[1:] - np.diff(ybins) / float(2)
_x_axes_label = ax.get_xlabel()
x_axes_label = (
_x_axes_label if _x_axes_label != "" else get_histogram_axes_title(hist.axes[0])
)
_y_axes_label = ax.get_ylabel()
y_axes_label = (
_y_axes_label if _y_axes_label != "" else get_histogram_axes_title(hist.axes[1])
)
H = H.T
if cmin is not None:
H[H < cmin] = None
if cmax is not None:
H[H > cmax] = None
X, Y = np.meshgrid(xbins, ybins)
kwargs.setdefault("shading", "flat")
pc = ax.pcolormesh(X, Y, H, **kwargs)
if x_axes_label:
ax.set_xlabel(x_axes_label)
if y_axes_label:
ax.set_ylabel(y_axes_label)
ax.set_xlim(xbins[0], xbins[-1])
ax.set_ylim(ybins[0], ybins[-1])
if xtick_labels is None: # Ordered axis
if len(ax.get_xticks()) > len(xbins) * 0.7:
ax.set_xticks(xbins)
else: # Categorical axis
ax.set_xticks(xbin_centers)
ax.set_xticklabels(xtick_labels)
if ytick_labels is None:
if len(ax.get_yticks()) > len(ybins) * 0.7:
ax.set_yticks(ybins)
else: # Categorical axis
ax.set_yticks(ybin_centers)
ax.set_yticklabels(ytick_labels)
if cbar:
cax = append_axes(
ax, size=cbarsize, pad=cbarpad, position=cbarpos, extend=cbarextend
)
cb_obj = plt.colorbar(pc, cax=cax)
else:
cb_obj = None
plt.sca(ax)
_labels: np.ndarray | None = None
if isinstance(labels, bool):
_labels = H if labels else None
elif np.iterable(labels):
label_array = np.asarray(labels).T
if H.shape == label_array.shape:
_labels = label_array
else:
raise ValueError(
f"Labels input has incorrect shape (expect: {H.shape}, got: {label_array.shape})"
)
elif labels is not None:
raise ValueError(
"Labels not understood, either specify a bool or a Hist-like array"
)
text_artists = []
if _labels is not None:
for ix, xc in enumerate(xbin_centers):
for iy, yc in enumerate(ybin_centers):
color = (
"black"
if isLight(pc.cmap(pc.norm(H[iy, ix]))[:-1])
else "lightgrey"
)
text_artists.append(
ax.text(
xc, yc, _labels[iy, ix], ha="center", va="center", color=color
)
)
return ColormeshArtists(pc, cb_obj, text_artists)
#############################################
# Utils
def overlap(ax, bbox, get_vertices=False):
"""
Find overlap of bbox for drawn elements an axes.
"""
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle
# From
# https://github.com/matplotlib/matplotlib/blob/08008d5cb4d1f27692e9aead9a76396adc8f0b19/lib/matplotlib/legend.py#L845
lines = []
bboxes = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
lines.append(path)
for handle in ax.collections:
for path in handle.get_paths():
lines.append(path.interpolated(20))
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
# TODO Possibly other objects
vertices = np.concatenate([line.vertices for line in lines])
tvertices = [ax.transData.transform(v) for v in vertices]
overlap = bbox.count_contains(tvertices) + bbox.count_overlaps(bboxes)
if get_vertices:
return overlap, vertices
else:
return overlap
def _draw_leg_bbox(ax):
"""
Draw legend() and fetch it's bbox
"""
fig = ax.figure
leg = ax.get_legend()
fig.canvas.draw()
return leg.get_frame().get_bbox()
def _draw_text_bbox(ax):
"""
Draw legend() and fetch it's bbox
"""
fig = ax.figure
textboxes = [k for k in ax.get_children() if type(k) == AnchoredText]
if len(textboxes) > 1:
print("Warning: More than one textbox found")
for box in textboxes:
if box.loc in [1, 2]:
bbox = box.get_tightbbox(fig.canvas.renderer)
else:
bbox = textboxes[0].get_tightbbox(fig.canvas.renderer)
return bbox
def yscale_legend(ax=None):
"""
Automatically scale y-axis up to fit in legend()
"""
if ax is None:
ax = plt.gca()
scale_factor = 10 ** (1.05) if ax.get_yscale() == "log" else 1.05
while overlap(ax, _draw_leg_bbox(ax)) > 0:
ax.set_ylim(ax.get_ylim()[0], ax.get_ylim()[-1] * scale_factor)
ax.figure.canvas.draw()
return ax
def yscale_text(ax=None):
"""
Automatically scale y-axis up to fit AnchoredText
"""
if ax is None:
ax = plt.gca()
while overlap(ax, _draw_text_bbox(ax)) > 0:
ax.set_ylim(ax.get_ylim()[0], ax.get_ylim()[-1] * 1.1)
ax.figure.canvas.draw()
return ax
def ylow(ax=None, ylow=None):
"""
Set lower y limit to 0 if not data/errors go lower.
Or set a specific value
"""
if ax is None:
ax = plt.gca()
if ax.get_yaxis().get_scale() == "log":
return ax
if ylow is None:
# Check full figsize below 0
bbox = Bbox.from_bounds(
0, 0, ax.get_window_extent().width, -ax.get_window_extent().height
)
if overlap(ax, bbox) == 0:
ax.set_ylim(0, None)
else:
ydata = overlap(ax, bbox, get_vertices=True)[1][:, 1]
ax.set_ylim(np.min([np.min(ydata), ax.get_ylim()[0]]), None)
else:
ax.set_ylim(0, ax.get_ylim()[-1])
return ax
def mpl_magic(ax=None, info=True):
"""
Consolidate all ex-post style adjustments:
ylow
yscale_legend
"""
if ax is None:
ax = plt.gca()
if not info:
print("Running ROOT/CMS style adjustments (hide with info=False):")
ax = ylow(ax)
ax = yscale_legend(ax)
ax = yscale_text(ax)
return ax
########################################
# Figure/axes helpers
def rescale_to_axessize(ax, w, h):
"""
Adjust figure size to axes size in inches
Parameters: w, h: width, height in inches
"""
if not ax:
ax = plt.gca()
left = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w) / (r - left)
figh = float(h) / (t - b)
ax.figure.set_size_inches(figw, figh)
def box_aspect(ax, aspect=1):
"""
Adjust figure size to axes size in inches
Parameters: aspect: float, optional aspect ratio
"""
position = ax.get_position()
fig_width, fig_height = ax.get_figure().get_size_inches()
fig_aspect = fig_height / fig_width
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(aspect, pb, fig_aspect)
ax.set_position(pb1)
class RemainderFixed(axes_size.Scaled):
def __init__(self, xsizes, ysizes, divider):
self.xsizes = xsizes
self.ysizes = ysizes
self.div = divider
def get_size(self, renderer):
xrel, xabs = axes_size.AddList(self.xsizes).get_size(renderer)
yrel, yabs = axes_size.AddList(self.ysizes).get_size(renderer)
bb = Bbox.from_bounds(*self.div.get_position()).transformed(
self.div._fig.transFigure
)
w = bb.width / self.div._fig.dpi - xabs
h = bb.height / self.div._fig.dpi - yabs
return 0, min([w, h])
def make_square_add_cbar(ax, size=0.4, pad=0.1):
"""
Make input axes square and return an appended axes to the right for
a colorbar. Both axes resize together to fit figure automatically.
Works with tight_layout().
"""
divider = make_axes_locatable(ax)
margin_size = axes_size.Fixed(size)
pad_size = axes_size.Fixed(pad)
xsizes = [pad_size, margin_size]
ysizes = xsizes
cax = divider.append_axes("right", size=margin_size, pad=pad_size)
divider.set_horizontal([RemainderFixed(xsizes, ysizes, divider)] + xsizes)
divider.set_vertical([RemainderFixed(xsizes, ysizes, divider)] + ysizes)
return cax
def append_axes(ax, size=0.1, pad=0.1, position="right", extend=False):
"""
Append a side ax to the current figure and return it.
Figure is automatically extended along the direction of the added axes to
accommodate it. Unfortunately can not be reliably chained.
"""
fig = ax.figure
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
def convert(fraction, position=position):
if isinstance(fraction, str) and fraction.endswith("%"):
if position in ["right", "left"]:
fraction = width * float(fraction.strip("%")) / 100
elif position in ["top", "bottom"]:
fraction = height * float(fraction.strip("%")) / 100
return fraction
size = convert(size)
pad = convert(pad)
divider = make_axes_locatable(ax)
margin_size = axes_size.Fixed(size)
pad_size = axes_size.Fixed(pad)
xsizes = [pad_size, margin_size]
if position in ["top", "bottom"]:
xsizes = xsizes[::-1]
yhax = divider.append_axes(position, size=margin_size, pad=pad_size)
if extend:
def extend_ratio(ax, yhax):
ax.figure.canvas.draw()
orig_size = ax.get_position().size
new_size = sum(itax.get_position().size for itax in [ax, yhax])
return new_size / orig_size
if position in ["right"]:
divider.set_horizontal([axes_size.Fixed(width)] + xsizes)
fig.set_size_inches(
fig.get_size_inches()[0] * extend_ratio(ax, yhax)[0],
fig.get_size_inches()[1],
)
elif position in ["left"]:
divider.set_horizontal(xsizes[::-1] + [axes_size.Fixed(width)])
fig.set_size_inches(
fig.get_size_inches()[0] * extend_ratio(ax, yhax)[0],
fig.get_size_inches()[1],
)
elif position in ["top"]:
divider.set_vertical([axes_size.Fixed(height)] + xsizes[::-1])
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1] * extend_ratio(ax, yhax)[1],
)
ax.get_shared_x_axes().join(ax, yhax)
elif position in ["bottom"]:
divider.set_vertical(xsizes + [axes_size.Fixed(height)])
fig.set_size_inches(
fig.get_size_inches()[0],
fig.get_size_inches()[1] * extend_ratio(ax, yhax)[1],
)
ax.get_shared_x_axes().join(ax, yhax)
return yhax
####################
# Legend Helpers
def hist_legend(ax=None, **kwargs):
from matplotlib.lines import Line2D
if ax is None:
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
new_handles = [
Line2D([], [], c=h.get_edgecolor()) if type(h) == mpl.patches.Polygon else h
for h in handles
]
ax.legend(handles=new_handles[::-1], labels=labels[::-1], **kwargs)
return ax
def sort_legend(ax, order=None):
"""
ax : axes with legend labels in it
order : Ordered dict with renames or array with order
"""
handles, labels = ax.get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
if isinstance(order, OrderedDict):
ordered_label_list = list(order.keys())
elif isinstance(order, (list, tuple, np.ndarray)):
ordered_label_list = list(order)
elif order is None:
ordered_label_list = labels
else:
raise TypeError(f"Unexpected values type of order: {type(order)}")
ordered_label_list = [entry for entry in ordered_label_list if entry in labels]
ordered_label_values = [by_label[k] for k in ordered_label_list]
if isinstance(order, OrderedDict):
ordered_label_list = [order[k] for k in ordered_label_list]
return ordered_label_values, ordered_label_list
|
py | b40ea3c9feeb605722e35636b4bd74f6486e44cb | from __future__ import print_function
# Time: O(n * m)
# Space: O(n + m)
class Solution:
# @return an integer
def minDistance(self, word1, word2):
if len(word1) < len(word2):
return self.minDistance(word2, word1)
distance = [i for i in xrange(len(word2) + 1)]
for i in xrange(1, len(word1) + 1):
pre_distance_i_j = distance[0]
distance[0] = i
for j in xrange(1, len(word2) + 1):
insert = distance[j - 1] + 1
delete = distance[j] + 1
replace = pre_distance_i_j
if word1[i - 1] != word2[j - 1]:
replace += 1
pre_distance_i_j = distance[j]
distance[j] = min(insert, delete, replace)
return distance[-1]
class Solution2:
# @return an integer
def minDistance(self, word1, word2):
distance = [[i] for i in xrange(len(word1) + 1)]
distance[0] = [j for j in xrange(len(word2) + 1)]
for i in xrange(1, len(word1) + 1):
for j in xrange(1, len(word2) + 1):
insert = distance[i][j - 1] + 1
delete = distance[i - 1][j] + 1
replace = distance[i - 1][j - 1]
if word1[i - 1] != word2[j - 1]:
replace += 1
distance[i].append(min(insert, delete, replace))
return distance[-1][-1]
if __name__ == "__main__":
print(Solution().minDistance("Rabbit", "Racket"))
print(Solution2().minDistance("Rabbit", "Rabket"))
print(Solution().minDistance("Rabbit", "Rabbitt"))
|
py | b40ea69f2ae42ff17b65fbbc52988e6036f87df5 | # https://leetcode.com/problems/max-points-on-a-line
from collections import defaultdict, Counter
from decimal import *
class Solution:
def maxPoints(self, points):
N = len(points)
if N == 1 or N == 0:
return N
# (coef, intercept) -> points list
d = defaultdict(set)
# detect duplicate
c = Counter([tuple(point) for point in points])
keys = []
for key in c:
if c[key] <= 1:
keys.append(key)
for key in keys:
del c[key]
for i in range(N):
for j in range(i + 1, N):
x1, y1 = points[i]
x2, y2 = points[j]
# calc coef and intercept
if x2 - x1 != 0:
m = Decimal(y2 - y1) / Decimal(x2 - x1)
b = m * (-x1) + y1
d[(str(m), str(b))].add((x1, y1))
d[(str(m), str(b))].add((x2, y2))
else:
d[(str(x1))].add((x1, y1))
d[(str(x1))].add((x2, y2))
ans = 1
for key, p_set in d.items():
tmp_val = 0
for c_key in c:
if c_key in p_set:
tmp_val += c[c_key] - 1
ans = max(ans, len(d[key]) + tmp_val)
return ans
|
py | b40ea6cc9fb22d650be01143b1ebb8b91c8ab3fa | import torch
from torch.nn.functional import one_hot
def select_greedy_action(state, model):
state = torch.from_numpy(state).float()
out = model(state)
if len(out) > 1:
out = out[0]
return out.argmax().item()
def indicator_fn(action_e, n_actions, loss_different=0.8):
l_val = torch.full((action_e.shape[0], n_actions), loss_different)
action_e_mask = one_hot(action_e.squeeze(-1), num_classes=n_actions).bool()
l_val[action_e_mask] = 0
return l_val |
py | b40ea6e1b4ae2aa99dfa79ea761bc6ec018bc5b4 | # -*- coding: utf-8 -*-
from celery import chain
from celery import group
from .messages import Message
from .messages import ExceptionMessage
from .models import MinkeSession
from .tasks import process_session
from .tasks import cleanup
def process(session_cls, queryset, session_data, user,
fabric_config=None, wait=False, console=False):
"""
Initiate and run celery-tasks.
"""
# TODO: Add a MinkeSession lock. To lock the host should be optional.
MinkeSession.objects.clear_currents(user, queryset)
hosts = queryset.get_hosts()
lock = hosts.filter(disabled=False).get_lock()
# group sessions by hosts
session_groups = dict()
for minkeobj in queryset.select_related_hosts():
host = minkeobj.get_host()
session = MinkeSession()
session.init(user, minkeobj, session_cls, session_data)
# Skip disabled or locked hosts...
if host.disabled:
msg = '{}: Host is disabled.'.format(minkeobj)
session.messages.add(Message(msg, 'error'), bulk=False)
session.cancel()
if console: session.prnt()
elif host.lock and host.lock != lock:
msg = '{}: Host is locked.'.format(minkeobj)
session.messages.add(Message(msg, 'error'), bulk=False)
session.cancel()
if console: session.prnt()
# otherwise group sessions by hosts...
else:
if host not in session_groups:
session_groups[host] = list()
session_groups[host].append(session)
# Stop here if no valid hosts are left...
if not session_groups: return
# merge fabric-config and invoke-config
config = session_cls.invoke_config.copy()
config.update(fabric_config or dict())
# run celery-tasks...
results = list()
for host, sessions in session_groups.items():
# get process_session_signatures for all sessions
signatures = [process_session.si(host.id, s.id, config) for s in sessions]
# To support parrallel execution per host we wrap the signatures in a group.
# NOTE: Since we append the cleanup-task the construct is essentially the
# same as a chord which is not supported by all result-backends (s. celery-docs).
if session_cls.parrallel_per_host:
signatures = [group(*signatures)]
# append the cleanup-task
signatures.append(cleanup.si(host.id))
try:
result = chain(*signatures).delay()
# NOTE: celery-4.2.1 fails to raise an exception if rabbitmq is
# down or no celery-worker is running at all... hope for 4.3.x
except process_session.OperationalError:
host.release_lock()
for session in sessions:
session.add_msg(ExceptionMessage())
session.cancel()
if console: session.prnt(session)
else:
results.append((result, (s.id for s in sessions)))
# print sessions in cli-mode as soon as they are ready...
if console:
print_results = results[:]
while print_results:
# try to find a ready result...
try: result, session_ids = next((r for r in print_results if r[0].ready()))
except StopIteration: continue
# reload session-objects
sessions = MinkeSession.objects.filter(id__in=session_ids)
# print and remove list-item
for session in sessions: session.prnt()
print_results.remove((result, session_ids))
# evt. wait till all tasks finished...
elif wait:
for result, sessions in results:
result.wait()
# At least call forget on every result - in case a result-backend is in use
# that eats up ressources to store result-data...
for result, sessions in results:
try: result.forget()
except NotImplementedError: pass
|
py | b40ea88fc9ed558dc65386898bd87cb5652fae58 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sparse attention utils."""
import functools
import itertools
import math
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.ops import control_flow_util # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.training import moving_averages # pylint: disable=g-direct-tensorflow-import
def is_xla_compiled():
"""Whether we are building graph that will be compiled by XLA.
This checks whether the code is executing within an XLA context.
If True, model authors should ensure the graph they build is compilable by
XLA. Specifically, they should ensure that all ops have XLA implementations
and that all shapes are statically known.
Returns:
bool, whether the current graph will be compiled for XLA.
"""
ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
x = control_flow_util.GetContainingXLAContext(ctxt) is not None
return x
def get_channel_embeddings(io_depth,
targets,
hidden_size,
name="channel",
vocab_size=256):
"""Get separate embedding for each of the channels."""
targets_split = tf.split(targets, io_depth, axis=3)
rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name,
[vocab_size * io_depth, hidden_size])
rgb_embedding_var = tf.identity(rgb_embedding_var)
rgb_embedding_var *= float(hidden_size)**0.5
channel_target_embs = []
for i in range(io_depth):
# Adding the channel offsets to get the right embedding since the
# embedding tensor has shape 256 * io_depth, hidden_size
target_ids = tf.squeeze(targets_split[i], axis=3) + i * vocab_size
target_embs = tf.gather(rgb_embedding_var, target_ids)
channel_target_embs.append(target_embs)
return tf.concat(channel_target_embs, axis=-1)
def get_embeddings(targets, vocab_size, hidden_size, name="embeddings"):
"""Get embeddings for symbols in the targets."""
with tf.variable_scope(name_or_scope=name):
var = tf.get_variable("embedding", shape=[vocab_size, hidden_size])
embed = tf.gather(var, targets)
return embed
def right_shift_blockwise_nd(x, block_shape):
"""Right shift once in every block.
Args:
x: a [batch, d1, d2, ..., dn, depth] tensor
block_shape: a tuple (q1, q2, ..., qn) representing the block shape
Returns:
a [batch, d1, d2, ..., dn, depth] tensor, right shifted.
"""
blocked_x = break_into_blocks_nd(x, block_shape)
blocked_x_shape = shape_list(blocked_x)
blocked_x = tf.reshape(blocked_x,
[blocked_x_shape[0], -1, blocked_x_shape[-1]])
padded_x = tf.pad(blocked_x, [[0, 0], [1, 0], [0, 0]])
x = tf.slice(padded_x, [0, 0, 0],
[-1, np.prod(blocked_x_shape[1:-1], dtype=np.int32), -1])
x = tf.reshape(x, blocked_x_shape)
return put_back_blocks_nd(x, block_shape)
def add_positional_embedding_nd(x, max_length, name=None):
"""Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
Returns:
Tensor of same shape as x.
"""
with tf.name_scope("add_positional_embedding_nd"):
x_shape = shape_list(x)
num_dims = len(x_shape) - 2
depth = x_shape[-1]
base_shape = [1] * (num_dims + 1) + [depth]
base_start = [0] * (num_dims + 2)
base_size = [-1] + [1] * num_dims + [depth]
for i in range(num_dims):
shape = base_shape[:]
start = base_start[:]
size = base_size[:]
shape[i + 1] = max_length
size[i + 1] = x_shape[i + 1]
var = tf.get_variable(
name + "_%d" % i,
shape,
initializer=tf.random_normal_initializer(0, depth**-0.5))
var = var * depth**0.5
x += tf.slice(var, start, size)
return x
def multihead_attention_nd_partial(hparams):
"""Returns partial multihead_attention_nd to reduce boilerplate."""
multihead_fn = functools.partial(
multihead_attention_nd,
output_depth=hparams.hidden_size,
query_shape=hparams.query_shape,
memory_query_shape=hparams.memory_query_shape,
memory_flange=hparams.memory_flange,
sparsity_cluster_size=hparams.sparsity_cluster_size,
sparsity_cluster_attention_window=hparams
.sparsity_cluster_attention_window,
sparsity_cluster_strided_num_heads=hparams
.sparsity_cluster_strided_num_heads,
sparsity_cluster_strided_relative=hparams
.sparsity_cluster_strided_relative,
sparsity_strided_num_heads=hparams.sparsity_strided_num_heads,
sparsity_strided_relative=hparams.sparsity_strided_relative,
mode=hparams.mode,
cache_padding_bias=hparams.cache_padding_bias,
max_relative_position=hparams.max_relative_position,
dropout_rate=hparams.attention_dropout,
ema=hparams.ema,
beta=hparams.beta,
decay=hparams.decay,
hash_items=hparams.hash_items,
use_tpu=hparams.use_tpu)
return multihead_fn
def transformer_encoder_layers(inputs,
num_layers,
hparams,
losses,
name="transformer_encoder",
token_bias=None,
padding_bias=None):
"""Multi layer transformer encoder with default un-masked attention.
Args:
inputs: Input tensor to the attention layers.
num_layers: Number of attention layers.
hparams: Hparam object containing attention configurations.
losses: Losses dict for training.
name: Name of the layers.
token_bias: Externally provided attention bias for self attention on inputs.
padding_bias: Padding bias for seq2seq models (Shape: [b, s]).
Returns:
Output transformed by self-attention.
"""
x = inputs
if hparams.layer_prepostprocess_dropout:
x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)
key_depth = hparams.attention_key_channels or hparams.hidden_size
value_depth = hparams.attention_value_channels or hparams.hidden_size
# A placeholder for attention bias cache tensors to facilitate sharing across
# attention types/layers.
bias_cache = {}
multihead_attention_fn = multihead_attention_nd_partial(hparams)
local_heads, sparsity_cluster_heads = 0, 0
for layer in range(num_layers):
local_heads = hparams.local_num_heads
sparsity_cluster_heads = hparams.sparsity_cluster_num_heads
if layer < hparams.sparsity_skip_first:
local_heads = hparams.local_num_heads + hparams.sparsity_cluster_num_heads
sparsity_cluster_heads = 0
with tf.variable_scope("%s_layer_%d" % (name, layer), reuse=tf.AUTO_REUSE):
with tf.variable_scope("self_attention"):
y = multihead_attention_fn(
query_antecedent=layer_preprocess(x, hparams),
memory_antecedent=None,
total_key_depth=key_depth,
total_value_depth=value_depth,
masked=False,
losses=losses,
name="self_attention",
bias_cache=bias_cache,
local_relative=hparams.local_relative,
local_num_heads=local_heads,
sparsity_cluster_relative=hparams.sparsity_cluster_relative,
sparsity_cluster_num_heads=sparsity_cluster_heads,
is_recomputing=False,
share_qk=False, # No need to share qk for encoder self attn
token_bias=token_bias,
token_bias_wt_trainable=hparams.token_bias_wt_trainable,
padding_bias=padding_bias)
x = layer_postprocess(x, y, hparams)
# feed-fwd layers + skip connections
y = ffn_layer(layer_preprocess(x, hparams), hparams)
x = layer_postprocess(x, y, hparams)
return layer_preprocess(x, hparams)
def transformer_decoder_layers(inputs,
num_layers,
hparams,
losses,
encoder_output=None,
decode_step=None,
cache=None,
name="transformer_decoder",
decoding_stats=None,
token_bias_inputs=None,
token_bias_targets=None,
padding_bias=None):
"""Multi layer transformer encoder or decoder with default masked attention.
Args:
inputs: Input tensor to the attention layers.
num_layers: Number of attention layers.
hparams: Hparam object containing attention configurations.
losses: Losses dict for training.
encoder_output: Optional argument signifying encoder output.
decode_step: Decode step for decoding.
cache: Cache containing layer attention values for faster computation.
name: Name of the layers.
decoding_stats: Dictionary containing decoding stats.
token_bias_inputs: Externally provided attention bias on encoder inputs.
token_bias_targets: Externally provided attention bias on decoder targets.
padding_bias: Padding bias for seq2seq models (Shape: [b, s]).
Returns:
Output transformed by self-attention.
"""
x = inputs
if hparams.layer_prepostprocess_dropout:
x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout)
key_depth = hparams.attention_key_channels or hparams.hidden_size
value_depth = hparams.attention_value_channels or hparams.hidden_size
# A placeholder for attention bias cache tensors to facilitate sharing across
# attention types/layers.
bias_cache = {}
multihead_attention_fn = multihead_attention_nd_partial(hparams)
local_heads, sparsity_cluster_heads = 0, 0
for layer in range(num_layers):
local_heads = hparams.local_num_heads
sparsity_cluster_heads = hparams.sparsity_cluster_num_heads
if layer < hparams.sparsity_skip_first:
local_heads = hparams.local_num_heads + hparams.sparsity_cluster_num_heads
sparsity_cluster_heads = 0
with tf.variable_scope("%s_layer_%d" % (name, layer), reuse=tf.AUTO_REUSE):
layer_cache = None
if decode_step is None and cache is not None:
# Initialize layer cache.
cache[layer] = {}
layer_cache = cache[layer]
if decode_step is not None:
layer_cache = cache[layer]
with tf.variable_scope("self_attention"):
y = multihead_attention_fn(
query_antecedent=layer_preprocess(x, hparams),
memory_antecedent=None,
total_key_depth=key_depth,
total_value_depth=value_depth,
masked=True,
losses=losses,
decode_step=decode_step,
cache=layer_cache,
name="self_attention",
bias_cache=bias_cache,
is_recomputing=False,
local_relative=hparams.local_relative,
local_num_heads=local_heads,
sparsity_cluster_relative=hparams.sparsity_cluster_relative,
sparsity_cluster_num_heads=sparsity_cluster_heads,
decoding_stats=decoding_stats,
share_qk=hparams.share_qk,
token_bias=token_bias_targets,
token_bias_wt_trainable=hparams.token_bias_wt_trainable,
padding_bias=None)
x = layer_postprocess(x, y, hparams)
if encoder_output is not None:
y = multihead_attention_fn(
query_antecedent=layer_preprocess(x, hparams),
memory_antecedent=encoder_output,
total_key_depth=key_depth,
total_value_depth=value_depth,
masked=False,
losses=losses,
decode_step=decode_step,
cache=layer_cache,
name="enc_dec_attention",
bias_cache=bias_cache,
is_recomputing=False,
local_relative=False,
local_num_heads=local_heads,
sparsity_cluster_relative=False,
sparsity_cluster_num_heads=sparsity_cluster_heads,
decoding_stats=decoding_stats,
share_qk=False, # No need to share qk for encoder-decoder attn
token_bias=token_bias_inputs,
token_bias_wt_trainable=hparams.token_bias_wt_trainable,
padding_bias=padding_bias)
x = layer_postprocess(x, y, hparams)
# feed-fwd layers + skip connections
y = ffn_layer(layer_preprocess(x, hparams), hparams)
x = layer_postprocess(x, y, hparams)
if decode_step is not None:
x = get_item_at_decode_step(x, decode_step, hparams.query_shape)
return layer_preprocess(x, hparams)
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def geglu(inputs,
filter_size,
output_size,
output_activation=None,
dropout=0.0,
dropout_broadcast_dims=None,
name=None):
"""GEGLU activation as in https://arxiv.org/abs/2002.05202."""
# layer_name is appended with "conv1" or "conv2" in this method only for
# historical reasons. These are in fact dense layers.
layer_name = "%s_{}" % name if name else "{}"
h = tf.layers.dense(
inputs,
filter_size,
use_bias=False,
activation=None,
name=layer_name.format("weight1"))
h = gelu(h)
v = tf.layers.dense(
inputs,
filter_size,
use_bias=False,
activation=None,
name=layer_name.format("weight2"))
h *= v
if dropout != 0.0:
h = dropout_with_broadcast_dims(
h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims)
o = tf.layers.dense(
h,
output_size,
activation=output_activation,
use_bias=False,
name=layer_name.format("weight3"))
return o
def dense_relu_dense(inputs,
filter_size,
output_size,
output_activation=None,
dropout=0.0,
dropout_broadcast_dims=None,
name=None):
"""Hidden layer with RELU activation followed by linear projection."""
# layer_name is appended with "conv1" or "conv2" in this method only for
# historical reasons. These are in fact dense layers.
layer_name = "%s_{}" % name if name else "{}"
h = tf.layers.dense(
inputs,
filter_size,
use_bias=True,
activation=tf.nn.relu,
name=layer_name.format("conv1"))
if dropout != 0.0:
h = dropout_with_broadcast_dims(
h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims)
o = tf.layers.dense(
h,
output_size,
activation=output_activation,
use_bias=True,
name=layer_name.format("conv2"))
return o
def layer_norm_vars(filters):
"""Create Variables for layer norm."""
scale = tf.get_variable(
"layer_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], initializer=tf.zeros_initializer())
return scale, bias
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None):
"""Layer norm raw computation."""
# Save these before they get converted to tensors by the casting below
params = (scale, bias)
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
output = norm_x * scale + bias
if layer_collection:
# Note that the first dimension of norm_x must be the batch size
layer_collection.register_scale_and_shift(
params, norm_x, output, approx="full")
return output
def layer_norm(x,
filters=None,
epsilon=1e-6,
name=None,
reuse=None,
layer_collection=None,
scaling=True):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
if scaling:
scale, bias = layer_norm_vars(filters)
else:
scale = tf.constant(1.0)
bias = tf.constant(0.0)
return layer_norm_compute(
x, epsilon, scale, bias, layer_collection=layer_collection)
def ffn_layer(x, hparams):
"""ffn layer transformer."""
with tf.variable_scope("ffn"):
if hparams.ffn_layer == "none":
return x
elif hparams.ffn_layer == "geglu":
return geglu(
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
else:
return dense_relu_dense(
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
def layer_prepostprocess(previous_value,
x,
sequence,
dropout_rate,
depth,
epsilon,
default_name,
name=None,
dropout_broadcast_dims=None,
layer_collection=None):
"""Apply a sequence of functions to the input or output of a layer.
The sequence is specified as a string which may contain the following
characters:
a: add previous_value
n: apply normalization
d: apply dropout
For example, if sequence=="dna", then the output is
previous_value + normalize(dropout(x))
Args:
previous_value: A Tensor, to be added as a residual connection ('a')
x: A Tensor to be transformed.
sequence: a string.
dropout_rate: a float
depth: an integer (size of last dimension of x).
epsilon: a float (parameter for normalization)
default_name: a string
name: a string
dropout_broadcast_dims: an optional list of integers less than 3 specifying
in which dimensions to broadcast the dropout decisions. saves memory.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC
optimizer. Default is None.
Returns:
a Tensor
"""
with tf.variable_scope(name, default_name=default_name):
if sequence == "none":
return x
for c in sequence:
if c == "a":
x += previous_value
elif c == "n":
x = layer_norm(x, depth, epsilon, layer_collection=layer_collection)
elif c == "d":
x = dropout_with_broadcast_dims(
x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
else:
raise ValueError("Unknown processing sequence.")
return x
def comma_separated_string_to_integer_list(s):
return [int(i) for i in s.split(",") if i]
def layer_preprocess(layer_input, hparams, layer_collection=None):
"""Apply layer preprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_preprocess_sequence
layer_prepostprocess_dropout
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
hparams: a hyperparameters object.
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC
optimizer. Default is None.
Returns:
a Tensor
"""
assert "a" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
assert "z" not in hparams.layer_preprocess_sequence, (
"No residual connections allowed in hparams.layer_preprocess_sequence")
return layer_prepostprocess(
None,
layer_input,
sequence=hparams.layer_preprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_prepostprocess",
layer_collection=layer_collection)
def layer_postprocess(layer_input, layer_output, hparams):
"""Apply layer postprocessing.
See layer_prepostprocess() for details.
A hyperparameters object is passed for convenience. The hyperparameters
that may be used are:
layer_postprocess_sequence
layer_prepostprocess_dropout
hidden_size
norm_epsilon
Args:
layer_input: a Tensor
layer_output: a Tensor
hparams: a hyperparameters object.
Returns:
a Tensor
"""
return layer_prepostprocess(
layer_input,
layer_output,
sequence=hparams.layer_postprocess_sequence,
dropout_rate=hparams.layer_prepostprocess_dropout,
depth=None,
epsilon=hparams.norm_epsilon,
dropout_broadcast_dims=comma_separated_string_to_integer_list(
getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")),
default_name="layer_postprocess")
def multihead_attention_nd(query_antecedent, # pylint: disable=dangerous-default-value
memory_antecedent,
total_key_depth,
total_value_depth,
output_depth,
query_shape,
memory_query_shape,
memory_flange,
local_num_heads,
local_relative=False,
sparsity_cluster_size=None,
sparsity_cluster_attention_window=None,
sparsity_cluster_num_heads=0,
sparsity_cluster_relative=False,
sparsity_cluster_strided_num_heads=0,
sparsity_cluster_strided_relative=False,
sparsity_strided_num_heads=0,
sparsity_strided_relative=False,
losses=None,
mode=tf.estimator.ModeKeys.EVAL,
masked=False,
cache=None,
decode_step=None,
name=None,
cache_padding_bias=False,
max_relative_position=None,
dropout_rate=0.,
bias_cache={},
ema=False,
beta=1e-4,
decay=0.99,
share_qk=False,
hash_items=False,
is_recomputing=False,
decoding_stats=None,
token_bias=None,
token_bias_wt_trainable=False,
padding_bias=None,
use_tpu=False):
"""n-d Multihead scaled-dot-product attention with in/output transformations.
Args:
query_antecedent: a Tensor with shape [batch, d1, ..., dn, depth_q].
memory_antecedent: a Tensor with shape [batch, d1, ..., dn, depth_m] or None
for self attention.
total_key_depth: an integer
total_value_depth: an integer
output_depth: an integer
query_shape: an tuple indicating the dimensions of each query block.
memory_query_shape: query shape for memory antecedent (enc-dec).
memory_flange: an integer indicating how much to look around a query block
in each dimension
local_num_heads: How many heads to use for local-nd attention. The sum of
all heads should divide total_key_depth and total_value_depth.
local_relative: whether to use relative for local heads,
sparsity_cluster_size: Number of clusters.
sparsity_cluster_attention_window: number of items within a cluster to
attend to.
sparsity_cluster_num_heads: how many heads to use for attention within
cluster.
sparsity_cluster_relative: whether to use relative for clustered attention
sparsity_cluster_strided_num_heads: how many heads to use for attending to
other clusters.
sparsity_cluster_strided_relative: whether to use relative for strided
clustering
sparsity_strided_num_heads: how many heads to use for strided sparsity.
sparsity_strided_relative: whether to use relative for strided heads.
losses: a list of extra losses.
mode: a tf.estimator.ModeKeys.
masked: a boolean to specify whether to do masked or unmasked attention.
cache: a dict like: {
'q': [batch, num_heads, d1, ..., dn, depth_k // num_heads],
'k': [batch, num_heads, d1, ..., dn, depth_k // num_heads],
'v': [batch, num_heads, d1, ..., dn, depth_v // num_heads]} Caller should
initially pass an empty dictionary and this method will update cache and
caller should pass the same cache in consecutive calls. This works for
both GPU and TPU inference. `memory_antecedent` should be None in this
case, since auto-regressive decoding only applies to self attention.
decode_step: integer to pass in decoding mode. `cache` and `decode_step`
should both be set in decoding mode. Caller can also pass an empty `cache`
without `decode_step`, for this method to initialize the cache for future
calls with `decode_step` > 0.
name: an optional string
cache_padding_bias: If sequences are not variable length (e.g. images and
videos) and the only source of padding is to be evenly divisible by blocks
we can cache padding bias as well to save memory.
max_relative_position: how much distance to consider for relative positions.
dropout_rate: Rate of dropout.
bias_cache: a dict containing attention bias cache.
ema: a boolean to do ema updates.
beta: multiplier for clustering loss.
decay: decay factor for learning centroids.
share_qk: Whether to share queries and keys.
hash_items: Whether to hash items instead of clustering.
is_recomputing: a boolean to represent whether this is a backward pass.
decoding_stats: a dict to be used to return tensors to capture additional
stats in decoding mode.
token_bias: Externally provided attention bias over memory sequence (k / v).
token_bias_wt_trainable: Whether or not token_bias_weight is trainable.
padding_bias: Padding bias for seq2seq models (Shape: [b, s]).
use_tpu: Whether to use TPU (default: False).
Returns:
A Tensor of shape [batch, d1, ..., dn, output_depth] or
[batch, 1, ..., 1, output_depth] if decode_step is set.
Raises:
ValueError: if the key depth or value depth are not divisible by the
number of attention heads.
"""
num_heads = (
local_num_heads + sparsity_cluster_num_heads +
sparsity_cluster_strided_num_heads + sparsity_strided_num_heads)
if total_key_depth % num_heads != 0:
raise ValueError("Key depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_key_depth, num_heads))
if total_value_depth % num_heads != 0:
raise ValueError("Value depth (%d) must be divisible by the number of "
"attention heads (%d)." % (total_value_depth, num_heads))
# Validate that if we share keys and queries for clustering, memory is None
if share_qk:
assert memory_antecedent is None
# Validate decoding input params are sensible.
if decode_step is not None:
assert "q" in cache and "k" in cache and "v" in cache
with tf.variable_scope(
name,
default_name="multihead_attention_nd",
values=[query_antecedent, memory_antecedent]):
if decode_step is not None:
latest_antecedent = get_item_at_decode_step(query_antecedent, decode_step,
query_shape)
latest_q, latest_k, latest_v = compute_qkv(latest_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth)
latest_q = split_heads_nd(latest_q, num_heads)
key_depth_per_head = total_key_depth // num_heads
latest_q *= key_depth_per_head**-0.5
latest_k = split_heads_nd(latest_k, num_heads)
latest_v = split_heads_nd(latest_v, num_heads)
# put latest q, k and v into their correct position in cache.
q = cache["q"]
k = cache["k"]
v = cache["v"]
q = put_item_in_decode_step(q, latest_q, decode_step, query_shape)
if memory_antecedent is None:
k = put_item_in_decode_step(k, latest_k, decode_step, query_shape)
v = put_item_in_decode_step(v, latest_v, decode_step, query_shape)
cache["q"] = q
cache["k"] = k
cache["v"] = v
else:
q, k, v = compute_qkv(query_antecedent, memory_antecedent,
total_key_depth, total_value_depth)
q = split_heads_nd(q, num_heads)
key_depth_per_head = total_key_depth // num_heads
q *= key_depth_per_head**-0.5
k = split_heads_nd(k, num_heads)
v = split_heads_nd(v, num_heads)
if cache is not None:
cache["q"] = q
cache["k"] = k
cache["v"] = v
x = attention_nd(
q,
k,
v,
query_shape=query_shape,
memory_query_shape=memory_query_shape,
memory_flange=memory_flange,
memory_antecedent=memory_antecedent,
local_num_heads=local_num_heads,
local_relative=local_relative,
sparsity_cluster_size=sparsity_cluster_size,
sparsity_cluster_attention_window=sparsity_cluster_attention_window,
sparsity_cluster_num_heads=sparsity_cluster_num_heads,
sparsity_cluster_relative=sparsity_cluster_relative,
sparsity_cluster_strided_num_heads=sparsity_cluster_strided_num_heads,
sparsity_cluster_strided_relative=sparsity_cluster_strided_relative,
sparsity_strided_num_heads=sparsity_strided_num_heads,
sparsity_strided_relative=sparsity_strided_relative,
masked=masked,
losses=losses,
mode=mode,
decode_step=decode_step,
cache_padding_bias=cache_padding_bias,
max_relative_position=max_relative_position,
dropout_rate=dropout_rate,
bias_cache=bias_cache,
ema=ema,
beta=beta,
decay=decay,
share_qk=share_qk,
hash_items=hash_items,
is_recomputing=is_recomputing,
decoding_stats=decoding_stats,
token_bias=token_bias,
token_bias_wt_trainable=token_bias_wt_trainable,
padding_bias=padding_bias,
use_tpu=use_tpu)
x = combine_heads_nd(x)
x = tf.layers.dense(
x, output_depth, use_bias=False, name="output_transform")
return x
def decode_step_to_index(decode_step, query_shape, tensor_shape):
"""Maps decode step to n-d index according to blocked raster scan order.
Args:
decode_step: an integer
query_shape: a tuple (q1, q2, ..., qn) representing the query shape
tensor_shape: a tuple (d1, d2, ..., dn) representing the tensor shape, minus
the batch and depth dimensions.
Returns:
a tuple (i1, i2, ..., in) representing the index of the element at
`decode_step` w.r.t. blocked raster scan order.
"""
assert len(query_shape) == len(tensor_shape)
blocks_per_dimension = [t // q for t, q in zip(tensor_shape, query_shape)]
items_in_block = np.prod(query_shape, dtype=np.int32)
step_block = decode_step // items_in_block
step_within_block = decode_step % items_in_block
block_index = []
for q in blocks_per_dimension[::-1]:
block_index.insert(0, step_block % q)
step_block //= q
within_block_index = []
for q in query_shape[::-1]:
within_block_index.insert(0, step_within_block % q)
step_within_block //= q
final_index = [
w + b * q for w, b, q in zip(within_block_index, block_index, query_shape)
]
return tuple(final_index)
def get_item_at_decode_step(x, decode_step, query_shape):
"""Extracts a single item from an n-d tensor at `decode_step` position.
Args:
x: a [batch, d1, d2, ..., dn, depth] tensor
decode_step: an integer
query_shape: a tuple (q1, q2, ..., qn) representing the query shape
Returns:
a [batch, 1, 1, ..., 1, depth] tensor that is a single element from `x` at
`decode_step` w.r.t. blocked raster scan order.
"""
x_shape = shape_list(x)
index = decode_step_to_index(decode_step, query_shape, x_shape[1:-1])
# TPU needs size to be non negative for the case when begins are not
# compile-time constants.
return tf.slice(x, [0] + list(index) + [0],
[x_shape[0]] + [1] * len(index) + [x_shape[-1]])
def put_item_in_decode_step(x, item, decode_step, query_shape):
"""Puts a single item into an n-d tensor at `decode_step` position.
Args:
x: a [batch, heads, d1, d2, ..., dn, depth] tensor
item: a [batch, heads, 1, 1, ..., 1, depth] tensor
decode_step: an integer
query_shape: a tuple (q1, q2, ..., qn) representing the query shape
Returns:
a [batch, heads, d1, d2, ..., dn, depth] tensor with value at `decode_step`
w.r.t. blocked raster scan order is updated to be `item`.
"""
x_shape = shape_list(x)
index = decode_step_to_index(decode_step, query_shape, x_shape[2:-1])
# inplace_update only works on the first dimension, we need to flatten and
# move batch to be the second dimension.
flattened_x = tf.reshape(
x, [-1, x_shape[1], np.prod(x_shape[2:-1]), x_shape[-1]])
# transpose to [positions, batch, heads, depth]
flattened_x = tf.transpose(flattened_x, [2, 0, 1, 3])
flattened_index = 0
factor = 1
for d, idx in zip(x_shape[-2:1:-1], index[::-1]):
flattened_index += idx * factor
factor *= d
item_shape = shape_list(item)
item = tf.reshape(item, item_shape[:2] + item_shape[-1:])
updated_x = inplace_ops.alias_inplace_update(
flattened_x,
flattened_index,
item)
# unflatten the results
updated_x = tf.transpose(updated_x, [1, 2, 0, 3])
return tf.reshape(updated_x, [-1, x_shape[1]] + x_shape[2:])
def compute_qkv(query_antecedent,
memory_antecedent,
total_key_depth,
total_value_depth,
q_filter_width=1,
kv_filter_width=1,
q_padding="VALID",
kv_padding="VALID",
vars_3d_num_heads=0):
"""Computes query, key and value.
Args:
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: a Tensor with shape [batch, length_m, channels]
total_key_depth: an integer
total_value_depth: an integer
q_filter_width: An integer specifying how wide you want the query to be.
kv_filter_width: An integer specifying how wide you want the keys and values
to be.
q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
vars_3d_num_heads: an optional (if we want to use 3d variables).
Returns:
q, k, v : [batch, length, depth] tensors
"""
if memory_antecedent is None:
memory_antecedent = query_antecedent
q = compute_attention_component(
query_antecedent,
total_key_depth,
q_filter_width,
q_padding,
"q",
vars_3d_num_heads=vars_3d_num_heads)
k = compute_attention_component(
memory_antecedent,
total_key_depth,
kv_filter_width,
kv_padding,
"k",
vars_3d_num_heads=vars_3d_num_heads)
v = compute_attention_component(
memory_antecedent,
total_value_depth,
kv_filter_width,
kv_padding,
"v",
vars_3d_num_heads=vars_3d_num_heads)
return q, k, v
def compute_attention_component(antecedent,
total_depth,
filter_width=1,
padding="VALID",
name="c",
vars_3d_num_heads=0):
"""Computes attention component (query, key or value).
Args:
antecedent: a Tensor with shape [batch, length, channels]
total_depth: an integer
filter_width: An integer specifying how wide you want the attention
component to be.
padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding.
name: a string specifying scope name.
vars_3d_num_heads: an optional integer (if we want to use 3d variables)
Returns:
c : [batch, length, depth] tensor
"""
if vars_3d_num_heads > 0:
assert filter_width == 1
input_depth = antecedent.get_shape().as_list()[-1]
depth_per_head = total_depth // vars_3d_num_heads
initializer_stddev = input_depth**-0.5
if "q" in name:
initializer_stddev *= depth_per_head**-0.5
var = tf.get_variable(
name,
[input_depth, vars_3d_num_heads, total_depth // vars_3d_num_heads],
initializer=tf.random_normal_initializer(stddev=initializer_stddev))
var = tf.cast(var, antecedent.dtype)
var = tf.reshape(var, [input_depth, total_depth])
return tf.tensordot(antecedent, var, axes=1)
if filter_width == 1:
return tf.layers.dense(antecedent, total_depth, use_bias=False, name=name)
else:
return tf.layers.conv1d(
antecedent, total_depth, filter_width, padding=padding, name=name)
def shape_list(x):
"""Return list of dims, statically where possible."""
x = tf.convert_to_tensor(x)
# If unknown rank, return dynamic shape
if x.get_shape().dims is None:
return tf.shape(x)
static = x.get_shape().as_list()
shape = tf.shape(x)
ret = []
for i, dim in enumerate(static):
if dim is None:
dim = shape[i]
ret.append(dim)
return ret
def split_heads_nd(x, num_heads):
"""Split the depth dimension (last dimension) into multiple heads.
Args:
x: a [batch, d1, ..., dn, depth] tensor
num_heads: an integer
Returns:
a [batch, num_heads, d1, ..., dn, depth // num_heads]
"""
num_dimensions = len(shape_list(x)) - 2
return tf.transpose(
split_last_dimension(x, num_heads), [0, num_dimensions + 1] +
list(range(1, num_dimensions + 1)) + [num_dimensions + 2])
def combine_heads_nd(x):
"""Inverse of split_heads_nd.
Args:
x: a [batch, num_heads, d1, ..., dn, depth // num_heads] tensor
Returns:
a [batch, d1, ...., dn, depth] tensor
"""
num_dimensions = len(shape_list(x)) - 3
return combine_last_two_dimensions(
tf.transpose(x, [0] + list(range(2, num_dimensions + 2)) +
[1, num_dimensions + 2]))
def combine_last_two_dimensions(x):
"""Reshape x so that the last two dimension become one.
Args:
x: a Tensor with shape [..., a, b]
Returns:
a Tensor with shape [..., ab]
"""
x_shape = shape_list(x)
a, b = x_shape[-2:] # pylint: disable=unbalanced-tuple-unpacking
return tf.reshape(x, x_shape[:-2] + [a * b])
def split_last_dimension(x, n):
"""Reshape x so that the last dimension becomes two dimensions.
The first of these two dimensions is n.
Args:
x: a Tensor with shape [..., m]
n: an integer.
Returns:
a Tensor with shape [..., n, m/n]
"""
x_shape = shape_list(x)
m = x_shape[-1]
if isinstance(m, int) and isinstance(n, int):
assert m % n == 0
return tf.reshape(x, x_shape[:-1] + [n, m // n])
def pad_to_multiple_nd(x, block_shape):
"""Making sure x is a multiple of shape.
Args:
x: a [batch, d1, d2, ..., dn, depth] tensor
block_shape: a n-d list of integers representing block shape
Returns:
padded x where each dimension is a multiple of corresponding block length.
"""
shape = shape_list(x)
paddings = [-l % b for l, b in zip(shape[1:-1], block_shape)]
return tf.pad(x, [[0, 0]] + [[0, p] for p in paddings] + [[0, 0]])
def break_into_blocks_nd(x, block_shape):
"""Break input tensor into blocks of `block_shape`.
Args:
x: a [batch, d1, d2, ..., dn, depth] tensor
block_shape: a n-d list of integers representing block shape
Returns:
a [batch, d1//block1, ..., dn//blockn, block1 *... * blockn, depth] tensor
"""
x_shape = shape_list(x)
assert all([l % b == 0 for l, b in zip(x_shape[1:], block_shape)])
blocks_per_dimension = [l // b for l, b in zip(x_shape[1:], block_shape)]
# reshape to [-1, d1 // block1, block1, ..., dn // blockn, blockn, depth]
reshape_to = list(
itertools.chain.from_iterable(zip(blocks_per_dimension, block_shape)))
x = tf.reshape(x, [-1] + reshape_to + x_shape[-1:])
# transpose dimensions to bring the n-d blocks in consecutive dimensions.
block_dimensions_index = [2 * (i + 1) for i in range(len(block_shape))]
x = tf.transpose(x, [0] + [i - 1 for i in block_dimensions_index] +
block_dimensions_index + [2 * len(block_shape) + 1])
return tf.reshape(x, [-1] + blocks_per_dimension +
[np.prod(block_shape, dtype=np.int32)] + x_shape[-1:])
def break_into_memory_blocks_nd(x, query_shape, memory_flange, masked=False):
"""Break a tensor into memory blocks around query blocks.
This requires memory_flange to be divisible by query_shape in every dimension.
Args:
x: a [batch, d1, d2, ..., dn, depth] tensor
query_shape: a n-d list of integers representing query shape
memory_flange: an n-d list of integers representing memory flange.
masked: a boolean for masked vs unmasked attention.
Returns:
a [batch, blocks_per_d1, ..., blocks_per_dn, b1 * ...* bn, depth] where bi
is the memory block size in dimension i which is equal to q[i] + 2m[i] or
q[i] + m[i] if masked attention and i = 1.
"""
assert all([m % b == 0 for b, m in zip(query_shape, memory_flange)])
original_x_shape = shape_list(x)
# calculate the total number of query blocks in each dimension
blocks_in_memory_flange = [m // b for b, m in zip(query_shape, memory_flange)]
num_query_blocks = [
l // q for l, q in zip(original_x_shape[1:-1], query_shape)
]
# pad x to have enough items on the corners to form the memory blocks.
if masked:
# Only pad the beginning of first dimension in masked mode.
x = tf.pad(x, [[0, 0], [memory_flange[0], 0]] +
[[p, p] for p in memory_flange[1:]] + [[0, 0]])
else:
x = tf.pad(x, [[0, 0]] + [[p, p] for p in memory_flange] + [[0, 0]])
query_blocks = break_into_blocks_nd(x, query_shape)
# stitch query blocks together to form memory blocks of the desired size.
start_indices_per_dimension = []
for dimension, blocks in enumerate(blocks_in_memory_flange):
if masked and dimension == 0:
# num blocks for first dimension in masked mode is blocks + 1
size = blocks + 1
else:
size = 2 * blocks + 1
start_indices_per_dimension.append(range(size))
slices = []
for start_indices in itertools.product(*start_indices_per_dimension):
start = [0] + list(start_indices) + [0, 0]
size = [-1] + num_query_blocks + [-1, -1]
s = tf.slice(query_blocks, start, size)
slices.append(s)
# concat slices in their query block dimension to form the full memory blocks
return tf.concat(slices, axis=-2)
def select_block_for_decode_step(blocked_x, decode_step, query_shape):
"""Selects one block from `x` that contains position `decode_step`.
NOTE: This method only works for blocked inputs. It selects one block around
`decode_step` position in blocked raster scan order.
Args:
blocked_x: a [batch, blocks_per_d1, ..., blocks_per_dn, b1 * ...* bn, depth]
tensor
decode_step: an integer
query_shape: a tuple (q1, q2, ..., qn) representing query shape
Returns:
a [batch, [1] * n, b1 * ... * bn, depth] tensor
"""
blocked_x_shape = shape_list(blocked_x)
# calculate the shape of the normal x
x_shape = [b * q for b, q in zip(blocked_x_shape[1:-2], query_shape)]
# Get the position of `decode_step` element in the unblocked x.
index = decode_step_to_index(decode_step, query_shape, x_shape)
# Convert it to the blocked positions.
blocked_index = [i // q for i, q in zip(index, query_shape)]
# TPU needs size to be non negative for the case when begin is not
# compile-time constants.
return tf.slice(blocked_x, [0] + blocked_index + [0, 0],
[blocked_x_shape[0]] + [1] * len(blocked_index) +
blocked_x_shape[-2:])
def flatten_blocks_nd(x):
"""Flattens blocks of the input tensor.
Args:
x: a [batch, b1, ..., bn, items_in_block, depth] tensor
Returns:
a flattened tensor of shape [batch, b1 * ...* bm, items_in_block, depth]
"""
x_shape = shape_list(x)
num_blocks = np.prod(x_shape[1:-2], dtype=np.int32)
return tf.reshape(x, [-1, num_blocks] + x_shape[-2:])
def unflatten_blocks_nd(x, blocks_per_dimension):
"""Converts a flattened tensor into a normal blocked tensor.
Args:
x: a [batch, d1 * ... dn, items_in_block, depth] tensor
blocks_per_dimension: a n-d list of integers for number of blocks in each
dimension.
Returns:
a [batch, d1, d2, ..., dn, items_in_block, depth] tensor
"""
x_shape = shape_list(x)
assert x_shape[1] == np.prod(blocks_per_dimension, dtype=np.int32)
return tf.reshape(x, [-1] + list(blocks_per_dimension) + x_shape[-2:])
def causal_attention_bias_nd(query_shape, # pylint: disable=dangerous-default-value
memory_flange,
decode_step=None,
bias_cache={}):
"""Creates causal attention bias for local nd attention.
This assumes memory_flange is divisible by query_shape in every dimension.
Args:
query_shape: a n-d list of integers representing query shape
memory_flange: a n-d list of integers representing memory flange
decode_step: an integer
bias_cache: attention bias cache
Returns:
a [1, 1, query_items, memory_items] tensor for masked attention bias or
a [1, 1, 1, memory_items] tensor if decode_step is not None.
"""
cache_key = "causal_attention_bias_{}_{}".format(query_shape, memory_flange)
if cache_key in bias_cache and decode_step is None:
return bias_cache[cache_key]
assert all([m % q == 0 for q, m in zip(query_shape, memory_flange)])
blocks_per_memory_flange = [
m // q for q, m in zip(query_shape, memory_flange)
]
# previous blocks will be half the number of all blocks if we select blocks
# to the left and right of center block in every dimension.
prev_blocks = np.prod([2 * b + 1 for b in blocks_per_memory_flange],
dtype=np.int32) // 2
all_blocks = np.prod(
[blocks_per_memory_flange[0] + 1] +
[2 * b + 1 for b in blocks_per_memory_flange[1:]],
dtype=np.int32)
future_blocks = all_blocks - prev_blocks - 1
# add unmasked biases for all prev blocks and a lower triangle for the center
# block and all masked for future blocks.
items_in_block = np.prod(query_shape, dtype=np.int32)
items_in_query = items_in_block if decode_step is None else 1
prev_blocks_attn = tf.zeros(
[1, 1, items_in_query, prev_blocks * items_in_block])
# add mask for the center block
if decode_step is None:
center_block_attn = attention_bias_lower_triangle(items_in_block,
bias_cache)
else:
step_in_block = decode_step % items_in_block
cond = tf.reshape(
tf.less_equal(tf.range(items_in_block, dtype=tf.int32), step_in_block),
[1, 1, items_in_query, items_in_block])
center_block_attn = tf.where(
cond, tf.zeros([1, 1, items_in_query, items_in_block]),
-1e9 * tf.ones([1, 1, items_in_query, items_in_block]))
# add mask for all future blocks
future_blocks_attn = -1e9 * tf.ones(
[1, 1, items_in_query, future_blocks * items_in_block])
bias = tf.concat([prev_blocks_attn, center_block_attn, future_blocks_attn],
axis=3)
if decode_step is None:
bias_cache[cache_key] = bias
return bias
def put_back_blocks_nd(x, block_shape):
"""Restructure input tensor from blocks to normal ordering.
Args:
x: a [batch, b1, ..., bn, items_in_block, depth] tensor
block_shape: a n-d list of integers representing block shape.
Returns:
a [batch, d1, ..., dn, depth] where blocks are put back to form the
original tensor.
"""
x_shape = shape_list(x)
if isinstance(x_shape[-2], int):
assert x_shape[-2] == np.prod(block_shape)
x = tf.reshape(x, x_shape[:-2] + list(block_shape) + x_shape[-1:])
block_dimension_index = [i + 1 for i in range(len(block_shape))]
block_shape_index = [b + len(block_shape) for b in block_dimension_index]
interleaved_dimensions = list(
itertools.chain.from_iterable(
zip(block_dimension_index, block_shape_index)))
x = tf.transpose(x, [0] + interleaved_dimensions + [2 * len(block_shape) + 1])
x_shape = shape_list(x)
x = tf.reshape(x, [-1] + [
x_shape[2 * i + 1] * x_shape[2 * i + 2] for i in range(len(block_shape))
] + x_shape[-1:])
return x
def embedding_to_padding(emb):
"""Calculates the padding mask based on which embeddings are all zero.
We have hacked symbol_modality to return all-zero embeddings for padding.
Args:
emb: a Tensor with shape [..., depth].
Returns:
a float Tensor with shape [...]. Each element is 1 if its corresponding
embedding vector is all zero, and is 0 otherwise.
"""
emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1)
return tf.to_float(tf.equal(emb_sum, 0.))
def attention_nd(q, # pylint: disable=dangerous-default-value
k,
v,
query_shape,
memory_query_shape,
memory_flange,
local_num_heads,
local_relative=False,
memory_antecedent=None,
sparsity_cluster_size=None,
sparsity_cluster_attention_window=None,
sparsity_cluster_num_heads=0,
sparsity_cluster_relative=False,
sparsity_cluster_strided_num_heads=0,
sparsity_cluster_strided_relative=False,
sparsity_strided_num_heads=0,
sparsity_strided_relative=False,
masked=True,
losses=None,
mode=tf.estimator.ModeKeys.EVAL,
decode_step=None,
name=None,
max_relative_position=None,
cache_padding_bias=True,
dropout_rate=0.,
bias_cache={},
ema=False,
beta=1e-4,
decay=0.99,
share_qk=False,
hash_items=False,
is_recomputing=False,
decoding_stats=None,
token_bias=None,
token_bias_wt_trainable=False,
padding_bias=None,
use_tpu=False):
"""Attention nd.
Args:
q: a [batch, heads, d1, d2, ..., dn, depth_k] tensor.
k: a [batch, heads, d1, d2, ..., dn, depth_k] tensor.
v: a [batch, heads, d1, d2, ..., dn, depth_v] tensor.
query_shape: a tuple (q1, q2, ..., qn) indicating the shape of query blocks.
memory_query_shape: query shape for memory antecedent (enc-dec).
memory_flange: a tuple (m1, m2, ..., mn) indicating the number of extra
positions in the attention memory. memory_shape=[q1 + m1, d2 + 2 * m2,
..., dn + 2 * mn]
local_num_heads: How many heads to use for local attention
local_relative: whether to use relative positions for local heads.
memory_antecedent: Memory antecedent for attention.
sparsity_cluster_size: Number of clusters for routing attention.
sparsity_cluster_attention_window: how many positions to attend to within a
cluster.
sparsity_cluster_num_heads: how many heads to use for attention within
cluster.
sparsity_cluster_relative: whether to use relative positions for clustering.
sparsity_cluster_strided_num_heads: how many heads to use for attending to
items outside cluster.
sparsity_cluster_strided_relative: whether to use relative for cluster
strided
sparsity_strided_num_heads: how many heads to use for strided attention.
sparsity_strided_relative: whether to use relative for strided heads.
masked: a boolean for masked/unmasked attention.
losses: a list of extra losses.
mode: a tf.estimator.ModeKeys.
decode_step: an integer in fast decoding mode.
name: an optional string
max_relative_position: the max distance to consider for relative positions.
cache_padding_bias: boolean to specify whether padding bias should be cached
and reused. This should only be set for problems that do not have variable
length sequences like images and videos.
dropout_rate: Rate of dropout.
bias_cache: attention bias cache.
ema: a boolean to do ema updates.
beta: multiplier for clustering loss.
decay: decay factor for learning centroids.
share_qk: Whether to share queries and keys.
hash_items: Whether to hash items instead of clustering.
is_recomputing: a boolean to represent whether this is a backward pass.
decoding_stats: a dict to be used to return tensors to capture additional
stats in decoding mode.
token_bias: Externally provided attention bias over memory sequence (k / v).
token_bias_wt_trainable: Whether or not token_bias_weight is trainable.
padding_bias: Padding bias for seq2seq models (Shape: [b, s]).
use_tpu: Whether to use TPU (default: False).
Returns:
a [batch, head, d1, d2, ..., dn, depth_v] tensor or
[batch, head, 1, 1, ..., 1, depth_v] if decode_step is not None.
"""
assert sparsity_cluster_strided_num_heads <= sparsity_cluster_num_heads
assert mode is not None
assert all([m % b == 0 for m, b in zip(memory_flange, query_shape)])
num_heads = (
local_num_heads + sparsity_cluster_num_heads +
sparsity_strided_num_heads + sparsity_cluster_strided_num_heads)
with tf.variable_scope(name, default_name="attention_nd", values=[q, k, v]):
if decode_step is not None:
q = tf.reshape(q, [-1] + shape_list(q)[2:])
latest_q = get_item_at_decode_step(q, decode_step, query_shape)
q = tf.reshape(q, [-1, num_heads] + shape_list(q)[1:])
latest_q = tf.reshape(latest_q,
[-1, num_heads] + shape_list(latest_q)[1:])
q_shape = shape_list(latest_q)
else:
q_shape = shape_list(q)
k_shape = shape_list(k)
v_shape = shape_list(v)
remainder_num_heads = num_heads
# split heads for different kinds of attention.
outputs = []
if sparsity_cluster_num_heads:
remainder_num_heads -= sparsity_cluster_num_heads
q_cluster, q = tf.split(
q, [sparsity_cluster_num_heads, remainder_num_heads], axis=1)
k_cluster, k = tf.split(
k, [sparsity_cluster_num_heads, remainder_num_heads], axis=1)
v_cluster, v = tf.split(
v, [sparsity_cluster_num_heads, remainder_num_heads], axis=1)
output_cluster, cluster_loss, cluster_attn_weights = (
clustered_local_attention_helper(
q=q_cluster,
k=k_cluster,
v=v_cluster,
query_shape=query_shape,
memory_antecedent=memory_antecedent,
attention_window=sparsity_cluster_attention_window,
sparsity_cluster_size=sparsity_cluster_size,
masked=masked,
decode_step=decode_step,
name="cluster_attention",
mode=mode,
relative_attention=sparsity_cluster_relative,
cache_padding_bias=cache_padding_bias,
max_relative_position=max_relative_position,
dropout_rate=dropout_rate,
bias_cache=bias_cache,
ema=ema,
beta=beta,
decay=decay,
share_qk=share_qk,
hash_items=hash_items,
is_recomputing=is_recomputing,
token_bias=token_bias,
token_bias_wt_trainable=token_bias_wt_trainable,
padding_bias=padding_bias,
use_tpu=use_tpu))
outputs.append(output_cluster)
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
losses.append(cluster_loss)
if sparsity_cluster_strided_num_heads:
remainder_num_heads -= sparsity_cluster_strided_num_heads
q_cluster, q = tf.split(
q, [sparsity_cluster_strided_num_heads, remainder_num_heads], axis=1)
k_cluster, k = tf.split(
k, [sparsity_cluster_strided_num_heads, remainder_num_heads], axis=1)
v_cluster, v = tf.split(
v, [sparsity_cluster_strided_num_heads, remainder_num_heads], axis=1)
output_cluster, cluster_loss, cluster_strided_attn_weights = (
clustered_local_attention_helper(
q=q_cluster,
k=k_cluster,
v=v_cluster,
query_shape=query_shape,
attention_window=sparsity_cluster_attention_window,
sparsity_cluster_size=sparsity_cluster_size,
strided_attention=True,
masked=masked,
decode_step=decode_step,
name="cluster_strided_attention",
mode=mode,
relative_attention=sparsity_cluster_strided_relative,
max_relative_position=max_relative_position,
cache_padding_bias=cache_padding_bias,
dropout_rate=dropout_rate,
bias_cache=bias_cache,
ema=ema,
is_recomputing=is_recomputing,
token_bias=token_bias,
token_bias_wt_trainable=token_bias_wt_trainable,
padding_bias=padding_bias,
use_tpu=use_tpu))
outputs.append(output_cluster)
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
losses.append(cluster_loss)
# Rest of attention types work on latest_q instead of the whole q.
if decode_step is not None:
start_head = (
sparsity_cluster_strided_num_heads + sparsity_cluster_num_heads)
if start_head:
_, q = tf.split(latest_q, [start_head, remainder_num_heads], axis=1)
else:
q = latest_q
if sparsity_strided_num_heads:
remainder_num_heads -= sparsity_strided_num_heads
q_strided, q = tf.split(
q, [sparsity_strided_num_heads, remainder_num_heads], axis=1)
k_strided, k = tf.split(
k, [sparsity_strided_num_heads, remainder_num_heads], axis=1)
v_strided, v = tf.split(
v, [sparsity_strided_num_heads, remainder_num_heads], axis=1)
# TODO(kalpeshk): Implement token_bias here?
output_strided = strided_local_attention_helper(
q=q_strided,
k=k_strided,
v=v_strided,
query_shape=query_shape,
masked=masked,
decode_step=decode_step,
name="strided_attention",
relative_attention=sparsity_strided_relative,
max_relative_position=max_relative_position,
dropout_rate=dropout_rate,
bias_cache=bias_cache)
outputs.append(output_strided)
if local_num_heads:
# move heads to batch dimension. This is needed to reduce number of
# dimensions as much as possible, since some ops support only up to 7
# dimensions.
q = tf.reshape(q, [-1] + q_shape[2:])
k = tf.reshape(k, [-1] + k_shape[2:])
v = tf.reshape(v, [-1] + v_shape[2:])
# Set memory query shape if using local attn for enc-dec
mem_query_shape = query_shape
if memory_antecedent is not None:
mem_query_shape = memory_query_shape
# Pad query, key, value to ensure multiple of corresponding lengths.
if decode_step is None:
# don't pad query in fast decoding mode. We only need to calculate self
# attention for one position.
q = pad_to_multiple_nd(q, query_shape)
k = pad_to_multiple_nd(k, mem_query_shape)
v = pad_to_multiple_nd(v, mem_query_shape)
# extract query and memory blocks
if decode_step is None:
q = break_into_blocks_nd(q, query_shape)
else:
# in fast decoding, q has 1 block with 1 item in it
# q shape will be [batch] + [1] * n + [1, depth] which is equivalent of
# [batch, b1, b2, ..., bn, items_in_block, depth] where there is 1 block
# and 1 item in that block
q = tf.reshape(q, [-1] + [1] * (len(q_shape) - 3) + [q_shape[-1]])
k = break_into_memory_blocks_nd(
k, mem_query_shape, memory_flange, masked=masked)
v = break_into_memory_blocks_nd(
v, mem_query_shape, memory_flange, masked=masked)
blocks_per_dim = shape_list(q)[1:-2]
# extract just one block of k and v in fast decoding mode.
if decode_step is not None:
k = select_block_for_decode_step(k, decode_step, mem_query_shape)
v = select_block_for_decode_step(v, decode_step, mem_query_shape)
# flatten q, k and v to [batch, num_blocks, items_in_block, depth]
q = flatten_blocks_nd(q)
k = flatten_blocks_nd(k)
v = flatten_blocks_nd(v)
# make attention bias for causal or unmasked attention.
attn_bias = local_attention_bias_nd(
query_shape=mem_query_shape,
memory_flange=memory_flange,
blocked_v=v,
masked=masked,
cache_padding_bias=cache_padding_bias,
decode_step=decode_step,
bias_cache=bias_cache)
def break_bias_into_blocks(bias):
# [b, s]
bias = tf.expand_dims(bias, axis=1)
bias = tf.tile(bias, [1, local_num_heads, 1])
bias = tf.expand_dims(bias, axis=-1)
bias = tf.reshape(bias, [-1] + shape_list(bias)[2:])
bias = pad_to_multiple_nd(bias, mem_query_shape)
bias = break_into_memory_blocks_nd(bias,
mem_query_shape,
memory_flange,
masked=masked)
if decode_step is not None:
bias = select_block_for_decode_step(bias, decode_step,
mem_query_shape)
bias = flatten_blocks_nd(bias)
bias = tf.squeeze(bias, axis=-1)
return bias
if padding_bias is not None:
padding_bias = break_bias_into_blocks(padding_bias)
padding_bias = tf.expand_dims(padding_bias * -1e9, axis=-2)
attn_bias = tf.minimum(attn_bias, padding_bias)
if token_bias is not None:
token_bias = break_bias_into_blocks(token_bias)
token_bias = tf.expand_dims(token_bias, axis=-2)
token_bias_weight = tf.get_variable(name="token_bias_weight",
initializer=1.0,
trainable=token_bias_wt_trainable)
attn_bias += token_bias_weight * token_bias
# Calculate dot product attention
output, local_attn_weights = dot_product_attention(
q,
k,
v,
attn_bias,
dropout_rate=dropout_rate,
name=name or "local_nd",
relative_attention=local_relative,
max_relative_position=max_relative_position,
decode_step=decode_step,
query_shape=query_shape)
output = unflatten_blocks_nd(output, blocks_per_dim)
output = tf.reshape(output, [q_shape[0], local_num_heads] +
shape_list(output)[1:])
outputs.append(output)
# Concat all the different types of attention results together
output = tf.concat(outputs, axis=1)
# Allow heads to talk to each other
output_shape = shape_list(output)
output = tf.reshape(output,
[output_shape[0], num_heads, -1, output_shape[-1]])
combine_heads_nd(output)
output = tf.layers.dense(output, output_shape[-1], use_bias=False)
output = tf.reshape(output, output_shape)
scope_name = tf.get_variable_scope().name
# restructure the output from blocks ordering to the original ordering
if decode_step is None:
# In fast decoding, output only contains one element, this is not needed.
output = tf.reshape(output, [-1] + shape_list(output)[2:])
output = put_back_blocks_nd(output, query_shape)
# bring back the heads dimension
output = tf.reshape(output, q_shape[:2] + shape_list(output)[1:])
# No padding is introduced in fast decoding, no need to do this.
output_shape = shape_list(output)
output = tf.slice(output, [0] * len(output_shape),
[-1, -1] + q_shape[2:-1] + [-1])
if decoding_stats is not None:
if local_num_heads:
decoding_stats["%s/local_local_jsd" % scope_name] = tf.constant(0.0)
if local_num_heads and sparsity_cluster_num_heads:
decoding_stats["%s/local_cluster_jsd" % scope_name] = tf.constant(0.0)
if local_num_heads and sparsity_cluster_strided_num_heads:
decoding_stats["%s/local_strided_jsd" % scope_name] = tf.constant(0.0)
if sparsity_cluster_num_heads:
decoding_stats["%s/cluster_cluster_jsd" %
scope_name] = tf.constant(0.0)
if sparsity_cluster_num_heads and sparsity_cluster_strided_num_heads:
decoding_stats["%s/cluster_strided_jsd" %
scope_name] = tf.constant(0.0)
if sparsity_cluster_strided_num_heads:
decoding_stats["%s/strided_strided_jsd" %
scope_name] = tf.constant(0.0)
if decode_step is not None and decoding_stats is not None:
seq_length = np.prod(k_shape[2:-1])
if local_num_heads:
local_attn_weights = tf.reshape(local_attn_weights,
[q_shape[0], local_num_heads, -1])
# scatter the attention weights into [batch, heads, seq_length]
block_len = shape_list(local_attn_weights)[-1]
batch_idx = tf.reshape(tf.range(q_shape[0]), [q_shape[0], 1, 1, 1])
batch_idx = tf.tile(batch_idx, [1, local_num_heads, block_len, 1])
head_idx = tf.reshape(
tf.range(local_num_heads), [1, local_num_heads, 1, 1])
head_idx = tf.tile(head_idx, [q_shape[0], 1, block_len, 1])
block_num = decode_step // seq_length
pos_idx = tf.range(block_len) + (block_num * block_len)
pos_idx = tf.reshape(pos_idx, [1, 1, block_len, 1])
pos_idx = tf.tile(pos_idx, [q_shape[0], local_num_heads, 1, 1])
idx = tf.concat([batch_idx, head_idx, pos_idx], axis=-1)
local_attn_weights = tf.scatter_nd(
idx, local_attn_weights, [q_shape[0], local_num_heads, seq_length])
if sparsity_cluster_num_heads:
cluster_attn_weights = tf.reshape(
cluster_attn_weights,
[q_shape[0], sparsity_cluster_num_heads, seq_length])
if sparsity_cluster_strided_num_heads:
cluster_strided_attn_weights = tf.reshape(
cluster_strided_attn_weights,
[q_shape[0], sparsity_cluster_strided_num_heads, seq_length])
if local_num_heads:
decoding_stats["%s/local_local_jsd" %
scope_name] += jensen_shannon_divergence(
local_attn_weights[:, 0], local_attn_weights[:, 1])
if local_num_heads and sparsity_cluster_num_heads:
decoding_stats["%s/local_cluster_jsd" % scope_name] += (
jensen_shannon_divergence(local_attn_weights[:, 0],
cluster_attn_weights[:, 0]))
if local_num_heads and sparsity_cluster_strided_num_heads:
decoding_stats["%s/local_strided_jsd" % scope_name] += (
jensen_shannon_divergence(local_attn_weights[:, 0],
cluster_strided_attn_weights[:, 0]))
if sparsity_cluster_num_heads:
decoding_stats["%s/cluster_cluster_jsd" % scope_name] += (
jensen_shannon_divergence(cluster_attn_weights[:, 0],
cluster_attn_weights[:, 1]))
if sparsity_cluster_num_heads and sparsity_cluster_strided_num_heads:
decoding_stats["%s/cluster_strided_jsd" % scope_name] += (
jensen_shannon_divergence(cluster_attn_weights[:, 0],
cluster_strided_attn_weights[:, 0]))
if sparsity_cluster_strided_num_heads:
decoding_stats["%s/strided_strided_jsd" % scope_name] += (
jensen_shannon_divergence(cluster_strided_attn_weights[:, 0],
cluster_strided_attn_weights[:, 1]))
return output
def jensen_shannon_divergence(a, b):
"""Calculates JSD.
Args:
a: a [batch, seq_length] tensor representing a density function.
b: a [batch, seq_length] tensor representing a density functon.
Returns:
the average JSD over batch as an scalar tensor.
"""
a /= tf.reduce_sum(a, axis=-1, keepdims=True)
b /= tf.reduce_sum(b, axis=-1, keepdims=True)
m = (a + b) / 2
jsd = kl_divergence(a, m) / 2 + kl_divergence(b, m) / 2
return tf.reduce_mean(jsd)
def kl_divergence(a, b):
eps = 1e-5
return tf.reduce_sum(-a * tf.log(b / (a + eps) + eps), axis=-1)
def local_attention_bias_nd(query_shape, # pylint: disable=dangerous-default-value
memory_flange,
blocked_v,
masked=False,
cache_padding_bias=True,
decode_step=None,
bias_cache={}):
"""create an attention bias for local n-d attention.
This function creates/picks from cache an attention bias for local n-d
attention type.
Args:
query_shape: a (q1, ..., qn) tuple
memory_flange: a (m1, ..., mn) tuple
blocked_v: a [batch, num_blocks, items_in_blocks, depth] tensor for v.
masked: Whether to create masked/unmasked bias.
cache_padding_bias: If sequences are not variable length (e.g. images and
videos) and the only source of padding is to be evenly divisible by blocks
we can cache padding bias as well to save memory.
decode_step: the decode step in fast decoding mode or None.
bias_cache: attention bias cache.
Returns:
the local attention bias tensor of shape
[batch * heads, num_blocks, items_in_query, items_in_memory] or
[1, num_blocks, items_in_query, items_in_meory] if cache padding bias is
true.
"""
cache_key = "local_attention_bias_{}_{}_{}_{}".format(query_shape,
memory_flange, masked,
cache_padding_bias)
# do not use cache attention bias in fast decoding mode since each mask is
# slightly different depending on decode step.
if cache_key in bias_cache and decode_step is None:
return bias_cache[cache_key]
if cache_padding_bias:
padding_attn_bias = tf.expand_dims(
embedding_to_padding(blocked_v[:1, :, :, :]) * -1e9, axis=-2)
else:
padding_attn_bias = tf.expand_dims(
embedding_to_padding(blocked_v) * -1e9, axis=-2)
if masked:
causal_attn_bias = causal_attention_bias_nd(
query_shape,
memory_flange,
decode_step=decode_step,
bias_cache=bias_cache)
causal_attn_bias, padding_attn_bias = maybe_tile(causal_attn_bias,
padding_attn_bias)
attn_bias = tf.minimum(causal_attn_bias, padding_attn_bias)
else:
attn_bias = padding_attn_bias
if cache_padding_bias and decode_step is None:
bias_cache[cache_key] = attn_bias
return attn_bias
def maybe_tile(x, y):
"""Tile two tensors so they have the same shape except for batch and depth."""
x_shape = shape_list(x)
y_shape = shape_list(y)
assert len(x_shape) == len(y_shape)
x_tile = []
y_tile = []
for x_dim, y_dim in zip(x_shape[1:-1], y_shape[1:-1]):
assert x_dim % y_dim == 0 or y_dim % x_dim == 0
if x_dim == y_dim:
x_tile.append(1)
y_tile.append(1)
elif x_dim > y_dim:
x_tile.append(1)
y_tile.append(x_dim // y_dim)
else:
x_tile.append(y_dim // x_dim)
y_tile.append(1)
return tf.tile(x, [1] + x_tile + [1]), tf.tile(y, [1] + y_tile + [1])
def strided_local_attention_helper(q, # pylint: disable=dangerous-default-value
k,
v,
query_shape,
masked,
decode_step,
name,
relative_attention=False,
max_relative_position=None,
dropout_rate=0.,
bias_cache={}):
"""Strided local attention helper.
Args:
q: a [batch, heads, d1, ..., dn, depth_k] tensor or [batch, heads, 1, ...,
1, depth_k] tensor in fast decoding mode.
k: a [batch, heads, d1, ..., dn, depth_k] tensor.
v: a [batch, heads, d1, ..., dn, depth_v] tensor.
query_shape: a tuple of (q1, ..., qn) representing the query shape.
masked: a boolean for masked/un masked attention.
decode_step: decode step in fast decoding mode.
name: variable scope name.
relative_attention: whether to do relative attention.
max_relative_position: the max distance to consider for relative positions.
dropout_rate: Rate of dropout.
bias_cache: attention bias cache.
Returns:
a [batch, heads, d1//q1, ..., dn//qn, items_in_block, depth_v] tensor where
each positions attends to previous positions that are in the same relative
position within their own query blocks. or [batch, heads, 1, ..., 1,
depth_v] for fast decoding.
"""
# This computation only applies to self attention, so assert q, k, v and
# antecedent have the same dimensions.
if decode_step is None:
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1])
else:
k.get_shape().assert_is_compatible_with(v.get_shape())
with tf.variable_scope(
name, default_name="strided_attention_nd", values=[q, k, v]):
q_shape = shape_list(q)
# rearrange q, k and v to blocked order and flatten them so
# shape becomes [batch * heads, blocks, items_in_block, depth].
k = tf.reshape(k, [-1] + shape_list(k)[2:])
k = pad_to_multiple_nd(k, query_shape)
k = break_into_blocks_nd(k, query_shape)
k = flatten_blocks_nd(k)
v = tf.reshape(v, [-1] + shape_list(v)[2:])
v = pad_to_multiple_nd(v, query_shape)
v = break_into_blocks_nd(v, query_shape)
v = flatten_blocks_nd(v)
# in fast decoding mode q will be [batch * heads, 1, depth]
if decode_step is not None:
q = tf.reshape(q, [-1, 1, q_shape[-1]])
else:
q = tf.reshape(q, [-1] + shape_list(q)[2:])
q = pad_to_multiple_nd(q, query_shape)
q = break_into_blocks_nd(q, query_shape)
blocked_q_shape = shape_list(q)
q = flatten_blocks_nd(q)
# select the correct strides from k and v.
if decode_step is not None:
items_in_block = shape_list(k)[2]
offset = decode_step % items_in_block
block_num = decode_step // items_in_block
# TPU needs size to be non negative for the case when begin is not
# compile-time constants.
k_shape = shape_list(k)
k = tf.slice(k, [0, 0, offset, 0], k_shape[:2] + [1] + k_shape[-1:])
v = tf.slice(v, [0, 0, offset, 0], k_shape[:2] + [1] + k_shape[-1:])
k = tf.reshape(k, [shape_list(k)[0]] + [-1] + [shape_list(k)[-1]])
v = tf.reshape(v, [shape_list(v)[0]] + [-1] + [shape_list(v)[-1]])
cond = tf.less_equal(tf.range(shape_list(k)[1]), block_num)
causal_attn_bias = tf.where(cond, tf.zeros_like(cond, dtype=tf.float32),
tf.ones_like(cond, dtype=tf.float32) * -1e9)
causal_attn_bias = tf.reshape(causal_attn_bias, [1, -1])
padding_attn_bias = embedding_to_padding(v[0, :, :]) * -1e9
if masked:
attn_bias = tf.minimum(causal_attn_bias, padding_attn_bias)
else:
attn_bias = padding_attn_bias
else:
q = tf.transpose(q, [0, 2, 1, 3])
k = tf.transpose(k, [0, 2, 1, 3])
v = tf.transpose(v, [0, 2, 1, 3])
causal_attn_bias = attention_bias_lower_triangle(
shape_list(q)[2], bias_cache=bias_cache)
padding_attn_bias = tf.expand_dims(
embedding_to_padding(v[:1, :1, :, :]) * -1e9, axis=-1)
causal_attn_bias, padding_attn_bias = maybe_tile(causal_attn_bias,
padding_attn_bias)
if masked:
attn_bias = tf.minimum(causal_attn_bias, padding_attn_bias)
else:
attn_bias = padding_attn_bias
# [batch * heads, num_items_in_block, num_blocks, depth] or
# [batch * heads, 1, depth] in fast decoding.
output, _ = dot_product_attention(
q,
k,
v,
attn_bias,
dropout_rate=dropout_rate,
name=name or "strided_dot_product",
relative_attention=relative_attention,
max_relative_position=max_relative_position,
decode_step=decode_step,
query_shape=query_shape)
if decode_step is None:
output = tf.transpose(output, [0, 2, 1, 3])
output = tf.reshape(output, [-1, q_shape[1]] + blocked_q_shape[1:])
else:
output = tf.reshape(output, q_shape)
return output
def clustered_local_attention_helper(q, # pylint: disable=dangerous-default-value
k,
v,
query_shape,
attention_window,
sparsity_cluster_size,
masked,
decode_step,
name,
mode,
memory_antecedent=None,
strided_attention=False,
relative_attention=False,
max_relative_position=None,
cache_padding_bias=False,
dropout_rate=0.,
bias_cache={},
ema=False,
beta=1e-4,
decay=0.99,
share_qk=False,
hash_items=False,
is_recomputing=False,
skip_summaries=True,
token_bias=None,
token_bias_wt_trainable=False,
padding_bias=None,
use_tpu=False):
"""clustered local attention helper.
Args:
q: a [batch, heads, d1, ..., dn, depth_k] tensor.
k: a [batch, heads, d1, ..., dn, depth_k] tensor.
v: a [batch, heads, d1, ..., dn, depth_v] tensor.
query_shape: a tuple of (q1, ..., qn) representing query shape.
attention_window: how many positions to attend to within a cluster.
sparsity_cluster_size: Number of clusters for routing attention.
masked: a boolean for masked/unmasked attention.
decode_step: decode step in fast decoding mode.
name: variable scope name.
mode: tf.estimator.ModeKeys.
memory_antecedent: Memory antecedent for self attention.
strided_attention: Whether to do strided attention in the cluster space.
relative_attention: Whether to do relative attention.
max_relative_position: the max distance to consider for relative positions.
cache_padding_bias: If sequences are not variable length (e.g. images and
videos) and the only source of padding is to be evenly divisible by blocks
we can cache padding bias as well to save memory.
dropout_rate: Rate of dropout.
bias_cache: attention bias cache.
ema: a boolean to do ema updates.
beta: multiplier for clustering loss.
decay: decay factor for learning centroids.
share_qk: Whether to share queries and keys.
hash_items: If True then use Locality Sensitive Hashing.
is_recomputing: a boolean to represent whether this is a backward pass.
skip_summaries: a boolean to represent whether to skip `tf.summary` ops.
token_bias: Externally provided attention bias over memory sequence (k / v).
token_bias_wt_trainable: Whether or not token_bias_weight is trainable.
padding_bias: Padding bias for seq2seq models (Shape: [b, s]).
use_tpu: Whether to use TPU (default: False).
Returns:
output: a [batch, heads, d1//q1, ..., dn//qn, items_in_block, depth_v]
tensor with clustered attention. or [batch, heads, 1, ..., 1, depth_v]
for fast decoding.
loss: a scalar tensor of clustering loss.
attention_weights: a [batch, heads, d1//q1, ..., dn//qn] tensor representing
the attention weights for query item at `decode_step` or None if not in
fast decoding mode.
"""
# This computation only applies to self attention, so assert q, k, v and
# antecedent have the same dimensions.
if memory_antecedent is None:
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1])
if share_qk:
k = q
with tf.variable_scope(
name, default_name="clustered_attention_nd", values=[q, k, v]):
q_shape = shape_list(q)
v_shape = shape_list(v)
num_heads = q_shape[1]
batch = q_shape[0]
# rearrange q, k and v to blocked order and flatten them so
# shape becomes [batch, heads, seq_length, depth].
k = tf.reshape(k, [-1] + shape_list(k)[2:])
k = pad_to_multiple_nd(k, query_shape)
seq_length = np.prod(shape_list(k)[1:-1], dtype=np.int32)
k = break_into_blocks_nd(k, query_shape)
k = tf.reshape(k, [-1, num_heads, seq_length, shape_list(k)[-1]])
v = tf.reshape(v, [-1] + shape_list(v)[2:])
v = pad_to_multiple_nd(v, query_shape)
v = break_into_blocks_nd(v, query_shape)
v = tf.reshape(v, [-1, num_heads, seq_length, shape_list(v)[-1]])
q = tf.reshape(q, [-1] + shape_list(q)[2:])
q = pad_to_multiple_nd(q, query_shape)
q = break_into_blocks_nd(q, query_shape)
blocked_q_shape = [batch, num_heads] + shape_list(q)[1:]
seq_q_length = np.prod(shape_list(q)[1:-1], dtype=np.int32)
q = tf.reshape(q, [-1, num_heads, seq_q_length, q_shape[-1]])
# Make sure keys and queries are normalized
q = layer_norm(q, scaling=False)
k = layer_norm(k, scaling=False)
# Route information using queries and keys
if hash_items:
q_idx = hash_items_fn(q, sparsity_cluster_size,
shape_list(q)[-1],
decode_step, name)
if share_qk:
k_idx = q_idx
else:
k_idx = hash_items_fn(k, sparsity_cluster_size,
shape_list(k)[-1],
decode_step, name)
clustering_loss = tf.constant(0.)
else:
# Keys and queries come from different sequences
# Encoder-decoder attention, blank queries only
q_cluster_dists, q_clustering_loss = cluster_items(
q,
sparsity_cluster_size,
shape_list(q)[-1],
mode,
decode_step,
name,
ema,
beta,
decay,
is_recomputing,
skip_summaries,
blank_future=masked,
use_tpu=use_tpu)
if share_qk:
k_cluster_dists, k_clustering_loss = q_cluster_dists, q_clustering_loss
else:
k_cluster_dists, k_clustering_loss = cluster_items(
k,
sparsity_cluster_size,
shape_list(k)[-1],
mode,
decode_step,
name,
ema,
beta,
decay,
is_recomputing,
skip_summaries,
blank_future=masked,
use_tpu=use_tpu)
clustering_loss = q_clustering_loss + k_clustering_loss
if decode_step is None and not hash_items and not skip_summaries:
# Add a summary for cluster loss
tf.summary.scalar("cluster_loss", clustering_loss)
# gather cluster items.
if hash_items:
q, _, _, q_idx, _, _ = gather_hashed_attention_items(
q=q,
k=q,
v=q,
sparsity_cluster_size=sparsity_cluster_size,
attention_window=attention_window,
idx=q_idx,
token_bias=token_bias,
padding_bias=padding_bias if memory_antecedent is not None else None)
_, k, v, k_idx, padding_bias, token_bias = gather_hashed_attention_items(
q=k,
k=k,
v=v,
sparsity_cluster_size=sparsity_cluster_size,
attention_window=attention_window,
idx=k_idx,
token_bias=token_bias,
padding_bias=padding_bias)
else:
q, _, _, q_idx, _, _ = gather_cluster_attention_items(
q=q,
k=q,
v=q,
attention_window=attention_window,
cluster_dists=q_cluster_dists,
strided_attention=strided_attention,
token_bias=token_bias,
padding_bias=padding_bias if memory_antecedent is not None else None)
_, k, v, k_idx, padding_bias, token_bias = gather_cluster_attention_items(
q=k,
k=k,
v=v,
attention_window=attention_window,
cluster_dists=k_cluster_dists,
strided_attention=strided_attention,
token_bias=token_bias,
padding_bias=padding_bias)
attn_bias = clustered_attention_bias_nd(
attention_window=attention_window,
clustered_v=v,
masked=masked,
cache_padding_bias=cache_padding_bias,
bias_cache=bias_cache)
if padding_bias is not None:
padding_bias = tf.expand_dims(padding_bias * -1e9, axis=-2)
attn_bias = tf.minimum(attn_bias, padding_bias)
if token_bias is not None:
token_bias = tf.expand_dims(token_bias, axis=-2)
token_bias_weight = tf.get_variable(name="token_bias_weight",
initializer=1.0,
trainable=token_bias_wt_trainable)
attn_bias += token_bias_weight * token_bias
if relative_attention:
q_shape = shape_list(q)
k_shape = shape_list(k)
v_shape = shape_list(v)
q = tf.reshape(q, [q_shape[0], q_shape[1] * q_shape[2]] + q_shape[3:])
k = tf.reshape(k, [k_shape[0], k_shape[1] * k_shape[2]] + k_shape[3:])
v = tf.reshape(v, [v_shape[0], v_shape[1] * v_shape[2]] + v_shape[3:])
bias_shape = shape_list(attn_bias)
new_bias_shape = [bias_shape[0], bias_shape[1] * bias_shape[2]
] + bias_shape[3:]
attn_bias = tf.reshape(attn_bias, new_bias_shape)
output, weights = dot_product_attention(
q,
k,
v,
attn_bias,
dropout_rate=dropout_rate,
name=name or "clustered_dot_product",
relative_attention=relative_attention,
max_relative_position=max_relative_position,
decode_step=decode_step,
query_shape=query_shape)
if relative_attention:
output = tf.reshape(output, q_shape[:-1] + [-1])
weights = tf.reshape(weights, q_shape[:-1] + [-1])
# scatter the results back into blocked raster scan order.
output = scatter_cluster_items(output, q_idx, seq_q_length)
if decode_step is not None:
output = tf.slice(
output, [0, 0, decode_step, 0],
[batch, num_heads, 1, shape_list(output)[-1]])
output = tf.reshape(output, [batch, num_heads] + [1] * len(query_shape) +
v_shape[-1:])
# [batch, heads, num_clusters, attention_window, 1]
weights = tf.transpose(weights, [0, 1, 2, 4, 3])
# scatter the results to obtain [batch, heads, b1, ..., bn]
weights = scatter_cluster_items(weights, q_idx, seq_length)
weights = tf.slice(weights, [0, 0, 0, decode_step],
[batch, num_heads, seq_length, 1])
else:
output = tf.reshape(output, blocked_q_shape[:-1] + v_shape[-1:])
return output, clustering_loss, weights if decode_step is not None else None
def clustered_attention_bias_nd(attention_window, # pylint: disable=dangerous-default-value
clustered_v,
masked=True,
cache_padding_bias=False,
bias_cache={}):
"""create a cluster attention bias nd.
Args:
attention_window: an integer for the attention window.
clustered_v: a [batch, heads, num_clusters, attention_window, depth] tensor.
masked: a boolean for masked/un masked attention
cache_padding_bias: If sequences are not variable length (e.g. images and
videos) and the only source of padding is to be evenly divisible by blocks
we can cache padding bias as well to save memory.
bias_cache: attention bias cache.
Returns:
cluster attention bias of shape
[batch, heads, num_clusters, attention_window, attention_window] or
[1, heads, num_clusters, attention_window, attention_window] if cache
padding bias is true.
"""
cache_key = "clustered_attention_bias_{}_{}_{}".format(
attention_window, masked, cache_padding_bias)
if cache_key in bias_cache:
return bias_cache[cache_key]
if cache_padding_bias:
padding_attn_bias = tf.expand_dims(
embedding_to_padding(clustered_v[:1, :, :, :, :]) * -1e9, axis=-2)
else:
padding_attn_bias = tf.expand_dims(
embedding_to_padding(clustered_v) * -1e9, axis=-2)
if masked:
causal_attn_bias = tf.expand_dims(
attention_bias_lower_triangle(
attention_window, bias_cache=bias_cache),
axis=0)
causal_attn_bias, padding_attn_bias = maybe_tile(causal_attn_bias,
padding_attn_bias)
attn_bias = tf.minimum(causal_attn_bias, padding_attn_bias)
else:
attn_bias = padding_attn_bias
if cache_padding_bias:
bias_cache[cache_key] = attn_bias
return attn_bias
def _generate_relative_positions_matrix(length_q,
length_k,
max_relative_position,
query_shape,
decode_step=None):
"""Generates matrix of relative positions between inputs."""
if decode_step is None:
if length_q == length_k:
range_vec_q = range_vec_k = tf.range(length_q)
else:
range_vec_k = tf.range(length_k)
range_vec_q = range_vec_k[-length_q:]
distance_mat = range_vec_k[None, :] - range_vec_q[:, None]
else:
block_len = np.prod(query_shape)
positive_positions = block_len - decode_step % block_len
distance_mat = tf.expand_dims(tf.range(-length_k, 0, 1),
0) + positive_positions
distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position,
max_relative_position)
# Shift values to be >= 0. Each integer still uniquely identifies a relative
# position difference.
final_mat = distance_mat_clipped + max_relative_position
return final_mat
def _generate_relative_positions_embeddings(length_q,
length_k,
depth,
max_relative_position,
name,
query_shape,
decode_step=None):
"""Generates tensor of size [1 if decode else length_q, length_k, depth]."""
with tf.variable_scope(name):
relative_positions_matrix = _generate_relative_positions_matrix(
length_q, length_k, max_relative_position, query_shape, decode_step)
vocab_size = max_relative_position * 2 + 1
# Generates embedding for each relative position of dimension depth.
embeddings_table = tf.get_variable("embeddings", [vocab_size, depth])
embeddings = tf.gather(embeddings_table, relative_positions_matrix)
return embeddings
def _relative_attention_inner(x, y, z, transpose):
"""Relative position-aware dot-product attention inner calculation.
This batches matrix multiply calculations to avoid unnecessary broadcasting.
Args:
x: Tensor with shape [batch_size, heads, length or 1, length or depth].
y: Tensor with shape [batch_size, heads, length or 1, depth].
z: Tensor with shape [length or 1, length, depth].
transpose: Whether to transpose inner matrices of y and z. Should be true if
last dimension of x is depth, not length.
Returns:
A Tensor with shape [batch_size, heads, length, length or depth].
"""
batch_size = tf.shape(x)[0]
heads = x.get_shape().as_list()[1]
length = tf.shape(x)[2]
# xy_matmul is [batch_size, heads, length or 1, length or depth]
xy_matmul = tf.matmul(x, y, transpose_b=transpose)
# x_t is [length or 1, batch_size, heads, length or depth]
x_t = tf.transpose(x, [2, 0, 1, 3])
# x_t_r is [length or 1, batch_size * heads, length or depth]
x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1])
# x_tz_matmul is [length or 1, batch_size * heads, length or depth]
x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose)
# x_tz_matmul_r is [length or 1, batch_size, heads, length or depth]
x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1])
# x_tz_matmul_r_t is [batch_size, heads, length or 1, length or depth]
x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3])
return xy_matmul + x_tz_matmul_r_t
def dot_product_attention_relative(q,
k,
v,
bias,
max_relative_position,
query_shape,
dropout_rate=0.0,
name=None,
decode_step=None):
"""Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth] or [batch, heads, 1,
depth] in fast decoding mode.
k: a Tensor with shape [batch, heads, length, depth].
v: a Tensor with shape [batch, heads, length, depth].
bias: bias Tensor.
max_relative_position: an integer specifying the maximum distance between
inputs that unique position embeddings should be learned for.
query_shape: a tuple to represent the query shape.
dropout_rate: a floating point number.
name: an optional string.
decode_step: the decode step in fast decoding mode.
Returns:
A Tensor of shape [batch, heads, length, depth].
A Tensor fof shape [batch, heads, length, length] for attention weights.
Raises:
ValueError: if max_relative_position is not > 0.
"""
if not max_relative_position:
raise ValueError("Max relative position (%s) should be > 0 when using "
"relative self attention." % (max_relative_position))
with tf.variable_scope(
name, default_name="dot_product_attention_relative", values=[q, k, v]):
# Use separate embeddings suitable for keys and values.
depth = k.get_shape().as_list()[3]
length_k = shape_list(k)[2]
length_q = shape_list(q)[2]
relations_keys = _generate_relative_positions_embeddings(
length_q, length_k, depth, max_relative_position,
"relative_positions_keys", query_shape, decode_step)
relations_values = _generate_relative_positions_embeddings(
length_q, length_k, depth, max_relative_position,
"relative_positions_values", query_shape, decode_step)
# Compute self attention considering the relative position embeddings.
logits = _relative_attention_inner(q, k, relations_keys, True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if dropout_rate:
weights = tf.nn.dropout(weights, 1.0 - dropout_rate)
return _relative_attention_inner(weights, v, relations_values,
False), weights
def dot_product_attention(q,
k,
v,
bias,
query_shape,
dropout_rate=0.0,
name=None,
relative_attention=False,
max_relative_position=None,
decode_step=None):
"""Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must match
with q.
bias: bias Tensor (see attention_bias())
query_shape: a tuple to represent the query shape.
dropout_rate: a float.
name: an optional string
relative_attention: whether to do relative attention.
max_relative_position: if relative attention is enabled, how much distance
to use for relative positions.
decode_step: the decode step in fast decoding mode.
Returns:
Tensor with shape [..., length_q, depth_v].
Tensor with shape [..., length_q, length_kv] representing attention weights.
"""
if relative_attention:
assert max_relative_position
return dot_product_attention_relative(
q=q,
k=k,
v=v,
bias=bias,
max_relative_position=max_relative_position,
dropout_rate=dropout_rate,
name=name,
decode_step=decode_step,
query_shape=query_shape)
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]):
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
if bias is not None:
bias = cast_like(bias, logits)
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
# Drop out attention links for each head.
if dropout_rate:
weights = tf.nn.dropout(weights, 1.0 - dropout_rate)
return tf.matmul(weights, v), weights
def cast_like(x, y):
"""Cast x to y's dtype, if necessary."""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
if x.dtype.base_dtype == y.dtype.base_dtype:
return x
cast_x = tf.cast(x, y.dtype)
if cast_x.device != x.device:
x_name = "(eager Tensor)"
try:
x_name = x.name
except AttributeError:
pass
tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name,
x.device, cast_x.device)
return cast_x
def attention_bias_local(length, max_backward, max_forward):
"""Create an bias tensor to be added to attention logits.
A position may attend to positions at most max_distance from it,
forward and backwards.
This does not actually save any computation.
Args:
length: int
max_backward: int, maximum distance backward to attend. Negative values
indicate unlimited.
max_forward: int, maximum distance forward to attend. Negative values
indicate unlimited.
Returns:
a `Tensor` with shape [1, 1, length, length].
"""
band = ones_matrix_band_part(
length,
length,
max_backward,
max_forward,
out_shape=[1, 1, length, length])
return -1e9 * (1.0 - band)
def attention_bias_lower_triangle(length, bias_cache={}): # pylint: disable=dangerous-default-value
"""Create an bias tensor to be added to attention logits.
Allows a query to attend to all positions up to and including its own.
Args:
length: a Scalar.
bias_cache: attention bias cache.
Returns:
a `Tensor` with shape [1, 1, length, length].
"""
cache_key = "attention_bias_lower_triangle_{}".format(length)
if cache_key in bias_cache:
return bias_cache[cache_key]
bias = attention_bias_local(length, -1, 0)
bias_cache[cache_key] = bias
return bias
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs):
"""Like tf.nn.dropout but takes broadcast_dims instead of noise_shape.
Instead of specifying noise_shape, this function takes broadcast_dims -
a list of dimension numbers in which noise_shape should be 1. The random
keep/drop tensor has dimensionality 1 along these dimensions.
Args:
x: a floating point tensor.
keep_prob: A scalar Tensor with the same type as x. The probability that
each element is kept.
broadcast_dims: an optional list of integers the dimensions along which to
broadcast the keep/drop flags.
**kwargs: keyword arguments to tf.nn.dropout other than "noise_shape".
Returns:
Tensor of the same shape as x.
"""
assert "noise_shape" not in kwargs
if math.isclose(keep_prob, 1):
return x
if broadcast_dims:
shape = tf.shape(x)
ndims = len(x.get_shape())
# Allow dimensions like "-1" as well.
broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims]
kwargs["noise_shape"] = [
1 if i in broadcast_dims else shape[i] for i in range(ndims)
]
return tf.nn.dropout(x, keep_prob, **kwargs)
def scatter_cluster_items(clustered_x, idx, seq_length):
"""Scatters items from clusters into their original positions.
Args:
clustered_x: a [batch, heads, num_clusters, attention_window, depth] tensor
or [batch, heads, num_clusters, 1, depth] in fast decoding mode.
idx: a [batch, heads, num_clusters, attention_window, 3] int tensor in which
items in the last dimension are [batch_index, head_index, seq_len_index]
seq_length: the sequence length.
Returns:
a [batch, heads, seq_length, depth] tensor where items in `clustered_x` are
scattered to their positions or [batch, heads, 1, depth] in fast decoding.
"""
x_shape = shape_list(clustered_x)
batch = x_shape[0]
heads = x_shape[1]
res = tf.scatter_nd(
idx, clustered_x, [batch, heads, seq_length, x_shape[-1]]) / (
tf.scatter_nd(idx, tf.ones_like(clustered_x),
[batch, heads, seq_length, x_shape[-1]]) + 1e-2)
return res
def gather_hashed_attention_items(q,
k,
v,
idx,
sparsity_cluster_size,
attention_window,
token_bias=None,
padding_bias=None):
"""Gathers items that should attend to each other based on input hashing.
Args:
q: a [batch, heads, seq_length, depth_k] tensor or [batch, heads, 1,
depth_k] in fast decoding mode.
k: a [batch, heads, seq_length, depth_k] tensor.
v: a [batch, heads, seq_length, depth_v] tensor.
idx: Hash bucket ids.
sparsity_cluster_size: Number of clusters for hashed attention.
attention_window: How many positions to attend to in each cluster.
token_bias: Externally provided attention bias over memory sequence (k / v).
padding_bias: Padding bias for seq2seq models (Shape: [b, s]).
Returns:
q: a [batch, heads, num_clusters, attention_window, depth_k] or
[batch, heads, num_clusters, 1, depth_k].
k: a [batch, heads, num_clusters, attention_window, depth_k].
v: a [batch, heads, num_clusters, attention_window, depth_v].
idx: a [batch, heads, num_clusters, attention_window, 3] int tensor in
which items in the last dimension is
[batch_index, head_index, seq_length_index]. This is used for scattering
the results back after dot product attention.
padding_bias: Padding bias gathered according to indices.
token_bias: token_bias gathered according to indices.
"""
batch = shape_list(q)[0]
heads = shape_list(q)[1]
# [batch, num_heads, num_clusters, seq_length]
idx = tf.one_hot(idx, depth=sparsity_cluster_size, axis=-1)
idx = tf.transpose(idx, [0, 1, 3, 2])
_, idx = tf.math.top_k(idx, k=attention_window)
# idx = [batch, num_heads, seq_length, 1] (signifying idx)
# ids correspond to decoding order, sort them to prevent peeking into future.
idx = tf.sort(idx, axis=-1)
idx = tf.expand_dims(idx, axis=-1)
# to prepare gather indices we need to add batch index to idx.
batch_idx = tf.reshape(tf.range(0, batch), [batch, 1, 1, 1, 1])
# [batch, heads, num_clusters, attention_window, 1]
batch_idx = tf.tile(batch_idx, [1, heads, sparsity_cluster_size,
attention_window, 1])
# we also need to add head index to idx.
head_idx = tf.reshape(tf.range(0, heads), [1, heads, 1, 1, 1])
head_idx = tf.tile(head_idx, [batch, 1, sparsity_cluster_size,
attention_window, 1])
# [batch, heads, num_clusters, attention_window, 3]
idx = tf.concat([batch_idx, head_idx, idx], axis=-1)
k, v = tf.split(tf.gather_nd(tf.concat([k, v], -1), idx), 2, -1)
def gather_idx_for_bias(bias):
# Padding bias is of shape [batch, seq_length]
bias = tf.expand_dims(bias, axis=1)
bias = tf.tile(bias, [1, heads, 1])
bias = tf.gather_nd(bias, idx)
return bias
if padding_bias is not None:
padding_bias = gather_idx_for_bias(padding_bias)
if token_bias is not None:
token_bias = gather_idx_for_bias(token_bias)
q = tf.gather_nd(q, idx)
return q, k, v, idx, padding_bias, token_bias
def gather_cluster_attention_items(q,
k,
v,
cluster_dists,
attention_window,
strided_attention=False,
token_bias=None,
padding_bias=None):
"""Gathers items that should attend to each other based on input clustering.
Args:
q: a [batch, heads, seq_length, depth_k] tensor or [batch, heads, 1,
depth_k] in fast decoding mode.
k: a [batch, heads, seq_length, depth_k] tensor.
v: a [batch, heads, seq_length, depth_v] tensor.
cluster_dists: a [batch, num_heads, seq_length, num_clusters] tensor
representing the distance of each item from all clusters.
attention_window: How many positions to attend to in each cluster.
strided_attention: Whether to do strided attention in the cluster space.
token_bias: Externally provided attention bias over memory sequence (k / v).
padding_bias: Padding bias for seq2seq models (Shape: [b, s]).
Returns:
q: a [batch, heads, num_clusters, attention_window, depth_k] or
[batch, heads, num_clusters, 1, depth_k].
k: a [batch, heads, num_clusters, attention_window, depth_k].
v: a [batch, heads, num_clusters, attention_window, depth_v].
idx: a [batch, heads, num_clusters, attention_window, 3] int tensor in
which items in the last dimension is
[batch_index, head_index, seq_length_index]. This is used for scattering
the results back after dot product attention.
padding_bias: Padding bias gathered according to indices.
token_bias: token_bias gathered according to indices.
"""
shape = shape_list(cluster_dists)
num_clusters = shape[-1]
batch = shape_list(q)[0]
heads = shape_list(q)[1]
# [batch, num_heads, num_clusters, seq_length]
cluster_dists = tf.transpose(cluster_dists, [0, 1, 3, 2])
if strided_attention:
# Simulate attending to the centroids by strided attention.
seq_len = shape_list(cluster_dists)[-1]
cluster_idx = tf.argsort(cluster_dists, axis=-1)
stride = seq_len // attention_window
idx = cluster_idx[:, :, :, ::stride]
# we may need to trim down idx.
if (seq_len % attention_window) != 0:
idx = idx[:, :, :, :attention_window]
else:
_, idx = tf.math.top_k(-cluster_dists, k=attention_window)
# ids correspond to decoding order, sort them to prevent peeking into future.
idx = tf.sort(idx, axis=-1)
idx = tf.expand_dims(idx, axis=-1)
# to prepare gather indices we need to add batch index to idx.
batch_idx = tf.reshape(tf.range(0, batch), [batch, 1, 1, 1, 1])
# [batch, heads, num_clusters, attention_window, 1]
batch_idx = tf.tile(batch_idx, [1, heads, num_clusters, attention_window, 1])
# we also need to add head index to idx.
head_idx = tf.reshape(tf.range(0, heads), [1, heads, 1, 1, 1])
head_idx = tf.tile(head_idx, [batch, 1, num_clusters, attention_window, 1])
# [batch, heads, num_clusters, attention_window, 3]
idx = tf.concat([batch_idx, head_idx, idx], axis=-1)
k, v = tf.split(tf.gather_nd(tf.concat([k, v], -1), idx), 2, -1)
def gather_idx_for_bias(bias):
# bias is of shape [batch, seq_length]
bias = tf.expand_dims(bias, axis=1)
bias = tf.tile(bias, [1, heads, 1])
bias = tf.gather_nd(bias, idx)
return bias
if padding_bias is not None:
padding_bias = gather_idx_for_bias(padding_bias)
if token_bias is not None:
token_bias = gather_idx_for_bias(token_bias)
q = tf.gather_nd(q, idx)
return q, k, v, idx, padding_bias, token_bias
def hash_items_fn(items, sparsity_cluster_size, codebook_depth,
decode_step, name):
"""Hash input items via random projections (LSH).
Args:
items: a [batch, heads, seq_length, depth] tensor
sparsity_cluster_size: Number of clusters for LSH attention.
codebook_depth: depth of the codebook entries.
decode_step: decode step or None.
name: variable scope name.
Returns:
idx: Membership index of each sequence item in hash bucket.
"""
del decode_step
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
num_heads = shape_list(items)[1]
num_bits = int(tf.log(sparsity_cluster_size) / tf.log(tf.constant(2)))
projection_tensors = tf.get_variable(
name="projection_tensors",
shape=[num_heads, num_bits, codebook_depth],
trainable=False,
initializer=tf.initializers.orthogonal())
# items have shape [bs, nh, seq_len, d]
# projection_tensors have shape [nh, k, d]
# inner product between the two has shape [bs, nh, seq_len, d]
inner_product = tf.einsum("bnsd, nkd->bnsk", items, projection_tensors)
signed_inner_product = tf.sign(inner_product)
# So every sequence element gets a sign corresponding to which bucket it is
binary_inner_product = (signed_inner_product + 1)//2
idx = bit_to_int(binary_inner_product, num_bits=num_bits)
return idx
def bit_to_int(x_bit, num_bits, base=2):
"""Turn x_bit representing numbers bitwise (lower-endian) to int tensor.
Args:
x_bit: Tensor containing numbers in a particular base to be converted to
int.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Integer representation of this number.
"""
x_l = tf.to_int64(tf.reshape(x_bit, [-1, num_bits]))
x_labels = [
x_l[:, i] * tf.to_int64(base)**tf.to_int64(i) for i in range(num_bits)]
res = sum(x_labels)
return tf.to_int64(tf.reshape(res, x_bit.get_shape().as_list()[:-1]))
def cluster_items(items, sparsity_cluster_size, codebook_depth, mode,
decode_step, name, ema, beta, decay, is_recomputing,
skip_summaries, blank_future=False, use_tpu=False):
"""Cluster input items via a discrete bottleneck.
Args:
items: a [batch, heads, seq_length, depth] tensor
sparsity_cluster_size: Number of clusters for routing attention.
codebook_depth: depth of the codebook entries.
mode: a tf.estimator.ModeKeys.
decode_step: decode step or None.
name: variable scope name.
ema: a boolean to do ema updates or not.
beta: multiplier for clustering loss.
decay: decay factor for learning centroids.
is_recomputing: a boolean to represent whether this is a backward pass.
skip_summaries: a boolean to represent whether to skip `tf.summary` ops.
blank_future: Whether to set future blank positions to infinity.
use_tpu: Whether to use TPU (default: False).
Returns:
cluster_dist: a [batch, heads, seq_length, num_clusters] float tensor
representing distance from all clusters.
loss: Scalar Tensor. Sum of codebook and commitment losses
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
num_heads = shape_list(items)[1]
seq_length = shape_list(items)[2]
means = tf.get_variable(
name="means",
shape=[num_heads, sparsity_cluster_size, codebook_depth],
trainable=True,
dtype=tf.float32,
initializer=lambda shape, dtype=None, partition_info=None, # pylint: disable=g-long-lambda
verify_shape=None: layer_norm(
tf.random.normal(shape=shape,
mean=0.0,
stddev=1.0,
dtype=dtype), scaling=False))
ema_count, ema_means = None, None
if ema:
ema_count = tf.get_variable(
name="ema_count",
shape=[num_heads, sparsity_cluster_size],
trainable=False,
initializer=tf.constant_initializer(0))
with tf.colocate_with(means):
# In export mode, means becomes a Tensor that does not have
# initialized_value defined.
ema_means = tf.get_variable(
name="ema_means",
shape=None if isinstance(means, tf.Variable) else
[num_heads, sparsity_cluster_size, codebook_depth],
trainable=False,
initializer=means.initialized_value() if isinstance(
means, tf.Variable) else tf.constant_initializer(0))
dist, loss = online_kmeans(
inputs=items,
sparsity_cluster_size=sparsity_cluster_size,
beta=beta,
means=means,
ema=ema,
decay=decay,
ema_count=ema_count,
ema_means=ema_means,
mode=mode,
is_recomputing=is_recomputing,
skip_summaries=skip_summaries,
use_tpu=use_tpu)
# In decoding mode, set distances for blank positions to infinity.
if decode_step is not None and blank_future:
batch_size = shape_list(dist)[0]
idx = tf.tile(
tf.reshape(tf.range(seq_length), [1, 1, -1, 1]),
[batch_size, num_heads, 1, shape_list(dist)[-1]])
dist = tf.where(idx <= decode_step, dist, tf.ones_like(dist) * 1e9)
return dist, loss
def online_kmeans(inputs,
sparsity_cluster_size,
mode=None,
beta=0.25,
ema=True,
means=None,
ema_count=None,
ema_means=None,
epsilon=1e-5,
decay=0.999,
is_recomputing=False,
skip_summaries=True,
use_tpu=False):
"""Clustering via online k-means.
Args:
inputs: Input to the bottleneck, a Tensor of shape [..., hidden_dim].
sparsity_cluster_size: Number of clusters for routing attention.
mode: tf.estimator.ModeKeys.
beta: Scale factor for online k-means.
ema: Whether to update embeddings using exponential moving averages.
means: The embedding table. Used only if ema is True.
ema_count: Table of counts for each embedding corresponding to how many
examples in a batch it was the closest to. Used only if ema is True.
ema_means: Exponentially averaged version of the embeddings. Used only if
ema is True.
epsilon: Small value to avoid dividing by zero in EMA update. Used only if
ema is True.
decay: Decay factor for the exponential moving average. Used only if ema is
True.
is_recomputing: a boolean to represent whether this is a backward pass.
skip_summaries: a boolean to represent whether to skip `tf.summary` ops.
use_tpu: Whether to use TPU (default: False).
Returns:
x_dist: Distance to the centroids for online k-means.
extra_loss: Loss for training online k-means.
"""
with tf.variable_scope("clustering", reuse=tf.AUTO_REUSE):
# inputs [bs, n, s, h], means [n, k, h]
input_shape = shape_list(inputs)
num_heads = input_shape[1]
x = inputs
x_means_hot, x_dist, q_loss, e_loss = embedding_lookup(x, means=means)
# Exclude pads from affecting centroids
x_means_hot_pad_mask = 1 - embedding_to_padding(x)
x_means_hot_pad_mask = tf.expand_dims(x_means_hot_pad_mask, axis=-1)
x_means_hot = tf.multiply(x_means_hot, x_means_hot_pad_mask)
extra_loss = 0
# Update the EMA variables.
if ema and mode == tf.estimator.ModeKeys.TRAIN and not is_recomputing:
tf.logging.info("Using EMA with beta = {}".format(beta))
# [bs, n, s, k], [n, k]
count = tf.reduce_sum(
tf.reshape(
x_means_hot,
shape=[-1, num_heads, sparsity_cluster_size]),
axis=0)
if use_tpu:
count = tf.tpu.cross_replica_sum(count)
updated_ema_count = moving_averages.assign_moving_average(
ema_count,
count,
decay,
zero_debias=False)
# [bs, n, s, k], [bs, n, s, h]
dw = tf.einsum("bnsk, bnsh -> nkh", x_means_hot, x)
if use_tpu:
dw = tf.tpu.cross_replica_sum(dw)
updated_ema_means = moving_averages.assign_moving_average(
ema_means, dw, decay, zero_debias=False)
n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True)
updated_ema_count = ((updated_ema_count + epsilon) /
(n + sparsity_cluster_size * epsilon) * n)
# pylint: disable=g-no-augmented-assignment
updated_ema_means = updated_ema_means / tf.expand_dims(
updated_ema_count, axis=-1)
# pylint: enable=g-no-augmented-assignment
with tf.control_dependencies([e_loss]):
update_means = tf.assign(means, updated_ema_means)
with tf.control_dependencies([update_means]):
extra_loss += beta * e_loss
elif ema:
extra_loss += beta * e_loss
else:
extra_loss += q_loss + beta * e_loss
# Adjust shape of dist
dist_shape = input_shape[:-1] + [sparsity_cluster_size]
x_dist = tf.reshape(x_dist, dist_shape)
# Add a tf summary for average cluster occupancy
if mode != tf.estimator.ModeKeys.PREDICT and not skip_summaries:
cluster_occupancy = tf.reduce_mean(
tf.reduce_sum(x_means_hot, axis=2), axis=[0, 1])
tf.summary.histogram("cluster_occupancy", cluster_occupancy)
cluster_occupancy_min = tf.reduce_min(cluster_occupancy)
cluster_occupancy_max = tf.reduce_max(cluster_occupancy)
tf.summary.scalar("cluster_occupancy_min", cluster_occupancy_min)
tf.summary.scalar("cluster_occupancy_max", cluster_occupancy_max)
return x_dist, extra_loss
def embedding_lookup(x, means):
"""Compute nearest neighbors and loss for training the embeddings.
Args:
x: Continuous encodings of shape [batch_size, sequence_length, hidden_dim].
means: Embedding table of shape [sparsity_cluster_size, hidden_dim].
Returns:
x_means_hot: The nearest neighbor in one hot form, with shape
[batch_size, sequence_length, sparsity_cluster_size].
x_dist: Distances to the centroids of shape [batch_size,
sequence_length, sparsity_cluster_size].
q_loss: Scalar Tensor representing codebook loss.
e_loss: Scalar Tensor representing commitment loss.
"""
x_means_hot, x_dist = nearest_neighbor(x, means)
x_means = tf.einsum("bnsk, nkh -> bnsh", x_means_hot, means)
q_loss = tf.reduce_mean(tf.squared_difference(tf.stop_gradient(x), x_means))
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, x_dist, q_loss, e_loss
def nearest_neighbor(x, means):
"""Find the nearest element in means to elements in x.
Args:
x: Continuous encodings of shape [batch_size, sequence_length, hidden_dim].
means: Embedding table of shape [sparsity_cluster_size, hidden_dim].
Returns:
Tensor with nearest element in mean encoded in one-hot notation
and distances.
"""
sparsity_cluster_size = tf.shape(means)[1]
scalar_prod = tf.einsum("bnsh, nkh -> bnsk", x, means)
dist = - scalar_prod
# computing cluster probabilities
nearest_idx = tf.argmax(-dist, axis=-1)
nearest_hot = tf.one_hot(nearest_idx, sparsity_cluster_size)
dist = tf.reshape(dist, shape_list(nearest_hot))
return nearest_hot, dist
def get_timing_signal_1d(length,
channels,
min_timescale=1.0,
max_timescale=1.0e4,
start_index=0):
"""Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can be
expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
Args:
length: scalar, length of timing signal sequence.
channels: scalar, size of timing embeddings to create. The number of
different timescales is equal to channels / 2.
min_timescale: a float
max_timescale: a float
start_index: index of first position
Returns:
a Tensor of timing signals [1, length, channels]
"""
position = tf.to_float(tf.range(length) + start_index)
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
tf.maximum(tf.to_float(num_timescales) - 1, 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
# Please note that this slightly differs from the published paper.
# See a discussion here: https://github.com/tensorflow/tensor2tensor/pull/177
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]])
signal = tf.reshape(signal, [1, length, channels])
return signal
|
py | b40eab02fd537bd0cc9a5bb6e761a8b5fb605e46 | import pygame
from game_src.brick import Brick
class Wall(pygame.sprite.Group):
"""
Class extends SpriteGroup in order to make use of the collision methods already implemented in Pygame
"""
bricks = []
brick_size = 10
(game_width, game_height) = (400, 400)
def __init__(self, game_width: int, game_height: int, brick: Brick):
super(Wall, self).__init__()
self.brick_size = brick.size
self.game_width, self.game_height = game_width, game_height
self.bricks = []
self.init_wall()
def init_wall(self):
"""
Build the wall for the game. Bricks are added one next to the other, starting from (0 ,0) to
(game_width, game_height).
Remember the coordinate system in Pygame starts from the top-left corner (0, 0) and both x and y axis
are positive to the right and down, respectively.
"""
for i in range(0, int(self.game_width / self.brick_size)):
brick = Brick(self.brick_size)
brick.set_position(i * self.brick_size, 0)
self.add(brick)
self.bricks.append(Brick)
for i in range(0, int(self.game_height / self.brick_size)):
brick = Brick(self.brick_size)
brick.set_position(0, i * self.brick_size)
self.add(brick)
self.bricks.append(Brick)
for i in range(0, int(self.game_width / self.brick_size)):
brick = Brick(self.brick_size)
brick.set_position(i * self.brick_size, self.game_height - self.brick_size)
self.add(brick)
self.bricks.append(Brick)
for i in range(0, int(self.game_height / self.brick_size)):
brick = Brick(self.brick_size)
brick.set_position(self.game_width - self.brick_size, i * self.brick_size)
self.add(brick)
self.bricks.append(Brick)
|
py | b40eab66d0fa603a818b31cdb8e687577831209e | #!/usr/bin/env python3
# coding: utf8
import os, sys, pathlib
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from src.token import CsvTokenReader
import unittest
from unittest.mock import MagicMock, patch, mock_open
import copy
import toml
class TestCsvTokenReader(unittest.TestCase):
def setUp(self):
self.rows = [
['test.com', 'test-user', 'read', 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'],
]
def test_path(self):
self.assertEqual(os.path.basename(CsvTokenReader().Path), 'token.tsv')
@patch('os.path.isfile', return_value=False)
def test_get_not_exist_file(self, mock_lib):
self.assertEqual(CsvTokenReader().get('', ''), None)
@patch('src.token.CsvTokenReader._CsvTokenReader__get_rows')
def test_get_hit_one_of_one(self, mock_lib):
mock_lib.return_value = self.rows
actual = CsvTokenReader().get(self.rows[0][0], self.rows[0][1])
mock_lib.assert_called_once()
self.assertEqual(actual, self.rows[0][3])
@patch('src.token.CsvTokenReader._CsvTokenReader__get_rows')
def test_get_hit_one_of_two(self, mock_lib):
mock_lib.return_value = [
self.rows[0],
[self.rows[0][0]+'2', self.rows[0][1]+'2', self.rows[0][2], self.rows[0][3]+'2'],
]
actual = CsvTokenReader().get(mock_lib.return_value[1][0], mock_lib.return_value[1][1])
mock_lib.assert_called_once()
self.assertEqual(actual, mock_lib.return_value[1][3])
@patch('src.token.CsvTokenReader._CsvTokenReader__get_rows')
def test_get_hit_two_of_two(self, mock_lib):
mock_lib.return_value = [
self.rows[0],
[self.rows[0][0], self.rows[0][1], ['write'], 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'],
]
actual = CsvTokenReader().get(mock_lib.return_value[1][0], mock_lib.return_value[1][1])
mock_lib.assert_called_once()
self.assertEqual(actual, mock_lib.return_value[0][3])
@patch('src.token.CsvTokenReader._CsvTokenReader__get_rows')
def test_get_not_hit_one(self, mock_lib):
mock_lib.return_value = self.rows
for case in [
(([self.rows[0][0]+'2', self.rows[0][1]], None), None),
(([self.rows[0][0], self.rows[0][1]+'2'], None), None),
(([self.rows[0][0]+'2', self.rows[0][1]], ['write']), None),
]:
with self.subTest(args=case[0][0], kwargs=case[0][1], expected=case[1]):
actual = CsvTokenReader().get(*case[0][0], scopes=case[0][1])
self.assertEqual(actual, None)
@patch('src.token.CsvTokenReader._CsvTokenReader__get_rows')
def test_get_not_hit_two(self, mock_lib):
mock_lib.return_value = [
self.rows[0],
[self.rows[0][0], self.rows[0][1], ['write'], 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'],
]
for case in [
(([self.rows[0][0]+'2', self.rows[0][1]], None), None),
(([self.rows[0][0], self.rows[0][1]+'2'], None), None),
(([self.rows[0][0]+'2', self.rows[0][1]], ['follow']), None),
]:
with self.subTest(args=case[0][0], kwargs=case[0][1], expected=case[1]):
actual = CsvTokenReader().get(*case[0][0], scopes=case[0][1])
self.assertEqual(actual, None)
if __name__ == "__main__":
unittest.main()
|
py | b40eac2ac88cf5d94b1025bdef8298ea66114fe5 | from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import csv
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {}
'''
{'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
'''
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a) in [3,4]])
class CryptoCompare(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('min-api.cryptocompare.com',
"/data/pricehistorical?fsym=ZEC&tsyms=USD")
return {'USD': Decimal(json['ZEC']['USD'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
# Use when dynamic fetching is needed
return self.config.get('currency', 'USD')
def config_exchange(self):
return self.config.get('use_exchange', 'CryptoCompare')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, CryptoCompare)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No exchange rate available)") if rate is None else " 1 %s=%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
py | b40eacb85ae8020ec47c44a7eebc3d045c276f37 | import urllib.request
from bs4 import BeautifulSoup
import re
import time
import json
from datetime import date
import os
zodiac = [
'aries',
'taurus',
'gemini',
'cancer',
'leo',
'virgo',
'libra',
'scorpio',
'sagittarius',
'capricorn',
'aquarius',
'pisces'
]
site_instances = [['HoroscopeCom',
'http://www.horoscope.com/us/horoscopes/general/horoscope-general-daily-today.aspx?sign=',
list(i for i in range(1, 13)),
['div', 'horoscope-content', 0]],
['HoroscopeCoUk',
'http://horoscope.co.uk/',
zodiac,
['div', 'daily-reading', 0]],
['HoroscopesCoUk',
'http://www.horoscopes.co.uk/',
list(sign.capitalize() for sign in zodiac),
['p', None, -1]],
['Elle',
'http://www.elle.com/horoscopes/daily/',
list(sign + '-daily-horoscope' for sign in zodiac),
['p', None, 2]],
]
with open('update_log.json', 'r+') as f: update_log = json.load(f)
f.close()
class HoroscopeParser:
'''Opens website, reads data and saves horoscopes in txt files sorted by source
self.name -> reference name
self.url -> full path without the suffix to exact zodiac sign
self.zodiac_list -> a list of suffixes to the 12 zodiac signs
self.id_tags -> list of 3 identifiers [HTML tag name, tag id, position]'''
def __init__(self, name, url, zodiac_list, id_tags):
self.name = name
self.url = url
self.zodiac_list = zodiac_list
self.id_tags = id_tags
def extract_text(self, zodiac):
'''opens URL, makes the soup, finds, text and
returns text'''
print('Extracting ' + zodiac + ' from', self.name)
print(self.url+zodiac)
try:
html = urllib.request.urlopen(self.url+zodiac)
except urllib.error.HTTPError:
print('ERROR! Website currently not available!')
return
soup = BeautifulSoup(html, 'html.parser')
try:
text = soup.findAll(self.id_tags[0], self.id_tags[1])[self.id_tags[2]].get_text()
except IndexError:
print('ERROR! Text not found! Please re-check HTML tags.')
return
html.close()
text = HoroscopeParser.clean_text(self, text)
return text
def clean_text(self, text):
'''removes numbers and zodiac names'''
text = re.sub('[0-9]+', '', text)
for sign in zodiac:
text = re.sub(sign.capitalize(), '', text)
def extract_all(self):
'''loop to get all 12 signs'''
try:
if update_log[self.name] == str(date.today()):
print(self.name + ' has already been extracted today.\nSkipping...\n')
return
else:
update_log[self.name] = str(date.today())
except KeyError:
update_log[self.name] = str(date.today())
for sign in self.zodiac_list:
hcope_text = HoroscopeParser.extract_text(self, str(sign))
fname = str(sign)[:2] + '_' + self.name + '_' + str(date.today()) + '.txt'
with open(os.path.join('./training_data', fname), 'w+') as f: f.write(str(hcope_text) + '\n')
f.close()
print("Extraction completed.\nWaiting 5 seconds...")
time.sleep(5)
####################
def scrape():
print('Hello.\nStarting daily scraping...\n\n')
for site_data in site_instances:
crawler = HoroscopeParser(site_data[0], site_data[1], site_data[2], site_data[3])
crawler.extract_all()
print('\nExtraction finished.')
with open('update_log.json', 'w+') as f: json.dump(update_log, f)
f.close()
|
py | b40ead06dfe17bb960cb29ca9867623da90723ae | from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
cfg = config['DEFAULT']
cfg_keys = 'version description keywords author author_email'.split()
expected = (cfg_keys
+ "lib_name user branch license status min_python audience language".split()
)
for o in expected:
assert o in cfg, "missing expected setting: {}".format(o)
setup_cfg = {o:cfg[o] for o in cfg_keys}
licenses = {
'apache2': (
'Apache Software License 2.0',
'OSI Approved :: Apache Software License'),
}
statuses = [
'1 - Planning',
'2 - Pre-Alpha',
'3 - Alpha',
'4 - Beta',
'5 - Production/Stable',
'6 - Mature',
'7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8'.split()
def parse_requirements(name):
return cfg[name].strip("\n").split("\n")
requirements = parse_requirements("requirements")
o_gpu = parse_requirements("onnxgpu")
o_cpu = parse_requirements("onnxcpu")
interp = parse_requirements("interp")
all_req = parse_requirements("all")
extras = {}
extras["onnx-gpu"] = ['onnxruntime-gpu']
extras["onnx-cpu"] = ['onnxruntime-cpu']
extras["interp"] = ['plotly', 'plotnine', 'shap<0.36.0']
extras["all"] = ['fastai', 'onnxruntime-gpu', 'plotly', 'plotnine', 'shap<0.36.0']
lic = licenses[cfg['license']]
min_python = cfg['min_python']
setuptools.setup(
name = cfg['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(cfg['status'])],
'Intended Audience :: ' + cfg['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + cfg['language'].title(),
]
+ [
'Programming Language :: Python :: '+o
for o in py_versions[py_versions.index(min_python):]
],
url = cfg['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
extras_require=extras,
dependency_links = cfg.get('dep_links','').split(),
python_requires = '>=' + cfg['min_python'],
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': cfg.get('console_scripts','').split() },
**setup_cfg)
|
py | b40eae6e6f856876f9ed19ae4666262356c9d5a3 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from PIL import Image
from io import BytesIO
"""
This script was written to generate a plot containing selected genes that are
used as the reference genes in my ICC or RT-qPCR experiments. The source of
the data is Supplementary Table 4 from "Purification and Characterization
of Progenitor and Mature Human Astrocytes Reveals Transcriptional and
Functional Differences with Mouse" by Zhang et al. published in Neuron
2016. DOI: 10.1016/j.neuron.2015.11.013
I do not claim the copyright of the work published as detailed above.
"""
print ("Reading file...")
df = pd.read_excel("Data/TableS4-HumanMouseMasterFPKMList.xlsx", sheet_name=1,
skiprows=1, index_col=0)
df = df.drop(["Gender"])
goi = ['GFAP', 'VIM', 'NES', 'S100B', 'ALDH1A1', 'NFIA', 'HIF1A', 'CSPG4',
'TUBB3', 'SOX10', 'SOX9', 'SLC2A1', 'FABP7', 'CD44', 'SOX1',
'SLC1A2', 'SLC1A3']
names = []
for i in range(1, len(df.T)+1):
if i <= 4:
names.append('Peri-tumor Astrocytes')
elif i > 4 and i <= 8:
names.append('Hippocampi Astrocytes')
elif i > 8 and i <= 14:
names.append('Foetal Astrocytes')
elif i > 14 and i <= 26:
names.append('Mature Astrocytes')
elif i == 27:
names.append('Neurons')
elif i > 27 and i <= 32:
names.append('Oligodendrocytes')
elif i > 32 and i <= 35:
names.append('Microglia')
elif i > 35 and i <= 37:
names.append('Endothelial')
else:
names.append('Whole Cortex')
print ("Dropping unwanted cell types...")
df.columns = names
todrop = ['Whole Cortex', 'Peri-tumor Astrocytes', 'Hippocampi Astrocytes',
'Microglia', 'Endothelial']
df = df.T.drop(todrop).T
dg = df.loc[goi]
dg = dg.astype(np.float64)
dg = dg.sort_index(ascending=False)
print ("Grouping by cell types and calculating the mean and stdev...")
dg = dg.groupby(by=dg.columns, axis=1)
dg_avg = dg.mean()
dg_std = dg.std()
print ("Plotting...")
fig, ax = plt.subplots(figsize=(20/2.54, 18/2.54), tight_layout=True)
plt.xlabel("FPKM")
dg_avg.plot.barh(xerr=dg_std, xlim=0, width=0.9, ax=ax)
# plt.show()
# save figure
# (1) save the image in memory in PNG format
png1 = BytesIO()
fig.savefig(png1, format='png')
# (2) load this image into PIL
png2 = Image.open(png1)
# (3) save as TIFF
print ("Saving plot...")
png2.save('Results/GOI_Zhang.tiff')
png1.close()
print ("DONE!")
|
py | b40eb0d0a335b506cb79b8bdbb26767c82f5d73d | from .prior import MeansGaussianPrior, MeansUniformPrior, CovarsStaticPrior, \
DiagCovarsUniformPrior, WeightsUniformPrior,\
WeightsDirichletPrior, WeightsStaticPrior, GMMPrior,\
DiagCovarsWishartPrior
|
py | b40eb15f7f5a02fc35dfaf91fd584e378c221c41 | class Solution:
def myAtoi(self, s: str) -> int:
length = len(s)
index = 0
symbol = 1
num = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
while index < length and s[index] == ' ':
# 前导空格
index += 1
if index < length and s[index] == '+':
# 正还是负号
index += 1
elif index < length and s[index] == '-':
index += 1
symbol = -1
tmp = 0
while index < length and s[index] in num:
# 非数字字符或到达输入的结尾
tmp *= 10
tmp += int(s[index])
index += 1
# 如果整数数超过 32 位有符号整数范围,固定在边界值
tmp *= symbol
if tmp > 2**31 - 1:
tmp = 2**31 - 1
elif tmp < -2**31:
tmp = -2**31
return tmp
if __name__ == '__main__':
s = Solution()
print(s.myAtoi(' -42')) |
py | b40eb1b45604c8284b660d8fbd89b98e468149bd | import tensorflow as tf
from tensorflow import keras
import numpy as np
import os
class SimpleESN(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.reservoirs = tf.keras.models.Sequential()
self.readout = tf.keras.models.Sequential()
def stack_reservoir(self, reservoir):
self.reservoirs.add(reservoir)
def add_readout(self, readout):
self.readout.add(readout)
def call(self, inputs):
r = self.reservoirs(inputs)
y = self.readout(r)
return y
def fit(self, x, y, **kwargs):
"""
Override of the fit method for the implementation of the pre-calculation of the reservoir states
:param x: training input data
:param y: label for input data
:param kwargs: other fit params
:return: training only on the readout level
"""
X_train = self.reservoirs(x)
x_val, y_val = kwargs['validation_data']
x_val_1 = self.reservoirs(x_val)
kwargs['validation_data'] = (x_val_1, y_val)
return self.readout.fit(X_train, y, **kwargs)
class ReservoirCell(keras.layers.Layer):
# Implementation of a shallow reservoir to be used as cell of a Recurrent Neural Network
# The implementation is parametrized by:
# units - the number of recurrent neurons in the reservoir
# input_scaling - the max abs value of a weight in the input-reservoir connections
# note that this value also scales the unitary input bias
# spectral_radius - the max abs eigenvalue of the recurrent weight matrix
# leaky - the leaking rate constant of the reservoir
# connectivity_input - number of outgoing connections from each input unit to the reservoir
# circular_law - used for initialization of the weight matrix
def __init__(self, units, seed,
input_scaling=1., spectral_radius=0.99, leaky=1,connectivity_recurrent=1,
circular_law=False,
**kwargs):
self.units = units
self.state_size = units
self.input_scaling = input_scaling
self.spectral_radius = spectral_radius
self.leaky = leaky
self.connectivity_recurrent = connectivity_recurrent
self.seed = seed
self.circular_law = circular_law
super().__init__(**kwargs)
def build(self, input_shape):
# build the input weight matrix
self.kernel = tf.random.uniform((input_shape[-1], self.units), minval=-1, maxval=1, seed=self.seed) * self.input_scaling
# build the recurrent weight matrix
if self.circular_law:
# uses circular law to determine the values of the recurrent weight matrix
value = (self.spectral_radius / np.sqrt(self.units)) * (6 / np.sqrt(12))
self.recurrent_kernel = tf.random.uniform(shape=(self.units, self.units), minval=-value, maxval=value, seed=self.seed)
else:
W = sparse_recurrent_tensor(self.units, C=self.connectivity_recurrent)
# re-scale the weight matrix to control the effective spectral radius of the linearized system
if (self.leaky == 1):
# if no leakage then rescale the W matrix
# compute the spectral radius of the randomly initialized matrix
e, _ = tf.linalg.eig(tf.sparse.to_dense(W))
rho = max(abs(e))
# rescale the matrix to the desired spectral radius
W = W * (self.spectral_radius / rho)
self.recurrent_kernel = W
else:
I = sparse_eye(self.units)
W2 = tf.sparse.add(I * (1 - self.leaky), W * self.leaky)
e, _ = tf.linalg.eig(tf.sparse.to_dense(W2))
rho = max(abs(e))
W2 = W2 * (self.spectral_radius / rho)
self.recurrent_kernel = tf.sparse.add(W2, I * (self.leaky - 1)) * (1 / self.leaky)
self.bias = tf.random.uniform(shape=(self.units,), minval=-1, maxval=1) * self.input_scaling
self.built = True
def call(self, inputs, states):
# computes the output of the cell given the input and previous state
prev_output = states[0]
input_part = tf.matmul(inputs, self.kernel)
if self.circular_law:
state_part = tf.matmul(prev_output, self.recurrent_kernel)
else:
state_part = tf.sparse.sparse_dense_matmul(prev_output, self.recurrent_kernel)
if self.circular_law:
output = tf.nn.tanh(input_part + self.bias + state_part)
else:
output = prev_output * (1 - self.leaky) + tf.nn.tanh(input_part + self.bias + state_part) * self.leaky
return output, [output]
def get_config(self):
base_config = super().get_config()
return {**base_config,
"units": self.units,
"spectral_radius": self.spectral_radius,
"leaky": self.leaky,
"input_scaling": self.input_scaling,
"state_size": self.state_size,
"circular_law": self.circular_law
}
def from_config(cls, config):
return cls(**config)
def sparse_recurrent_tensor(M, C=1):
# Generates an M x M matrix to be used as sparse recurrent kernel
# For each column only C elements are non-zero
# (i.e., each recurrent neuron takes input from C other recurrent neurons).
# The non-zero elements are generated randomly from a uniform distribution in [-1,1]
dense_shape = (M, M) # the shape of the dense version of the matrix
indices = np.zeros((M * C, 2)) # indices of non-zero elements initialization
k = 0
for i in range(M):
# the indices of non-zero elements in the i-th column of the matrix
idx = np.random.choice(M, size=C, replace=False)
for j in range(C):
indices[k, :] = [idx[j], i]
k = k + 1
values = 2 * (2 * np.random.rand(M * C).astype('f') - 1)
W = (tf.sparse.reorder(tf.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)))
return W
def sparse_eye(M):
# Generates an M x M matrix to be used as sparse identity matrix for the
# re-scaling of the sparse recurrent kernel in presence of non-zero leakage.
# The neurons are connected according to a ring topology, where each neuron
# receives input only from one neuron and propagates its activation only to one other neuron.
# All the non-zero elements are set to 1
dense_shape = (M, M)
# gives the shape of a ring matrix:
indices = np.zeros((M, 2))
for i in range(M):
indices[i, :] = [i, i]
values = np.ones(shape=(M,)).astype('f')
W = (tf.sparse.reorder(tf.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)))
return W
|
py | b40eb20dd3720a145d86549c58c7ae53aaad219a | """
Ref:https://github.com/Diego999/pyGAT
@author Metro
@date 2022/1/6
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
GAT layers, ref https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
"""
:param in_features:
:param out_features:
:param dropout:
:param alpha:
:param concat:
"""
super(GraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_normal_(self.W.data, gain=1.414) # ref Understanding Glorot, X. & Bengio, Y. (2010)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_normal_(self.a, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
"""
:param h:
:param adj:
:return:
"""
Wh = torch.mm(h, self.W) # h.shape: (N, in_features), Wh.shape: (N, out_features)
e = self.attention_mechanism(Wh)
zero_vec = 1e-12 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def attention_mechanism(self, Wh):
"""
:param Wh:
:return:
"""
Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
# broadcast add
e = Wh1 + Wh2.T
return self.leakyrelu(e)
|
py | b40eb230db044c54220a29cf6bbf1257429dd755 | import socket
HOST='127.0.0.1'
PORT=2053
with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:
s.connect((HOST,PORT))
data=input("send data to server:")
s.sendall(bytearray(data,'utf-8'))
data=s.recv(1024)
print("From Server Got back")
print(data.decode())
|
py | b40eb31851c7d807a42ceb12db58f397f6d175ac | from unittest import TestCase
from sport_activities_features.training_loads import BanisterTRIMP
class BanisterTRIMPTestCase(TestCase):
def setUp(self):
self.__banister = BanisterTRIMP(33.44, 165.22)
def test_init_banister_trimp_works_fine(self):
self.assertEqual(33.44, self.__banister.duration)
self.assertEqual(165.22, self.__banister.avg_hr)
def test_calculate_trimp_works_fine(self):
val = self.__banister.calculate_TRIMP()
self.assertEqual(5524.9568, val)
|
py | b40eb516e614d597713922cd1c72a9314f03e2dc | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_volume_facts
short_description: Retrieve facts about the OneView Volumes.
description:
- Retrieve facts about the Volumes from OneView.
version_added: "2.5"
requirements:
- "python >= 2.7.9"
- "hpOneView >= 5.0.0"
author: "Mariana Kreisig (@marikrg)"
options:
name:
description:
- Volume name.
required: false
options:
description:
- "List with options to gather additional facts about Volume and related resources.
Options allowed:
- C(attachableVolumes)
- C(extraManagedVolumePaths)
- C(snapshots). For this option, you may provide a name."
required: false
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Volumes
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
- debug: var=storage_volumes
- name: Gather paginated, filtered and sorted facts about Volumes
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
params:
start: 0
count: 2
sort: 'name:descending'
filter: "provisionType='Thin'"
- debug: var=storage_volumes
- name: "Gather facts about all Volumes, the attachable volumes managed by the appliance and the extra managed
storage volume paths"
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
options:
- attachableVolumes # optional
- extraManagedVolumePaths # optional
- debug: var=storage_volumes
- debug: var=attachable_volumes
- debug: var=extra_managed_volume_paths
- name: Gather facts about a Volume by name with a list of all snapshots taken
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: "{{ volume_name }}"
options:
- snapshots # optional
- debug: var=storage_volumes
- debug: var=snapshots
- name: "Gather facts about a Volume with one specific snapshot taken"
oneview_volume_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: "{{ volume_name }}"
options:
- snapshots: # optional
name: "{{ snapshot_name }}"
- debug: var=storage_volumes
- debug: var=snapshots
'''
RETURN = '''
storage_volumes:
description: Has all the OneView facts about the Volumes.
returned: Always, but can be null.
type: dict
attachable_volumes:
description: Has all the facts about the attachable volumes managed by the appliance.
returned: When requested, but can be null.
type: dict
extra_managed_volume_paths:
description: Has all the facts about the extra managed storage volume paths from the appliance.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class VolumeFactsModule(OneViewModule):
def __init__(self):
argument_spec = dict(name=dict(type='str'), options=dict(type='list'), params=dict(type='dict'))
super(VolumeFactsModule, self).__init__(additional_arg_spec=argument_spec)
self.set_resource_object(self.oneview_client.volumes)
def execute_module(self):
ansible_facts = {}
networks = self.facts_params.pop('networks', None)
if self.module.params.get('name'):
ansible_facts['storage_volumes'] = self.resource_client.get_by('name', self.module.params['name'])
ansible_facts.update(self._gather_facts_about_one_volume(ansible_facts['storage_volumes']))
else:
ansible_facts['storage_volumes'] = self.resource_client.get_all(**self.facts_params)
if networks:
self.facts_params['networks'] = networks
ansible_facts.update(self._gather_facts_from_appliance())
return dict(changed=False, ansible_facts=ansible_facts)
def _gather_facts_from_appliance(self):
facts = {}
if self.options:
if self.options.get('extraManagedVolumePaths'):
extra_managed_volume_paths = self.resource_client.get_extra_managed_storage_volume_paths()
facts['extra_managed_volume_paths'] = extra_managed_volume_paths
if self.options.get('attachableVolumes'):
query_params = self.options['attachableVolumes']
query_params = {} if type(query_params) is not dict else query_params
if 'connections' in query_params:
query_params['connections'] = str(query_params['connections'])
attachable_volumes = self.resource_client.get_attachable_volumes(**query_params)
facts['attachable_volumes'] = attachable_volumes
return facts
def _gather_facts_about_one_volume(self, volumes):
facts = {}
if self.options.get('snapshots') and len(volumes) > 0:
options_snapshots = self.options['snapshots']
volume_uri = volumes[0]['uri']
if isinstance(options_snapshots, dict) and 'name' in options_snapshots:
facts['snapshots'] = self.resource_client.get_snapshot_by(volume_uri, 'name', options_snapshots['name'])
else:
facts['snapshots'] = self.resource_client.get_snapshots(volume_uri)
return facts
def main():
VolumeFactsModule().run()
if __name__ == '__main__':
main()
|
py | b40eb56770e4c1fc99da354b11ea59e6e79c0f02 | #!/usr/bin/env python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""Simple demonstration of solving the Poisson equation in 2D on a circular sector
domain of radius 1 using an unstructured mesh.
Note that Gmsh (http://geuz.org/gmsh/) is required for meshing.
Usage:
elliptic_unstructured.py ANGLE NUM_POINTS CLSCALE
Arguments:
ANGLE The angle of the circular sector.
NUM_POINTS The number of points that form the arc of the circular sector.
CLSCALE Mesh element size scaling factor.
Options:
-h, --help Show this message.
"""
from docopt import docopt
import numpy as np
from pymor.analyticalproblems.elliptic import StationaryProblem
from pymor.discretizers.cg import discretize_stationary_cg
from pymor.domaindescriptions.polygonal import CircularSectorDomain
from pymor.functions.basic import ConstantFunction, ExpressionFunction
def elliptic_gmsh_demo(args):
args['ANGLE'] = float(args['ANGLE'])
args['NUM_POINTS'] = int(args['NUM_POINTS'])
args['CLSCALE'] = float(args['CLSCALE'])
problem = StationaryProblem(
domain=CircularSectorDomain(args['ANGLE'], radius=1, num_points=args['NUM_POINTS']),
diffusion=ConstantFunction(1, dim_domain=2),
rhs=ConstantFunction(np.array(0.), dim_domain=2, name='rhs'),
dirichlet_data=ExpressionFunction('sin(polar(x)[1] * pi/angle)', 2, (),
{}, {'angle': args['ANGLE']}, name='dirichlet')
)
print('Discretize ...')
m, data = discretize_stationary_cg(analytical_problem=problem, diameter=args['CLSCALE'])
grid = data['grid']
print(grid)
print()
print('Solve ...')
U = m.solve()
solution = ExpressionFunction('(lambda r, phi: r**(pi/angle) * sin(phi * pi/angle))(*polar(x))', 2, (),
{}, {'angle': args['ANGLE']})
U_ref = U.space.make_array(solution(grid.centers(2)))
m.visualize((U, U_ref, U-U_ref),
legend=('Solution', 'Analytical solution (circular boundary)', 'Error'),
separate_colorbars=True)
if __name__ == '__main__':
args = docopt(__doc__)
elliptic_gmsh_demo(args)
|
py | b40eb5d19f37a1b274e61aea0ea051e6cca20203 | """
Django settings for Hello project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from django.contrib.messages import constants as messages
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e+e#vq+j1^$m+ipl$m6n$&fk!k!ewtkoqe$4--!#tfvx&j5w#&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'home.apps.HomeConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Hello.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, "templates") ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Hello.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# Added manually
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
] |
py | b40eb5ff849d49cefe482ecd129ce8d4e7d646f8 | from .base_dataset import BaseDataset
from .base_generation_dataset import BaseGenerationDataset
from .base_matting_dataset import BaseMattingDataset
from .base_sr_dataset import BaseSRDataset
from .builder import build_dataloader, build_dataset
from .comp1k_dataset import AdobeComp1kDataset
from .dataset_wrappers import RepeatDataset
from .generation_paired_dataset import GenerationPairedDataset
from .generation_unpaired_dataset import GenerationUnpairedDataset
from .img_inpainting_dataset import ImgInpaintingDataset
from .registry import DATASETS, PIPELINES
from .sr_annotation_dataset import SRAnnotationDataset
from .sr_facial_landmark_dataset import SRFacialLandmarkDataset
from .sr_folder_dataset import SRFolderDataset
from .sr_folder_gt_dataset import SRFolderGTDataset
from .sr_folder_multiple_gt_dataset import SRFolderMultipleGTDataset
from .sr_folder_ref_dataset import SRFolderRefDataset
from .sr_folder_video_dataset import SRFolderVideoDataset
from .sr_lmdb_dataset import SRLmdbDataset
from .sr_reds_dataset import SRREDSDataset
from .sr_reds_multiple_gt_dataset import SRREDSMultipleGTDataset
from .sr_test_multiple_gt_dataset import SRTestMultipleGTDataset
from .sr_vid4_dataset import SRVid4Dataset
from .sr_vimeo90k_dataset import SRVimeo90KDataset
from .sr_vimeo90k_multiple_gt_dataset import SRVimeo90KMultipleGTDataset
__all__ = [
'DATASETS', 'PIPELINES', 'build_dataset', 'build_dataloader',
'BaseDataset', 'BaseMattingDataset', 'ImgInpaintingDataset',
'AdobeComp1kDataset', 'SRLmdbDataset', 'SRFolderDataset',
'SRAnnotationDataset', 'BaseSRDataset', 'RepeatDataset', 'SRREDSDataset',
'SRVimeo90KDataset', 'BaseGenerationDataset', 'GenerationPairedDataset',
'GenerationUnpairedDataset', 'SRVid4Dataset', 'SRFolderGTDataset',
'SRREDSMultipleGTDataset', 'SRVimeo90KMultipleGTDataset',
'SRTestMultipleGTDataset', 'SRFolderRefDataset', 'SRFacialLandmarkDataset',
'SRFolderMultipleGTDataset', 'SRFolderVideoDataset'
]
|
py | b40eb62b96d84eea1fb8e6f52615553f4e133924 | from __future__ import absolute_import
from __future__ import print_function
from six.moves import range
__author__ = 'a_medelyan'
from rake_nlp import Rake
import test_data
import sys
# reading a directory with test documents
input_dir = sys.argv[1]
# number of top ranked keywords to evaluate
top = int(sys.argv[2])
test_set = test_data.read_data(input_dir)
# evaluating
rake_object = Rake("SmartStoplist.txt", 5, 3, 4)
total_precision = 0
total_recall = 0
for test_doc in test_set.values():
print('document', test_doc.name)
print(len(test_doc.keywords), 'manual keywords: ', test_doc.keywords)
keywords = rake_object.run(test_doc.text)[:top]
print('RAKE keywords:', keywords)
num_manual_keywords = len(test_doc.keywords)
correct = 0
for i in range(0,min(top, len(keywords))):
if keywords[i][0] in set(test_doc.keywords):
correct += 1
total_precision += correct/float(len(keywords))
total_recall += correct/float(num_manual_keywords)
print('correct:', correct, 'out of', num_manual_keywords)
avg_precision = round(total_precision*100/float(len(test_set)), 2)
avg_recall = round(total_recall*100/float(len(test_set)), 2)
avg_fmeasure = round(2*avg_precision*avg_recall/(avg_precision + avg_recall), 2)
print("Precision", avg_precision, "Recall", avg_recall, "F-Measure", avg_fmeasure) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.