id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/BigJob2-0.54.post73.tar.gz/BigJob2-0.54.post73/util/archive.py |
import redis
import os, sys
import pandas as pd
from bigjob import logger
# Archive the following redis urls
REDIS_URLS=["redis://[email protected]:6379", "redis://localhost"]
REDIS_SERVER="localhost"
REDIS_SERVER_PORT=6379
REDIS_URL_SCHEME="redis://"
class RedisDownloader(object):
def __init__(self, redis_url):
'''
Constructor
'''
server_port=6379
self.redis_url=redis_url
self.password=None
start_index = self.redis_url.find(REDIS_URL_SCHEME)+len(REDIS_URL_SCHEME)
server_and_port = self.redis_url[start_index:]
password_end = server_and_port.find("@")
# parse out password
if password_end != -1:
self.password = server_and_port[:password_end]
start_index=password_end
server_and_port= server_and_port[(password_end+1):]
# port and hostname
if server_and_port.find(":")==-1:
server=server_and_port
server_port = REDIS_SERVER_PORT
else:
server = server_and_port.split(":")[0]
server_port = int(server_and_port.split(":")[1])
logger.debug("Connect to Redis: " + server + " Port: " + str(server_port))
if self.password==None:
self.redis_client = redis.Redis(host=server, port=server_port, db=0)
else:
self.redis_client = redis.Redis(host=server, port=server_port, password=self.password, db=0)
self.pipe = self.redis_client.pipeline()
try:
self.redis_client.ping()
except:
logger.error("Please start Redis server!")
raise Exception("Please start Redis server!")
def get_pilots(self):
pilots = self.redis_client.keys("bigjob:bj-*")
for i in pilots:
if ":jobs:" not in i and i.count(":")==2:
#print i
self.pipe.hgetall(i)
response = self.pipe.execute()
return response;
def get_cus(self):
cus = self.redis_client.keys("*:jobs:*")
for i in cus:
self.pipe.hgetall(i)
response = self.pipe.execute()
return response;
if __name__ == '__main__':
if len(sys.argv)>1:
print "Get data from " + sys.argv[1]
rd = RedisDownloader(sys.argv[1])
pilots = rd.get_pilots()
cus = rd.get_cus()
print "Loaded Redis data: %d pilots, %d cus"%(len(pilots), len(cus))
else:
for i in REDIS_URLS:
rd = RedisDownloader(i)
pilots = rd.get_pilots()
cus = rd.get_cus() | PypiClean |
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/diofant/domains/expressiondomain.py |
from ..core.sympify import SympifyError, sympify
from .characteristiczero import CharacteristicZero
from .field import Field
from .simpledomain import SimpleDomain
class ExpressionDomain(CharacteristicZero, SimpleDomain, Field):
"""A class for arbitrary expressions."""
is_ExpressionDomain = True
class Expression:
"""A class for elements of :class:`ExpressionDomain`."""
def __init__(self, ex):
if not isinstance(ex, self.__class__):
self.ex = sympify(ex)
else:
self.ex = ex.ex
def __str__(self):
return f'EX({self.ex})'
def __hash__(self):
return hash((self.__class__.__name__, self.ex))
def as_expr(self):
return self.ex
@property
def numerator(self):
return self.__class__(self.ex.as_numer_denom()[0])
@property
def denominator(self):
return self.__class__(self.ex.as_numer_denom()[1])
def simplify(self, ex):
return self.__class__(ex.cancel().expand())
def __abs__(self):
return self.__class__(abs(self.ex))
def __neg__(self):
return self.__class__(-self.ex)
def _to_ex(self, other):
try:
return self.__class__(other)
except SympifyError:
return
def __add__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex + other.ex)
else:
return NotImplemented
def __radd__(self, other):
return self.simplify(self.__class__(other).ex + self.ex)
def __sub__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex - other.ex)
else:
return NotImplemented
def __rsub__(self, other):
return self.simplify(self.__class__(other).ex - self.ex)
def __mul__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex*other.ex)
else:
return NotImplemented
def __rmul__(self, other):
return self.simplify(self.__class__(other).ex*self.ex)
def __pow__(self, n):
n = self._to_ex(n)
if n is not None:
return self.simplify(self.ex**n.ex)
else:
return NotImplemented
def __truediv__(self, other):
other = self._to_ex(other)
if other is not None:
return self.simplify(self.ex/other.ex)
else:
return NotImplemented
def __rtruediv__(self, other):
return self.simplify(self.__class__(other).ex/self.ex)
def __eq__(self, other):
return self.ex == self.__class__(other).ex
def __bool__(self):
return self.ex != 0
def gcd(self, other):
from ..polys import gcd
return self.__class__(gcd(self.ex, self.__class__(other).ex))
def lcm(self, other):
from ..polys import lcm
return self.__class__(lcm(self.ex, self.__class__(other).ex))
dtype = Expression
zero = Expression(0)
one = Expression(1)
rep = 'EX'
def to_expr(self, element):
return element.as_expr()
def from_expr(self, expr):
return self.dtype(expr)
def _from_PythonIntegerRing(self, a, K0):
return self(K0.to_expr(a))
def _from_PythonRationalField(self, a, K0):
return self(K0.to_expr(a))
def _from_GMPYIntegerRing(self, a, K0):
return self(K0.to_expr(a))
def _from_GMPYRationalField(self, a, K0):
return self(K0.to_expr(a))
def _from_RealField(self, a, K0):
return self(K0.to_expr(a))
def _from_PolynomialRing(self, a, K0):
return self(K0.to_expr(a))
def _from_FractionField(self, a, K0):
return self(K0.to_expr(a))
def _from_AlgebraicField(self, a, K0):
return self(K0.to_expr(a))
@property
def ring(self):
return self # XXX: EX is not a ring but we don't have much choice here.
def is_normal(self, a):
return a.ex.as_coeff_mul()[0].is_nonnegative
def gcd(self, a, b):
return a.gcd(b)
def lcm(self, a, b):
return a.lcm(b)
EX = ExpressionDomain() | PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/attachment_store.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
def lazy_import():
from firefly_iii_client.model.attachable_type import AttachableType
globals()['AttachableType'] = AttachableType
class AttachmentStore(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'attachable_id': (str,), # noqa: E501
'attachable_type': (AttachableType,), # noqa: E501
'filename': (str,), # noqa: E501
'notes': (str, none_type,), # noqa: E501
'title': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'attachable_id': 'attachable_id', # noqa: E501
'attachable_type': 'attachable_type', # noqa: E501
'filename': 'filename', # noqa: E501
'notes': 'notes', # noqa: E501
'title': 'title', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, attachable_id, attachable_type, filename, *args, **kwargs): # noqa: E501
"""AttachmentStore - a model defined in OpenAPI
Args:
attachable_id (str): ID of the model this attachment is linked to.
attachable_type (AttachableType):
filename (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
notes (str, none_type): [optional] # noqa: E501
title (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.attachable_id = attachable_id
self.attachable_type = attachable_type
self.filename = filename
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, attachable_id, attachable_type, filename, *args, **kwargs): # noqa: E501
"""AttachmentStore - a model defined in OpenAPI
Args:
attachable_id (str): ID of the model this attachment is linked to.
attachable_type (AttachableType):
filename (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
notes (str, none_type): [optional] # noqa: E501
title (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.attachable_id = attachable_id
self.attachable_type = attachable_type
self.filename = filename
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/yaml/yaml/constructor.py | __all__ = [
'BaseConstructor',
'SafeConstructor',
'FullConstructor',
'UnsafeConstructor',
'Constructor',
'ConstructorError'
]
from .error import *
from .nodes import *
import collections.abc, datetime, base64, binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
class BaseConstructor:
yaml_constructors = {}
yaml_multi_constructors = {}
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
self.state_generators = []
self.deep_construct = False
def check_data(self):
# If there are more documents available?
return self.check_node()
def check_state_key(self, key):
"""Block special attributes/methods from being set in a newly created
object, to prevent user-controlled methods from being called during
deserialization"""
if self.get_state_keys_blacklist_regexp().match(key):
raise ConstructorError(None, None,
"blacklisted key '%s' in instance state found" % (key,), None)
def get_data(self):
# Construct and return the next document.
if self.check_node():
return self.construct_document(self.get_node())
def get_single_data(self):
# Ensure that the stream contains a single document and construct it.
node = self.get_single_node()
if node is not None:
return self.construct_document(node)
return None
def construct_document(self, node):
data = self.construct_object(node)
while self.state_generators:
state_generators = self.state_generators
self.state_generators = []
for generator in state_generators:
for dummy in generator:
pass
self.constructed_objects = {}
self.recursive_objects = {}
self.deep_construct = False
return data
def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
if deep:
old_deep = self.deep_construct
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
"found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
tag_suffix = None
if node.tag in self.yaml_constructors:
constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if tag_prefix is not None and node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
tag_suffix = node.tag
constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
constructor = self.__class__.construct_mapping
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
data = next(generator)
if self.deep_construct:
for dummy in generator:
pass
else:
self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
if deep:
self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
return [self.construct_object(child, deep=deep)
for child in node.value]
def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if not isinstance(key, collections.abc.Hashable):
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unhashable key", key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
@classmethod
def add_constructor(cls, tag, constructor):
if not 'yaml_constructors' in cls.__dict__:
cls.yaml_constructors = cls.yaml_constructors.copy()
cls.yaml_constructors[tag] = constructor
@classmethod
def add_multi_constructor(cls, tag_prefix, multi_constructor):
if not 'yaml_multi_constructors' in cls.__dict__:
cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
cls.yaml_multi_constructors[tag_prefix] = multi_constructor
class SafeConstructor(BaseConstructor):
def construct_scalar(self, node):
if isinstance(node, MappingNode):
for key_node, value_node in node.value:
if key_node.tag == 'tag:yaml.org,2002:value':
return self.construct_scalar(value_node)
return super().construct_scalar(node)
def flatten_mapping(self, node):
merge = []
index = 0
while index < len(node.value):
key_node, value_node = node.value[index]
if key_node.tag == 'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
self.flatten_mapping(value_node)
merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
submerge = []
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing a mapping",
node.start_mark,
"expected a mapping for merging, but found %s"
% subnode.id, subnode.start_mark)
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
for value in submerge:
merge.extend(value)
else:
raise ConstructorError("while constructing a mapping", node.start_mark,
"expected a mapping or list of mappings for merging, but found %s"
% value_node.id, value_node.start_mark)
elif key_node.tag == 'tag:yaml.org,2002:value':
key_node.tag = 'tag:yaml.org,2002:str'
index += 1
else:
index += 1
if merge:
node.value = merge + node.value
def construct_mapping(self, node, deep=False):
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return super().construct_mapping(node, deep=deep)
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
bool_values = {
'yes': True,
'no': False,
'true': True,
'false': False,
'on': True,
'off': False,
}
def construct_yaml_bool(self, node):
value = self.construct_scalar(node)
return self.bool_values[value.lower()]
def construct_yaml_int(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '')
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '0':
return 0
elif value.startswith('0b'):
return sign*int(value[2:], 2)
elif value.startswith('0x'):
return sign*int(value[2:], 16)
elif value[0] == '0':
return sign*int(value, 8)
elif ':' in value:
digits = [int(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*int(value)
inf_value = 1e300
while inf_value != inf_value*inf_value:
inf_value *= inf_value
nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = self.construct_scalar(node)
value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
if value == '.inf':
return sign*self.inf_value
elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
digits.reverse()
base = 1
value = 0.0
for digit in digits:
value += digit*base
base *= 60
return sign*value
else:
return sign*float(value)
def construct_yaml_binary(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
timestamp_regexp = re.compile(
r'''^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:(?:[Tt]|[ \t]+)
(?P<hour>[0-9][0-9]?)
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
(?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
(?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
day = int(values['day'])
if not values['hour']:
return datetime.date(year, month, day)
hour = int(values['hour'])
minute = int(values['minute'])
second = int(values['second'])
fraction = 0
tzinfo = None
if values['fraction']:
fraction = values['fraction'][:6]
while len(fraction) < 6:
fraction += '0'
fraction = int(fraction)
if values['tz_sign']:
tz_hour = int(values['tz_hour'])
tz_minute = int(values['tz_minute'] or 0)
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
tzinfo = datetime.timezone(delta)
elif values['tz']:
tzinfo = datetime.timezone.utc
return datetime.datetime(year, month, day, hour, minute, second, fraction,
tzinfo=tzinfo)
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
omap = []
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
omap.append((key, value))
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
pairs = []
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark)
if len(subnode.value) != 1:
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
pairs.append((key, value))
def construct_yaml_set(self, node):
data = set()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_str(self, node):
return self.construct_scalar(node)
def construct_yaml_seq(self, node):
data = []
yield data
data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
data = {}
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_yaml_object(self, node, cls):
data = cls.__new__(cls)
yield data
if hasattr(data, '__setstate__'):
state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
state = self.construct_mapping(node)
data.__dict__.update(state)
def construct_undefined(self, node):
raise ConstructorError(None, None,
"could not determine a constructor for the tag %r" % node.tag,
node.start_mark)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:null',
SafeConstructor.construct_yaml_null)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:bool',
SafeConstructor.construct_yaml_bool)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:int',
SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:float',
SafeConstructor.construct_yaml_float)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:timestamp',
SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:omap',
SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:pairs',
SafeConstructor.construct_yaml_pairs)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:set',
SafeConstructor.construct_yaml_set)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:str',
SafeConstructor.construct_yaml_str)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:seq',
SafeConstructor.construct_yaml_seq)
SafeConstructor.add_constructor(
'tag:yaml.org,2002:map',
SafeConstructor.construct_yaml_map)
SafeConstructor.add_constructor(None,
SafeConstructor.construct_undefined)
class FullConstructor(SafeConstructor):
# 'extend' is blacklisted because it is used by
# construct_python_object_apply to add `listitems` to a newly generate
# python instance
def get_state_keys_blacklist(self):
return ['^extend$', '^__.*__$']
def get_state_keys_blacklist_regexp(self):
if not hasattr(self, 'state_keys_blacklist_regexp'):
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
return self.state_keys_blacklist_regexp
def construct_python_str(self, node):
return self.construct_scalar(node)
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_bytes(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
def construct_python_long(self, node):
return self.construct_yaml_int(node)
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
if unsafe:
try:
__import__(name)
except ImportError as exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name, exc), mark)
if name not in sys.modules:
raise ConstructorError("while constructing a Python module", mark,
"module %r is not imported" % name, mark)
return sys.modules[name]
def find_python_name(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if '.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = 'builtins'
object_name = name
if unsafe:
try:
__import__(module_name)
except ImportError as exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name, exc), mark)
if module_name not in sys.modules:
raise ConstructorError("while constructing a Python object", mark,
"module %r is not imported" % module_name, mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r"
% (object_name, module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_module(suffix, node.start_mark)
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False, unsafe=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if not (unsafe or isinstance(cls, type)):
raise ConstructorError("while constructing a Python instance", node.start_mark,
"expected a class, but found %r" % type(cls),
node.start_mark)
if newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state, unsafe=False):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
if not unsafe and state:
for key in state.keys():
self.check_state_key(key)
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
if not unsafe:
self.check_state_key(key)
setattr(instance, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/none',
FullConstructor.construct_yaml_null)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bool',
FullConstructor.construct_yaml_bool)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/str',
FullConstructor.construct_python_str)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/unicode',
FullConstructor.construct_python_unicode)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bytes',
FullConstructor.construct_python_bytes)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/int',
FullConstructor.construct_yaml_int)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/long',
FullConstructor.construct_python_long)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/float',
FullConstructor.construct_yaml_float)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/complex',
FullConstructor.construct_python_complex)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/list',
FullConstructor.construct_yaml_seq)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/tuple',
FullConstructor.construct_python_tuple)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/dict',
FullConstructor.construct_yaml_map)
FullConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/name:',
FullConstructor.construct_python_name)
class UnsafeConstructor(FullConstructor):
def find_python_module(self, name, mark):
return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
def find_python_name(self, name, mark):
return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
return super(UnsafeConstructor, self).make_python_instance(
suffix, node, args, kwds, newobj, unsafe=True)
def set_python_instance_state(self, instance, state):
return super(UnsafeConstructor, self).set_python_instance_state(
instance, state, unsafe=True)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/module:',
UnsafeConstructor.construct_python_module)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object:',
UnsafeConstructor.construct_python_object)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/new:',
UnsafeConstructor.construct_python_object_new)
UnsafeConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/apply:',
UnsafeConstructor.construct_python_object_apply)
# Constructor is same as UnsafeConstructor. Need to leave this in place in case
# people have extended it directly.
class Constructor(UnsafeConstructor):
pass | PypiClean |
/HolmesIV-2021.9.8a1.tar.gz/HolmesIV-2021.9.8a1/mycroft/client/enclosure/base.py | import asyncio
from collections import namedtuple
from threading import Lock
from mycroft.configuration import Configuration
from mycroft.messagebus.client import MessageBusClient
from mycroft.util import create_daemon, start_message_bus_client
from mycroft.util.log import LOG
import json
import tornado.web as web
from tornado import ioloop
from tornado.websocket import WebSocketHandler
from mycroft.messagebus.message import Message
Namespace = namedtuple('Namespace', ['name', 'pages'])
write_lock = Lock()
namespace_lock = Lock()
RESERVED_KEYS = ['__from', '__idle']
def _get_page_data(message):
""" Extract page related data from a message.
Args:
message: messagebus message object
Returns:
tuple (page, namespace, index)
Raises:
ValueError if value is missing.
"""
data = message.data
# Note: 'page' can be either a string or a list of strings
if 'page' not in data:
raise ValueError("Page missing in data")
if 'index' in data:
index = data['index']
else:
index = 0
page = data.get("page", "")
namespace = data.get("__from", "")
return page, namespace, index
class Enclosure:
def __init__(self):
# Load full config
config = Configuration.get()
self.lang = config['lang']
self.config = config.get("enclosure")
self.global_config = config
# Create Message Bus Client
self.bus = MessageBusClient()
self.gui = create_gui_service(self, config['gui_websocket'])
# This datastore holds the data associated with the GUI provider. Data
# is stored in Namespaces, so you can have:
# self.datastore["namespace"]["name"] = value
# Typically the namespace is a meaningless identifier, but there is a
# special "SYSTEM" namespace.
self.datastore = {}
# self.loaded is a list, each element consists of a namespace named
# tuple.
# The namespace namedtuple has the properties "name" and "pages"
# The name contains the namespace name as a string and pages is a
# mutable list of loaded pages.
#
# [Namespace name, [List of loaded qml pages]]
# [
# ["SKILL_NAME", ["page1.qml, "page2.qml", ... , "pageN.qml"]
# [...]
# ]
self.loaded = [] # list of lists in order.
self.explicit_move = True # Set to true to send reorder commands
# Listen for new GUI clients to announce themselves on the main bus
self.active_namespaces = []
self.bus.on("mycroft.gui.connected", self.on_gui_client_connected)
self.register_gui_handlers()
# First send any data:
self.bus.on("gui.value.set", self.on_gui_set_value)
self.bus.on("gui.page.show", self.on_gui_show_page)
self.bus.on("gui.page.delete", self.on_gui_delete_page)
self.bus.on("gui.clear.namespace", self.on_gui_delete_namespace)
self.bus.on("gui.event.send", self.on_gui_send_event)
self.bus.on("gui.status.request", self.handle_gui_status_request)
def run(self):
"""Start the Enclosure after it has been constructed."""
# Allow exceptions to be raised to the Enclosure Service
# if they may cause the Service to fail.
start_message_bus_client("ENCLOSURE", self.bus)
def stop(self):
"""Perform any enclosure shutdown processes."""
pass
######################################################################
# GUI client API
@property
def gui_connected(self):
"""Returns True if at least 1 gui is connected, else False"""
return len(GUIWebsocketHandler.clients) > 0
def handle_gui_status_request(self, message):
"""Reply to gui status request, allows querying if a gui is
connected using the message bus"""
self.bus.emit(message.reply("gui.status.request.response",
{"connected": self.gui_connected}))
def send(self, msg_dict):
""" Send to all registered GUIs. """
for connection in GUIWebsocketHandler.clients:
try:
connection.send(msg_dict)
except Exception as e:
LOG.exception(repr(e))
def on_gui_send_event(self, message):
""" Send an event to the GUIs. """
try:
data = {'type': 'mycroft.events.triggered',
'namespace': message.data.get('__from'),
'event_name': message.data.get('event_name'),
'params': message.data.get('params')}
self.send(data)
except Exception as e:
LOG.error('Could not send event ({})'.format(repr(e)))
def on_gui_set_value(self, message):
data = message.data
namespace = data.get("__from", "")
# Pass these values on to the GUI renderers
for key in data:
if key not in RESERVED_KEYS:
try:
self.set(namespace, key, data[key])
except Exception as e:
LOG.exception(repr(e))
def set(self, namespace, name, value):
""" Perform the send of the values to the connected GUIs. """
if namespace not in self.datastore:
self.datastore[namespace] = {}
if self.datastore[namespace].get(name) != value:
self.datastore[namespace][name] = value
# If the namespace is loaded send data to GUI
if namespace in [l.name for l in self.loaded]:
msg = {"type": "mycroft.session.set",
"namespace": namespace,
"data": {name: value}}
self.send(msg)
def on_gui_delete_page(self, message):
""" Bus handler for removing pages. """
page, namespace, _ = _get_page_data(message)
try:
with namespace_lock:
self.remove_pages(namespace, page)
except Exception as e:
LOG.exception(repr(e))
def on_gui_delete_namespace(self, message):
""" Bus handler for removing namespace. """
try:
namespace = message.data['__from']
with namespace_lock:
self.remove_namespace(namespace)
except Exception as e:
LOG.exception(repr(e))
def on_gui_show_page(self, message):
try:
page, namespace, index = _get_page_data(message)
# Pass the request to the GUI(s) to pull up a page template
with namespace_lock:
self.show(namespace, page, index)
except Exception as e:
LOG.exception(repr(e))
def __find_namespace(self, namespace):
for i, skill in enumerate(self.loaded):
if skill[0] == namespace:
return i
return None
def __insert_pages(self, namespace, pages):
""" Insert pages into the namespace
Args:
namespace (str): Namespace to add to
pages (list): Pages (str) to insert
"""
LOG.debug("Inserting new pages")
if not isinstance(pages, list):
raise ValueError('Argument must be list of pages')
self.send({"type": "mycroft.gui.list.insert",
"namespace": namespace,
"position": len(self.loaded[0].pages),
"data": [{"url": p} for p in pages]
})
# Insert the pages into local reprensentation as well.
updated = Namespace(self.loaded[0].name, self.loaded[0].pages + pages)
self.loaded[0] = updated
def __remove_page(self, namespace, pos):
""" Delete page.
Args:
namespace (str): Namespace to remove from
pos (int): Page position to remove
"""
LOG.debug("Deleting {} from {}".format(pos, namespace))
self.send({"type": "mycroft.gui.list.remove",
"namespace": namespace,
"position": pos,
"items_number": 1
})
# Remove the page from the local reprensentation as well.
self.loaded[0].pages.pop(pos)
# Add a check to return any display to idle from position 0
if (pos == 0 and len(self.loaded[0].pages) == 0):
self.bus.emit(Message("mycroft.device.show.idle"))
def __insert_new_namespace(self, namespace, pages):
""" Insert new namespace and pages.
This first sends a message adding a new namespace at the
highest priority (position 0 in the namespace stack)
Args:
namespace (str): The skill namespace to create
pages (str): Pages to insert (name matches QML)
"""
LOG.debug("Inserting new namespace")
self.send({"type": "mycroft.session.list.insert",
"namespace": "mycroft.system.active_skills",
"position": 0,
"data": [{"skill_id": namespace}]
})
# Load any already stored Data
data = self.datastore.get(namespace, {})
for key in data:
msg = {"type": "mycroft.session.set",
"namespace": namespace,
"data": {key: data[key]}}
self.send(msg)
LOG.debug("Inserting new page")
self.send({"type": "mycroft.gui.list.insert",
"namespace": namespace,
"position": 0,
"data": [{"url": p} for p in pages]
})
# Make sure the local copy is updated
self.loaded.insert(0, Namespace(namespace, pages))
def __move_namespace(self, from_pos, to_pos):
""" Move an existing namespace to a new position in the stack.
Args:
from_pos (int): Position in the stack to move from
to_pos (int): Position to move to
"""
LOG.debug("Activating existing namespace")
# Seems like the namespace is moved to the top automatically when
# a page change is done. Deactivating this for now.
if self.explicit_move:
LOG.debug("move {} to {}".format(from_pos, to_pos))
self.send({"type": "mycroft.session.list.move",
"namespace": "mycroft.system.active_skills",
"from": from_pos, "to": to_pos,
"items_number": 1})
# Move the local representation of the skill from current
# position to position 0.
self.loaded.insert(to_pos, self.loaded.pop(from_pos))
def __switch_page(self, namespace, pages):
""" Switch page to an already loaded page.
Args:
pages (list): pages (str) to switch to
namespace (str): skill namespace
"""
try:
num = self.loaded[0].pages.index(pages[0])
except Exception as e:
LOG.exception(repr(e))
num = 0
LOG.debug('Switching to already loaded page at '
'index {} in namespace {}'.format(num, namespace))
self.send({"type": "mycroft.events.triggered",
"namespace": namespace,
"event_name": "page_gained_focus",
"data": {"number": num}})
def show(self, namespace, page, index):
""" Show a page and load it as needed.
Args:
page (str or list): page(s) to show
namespace (str): skill namespace
index (int): ??? TODO: Unused in code ???
TODO: - Update sync to match.
- Separate into multiple functions/methods
"""
LOG.debug("GUIConnection activating: " + namespace)
pages = page if isinstance(page, list) else [page]
# find namespace among loaded namespaces
try:
index = self.__find_namespace(namespace)
if index is None:
# This namespace doesn't exist, insert them first so they're
# shown.
self.__insert_new_namespace(namespace, pages)
return
else: # Namespace exists
if index > 0:
# Namespace is inactive, activate it by moving it to
# position 0
self.__move_namespace(index, 0)
# Find if any new pages needs to be inserted
new_pages = [p for p in pages if p not in self.loaded[0].pages]
if new_pages:
self.__insert_pages(namespace, new_pages)
else:
# No new pages, just switch
self.__switch_page(namespace, pages)
except Exception as e:
LOG.exception(repr(e))
def remove_namespace(self, namespace):
""" Remove namespace.
Args:
namespace (str): namespace to remove
"""
index = self.__find_namespace(namespace)
if index is None:
return
else:
LOG.debug("Removing namespace {} at {}".format(namespace, index))
self.send({"type": "mycroft.session.list.remove",
"namespace": "mycroft.system.active_skills",
"position": index,
"items_number": 1
})
# Remove namespace from loaded namespaces
self.loaded.pop(index)
def remove_pages(self, namespace, pages):
""" Remove the listed pages from the provided namespace.
Args:
namespace (str): The namespace to modify
pages (list): List of page names (str) to delete
"""
try:
index = self.__find_namespace(namespace)
if index is None:
return
else:
# Remove any pages that doesn't exist in the namespace
pages = [p for p in pages if p in self.loaded[index].pages]
# Make sure to remove pages from the back
indexes = [self.loaded[index].pages.index(p) for p in pages]
indexes = sorted(indexes)
indexes.reverse()
for page_index in indexes:
self.__remove_page(namespace, page_index)
except Exception as e:
LOG.exception(repr(e))
######################################################################
# GUI client socket
#
# The basic mechanism is:
# 1) GUI client announces itself on the main messagebus
# 2) Mycroft prepares a port for a socket connection to this GUI
# 3) The port is announced over the messagebus
# 4) The GUI connects on the socket
# 5) Connection persists for graphical interaction indefinitely
#
# If the connection is lost, it must be renegotiated and restarted.
def on_gui_client_connected(self, message):
# GUI has announced presence
LOG.info('GUI HAS ANNOUNCED!')
port = self.global_config["gui_websocket"]["base_port"]
LOG.debug("on_gui_client_connected")
gui_id = message.data.get("gui_id")
LOG.debug("Heard announcement from gui_id: {}".format(gui_id))
# Announce connection, the GUI should connect on it soon
self.bus.emit(Message("mycroft.gui.port",
{"port": port,
"gui_id": gui_id}))
def register_gui_handlers(self):
# TODO: Register handlers for standard (Mark 1) events
# self.bus.on('enclosure.eyes.on', self.on)
# self.bus.on('enclosure.eyes.off', self.off)
# self.bus.on('enclosure.eyes.blink', self.blink)
# self.bus.on('enclosure.eyes.narrow', self.narrow)
# self.bus.on('enclosure.eyes.look', self.look)
# self.bus.on('enclosure.eyes.color', self.color)
# self.bus.on('enclosure.eyes.level', self.brightness)
# self.bus.on('enclosure.eyes.volume', self.volume)
# self.bus.on('enclosure.eyes.spin', self.spin)
# self.bus.on('enclosure.eyes.timedspin', self.timed_spin)
# self.bus.on('enclosure.eyes.reset', self.reset)
# self.bus.on('enclosure.eyes.setpixel', self.set_pixel)
# self.bus.on('enclosure.eyes.fill', self.fill)
# self.bus.on('enclosure.mouth.reset', self.reset)
# self.bus.on('enclosure.mouth.talk', self.talk)
# self.bus.on('enclosure.mouth.think', self.think)
# self.bus.on('enclosure.mouth.listen', self.listen)
# self.bus.on('enclosure.mouth.smile', self.smile)
# self.bus.on('enclosure.mouth.viseme', self.viseme)
# self.bus.on('enclosure.mouth.text', self.text)
# self.bus.on('enclosure.mouth.display', self.display)
# self.bus.on('enclosure.mouth.display_image', self.display_image)
# self.bus.on('enclosure.weather.display', self.display_weather)
# self.bus.on('recognizer_loop:record_begin', self.mouth.listen)
# self.bus.on('recognizer_loop:record_end', self.mouth.reset)
# self.bus.on('recognizer_loop:audio_output_start', self.mouth.talk)
# self.bus.on('recognizer_loop:audio_output_end', self.mouth.reset)
pass
##########################################################################
# GUIConnection
##########################################################################
gui_app_settings = {
'debug': True
}
def create_gui_service(enclosure, config):
import tornado.options
LOG.info('Starting message bus for GUI...')
# Disable all tornado logging so mycroft loglevel isn't overridden
tornado.options.parse_command_line(['--logging=None'])
routes = [(config['route'], GUIWebsocketHandler)]
application = web.Application(routes, debug=True)
application.enclosure = enclosure
application.listen(config['base_port'], config['host'])
create_daemon(ioloop.IOLoop.instance().start)
LOG.info('GUI Message bus started!')
return application
class GUIWebsocketHandler(WebSocketHandler):
"""The socket pipeline between the GUI and Mycroft."""
clients = []
def open(self):
GUIWebsocketHandler.clients.append(self)
LOG.info('New Connection opened!')
self.synchronize()
def on_close(self, *args):
LOG.info('Closing {}'.format(id(self)))
GUIWebsocketHandler.clients.remove(self)
def synchronize(self):
""" Upload namespaces, pages and data to the last connected. """
namespace_pos = 0
enclosure = self.application.enclosure
for namespace, pages in enclosure.loaded:
LOG.info('Sync {}'.format(namespace))
# Insert namespace
self.send({"type": "mycroft.session.list.insert",
"namespace": "mycroft.system.active_skills",
"position": namespace_pos,
"data": [{"skill_id": namespace}]
})
# Insert pages
self.send({"type": "mycroft.gui.list.insert",
"namespace": namespace,
"position": 0,
"data": [{"url": p} for p in pages]
})
# Insert data
data = enclosure.datastore.get(namespace, {})
for key in data:
self.send({"type": "mycroft.session.set",
"namespace": namespace,
"data": {key: data[key]}
})
namespace_pos += 1
def on_message(self, *args):
if len(args) == 1:
message = args[0]
else:
message = args[1]
LOG.info("Received: {}".format(message))
msg = json.loads(message)
if (msg.get('type') == "mycroft.events.triggered" and
(msg.get('event_name') == 'page_gained_focus' or
msg.get('event_name') == 'system.gui.user.interaction')):
# System event, a page was changed
msg_type = 'gui.page_interaction'
msg_data = {'namespace': msg['namespace'],
'page_number': msg['parameters'].get('number'),
'skill_id': msg['parameters'].get('skillId')}
elif msg.get('type') == "mycroft.events.triggered":
# A normal event was triggered
msg_type = '{}.{}'.format(msg['namespace'], msg['event_name'])
msg_data = msg['parameters']
elif msg.get('type') == 'mycroft.session.set':
# A value was changed send it back to the skill
msg_type = '{}.{}'.format(msg['namespace'], 'set')
msg_data = msg['data']
message = Message(msg_type, msg_data)
LOG.info('Forwarding to bus...')
self.application.enclosure.bus.emit(message)
LOG.info('Done!')
def write_message(self, *arg, **kwarg):
"""Wraps WebSocketHandler.write_message() with a lock. """
try:
asyncio.get_event_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
with write_lock:
super().write_message(*arg, **kwarg)
def send(self, data):
"""Send the given data across the socket as JSON
Args:
data (dict): Data to transmit
"""
s = json.dumps(data)
LOG.info('Sending {}'.format(s))
self.write_message(s)
def check_origin(self, origin):
"""Disable origin check to make js connections work."""
return True | PypiClean |
/GitHubSyncPlugin-0.1.4.tar.gz/GitHubSyncPlugin-0.1.4/githubsync/api.py | import fcntl, json, os
from trac.admin import AdminCommandManager
from trac.core import *
from trac.config import ListOption
from trac.web import IRequestHandler, IRequestFilter, RequestDone, HTTPNotFound
from trac.versioncontrol import RepositoryManager
import iptools
class GitHubSync(Component):
"""This component syncs GitHub repository with local repository used by Trac."""
post_request_ips = ListOption('git', 'post_request_ips', ['204.232.175.64/27', '192.30.252.0/22'],
"""List of IPs (in CIDR format) POST request is accepted from.""")
implements(IRequestHandler, IRequestFilter)
# IRequestFilter methods
def pre_process_request(self, req, handler):
"""Called after initial handler selection, and can be used to change
the selected handler or redirect request."""
if self.match_request(req):
# We disable CSRF protection here and force ourselves as a handler
req.form_token = None
return self
return handler
def post_process_request(req, template, data, content_type):
"""Do any post-processing the request might need; typically adding
values to the template `data` dictionary, or changing template or
mime type."""
return (template, data, content_type)
# IRequestHandler methods
def match_request(self, req):
"""Return whether the handler wants to process the given request."""
if req.method != 'POST' or req.path_info != '/githubsync':
return False
self.env.log.debug("GitHubSync: Request from '%s'", req.remote_addr)
for allowed_ips in self.post_request_ips:
if req.remote_addr in iptools.IpRangeList(allowed_ips):
self.env.log.debug("GitHubSync: Request from '%s' allowed, in '%s'", req.remote_addr, allowed_ips)
return True
self.env.log.debug("GitHubSync: Request from '%s' denied", req.remote_addr)
return False
def process_request(self, req):
"""Process the request."""
payload = json.loads(req.args.get('payload'))
repository_name = payload.get('repository', {}).get('name')
self.env.log.debug("GitHubSync: Got POST request for repository '%s'", repository_name)
self._process_repository(repository_name)
req.send_response(200)
req.send_header('Content-Type', 'text/plain')
req.send_header('Content-Length', 0)
req.end_headers()
raise RequestDone
def _process_repository(self, name):
if not name:
return
rm = RepositoryManager(self.env)
trac_repo = rm.get_repository(name)
if not trac_repo or not hasattr(trac_repo, 'gitrepo'):
return
self.env.log.debug("GitHubSync: Processing repository at '%s'", trac_repo.gitrepo)
lock_file = os.path.join(trac_repo.gitrepo, 'githubsync.lock')
lock = open(lock_file, 'w')
fcntl.lockf(lock, fcntl.LOCK_EX)
try:
self.env.log.debug("GitHubSync: Lock acquired")
before_revisions = set(trac_repo.git.repo.rev_list('--branches', '--tags').splitlines())
# Pulling from default source (as configured in repo configuration)
output = trac_repo.git.repo.fetch('--all', '--prune', '--tags')
self.env.log.debug("GitHubSync: git output: %s", output)
after_revisions = set(trac_repo.git.repo.rev_list('--branches', '--tags').splitlines())
finally:
fcntl.lockf(lock, fcntl.LOCK_UN)
lock.close()
os.unlink(lock_file)
self.env.log.debug("GitHubSync: Lock released")
new_revisions = after_revisions - before_revisions
if len(new_revisions) > 0:
self.env.log.debug("GitHubSync: New revisions: %s", new_revisions)
cmd_mgr = AdminCommandManager(self.env)
cmd_mgr.execute_command('changeset', 'added', name, *new_revisions) | PypiClean |
/CustomPipeline-0.0.3-py3-none-any.whl/rpcore/loader.py | from __future__ import print_function
import time
from panda3d.core import PNMImage, VirtualFileSystem, VirtualFileMountRamdisk
from panda3d.core import Shader
from rpcore.globals import Globals
from rpcore.rpobject import RPObject
__all__ = ("RPLoader",)
class timed_loading_operation(object): # noqa # pylint: disable=invalid-name,too-few-public-methods
""" Context manager for a synchronous loading operation, keeping track
on how much time elapsed during the loading process, and warning about
long loading times. """
WARNING_COUNT = 0
def __init__(self, resource):
self.resource = resource
if isinstance(self.resource, (list, tuple)):
self.resource = ', '.join(self.resource)
def __enter__(self):
self.start_time = time.process_time()
def __exit__(self, *args):
duration = (time.process_time() - self.start_time) * 1000.0
if duration > 80.0 and timed_loading_operation.WARNING_COUNT < 5:
RPObject.global_warn(
"RPLoader", "Loading '" + self.resource + "' took", round(duration, 2), "ms")
timed_loading_operation.WARNING_COUNT += 1
if timed_loading_operation.WARNING_COUNT == 5:
RPObject.global_warn(
"RPLoader", "Skipping further loading warnings (max warning count reached)")
class RPLoader(RPObject):
""" Generic loader class used by the pipeline. All loading of assets happens
here, which enables us to keep track of used resources """
@classmethod
def load_texture(cls, filename):
""" Loads a 2D-texture from disk """
with timed_loading_operation(filename):
return Globals.base.loader.load_texture(filename)
@classmethod
def load_cube_map(cls, filename, read_mipmaps=False):
""" Loads a cube map from disk """
with timed_loading_operation(filename):
return Globals.base.loader.load_cube_map(filename, readMipmaps=read_mipmaps)
@classmethod
def load_3d_texture(cls, filename):
""" Loads a 3D-texture from disk """
with timed_loading_operation(filename):
return Globals.base.loader.load_3d_texture(filename)
@classmethod
def load_font(cls, filename):
""" Loads a font from disk """
with timed_loading_operation(filename):
return Globals.base.loader.load_font(filename)
@classmethod
def load_shader(cls, *args):
""" Loads a shader from disk """
with timed_loading_operation(args):
if len(args) == 1:
return Shader.load_compute(Shader.SL_GLSL, args[0])
return Shader.load(Shader.SL_GLSL, *args)
@classmethod
def load_model(cls, filename):
""" Loads a model from disk """
with timed_loading_operation(filename):
return Globals.base.loader.load_model(filename)
@classmethod
def load_sliced_3d_texture(cls, fname, tile_size_x, tile_size_y=None, num_tiles=None):
""" Loads a texture from the given filename and dimensions. If only
one dimensions is specified, the other dimensions are assumed to be
equal. This internally loads the texture into ram, splits it into smaller
sub-images, and then calls the load_3d_texture from the Panda loader """
tempfile_name = "/$$slice_loader_temp-" + str(time.time()) + "/"
tile_size_y = tile_size_x if tile_size_y is None else tile_size_y
num_tiles = tile_size_x if num_tiles is None else num_tiles
# Load sliced image from disk
tex_handle = cls.load_texture(fname)
source = PNMImage()
tex_handle.store(source)
width = source.get_x_size()
# Find slice properties
num_cols = width // tile_size_x
temp_img = PNMImage(
tile_size_x, tile_size_y, source.get_num_channels(), source.get_maxval())
# Construct a ramdisk to write the files to
vfs = VirtualFileSystem.get_global_ptr()
ramdisk = VirtualFileMountRamdisk()
vfs.mount(ramdisk, tempfile_name, 0)
# Extract all slices and write them to the virtual disk
for z_slice in range(num_tiles):
slice_x = (z_slice % num_cols) * tile_size_x
slice_y = (z_slice // num_cols) * tile_size_y
temp_img.copy_sub_image(source, 0, 0, slice_x, slice_y, tile_size_x, tile_size_y)
temp_img.write(tempfile_name + str(z_slice) + ".png")
# Load the de-sliced texture from the ramdisk
texture_handle = cls.load_3d_texture(tempfile_name + "/#.png")
vfs.unmount(ramdisk)
return texture_handle | PypiClean |
/Nevow-0.14.5.tar.gz/Nevow-0.14.5/nevow/js/Divmod/UnitTest.js | // import Divmod
// import Divmod.Inspect
// import Divmod.Runtime
/**
* Return a suite which contains every test defined in C{testClass}. Assumes
* that if a method name starts with C{test_}, then it is a test.
*/
Divmod.UnitTest.loadFromClass = function loadFromClass(testClass) {
var prefix = 'test_';
var suite = Divmod.UnitTest.TestSuite();
var methods = Divmod.Inspect.methods(testClass);
for (var i = 0; i < methods.length; ++i) {
var name = methods[i];
// XXX - abstract startsWith
if (name.substr(0, prefix.length) == prefix) {
suite.addTest(testClass(name));
}
}
return suite;
};
/**
* Return C{true} is given value is a subclass of L{Divmod.UnitTest.TestCase},
* C{false} otherwise.
*/
Divmod.UnitTest.isTestCaseClass = function isTestCaseClass(klass) {
if (klass.subclassOf === undefined) {
return false;
}
return klass.subclassOf(Divmod.UnitTest.TestCase);
};
/**
* Return a suite which contains every test defined in C{testModule}.
*/
Divmod.UnitTest.loadFromModule = function loadFromModule(testModule) {
var suite = Divmod.UnitTest.TestSuite();
for (var name in testModule) {
if (Divmod.UnitTest.isTestCaseClass(testModule[name])) {
suite.addTest(Divmod.UnitTest.loadFromClass(testModule[name]));
}
}
return suite;
};
/**
* Raised to indicate that a test has failed.
*/
Divmod.UnitTest.AssertionError = Divmod.Error.subclass('Divmod.UnitTest.AssertionError');
Divmod.UnitTest.AssertionError.methods(
function toString(self) {
return 'AssertionError: ' + self.message;
});
/**
* Represents the results of a run of unit tests.
*
* @type testsRun: integer
* @ivar testsRun: The number of tests that have been run using this as the
* result.
*
* @type failures: Array of [L{TestCase}, L{Divmod.Error}] pairs
* @ivar failures: The assertion failures that have occurred in this test run,
* paired with the tests that generated them.
*
* @type successes: Array of L{TestCase}
* @ivar successes: A list of tests that succeeded.
*
* @type errors: Array of [L{TestCase}, L{Divmod.Error}] pairs
* @ivar errors: The errors that were raised by tests in this test run, paired
* with the tests that generated them.
*/
Divmod.UnitTest.TestResult = Divmod.Class.subclass('Divmod.UnitTest.TestResult');
Divmod.UnitTest.TestResult.methods(
function __init__(self) {
self.testsRun = 0;
self.failures = [];
self.successes = [];
self.errors = [];
},
/**
* Called by C{TestCase.run} at the start of the test.
*
* @param test: The test that just started.
* @type test: L{Divmod.UnitTest.TestCase}
*/
function startTest(self, test) {
self.testsRun++;
},
/**
* Called by C{TestCase.run} at the end of the test run.
*
* @param test: The test that just finished.
* @type test: L{Divmod.UnitTest.TestCase}
*/
function stopTest(self, test) {
},
/**
* Report an error that occurred while running the given test.
*
* @param test: The test that had an error.
* @type test: L{Divmod.UnitTest.TestCase}
*
* @param error: The error that occurred.
* @type error: Generally a L{Divmod.Error} instance.
*/
function addError(self, test, error) {
self.errors.push([test, error]);
},
/**
* Report a failed assertion that occurred while running the given test.
*
* @param test: The test with the failed assertion.
* @type test: L{Divmod.UnitTest.TestCase}
*
* @param failure: The failure that occurred.
* @type failure: A L{Divmod.UnitTest.AssertionError} instance.
*/
function addFailure(self, test, failure) {
self.failures.push([test, failure]);
},
/**
* Report that the given test succeeded.
*
* @param test: The test that succeeded.
* @type test: L{Divmod.UnitTest.TestCase}
*/
function addSuccess(self, test) {
self.successes.push(test);
},
/**
* Return a triple of (tests run, number of failures, number of errors)
*/
function getSummary(self) {
return [self.testsRun, self.failures.length, self.errors.length];
},
/**
* Return C{true} if there have been no failures or errors. Return C{false}
* if there have been.
*/
function wasSuccessful(self) {
return self.failures.length == 0 && self.errors.length == 0;
});
Divmod.UnitTest.SubunitTestClient = Divmod.UnitTest.TestResult.subclass('Divmod.UnitTest.SubunitTestClient');
Divmod.UnitTest.SubunitTestClient.methods(
function _write(self, string) {
print(string);
},
function _sendException(self, error) {
var f = Divmod.Defer.Failure(error);
self._write(f.toPrettyText(f.filteredParseStack()));
},
function addError(self, test, error) {
self._write("error: " + test.id() + " [");
self._sendException(error);
self._write(']');
},
function addFailure(self, test, error) {
self._write("failure: " + test.id() + " [");
self._sendException(error);
self._write(']');
},
function addSuccess(self, test) {
self._write('successful: ' + test.id());
},
function startTest(self, test) {
self._write('test: ' + test.id());
});
/**
* Represents a collection of tests. Implements the Composite pattern.
*/
Divmod.UnitTest.TestSuite = Divmod.Class.subclass('Divmod.UnitTest.TestSuite');
Divmod.UnitTest.TestSuite.methods(
function __init__(self, /* optional */ tests) {
self.tests = [];
if (tests != undefined) {
self.addTests(tests);
}
},
/**
* Add the given test to the suite.
*
* @param test: The test to add.
* @type test: L{Divmod.UnitTest.TestCase} or L{Divmod.UnitTest.TestSuite}
*/
function addTest(self, test) {
self.tests.push(test);
},
/**
* Add the given tests to the suite.
*
* @param tests: An array of tests to add.
* @type tests: [L{Divmod.UnitTest.TestCase} or L{Divmod.UnitTest.TestSuite}]
*/
function addTests(self, tests) {
for (var i = 0; i < tests.length; ++i) {
self.addTest(tests[i]);
}
},
/**
* Return the number of actual tests contained in this suite.
*/
function countTestCases(self) {
var total = 0;
self.visit(function (test) { total += test.countTestCases(); });
return total;
},
/**
* Visit each test case in this suite with the given visitor function.
*/
function visit(self, visitor) {
for (var i = 0; i < self.tests.length; ++i) {
self.tests[i].visit(visitor);
}
},
/**
* Run all of the tests in the suite.
*/
function run(self, result) {
self.visit(function (test) { test.run(result); });
});
/**
* I represent a single unit test.
*/
Divmod.UnitTest.TestCase = Divmod.Class.subclass('Divmod.UnitTest.TestCase');
Divmod.UnitTest.TestCase.methods(
/**
* Construct a test.
*
* @type methodName: string
* @param methodName: The name of a method on this object that contains
* the unit test.
*/
function __init__(self, methodName) {
self._methodName = methodName;
},
/**
* Return a string which identifies this test.
*/
function id(self) {
return self.__class__.__name__ + '.' + self._methodName;
},
/**
* Count the number of test cases in this test. Always 1, because an
* instance represents a single test.
*/
function countTestCases(self) {
return 1;
},
/**
* Visit this test case.
*
* @param visitor: A callable which takes one argument (a test case).
*/
function visit(self, visitor) {
visitor(self);
},
/**
* Fail the test. Equivalent to an invalid assertion.
*
* @type reason: text
* @param reason: Why the test is being failed.
* @throw: Divmod.UnitTest.AssertionError
*/
function fail(self, reason) {
throw Divmod.UnitTest.AssertionError(reason);
},
/**
* Assert that the given expression evalutates to true.
*
* @type expression: boolean
* @param expression: The thing we are asserting.
*
* @type message: text
* @param message: An optional parameter, explaining what the assertion
* means.
*/
function assert(self, expression, /* optional */ message) {
if (!expression) {
self.fail(message);
}
},
/**
* Compare C{a} and C{b} using the provided predicate.
*
* @type predicate: A callable that accepts two parameters.
* @param predicate: Returns either C{true} or C{false}.
*
* @type description: text
* @param description: Describes the inverse of the comparison. This is
* used in the L{AssertionError} if the comparison
* fails.
*
* @type a: any
* @param a: The thing to be compared with C{b}. Passed as the first
* parameter to C{predicate}.
*
* @type b: any
* @param b: The thing to be compared with C{a}. Passed as the second
* parameter to C{predicate}.
*
* @type message: text
* @param message: An optional message to be included in the raised
* L{AssertionError}.
*
* @raises L{Divmod.UnitTest.AssertionError} if C{predicate} returns
* C{false}.
*/
function compare(self, predicate, description, a, b,
/* optional */ message) {
var repr = Divmod.UnitTest.repr;
if (!predicate(a, b)) {
msg = repr(a) + " " + description + " " + repr(b);
if (message != null) {
msg += ': ' + message;
}
self.fail(msg);
}
},
/**
* Assert that C{a} and C{b} are equal. Recurses into arrays and dicts.
*/
function assertArraysEqual(self, a, b, /* optional */ message) {
self.compare(Divmod.arraysEqual, '!=', a, b, message);
},
/**
* Assert that C{a} and C{b} are identical.
*/
function assertIdentical(self, a, b, /* optional */ message) {
self.compare(function (x, y) { return x === y; },
'!==', a, b, message);
},
/**
* Assert that C{callable} throws C{expectedError}
*
* @param expectedError: The error type (class or prototype) which is
* expected to be thrown.
*
* @param callable: A callable which is expected to throw C{expectedError}.
*
* @param ...: Optional positional arguments passed to C{callable}.
*
* @throw AssertionError: Thrown if the callable doesn't throw
* C{expectedError}. This could be because it threw a different error or
* because it didn't throw any errors.
*
* @return: The exception that was raised by callable.
*/
function assertThrows(self, expectedError, callable /*... */) {
var threw = null;
var args = Array.prototype.slice.call(arguments, 3);
try {
callable.apply(null, args);
} catch (e) {
threw = e;
self.assert(e instanceof expectedError,
"Wrong error type thrown: " + e);
}
self.assert(threw != null, "Callable threw no error");
return threw;
},
/**
* Override me to provide code to set up a unit test. This method is called
* before the test method.
*
* L{setUp} is most useful when a subclass contains many test methods which
* require a common base configuration. L{tearDown} is the complement of
* L{setUp}.
*/
function setUp(self) {
},
/**
* Override me to provide code to clean up a unit test. This method is called
* after the test method.
*
* L{tearDown} is at its most useful when used to clean up resources that are
* initialized/modified by L{setUp} or by the test method.
*/
function tearDown(self) {
},
/**
* Actually run this test.
*/
function run(self, result) {
var success = true;
result.startTest(self);
// XXX: This probably isn't the best place to put this, but it's the
// only place for the time being; see #2806 for the proper way to deal
// with this.
Divmod.Runtime.initRuntime();
try {
self.setUp();
} catch (e) {
result.addError(self, e);
return result;
}
try {
self[self._methodName]();
} catch (e) {
if (e instanceof Divmod.UnitTest.AssertionError) {
result.addFailure(self, e);
} else {
result.addError(self, e);
}
success = false;
}
try {
self.tearDown();
} catch (e) {
result.addError(self, e);
success = false;
}
if (success) {
result.addSuccess(self);
}
result.stopTest(self);
});
/**
* Return a nicely formatted summary from the given L{TestResult}.
*/
Divmod.UnitTest.formatSummary = function formatSummary(result) {
var summary;
if (result.wasSuccessful()) {
summary = "PASSED "
} else {
summary = "FAILED "
}
summary += "(tests=" + result.testsRun;
if (result.errors.length > 0) {
summary += ", errors=" + result.errors.length
}
if (result.failures.length > 0) {
summary += ", failures=" + result.failures.length;
}
summary += ')';
return summary;
};
/**
* Return a formatted string containing all the errors and failures in a result
*
* @param result: A test result.
* @type result: L{Divmod.UnitTest.TestResult}
*/
Divmod.UnitTest.formatErrors = function formatErrors(result) {
var format = '';
for (var i = 0; i < result.errors.length; ++i) {
format += Divmod.UnitTest.formatError('ERROR',
result.errors[i][0],
result.errors[i][1]);
}
for (var i = 0; i < result.failures.length; ++i) {
format += Divmod.UnitTest.formatError('FAILURE',
result.failures[i][0],
result.failures[i][1]);
}
return format;
};
/**
* Return a formatting string showing the failure/error that occurred in a test.
*
* @param test: A test which had a failure or error.
* @type test: L{Divmod.UnitTest.TestCase}
*
* @param error: An error or failure which occurred in the test.
* @type error: L{Divmod.Error}
*/
Divmod.UnitTest.formatError = function formatError(kind, test, error) {
var f = Divmod.Defer.Failure(error);
var ret = '[' + kind + '] ' + test.id() + ': ' + error.message + '\n';
ret += f.toPrettyText(f.filteredParseStack()) + '\n';
return ret;
};
/**
* Run the given test, printing the summary of results and any errors. If run
* inside a web browser, it will try to print these things to the printer, so
* don't use this in a web browser.
*
* @param test: The test to run.
* @type test: L{Divmod.UnitTest.TestCase} or L{Divmod.UnitTest.TestSuite}
*/
Divmod.UnitTest.run = function run(test) {
var result = Divmod.UnitTest.TestResult()
test.run(result);
print(Divmod.UnitTest.formatErrors(result));
print(Divmod.UnitTest.formatSummary(result));
};
Divmod.UnitTest.runRemote = function runRemote(test) {
var result = Divmod.UnitTest.SubunitTestClient();
test.run(result);
};
/**
* Return a string representation of an arbitrary value, similar to
* Python's builtin repr() function.
*/
Divmod.UnitTest.repr = function repr(value) {
// We can't call methods on undefined or null.
if (value === undefined) {
return 'undefined';
} else if (value === null) {
return 'null';
} else if (typeof value === 'string') {
return '"' + value + '"';
} else if (typeof value === 'number') {
return '' + value;
} else if (value.toSource !== undefined) {
return value.toSource();
} else if (value.toString !== undefined) {
return value.toString();
} else {
return '' + value;
}
}; | PypiClean |
/Gbtestapi-0.1a10-py3-none-any.whl/gailbot/services/converter/payload/conversationDirectoryPayload.py | import os
from typing import List, Union
from .payloadObject import PayLoadObject, PayLoadStatus
from ...organizer.source import SourceObject
from src.gailbot.core.utils.general import paths_in_dir, is_directory, copy
from src.gailbot.core.utils.logger import makelogger
from src.gailbot.workspace.manager import WorkspaceManager
from .audioPayload import AudioPayload
from src.gailbot.core.utils.media import AudioHandler
MERGED_FILE_NAME = "merged"
logger = makelogger("conversation_payload")
def load_conversation_dir_payload(
source: SourceObject, ws_manager: WorkspaceManager
) -> Union[bool, List[PayLoadObject]]:
"""Given a source object, convert it into an conversation directory payload
if the source stores a conversation directory
Args:
source (SourceObject): an instance of SourceObject that stores the
datafile and setting of the transcription
Returns:
Union[bool, List[PayLoadObject]]: return the converted payload if the
conversion is successful, return false other wise
"""
original_source = source.source_path()
if not is_directory(original_source) or not source.setting:
return False
if ConversationDirectoryPayload.is_supported(original_source):
return [ConversationDirectoryPayload(source, ws_manager)]
# NOTE: currently not support loading directory inside directory
return False
class ConversationDirectoryPayload(PayLoadObject):
"""
Stores a conversation directory with only audio files
"""
def __init__(self, source: SourceObject, workspace: WorkspaceManager) -> None:
super().__init__(source, workspace)
@staticmethod
def supported_format() -> str:
"""
Contains and accesses a list of the supported formats
"""
return "directory"
@staticmethod
def is_supported(file_path: str) -> bool:
"""
Determines if a given file path has a supported file extension
Args:
file_path: str: file path to check
Returns:
bool: True if it contains a supported file extension, false if not
"""
logger.info(file_path)
if not is_directory(file_path):
return False
sub_paths = paths_in_dir(
file_path, AudioPayload.supported_format(), recursive=False
)
if len(sub_paths) == 0:
return False
return True
def _copy_file(self) -> None:
"""
Copies file to workspace
"""
try:
tgt_path = os.path.join(self.workspace.data_copy, f"{self.name}")
copy(self.original_source, tgt_path)
self.data_files = []
sub_paths = paths_in_dir(
tgt_path, AudioPayload.supported_format(), recursive=False
)
for path in sub_paths:
self.data_files.append(path)
except Exception as e:
logger.error(e, exc_info=e)
def _merge_audio(self):
try:
handler = AudioHandler()
merged_path = handler.overlay_audios(
self.data_files, self.out_dir.media_file, MERGED_FILE_NAME
)
self.merged_audio = merged_path
assert merged_path
except Exception as e:
logger.error(e, exc_info=e)
def _set_initial_status(self) -> None:
"""
Sets the initial status of the payload object to initialized
"""
self.status = PayLoadStatus.INITIALIZED
def __repr__(self) -> str:
return "Conversation directory payload" | PypiClean |
/DigLabTools-0.0.2-py3-none-any.whl/redcap_bridge/project_validation.py | import pathlib
import pandas as pd
import warnings
import redcap_bridge
from redcap_bridge.utils import map_header_json_to_csv
index_column_header = 'Variable / Field Name'
template_dir = pathlib.Path(redcap_bridge.__file__).parent / 'template_parts'
def validate_project_against_template_parts(project, *templates):
"""
Validate a built project csv
Parameters
----------
project: str, buffer
Filepath of the csv file or csv buffer of the built project
templates: str, list
List of file paths of the template part csvs.
Returns
----------
bool
True if the validation was successful
"""
df_project = pd.read_csv(project)
# unify column names to conform to csv style
df_project = df_project.rename(columns=map_header_json_to_csv)
df_project.index = df_project[index_column_header]
dfs_templates = []
if not templates:
warnings.warn('No template selected list is empty')
for template in templates:
df_template = pd.read_csv((template_dir / template).with_suffix('.csv'))
df_template.index = df_template[index_column_header]
dfs_templates.append(df_template)
# compare content of template_parts and project
for template_df in dfs_templates:
if not all(template_df.columns == df_project.columns):
raise ValueError(f'Incompatible columns in project '
f'({project.columns}) and template '
f'({template_df.columns})')
for i in template_df.index:
if i not in df_project.index:
raise ValueError(f'Row {i} is missing in project csv')
# compare entries of the row and exclude `na` entries
na_values = template_df.loc[i].isna()
equal_entries = df_project.loc[i] == template_df.loc[i]
if not (equal_entries | na_values).all():
raise ValueError(f'Row {i} differs between project csv and '
f'template')
print('Validation successful')
return True
def validate_record_against_template(record_csv, template_csv):
"""
Validate a RedCap record against a template instrument
Parameters
----------
record_csv: path
path to the record csv of that instrument
template_csv: path
path to the template csv of an instrument
Returns
-------
True
Raises
------
ValueError in case of failing validation
"""
template = pd.read_csv(template_csv)
record = pd.read_csv(record_csv)
assert 'Variable / Field Name' in template
# remove 'record_id' as it is unique to template
template = template.loc[template['Variable / Field Name'] != 'record_id']
# Step 1: Assert all fields are preserved
type_groups = template.groupby(template['Field Type'])
for group_type in type_groups.groups:
# check if all options are present for checkbox fields
if group_type == 'checkbox':
df_checkboxes = type_groups.get_group(group_type)
# reduce to only relevant columns
df_checkboxes = df_checkboxes[['Variable / Field Name',
'Choices, Calculations, OR Slider Labels']]
for field_name, choices in df_checkboxes.values:
choice_ids = [c.split(',')[0] for c in choices.split('| ')]
for cid in choice_ids:
if f'{field_name}___{cid}' not in record.columns.values:
raise ValueError(f'"{field_name}___{cid}" column '
f'header is missing in record')
# check that all non-descriptive fields are preserved
elif group_type != 'descriptive':
# check if editable field is present
group_df = type_groups.get_group(group_type)
for key in group_df['Variable / Field Name'].values:
if key not in record.columns.values:
raise ValueError(
f'"{key}" column header is missing in record')
# Step 2: Check that required fields contain data
type_groups = template.groupby(template['Required Field?'])
required_fields_df = type_groups.get_group('y')
# ignore required 'checkbox' fields
required_fields_df = required_fields_df.loc[
required_fields_df['Field Type'] != 'checkbox']
required_fields = required_fields_df['Variable / Field Name'].values
for required_field in required_fields:
empty_record_mask = record[required_field].isnull()
if empty_record_mask.values.any():
empty_record = record.loc[empty_record_mask]
raise ValueError(
f'records with {empty_record.index.name}='
f'{empty_record.index.tolist()} do not contain data in '
f'required field "{required_field}"')
return True
if __name__ == '__main__':
pass | PypiClean |
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/pylot/component/static/pylot/vendor/mdeditor/bower_components/codemirror/addon/hint/html-hint.js | (function () {
var langs = "ab aa af ak sq am ar an hy as av ae ay az bm ba eu be bn bh bi bs br bg my ca ch ce ny zh cv kw co cr hr cs da dv nl dz en eo et ee fo fj fi fr ff gl ka de el gn gu ht ha he hz hi ho hu ia id ie ga ig ik io is it iu ja jv kl kn kr ks kk km ki rw ky kv kg ko ku kj la lb lg li ln lo lt lu lv gv mk mg ms ml mt mi mr mh mn na nv nb nd ne ng nn no ii nr oc oj cu om or os pa pi fa pl ps pt qu rm rn ro ru sa sc sd se sm sg sr gd sn si sk sl so st es su sw ss sv ta te tg th ti bo tk tl tn to tr ts tt tw ty ug uk ur uz ve vi vo wa cy wo fy xh yi yo za zu".split(" ");
var targets = ["_blank", "_self", "_top", "_parent"];
var charsets = ["ascii", "utf-8", "utf-16", "latin1", "latin1"];
var methods = ["get", "post", "put", "delete"];
var encs = ["application/x-www-form-urlencoded", "multipart/form-data", "text/plain"];
var media = ["all", "screen", "print", "embossed", "braille", "handheld", "print", "projection", "screen", "tty", "tv", "speech",
"3d-glasses", "resolution [>][<][=] [X]", "device-aspect-ratio: X/Y", "orientation:portrait",
"orientation:landscape", "device-height: [X]", "device-width: [X]"];
var s = { attrs: {} }; // Simple tag, reused for a whole lot of tags
var data = {
a: {
attrs: {
href: null, ping: null, type: null,
media: media,
target: targets,
hreflang: langs
}
},
abbr: s,
acronym: s,
address: s,
applet: s,
area: {
attrs: {
alt: null, coords: null, href: null, target: null, ping: null,
media: media, hreflang: langs, type: null,
shape: ["default", "rect", "circle", "poly"]
}
},
article: s,
aside: s,
audio: {
attrs: {
src: null, mediagroup: null,
crossorigin: ["anonymous", "use-credentials"],
preload: ["none", "metadata", "auto"],
autoplay: ["", "autoplay"],
loop: ["", "loop"],
controls: ["", "controls"]
}
},
b: s,
base: { attrs: { href: null, target: targets } },
basefont: s,
bdi: s,
bdo: s,
big: s,
blockquote: { attrs: { cite: null } },
body: s,
br: s,
button: {
attrs: {
form: null, formaction: null, name: null, value: null,
autofocus: ["", "autofocus"],
disabled: ["", "autofocus"],
formenctype: encs,
formmethod: methods,
formnovalidate: ["", "novalidate"],
formtarget: targets,
type: ["submit", "reset", "button"]
}
},
canvas: { attrs: { width: null, height: null } },
caption: s,
center: s,
cite: s,
code: s,
col: { attrs: { span: null } },
colgroup: { attrs: { span: null } },
command: {
attrs: {
type: ["command", "checkbox", "radio"],
label: null, icon: null, radiogroup: null, command: null, title: null,
disabled: ["", "disabled"],
checked: ["", "checked"]
}
},
data: { attrs: { value: null } },
datagrid: { attrs: { disabled: ["", "disabled"], multiple: ["", "multiple"] } },
datalist: { attrs: { data: null } },
dd: s,
del: { attrs: { cite: null, datetime: null } },
details: { attrs: { open: ["", "open"] } },
dfn: s,
dir: s,
div: s,
dl: s,
dt: s,
em: s,
embed: { attrs: { src: null, type: null, width: null, height: null } },
eventsource: { attrs: { src: null } },
fieldset: { attrs: { disabled: ["", "disabled"], form: null, name: null } },
figcaption: s,
figure: s,
font: s,
footer: s,
form: {
attrs: {
action: null, name: null,
"accept-charset": charsets,
autocomplete: ["on", "off"],
enctype: encs,
method: methods,
novalidate: ["", "novalidate"],
target: targets
}
},
frame: s,
frameset: s,
h1: s, h2: s, h3: s, h4: s, h5: s, h6: s,
head: {
attrs: {},
children: ["title", "base", "link", "style", "meta", "script", "noscript", "command"]
},
header: s,
hgroup: s,
hr: s,
html: {
attrs: { manifest: null },
children: ["head", "body"]
},
i: s,
iframe: {
attrs: {
src: null, srcdoc: null, name: null, width: null, height: null,
sandbox: ["allow-top-navigation", "allow-same-origin", "allow-forms", "allow-scripts"],
seamless: ["", "seamless"]
}
},
img: {
attrs: {
alt: null, src: null, ismap: null, usemap: null, width: null, height: null,
crossorigin: ["anonymous", "use-credentials"]
}
},
input: {
attrs: {
alt: null, dirname: null, form: null, formaction: null,
height: null, list: null, max: null, maxlength: null, min: null,
name: null, pattern: null, placeholder: null, size: null, src: null,
step: null, value: null, width: null,
accept: ["audio/*", "video/*", "image/*"],
autocomplete: ["on", "off"],
autofocus: ["", "autofocus"],
checked: ["", "checked"],
disabled: ["", "disabled"],
formenctype: encs,
formmethod: methods,
formnovalidate: ["", "novalidate"],
formtarget: targets,
multiple: ["", "multiple"],
readonly: ["", "readonly"],
required: ["", "required"],
type: ["hidden", "text", "search", "tel", "url", "email", "password", "datetime", "date", "month",
"week", "time", "datetime-local", "number", "range", "color", "checkbox", "radio",
"file", "submit", "image", "reset", "button"]
}
},
ins: { attrs: { cite: null, datetime: null } },
kbd: s,
keygen: {
attrs: {
challenge: null, form: null, name: null,
autofocus: ["", "autofocus"],
disabled: ["", "disabled"],
keytype: ["RSA"]
}
},
label: { attrs: { "for": null, form: null } },
legend: s,
li: { attrs: { value: null } },
link: {
attrs: {
href: null, type: null,
hreflang: langs,
media: media,
sizes: ["all", "16x16", "16x16 32x32", "16x16 32x32 64x64"]
}
},
map: { attrs: { name: null } },
mark: s,
menu: { attrs: { label: null, type: ["list", "context", "toolbar"] } },
meta: {
attrs: {
content: null,
charset: charsets,
name: ["viewport", "application-name", "author", "description", "generator", "keywords"],
"http-equiv": ["content-language", "content-type", "default-style", "refresh"]
}
},
meter: { attrs: { value: null, min: null, low: null, high: null, max: null, optimum: null } },
nav: s,
noframes: s,
noscript: s,
object: {
attrs: {
data: null, type: null, name: null, usemap: null, form: null, width: null, height: null,
typemustmatch: ["", "typemustmatch"]
}
},
ol: { attrs: { reversed: ["", "reversed"], start: null, type: ["1", "a", "A", "i", "I"] } },
optgroup: { attrs: { disabled: ["", "disabled"], label: null } },
option: { attrs: { disabled: ["", "disabled"], label: null, selected: ["", "selected"], value: null } },
output: { attrs: { "for": null, form: null, name: null } },
p: s,
param: { attrs: { name: null, value: null } },
pre: s,
progress: { attrs: { value: null, max: null } },
q: { attrs: { cite: null } },
rp: s,
rt: s,
ruby: s,
s: s,
samp: s,
script: {
attrs: {
type: ["text/javascript"],
src: null,
async: ["", "async"],
defer: ["", "defer"],
charset: charsets
}
},
section: s,
select: {
attrs: {
form: null, name: null, size: null,
autofocus: ["", "autofocus"],
disabled: ["", "disabled"],
multiple: ["", "multiple"]
}
},
small: s,
source: { attrs: { src: null, type: null, media: null } },
span: s,
strike: s,
strong: s,
style: {
attrs: {
type: ["text/css"],
media: media,
scoped: null
}
},
sub: s,
summary: s,
sup: s,
table: s,
tbody: s,
td: { attrs: { colspan: null, rowspan: null, headers: null } },
textarea: {
attrs: {
dirname: null, form: null, maxlength: null, name: null, placeholder: null,
rows: null, cols: null,
autofocus: ["", "autofocus"],
disabled: ["", "disabled"],
readonly: ["", "readonly"],
required: ["", "required"],
wrap: ["soft", "hard"]
}
},
tfoot: s,
th: { attrs: { colspan: null, rowspan: null, headers: null, scope: ["row", "col", "rowgroup", "colgroup"] } },
thead: s,
time: { attrs: { datetime: null } },
title: s,
tr: s,
track: {
attrs: {
src: null, label: null, "default": null,
kind: ["subtitles", "captions", "descriptions", "chapters", "metadata"],
srclang: langs
}
},
tt: s,
u: s,
ul: s,
"var": s,
video: {
attrs: {
src: null, poster: null, width: null, height: null,
crossorigin: ["anonymous", "use-credentials"],
preload: ["auto", "metadata", "none"],
autoplay: ["", "autoplay"],
mediagroup: ["movie"],
muted: ["", "muted"],
controls: ["", "controls"]
}
},
wbr: s
};
var globalAttrs = {
accesskey: ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9"],
"class": null,
contenteditable: ["true", "false"],
contextmenu: null,
dir: ["ltr", "rtl", "auto"],
draggable: ["true", "false", "auto"],
dropzone: ["copy", "move", "link", "string:", "file:"],
hidden: ["hidden"],
id: null,
inert: ["inert"],
itemid: null,
itemprop: null,
itemref: null,
itemscope: ["itemscope"],
itemtype: null,
lang: ["en", "es"],
spellcheck: ["true", "false"],
style: null,
tabindex: ["1", "2", "3", "4", "5", "6", "7", "8", "9"],
title: null,
translate: ["yes", "no"],
onclick: null,
rel: ["stylesheet", "alternate", "author", "bookmark", "help", "license", "next", "nofollow", "noreferrer", "prefetch", "prev", "search", "tag"]
};
function populate(obj) {
for (var attr in globalAttrs) if (globalAttrs.hasOwnProperty(attr))
obj.attrs[attr] = globalAttrs[attr];
}
populate(s);
for (var tag in data) if (data.hasOwnProperty(tag) && data[tag] != s)
populate(data[tag]);
CodeMirror.htmlSchema = data;
function htmlHint(cm, options) {
var local = {schemaInfo: data};
if (options) for (var opt in options) local[opt] = options[opt];
return CodeMirror.hint.xml(cm, local);
}
CodeMirror.htmlHint = htmlHint; // deprecated
CodeMirror.registerHelper("hint", "html", htmlHint);
})(); | PypiClean |
/HarlRing-1.0.0-py3-none-any.whl/harlring/confluence/confluence_util.py | import logging
from atlassian import Confluence, utils
import jieba.posseg as pseg
class ConfluenceUtil(object):
"""
Confluence Util
Author: chenying
"""
def __init__(self, url, username, password):
# 初始化atlassian-python-api库的confluence model实例
self.confluence = Confluence(
url=url,
username=username,
password=password
)
logging.basicConfig(level=logging.CRITICAL)
def _get_space_key_by_space_name(self, space_name):
"""
Get space key for space name
Author: chenying
:param space_name: SPACE NAME
:return: Space Key
"""
# 获取所有空间信息列表
spaces_list = self.confluence.get_all_spaces()
try:
# 根据空间显示名筛选空间名对应的SPACE KEY
space_key = spaces_list[next(index for (index, d) in enumerate(spaces_list) if d['name'] == space_name)][
'key']
return space_key
except Exception as ex:
print(ex)
return 'FAIL', "Can't find {space_name} page in the {url}".format(space_name=space_name,
url=self.confluence.url)
def _get_page_version(self, space_name, title):
space = self._get_space_key_by_space_name(space_name)
page_id = self.confluence.get_page_id(space, title)
page_body_value = self.confluence.get_page_by_id(page_id).get("version").get("number")
return page_body_value
@staticmethod
def _create_list(data):
"""
Create confluence page list power
Author: chenying
:param data: datas
:return:
"""
value = "<p>"
for item in data:
value += "{}<br />".format(item)
return value + "</p>"
@staticmethod
def _create_jira_filet_no_column():
"""
Create JIRA Filter no column
Author: chenying
:return: XHTML
"""
value = """<p>
<ac:structured-macro ac:name="jira">
<ac:parameter ac:name="server">JIRA</ac:parameter>
<ac:parameter ac:name="serverId">e78bf60f-8e47-4183-9f26-9e9661fc2ce8</ac:parameter>
<ac:parameter ac:name="key">{}</ac:parameter>
</ac:structured-macro>
</p>"""
return value
@staticmethod
def _create_jira_filet_jql_no_columns():
"""
Create JIRA Filter JQL no column
Author: chenying
:return: XHTML
"""
value = """<p>
<ac:structured-macro ac:name="jira">
<ac:parameter ac:name="server">JIRA</ac:parameter>
<ac:parameter ac:name="serverId">e78bf60f-8e47-4183-9f26-9e9661fc2ce8</ac:parameter>
<ac:parameter ac:name="jqlQuery">{}</ac:parameter>
</ac:structured-macro>
</p>"""
return value
@staticmethod
def _jql_is_name(data):
"""
Judgment JQLQuery is a name
Author: chenying
:param data: JQLQuery
:return: True:is name|False:isn`t a name
"""
data_list = pseg.lcut(data)
for eve_word, isxing in data_list:
if isxing == "nr":
return True
return False
@staticmethod
def _create_jira_filet_jql_is_people_name():
"""
Create JIRA Filter JQL no column
Author: chenying
:return: XHTML
"""
value = """<p>
<ac:structured-macro ac:name="jira">
<ac:parameter ac:name="server">JIRA</ac:parameter>
<ac:parameter ac:name="serverId">e78bf60f-8e47-4183-9f26-9e9661fc2ce8</ac:parameter>
<ac:parameter ac:name="jqlQuery">assignee = {} OR reporter = {}</ac:parameter>
</ac:structured-macro>
</p>"""
return value
@staticmethod
def _create_jira_filet_jql_is_people_name_has_columns():
"""
Create JIRA Filter JQL has column
Author: chenying
:return: XHTML
"""
value = """<p>
<ac:structured-macro ac:name="jira">
<ac:parameter ac:name="server">JIRA</ac:parameter>
<ac:parameter ac:name="serverId">e78bf60f-8e47-4183-9f26-9e9661fc2ce8</ac:parameter>
<ac:parameter ac:name="jqlQuery">assignee = {} OR reporter = {}</ac:parameter>
<ac:parameter ac:name="columns">{}</ac:parameter>
</ac:structured-macro>
</p>"""
return value
@staticmethod
def _create_jira_filet_jql_is_str_no_columns():
"""
Create JIRA Filter JQL no column
Author: chenying
:return: XHTML
"""
value = """<p>
<ac:structured-macro ac:name="jira">
<ac:parameter ac:name="server">JIRA</ac:parameter>
<ac:parameter ac:name="serverId">e78bf60f-8e47-4183-9f26-9e9661fc2ce8</ac:parameter>
<ac:parameter ac:name="jqlQuery">summary ~ {} OR description ~ {}</ac:parameter>
</ac:structured-macro>
</p>"""
return value
@staticmethod
def _create_jira_filet_jql_is_str_has_columns():
"""
Create JIRA Filter JQL has column
Author: chenying
:return: XHTML
"""
value = """<p>
<ac:structured-macro ac:name="jira">
<ac:parameter ac:name="server">JIRA</ac:parameter>
<ac:parameter ac:name="serverId">e78bf60f-8e47-4183-9f26-9e9661fc2ce8</ac:parameter>
<ac:parameter ac:name="jqlQuery">summary ~ {} OR description ~ {}</ac:parameter>
<ac:parameter ac:name="columns">{}</ac:parameter>
</ac:structured-macro>
</p>"""
return value
@staticmethod
def _create_jira_filet_jql_has_columns():
"""
Create JIRA Filter JQL with column
Author: chenying
:return: XHTML
"""
value = """<p>
<ac:structured-macro ac:name="jira">
<ac:parameter ac:name="server">JIRA</ac:parameter>
<ac:parameter ac:name="serverId">e78bf60f-8e47-4183-9f26-9e9661fc2ce8</ac:parameter>
<ac:parameter ac:name="jqlQuery">{}</ac:parameter>
<ac:parameter ac:name="columns">{}</ac:parameter>
</ac:structured-macro>
</p>"""
return value
@staticmethod
def _create_table(ordering, datas):
"""
Create table by ordering and datas
Author: chenying
:param ordering: table headers
:param datas: table data
:return: table
"""
data = []
for value in datas:
dict_list = (dict(zip(ordering, value)))
data.append(dict_list)
result = utils.html_table_from_dict(data, ordering)
return result
@staticmethod
def _get_page_id_by_url(url):
"""
Get Page ID
Author: chenying
:param url: URL
:return: page_id
"""
return ''.join(url).split('=')[-1]
def create_confluence_page(self, space_name, parent_title, title, body):
"""
1.Update page body if it is exists.
2.Create a page if it is not exists
Author: chenying
:param space_name: SPACE NAME
:param parent_title: parent page title
:param title: title
:param body: body
:return: SUCCESS,URL|FAIL,MESSAGE
"""
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
if self.confluence.get_page_by_title(space_key, parent_title) is None:
return 'FAIL', "Can't find '{parent_title}' page in the '{space_name}'".format(parent_title=parent_title,
space_name=space_name)
parent_id = self.confluence.get_page_id(space_key, title=parent_title)
update_or_create_page_dict_info = self.confluence.update_or_create(parent_id, title, body,
representation="wiki")
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url,
url=((update_or_create_page_dict_info or {}).get('_links') or {}).get(
'webui'))
def clean_confluence_page(self, space_name, title):
"""
Clean page body if already exist
Author: chenying
:param space_name: SPACE NAME
:param title: title
:return: SUCCESS,URL|FAIL,MESSAGE
"""
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
if self.confluence.page_exists(space_key, title) is True:
clean_page_dict_info = self.confluence.update_existing_page(self.confluence.get_page_id(space_key, title),
title, body="")
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url,
url=((clean_page_dict_info or {}).get('_links') or {}).get(
'webui'))
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title, space_name=space_name)
def append_confluence_list(self, space_name, title, data):
"""
Append confluence page list
Author: chenying
:param space_name: SPACE NAME
:param title: title
:param data: list data
:return: SUCCESS,URL|FAIL,MESSAGE
"""
data_list = []
for value in data:
data_list.append("".join(value).replace(",", ", "))
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
# 判断confluence页面是否存在
if self.confluence.page_exists(space_key, title) is True:
page_id = self.confluence.get_page_id(space_key, title)
# 追加列表
append_page_dict_info = self.confluence.append_page(page_id, title, self._create_list(data_list))
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url,
url=((append_page_dict_info or {}).get('_links') or {}).get('webui'))
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title, space_name=space_name)
def append_confluence_dict(self, space_name, title, data):
"""
Transfer dict to list append into confluence page
Author: chenying
:param space_name: SPACE NAME
:param title: title
:param data: dict data
:return: SUCCESS,URL|FAIL,MESSAGE
"""
dicts = []
str_list = []
for lists in data:
dicts_list = []
for key, value in lists.items():
dicts_list.append(key + "=" + value)
dicts.append(dicts_list)
for value in dicts:
str_list.append(", ".join(value))
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
# 判断confluence页面是否存在
if self.confluence.page_exists(space_key, title) is True:
page_id = self.confluence.get_page_id(space_key, title)
append_page_dict_info = self.confluence.append_page(page_id, title, self._create_list(str_list))
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url,
url=((append_page_dict_info or {}).get('_links') or {}).get(
'webui'))
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title, space_name=space_name)
def append_confluence_table(self, space_name, title, ordering, data):
"""
Append confluence page table
Author: chenying
:param space_name: SPACE NAME
:param title: title
:param ordering: table headers
:param data: table data
:return: SUCCESS,URL|FAIL,MESSAGE
"""
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
# 判断confluence页面是否存在
if self.confluence.page_exists(space_key, title) is True:
page_id = self.confluence.get_page_id(space_key, title)
# 追加表格
append_page_dict_info = self.confluence.append_page(page_id, title, self._create_table(ordering, data))
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url,
url=((append_page_dict_info or {}).get('_links') or {}).get(
'webui'))
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title, space_name=space_name)
def append_confluence_image(self, space_name, title, image_file):
"""
Append confluence page image
Author: chenying
:param space_name: SPACE NAME
:param title: title
:param image_file: image file
:return: SUCCESS,URL|FAIL,MESSAGE
"""
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
# 判断confluence页面是否存在
if self.confluence.page_exists(space_key, title) is True:
page_id = self.confluence.get_page_id(space_key, title)
# 将图片作为附件上传到confluence页面
attach_file_dict_info = self.confluence.attach_file(image_file, page_id)
attach_file_url = '{host}{url}'.format(host=self.confluence.url,
url=((attach_file_dict_info or {}).get('_links') or {}).get(
'thumbnail'))
value = """<p>
<ac:image>
<ri:url ri:value="{}"/>
</ac:image>
</p>""".format(attach_file_url)
# 获取图片
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url,
url=((append_page_dict_info or {}).get('_links') or {}).get(
'webui'))
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title, space_name=space_name)
def append_confluence_jira_filter(self, space_name, title, body, columns=None):
"""
Append JIRA Filter
Author: chenying
:param space_name: SPACE NAME
:param title: title
:param body: Type:JQLQuery, People's name, JIRA_ID, JIRA_URL. JIRA_URL transfer to JIRA_ID
:param columns: JIRA ISSUE Column's name
Note: Columns parameter doesn't work for JIRA_URL and JIRA_ID
:return: SUCCESS,URL|FAIL,MESSAGE
"""
keywords = ['key', 'summary', 'status', 'project', 'type', 'status', 'priority', 'resolution',
'affects version', 'fix version', 'component', 'labels', 'environment', 'description', 'links',
'assignee', 'reporter', 'due', 'created', 'updated', 'resolved', 'estimate', 'remaining',
'logged', 'development', 'agile', 'votes', 'watchers']
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
# confluence页面存在
if self.confluence.page_exists(space_key, title) is True:
page_id = self.confluence.get_page_id(space_key, title)
# 不指定显示列
if columns is None:
# 输入的是JIRA_URL
if "".join(body).split(":")[0] == "http" and "".join(body).split("/")[-1].split("-")[0] == "JIRA":
jql_url_jira_id = "".join(body).split("/")[-1]
value = self._create_jira_filet_no_column().format(jql_url_jira_id)
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(append_page_dict_info or {}).get('_links') or {}).get('webui'))
# 输入的是JIRA_ID
if "".join(body).split(":")[0] != "http" and "".join(body).split("-")[0] == "JIRA":
jira_id = "".join(body)
value = self._create_jira_filet_no_column().format(jira_id)
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(append_page_dict_info or {}).get('_links') or {}).get('webui'))
# 输入的是JQL查询语句
for keyword in keywords:
if keyword in body.split("=")[0]:
value = self._create_jira_filet_jql_no_columns().format(body)
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(append_page_dict_info or {}).get('_links') or {}).get('webui'))
# 输入的是人名
if self._jql_is_name("".join(body)) is True:
value = self._create_jira_filet_jql_is_people_name().format(body, body)
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(append_page_dict_info or {}).get('_links') or {}).get('webui'))
# 输入的是字符串
value = self._create_jira_filet_jql_is_str_no_columns().format(body, body)
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(append_page_dict_info or {}).get('_links') or {}).get('webui'))
# 指定显示列
else:
# 判断columns参数是否符合JIRA ISSUE列名规范
for value in columns.split(','):
# 错误直接打断执行
if value not in keywords:
return 'Incorrect column parameter: {}'.format(
value), 'Please check the columns parameters, JIRA ISSUE allow use {}'.format(keywords)
# 指定显示列参数对JIRA_URL不起作用
if "".join(body).split(":")[0] == "http" and "".join(body).split("/")[-1].split("-")[0] == "JIRA":
return "FAIL", "Please remove the columns parameter."
# 指定显示列参数对JIRA_ID不起作用
if "".join(body).split(":")[0] != "http" and "".join(body).split("-")[0] == "JIRA":
return "FAIL", "Please remove the columns parameter."
# 输入的是JQL查询语句
for keyword in keywords:
if keyword in body.split("=")[0]:
value = self._create_jira_filet_jql_has_columns().format(body, columns)
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(append_page_dict_info or {}).get('_links') or {}).get('webui'))
# 输入的是人名
if self._jql_is_name("".join(body)) is True:
value = self._create_jira_filet_jql_is_people_name_has_columns().format(body, body, columns)
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(append_page_dict_info or {}).get('_links') or {}).get('webui'))
# 输入的是字符串
value = self._create_jira_filet_jql_is_str_has_columns().format(body, body, columns)
append_page_dict_info = self.confluence.append_page(page_id, title, value)
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(append_page_dict_info or {}).get('_links') or {}).get('webui'))
# confluence页面不存在
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title, space_name=space_name)
def delete_confluence_page_by_title(self, space_name, title):
"""
This method removes a page by the space key and the page title
Author: chenying
:param space_name: SPACE NAME
:param title: title
:return: SUCCESS|FAIL,MESSAGE
"""
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
if self.confluence.page_exists(space_key, title) is True:
page_id = self.confluence.get_page_by_title(space_key, title)['id']
self.confluence.remove_page(page_id)
return 'SUCCESS'
else:
return 'FAIL', "Can't find '{title}' page in the '{url}'".format(title=title, url=self.confluence.url)
def get_confluence_page_url_by_title(self, space_name, title):
"""
Return the first page on a piece of Content
Author: chenying
:param space_name: SPACE NAME
:param title: title
:return: SUCCESS,URL|FAIL,MESSAGE
"""
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
if self.confluence.page_exists(space_key, title) is True:
return 'SUCCESS', '{host}{url}'.format(host=self.confluence.url, url=(
(self.confluence.get_page_by_title(space_key, title) or {}).get("_links") or {}).get("webui"))
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title,
space_name=space_name)
def export_confluence_page_as_pdf(self, space_name, title, export_name):
"""
Export page as standard pdf exporter
Author: chenying
:param space_name: Space name
:param title: title
:param export_name: export file name
:return: SUCCESS|FAIL,MESSAGE
"""
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
page_id = self.confluence.get_page_id(space_key, title)
if page_id is not None:
content = self.confluence.export_page(page_id)
with open(export_name, "wb") as pdf:
pdf.write(content)
pdf.close()
return "SUCCESS"
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title,
space_name=space_name)
def export_confluence_page_as_word(self, space_name, title, export_name):
"""
Export page as standard word exporter
Author: chenying
:param space_name: Space name
:param title: title
:param export_name: export file name
:return: SUCCESS|FAIL,MESSAGE
"""
space_key = self._get_space_key_by_space_name(space_name)
if "FAIL" in space_key:
return space_key
page_id = self.confluence.get_page_id(space_key, title)
if page_id is not None:
content = self.confluence.get_page_as_word(page_id)
with open(export_name, "wb") as pdf:
pdf.write(content)
pdf.close()
return "SUCCESS"
else:
return 'FAIL', "Can't find '{title}' page in the '{space_name}'".format(title=title,
space_name=space_name)
def delete_confluence_page_by_url(self, url):
"""
This method removes a page for the page id
Author: chenying
:param url: URL
:return: SUCCESS|FAIL,MESSAGE
"""
# Get page id
page_id = self._get_page_id_by_url(url)
try:
self.confluence.remove_page(page_id)
return 'SUCCESS'
except Exception as ex:
print(ex)
return 'FAIL', 'Page: "{URL}", is not exists'.format(URL=url)
def get_confluence_page_title_by_url(self, url):
"""
Get page title by the URL
Author: chenying
:param url: URL
:return: SUCCESS,title|FAIL,MESSAGE
"""
page_id = self._get_page_id_by_url(url)
try:
get_page_by_id_dict_info = self.confluence.get_page_by_id(page_id)
return 'SUCCESS', '{title}'.format(title=(get_page_by_id_dict_info or {}).get('title'))
except Exception as ex:
print(ex)
return 'FAIL', 'Page: "{url}" is not exists'.format(url=url)
def get_confluence_all_groups(self):
"""
Get all confluence groups
Author: chenying
:return: list(group_name)
"""
_groups = []
for lists in self.confluence.get_all_groups():
group_name = lists.get("name")
_groups.append(group_name)
return _groups
def get_confluence_group_members(self, group_name):
"""
Get confluence group members
Author: chenying
:param group_name: group_name
:return: group members username, Type: [user_key, display_name, user_name]
"""
_members = []
if group_name not in self.get_confluence_all_groups():
return "FAIL"
for lists in self.confluence.get_group_members(group_name):
users = lists.get("username")
users_info = self.get_confluence_user_details_by_username(users)
_members.append(users_info)
return _members
# def get_confluence_group_members_name(self, user_list):
# """
# Get group members info, member's info type: [user_key, display_name, user_name]
# Author: chenying
# :param user_list: group members
# :return: list([user_key, display_name, user_name])
# """
# user_info_list = []
# for user in user_list:
# user_name_list = self.get_confluence_user_details_by_username(user)
#
# user_info_list.append(user_name_list)
#
# return user_info_list
def get_confluence_all_members(self):
"""
Get all user info, info type: list([user_key, display_name, user_name])
Author: chenying
:return: list([user_key, display_name, user_name])
"""
user_info = []
groups = self.get_confluence_all_groups()
for group in groups:
for member in self.get_confluence_group_members(group):
user_info.append(member)
return user_info
def get_confluence_user_details_by_username(self, username):
"""
Get user displayName and username
Author: chenying
:param username: user_name
:return: [user_key, display_name, user_name]|error_message
"""
try:
details_dict_info = self.confluence.get_user_details_by_username(username)
user_username = details_dict_info.get("username")
user_display_name = details_dict_info.get("displayName")
user_user_key = details_dict_info.get("userKey")
user_dict_info = [user_user_key, user_display_name, user_username]
return user_dict_info
except Exception as ex:
error_message = str(ex)
return error_message | PypiClean |
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/library/airfoils.py | from aerosandbox.geometry.airfoil import Airfoil
from aerosandbox.library.aerodynamics.viscous import *
from aerosandbox.geometry.airfoil.airfoil_families import get_NACA_coordinates, \
get_UIUC_coordinates
def diamond_airfoil(
t_over_c: float,
n_points_per_panel=2,
) -> Airfoil:
x_nondim = [1, 0.5, 0, 0.5, 1]
y_nondim = [0, 1, 0, -1, 0]
x = np.concatenate(
[
list(np.cosspace(a, b, n_points_per_panel))[:-1]
for a, b in zip(x_nondim[:-1], x_nondim[1:])
] + [[x_nondim[-1]]]
)
y = np.concatenate(
[
list(np.cosspace(a, b, n_points_per_panel))[:-1]
for a, b in zip(y_nondim[:-1], y_nondim[1:])
] + [[y_nondim[-1]]]
)
y = y * (t_over_c / 2)
coordinates = np.array([x, y]).T
return Airfoil(
name="Diamond",
coordinates=coordinates,
)
generic_cambered_airfoil = Airfoil(
name="Generic Cambered Airfoil",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
(alpha * np.pi / 180) * (2 * np.pi) + 0.4550
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
(1 + (alpha / 5) ** 2) * 2 * Cf_flat_plate(Re_L=Re)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
-0.1
),
coordinates=get_UIUC_coordinates(name="clarky")
)
generic_airfoil = Airfoil(
name="Generic Airfoil",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
(alpha * np.pi / 180) * (2 * np.pi)
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
(1 + (alpha / 5) ** 2) * 2 * Cf_flat_plate(Re_L=Re)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
0
), # TODO make this an actual curve!
coordinates=get_NACA_coordinates(name="naca0012")
)
e216 = Airfoil(
name="e216",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
Cl_e216(alpha=alpha, Re_c=Re)
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
Cd_profile_e216(alpha=alpha, Re_c=Re) +
Cd_wave_e216(Cl=Cl_e216(alpha=alpha, Re_c=Re), mach=mach)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
-0.15
), # TODO make this an actual curve!
)
rae2822 = Airfoil(
name="rae2822",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
Cl_rae2822(alpha=alpha, Re_c=Re)
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
Cd_profile_rae2822(alpha=alpha, Re_c=Re) +
Cd_wave_rae2822(Cl=Cl_rae2822(alpha=alpha, Re_c=Re), mach=mach)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
-0.05
), # TODO make this an actual curve!
)
naca0008 = Airfoil(
name="naca0008",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
Cl_flat_plate(alpha=alpha) # TODO fit this to actual data
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
(1 + (alpha / 5) ** 2) * 2 * Cf_flat_plate(Re_L=Re) + # TODO fit this to actual data
Cd_wave_Korn(Cl=Cl_flat_plate(alpha=alpha), t_over_c=0.08, mach=mach, kappa_A=0.87)
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function about quarter-chord
0
), # TODO make this an actual curve!
)
flat_plate = Airfoil(
name="Flat Plate",
CL_function=lambda alpha, Re, mach: ( # Lift coefficient function
Cl_flat_plate(alpha=alpha)
),
CD_function=lambda alpha, Re, mach: ( # Profile drag coefficient function
Cf_flat_plate(Re_L=Re) * 2
),
CM_function=lambda alpha, Re, mach: ( # Moment coefficient function
0
),
coordinates=np.array([
[1, 0],
[1, 1e-6],
[0, 1e-6],
[0, -1e-6],
[1, -1e-6],
[1, 0],
])
) | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/extern/pygments/styles/tango.py | from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class TangoStyle(Style):
"""
The Crunchy default Style inspired from the color palette from
the Tango Icon Theme Guidelines.
"""
# work in progress...
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Multiline: "italic #8f5902", # class: 'cm'
Comment.Preproc: "italic #8f5902", # class: 'cp'
Comment.Single: "italic #8f5902", # class: 'c1'
Comment.Special: "italic #8f5902", # class: 'cs'
Keyword: "bold #204a87", # class: 'k'
Keyword.Constant: "bold #204a87", # class: 'kc'
Keyword.Declaration: "bold #204a87", # class: 'kd'
Keyword.Namespace: "bold #204a87", # class: 'kn'
Keyword.Pseudo: "bold #204a87", # class: 'kp'
Keyword.Reserved: "bold #204a87", # class: 'kr'
Keyword.Type: "bold #204a87", # class: 'kt'
Operator: "bold #ce5c00", # class: 'o'
Operator.Word: "bold #204a87", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#204a87", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "bold #5c35cc", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #204a87", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
# since the tango light blue does not show up well in text, we choose
# a pure blue instead.
Number: "bold #0000cf", # class: 'm'
Number.Float: "bold #0000cf", # class: 'mf'
Number.Hex: "bold #0000cf", # class: 'mh'
Number.Integer: "bold #0000cf", # class: 'mi'
Number.Integer.Long: "bold #0000cf", # class: 'il'
Number.Oct: "bold #0000cf", # class: 'mo'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "italic #000000", # class: 'go'
Generic.Prompt: "#8f5902", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
} | PypiClean |
/3DeeCellTracker-0.5.2a0-py3-none-any.whl/CellTracker/synthesize.py | from typing import Tuple, Optional, List, Union, Generator, Callable
import numpy as np
from numpy import ndarray
from sklearn.neighbors import KernelDensity, NearestNeighbors
RATIO_SEG_ERROR = 0.15
K_NEIGHBORS = 20 # number of neighbors for calculating relative coordinates
def points_to_features(x_2nxk: ndarray, y_2nx1: ndarray, points_raw_nx3: ndarray, points_wi_errors_nx3: ndarray,
replaced_indexes: ndarray, method_features: Callable, num_features: int, k_ptrs: int = K_NEIGHBORS):
knn_model_raw = NearestNeighbors(n_neighbors=k_ptrs + 1).fit(points_raw_nx3)
knn_model_generated = NearestNeighbors(n_neighbors=k_ptrs + 1).fit(points_wi_errors_nx3)
n = points_raw_nx3.shape[0]
points_no_match_nx3 = no_match_points(n, points_wi_errors_nx3)
x_a_nxf = method_features(points_raw_nx3, points_raw_nx3, k_ptrs, num_features, knn_model_raw)
x_b_match_nxf = method_features(points_wi_errors_nx3, points_wi_errors_nx3, k_ptrs, num_features,
knn_model_generated)
x_b_no_match_nxf = method_features(points_wi_errors_nx3, points_no_match_nx3, k_ptrs, num_features,
knn_model_generated)
features_a = np.vstack((x_a_nxf, x_a_nxf))
features_b = np.vstack((x_b_match_nxf, x_b_no_match_nxf))
if np.random.rand() > 0.5:
features_a, features_b = features_b, features_a
x_2nxk[:, :num_features] = features_a
x_2nxk[:, num_features:] = features_b
y_2nx1[:n] = True
y_2nx1[:n][replaced_indexes] = False
y_2nx1[n:] = False
def no_match_points(n, points_wi_errors_nx3):
random_indexes = np.arange(n)
np.random.shuffle(random_indexes)
points_no_match_nx3 = np.zeros_like(points_wi_errors_nx3)
for i in range(n):
if random_indexes[i] == i:
no_match_index = random_indexes[i - 1]
else:
no_match_index = random_indexes[i]
points_no_match_nx3[i, :] = points_wi_errors_nx3[no_match_index, :]
return points_no_match_nx3
def add_seg_errors(points_normalized_nx3: ndarray, ratio: float = RATIO_SEG_ERROR, bandwidth: float = 0.1) -> Tuple[
ndarray, ndarray]:
if ratio <= 0 or ratio >= 1:
raise ValueError(f"ratio should be set between 0 and 1 but = {ratio}")
new_points_nx3 = points_normalized_nx3.copy()
kde_model = KernelDensity(bandwidth=bandwidth)
kde_model.fit(points_normalized_nx3)
num_points = points_normalized_nx3.shape[0]
num_replaced_points = int(np.ceil(num_points * ratio))
points_indexes = np.arange(num_points)
np.random.shuffle(points_indexes)
replaced_indexes = points_indexes[:num_replaced_points]
new_points_nx3[replaced_indexes, :] = kde_model.sample(num_replaced_points)
return new_points_nx3, replaced_indexes
def affine_transform(points: ndarray, affine_level: float, rand_move_level: float) -> ndarray:
"""generate affine transformed points
Notes
-----
points should have been normalized to have average of 0
"""
random_transform = (np.random.rand(3, 3) - 0.5) * affine_level
random_movements = (np.random.rand(*points.shape) - 0.5) * 4 * rand_move_level
ptrs_affine = np.dot(points, np.eye(3) + random_transform) + random_movements
return ptrs_affine | PypiClean |
/FLAML-2.0.2-py3-none-any.whl/flaml/automl/model.py | from contextlib import contextmanager
from functools import partial
import signal
import os
from typing import Callable, List, Union
import numpy as np
import time
import logging
import shutil
import sys
import math
from flaml import tune
from flaml.automl.data import (
group_counts,
)
from flaml.automl.task.task import (
Task,
SEQCLASSIFICATION,
SEQREGRESSION,
TOKENCLASSIFICATION,
SUMMARIZATION,
NLG_TASKS,
)
from flaml.automl.task.factory import task_factory
try:
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.dummy import DummyClassifier, DummyRegressor
except ImportError:
pass
try:
from scipy.sparse import issparse
except ImportError:
pass
from flaml.automl.spark import psDataFrame, sparkDataFrame, psSeries, ERROR as SPARK_ERROR, DataFrame, Series
from flaml.automl.spark.utils import len_labels, to_pandas_on_spark
from flaml.automl.spark.configs import (
ParamList_LightGBM_Classifier,
ParamList_LightGBM_Regressor,
ParamList_LightGBM_Ranker,
)
if DataFrame is not None:
from pandas import to_datetime
try:
import psutil
except ImportError:
psutil = None
try:
import resource
except ImportError:
resource = None
try:
from lightgbm import LGBMClassifier, LGBMRegressor, LGBMRanker
except ImportError:
LGBMClassifier = LGBMRegressor = LGBMRanker = None
logger = logging.getLogger("flaml.automl")
# FREE_MEM_RATIO = 0.2
def TimeoutHandler(sig, frame):
raise TimeoutError(sig, frame)
@contextmanager
def limit_resource(memory_limit, time_limit):
if memory_limit > 0:
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if soft < 0 and (hard < 0 or memory_limit <= hard) or memory_limit < soft:
try:
resource.setrlimit(resource.RLIMIT_AS, (int(memory_limit), hard))
except ValueError:
# According to https://bugs.python.org/issue40518, it's a mac-specific error.
pass
main_thread = False
if time_limit is not None:
try:
signal.signal(signal.SIGALRM, TimeoutHandler)
signal.alarm(int(time_limit) or 1)
main_thread = True
except ValueError:
pass
try:
yield
finally:
if main_thread:
signal.alarm(0)
if memory_limit > 0:
resource.setrlimit(resource.RLIMIT_AS, (soft, hard))
class BaseEstimator:
"""The abstract class for all learners.
Typical examples:
* XGBoostEstimator: for regression.
* XGBoostSklearnEstimator: for classification.
* LGBMEstimator, RandomForestEstimator, LRL1Classifier, LRL2Classifier:
for both regression and classification.
"""
def __init__(self, task="binary", **config):
"""Constructor.
Args:
task: A string of the task type, one of
'binary', 'multiclass', 'regression', 'rank', 'seq-classification',
'seq-regression', 'token-classification', 'multichoice-classification',
'summarization', 'ts_forecast', 'ts_forecast_classification'.
config: A dictionary containing the hyperparameter names, 'n_jobs' as keys.
n_jobs is the number of parallel threads.
"""
self._task = task if isinstance(task, Task) else task_factory(task, None, None)
self.params = self.config2params(config)
self.estimator_class = self._model = None
if "_estimator_type" in config:
self._estimator_type = self.params.pop("_estimator_type")
else:
self._estimator_type = "classifier" if self._task.is_classification() else "regressor"
def get_params(self, deep=False):
params = self.params.copy()
params["task"] = self._task
if hasattr(self, "_estimator_type"):
params["_estimator_type"] = self._estimator_type
return params
@property
def classes_(self):
return self._model.classes_
@property
def n_features_in_(self):
return self._model.n_features_in_
@property
def model(self):
"""Trained model after fit() is called, or None before fit() is called."""
return self._model
@property
def estimator(self):
"""Trained model after fit() is called, or None before fit() is called."""
return self._model
@property
def feature_names_in_(self):
"""
if self._model has attribute feature_names_in_, return it.
otherwise, if self._model has attribute feature_name_, return it.
otherwise, if self._model has attribute feature_names, return it.
otherwise, if self._model has method get_booster, return the feature names.
otherwise, return None.
"""
if hasattr(self._model, "feature_names_in_"): # for sklearn, xgboost>=1.6
return self._model.feature_names_in_
if hasattr(self._model, "feature_name_"): # for lightgbm
return self._model.feature_name_
if hasattr(self._model, "feature_names"): # for XGBoostEstimator
return self._model.feature_names
if hasattr(self._model, "get_booster"):
# get feature names for xgboost<1.6
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.feature_names
booster = self._model.get_booster()
return booster.feature_names
return None
@property
def feature_importances_(self):
"""
if self._model has attribute feature_importances_, return it.
otherwise, if self._model has attribute coef_, return it.
otherwise, return None.
"""
if hasattr(self._model, "feature_importances_"):
# for sklearn, lightgbm, catboost, xgboost
return self._model.feature_importances_
elif hasattr(self._model, "coef_"): # for linear models
return self._model.coef_
else:
return None
def _preprocess(self, X):
return X
def _fit(self, X_train, y_train, **kwargs):
current_time = time.time()
if "groups" in kwargs:
kwargs = kwargs.copy()
groups = kwargs.pop("groups")
if self._task == "rank":
kwargs["group"] = group_counts(groups)
# groups_val = kwargs.get('groups_val')
# if groups_val is not None:
# kwargs['eval_group'] = [group_counts(groups_val)]
# kwargs['eval_set'] = [
# (kwargs['X_val'], kwargs['y_val'])]
# kwargs['verbose'] = False
# del kwargs['groups_val'], kwargs['X_val'], kwargs['y_val']
X_train = self._preprocess(X_train)
model = self.estimator_class(**self.params)
if logger.level == logging.DEBUG:
# xgboost 1.6 doesn't display all the params in the model str
logger.debug(f"flaml.model - {model} fit started with params {self.params}")
model.fit(X_train, y_train, **kwargs)
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {model} fit finished")
train_time = time.time() - current_time
self._model = model
return train_time
def fit(self, X_train, y_train, budget=None, free_mem_ratio=0, **kwargs):
"""Train the model from given training data.
Args:
X_train: A numpy array or a dataframe of training data in shape n*m.
y_train: A numpy array or a series of labels in shape n*1.
budget: A float of the time budget in seconds.
free_mem_ratio: A float between 0 and 1 for the free memory ratio to keep during training.
Returns:
train_time: A float of the training time in seconds.
"""
if (
getattr(self, "limit_resource", None)
and resource is not None
and (budget is not None or psutil is not None)
):
start_time = time.time()
mem = psutil.virtual_memory() if psutil is not None else None
try:
with limit_resource(
mem.available * (1 - free_mem_ratio) + psutil.Process(os.getpid()).memory_info().rss
if mem is not None
else -1,
budget,
):
train_time = self._fit(X_train, y_train, **kwargs)
except (MemoryError, TimeoutError) as e:
logger.warning(f"{e.__class__} {e}")
if self._task.is_classification():
model = DummyClassifier()
else:
model = DummyRegressor()
X_train = self._preprocess(X_train)
model.fit(X_train, y_train)
self._model = model
train_time = time.time() - start_time
else:
train_time = self._fit(X_train, y_train, **kwargs)
return train_time
def predict(self, X, **kwargs):
"""Predict label from features.
Args:
X: A numpy array or a dataframe of featurized instances, shape n*m.
Returns:
A numpy array of shape n*1.
Each element is the label for a instance.
"""
if self._model is not None:
X = self._preprocess(X)
return self._model.predict(X, **kwargs)
else:
logger.warning("Estimator is not fit yet. Please run fit() before predict().")
return np.ones(X.shape[0])
def predict_proba(self, X, **kwargs):
"""Predict the probability of each class from features.
Only works for classification problems
Args:
X: A numpy array of featurized instances, shape n*m.
Returns:
A numpy array of shape n*c. c is the # classes.
Each element at (i,j) is the probability for instance i to be in
class j.
"""
assert self._task.is_classification(), "predict_proba() only for classification."
X = self._preprocess(X)
return self._model.predict_proba(X, **kwargs)
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
"""Report the evaluation score of a trained estimator.
Args:
X_val: A pandas dataframe of the validation input data.
y_val: A pandas series of the validation label.
kwargs: keyword argument of the evaluation function, for example:
- metric: A string of the metric name or a function
e.g., 'accuracy', 'roc_auc', 'roc_auc_ovr', 'roc_auc_ovo',
'f1', 'micro_f1', 'macro_f1', 'log_loss', 'mae', 'mse', 'r2',
'mape'. Default is 'auto'.
If metric is given, the score will report the user specified metric.
If metric is not given, the metric is set to accuracy for classification and r2
for regression.
You can also pass a customized metric function, for examples on how to pass a
customized metric function, please check
[test/nlp/test_autohf_custom_metric.py](https://github.com/microsoft/FLAML/blob/main/test/nlp/test_autohf_custom_metric.py) and
[test/automl/test_multiclass.py](https://github.com/microsoft/FLAML/blob/main/test/automl/test_multiclass.py).
Returns:
The evaluation score on the validation dataset.
"""
from .ml import metric_loss_score
from .ml import is_min_metric
if self._model is not None:
if self._task == "rank":
raise NotImplementedError("AutoML.score() is not implemented for ranking")
else:
X_val = self._preprocess(X_val)
metric = kwargs.pop("metric", None)
if metric:
y_pred = self.predict(X_val, **kwargs)
if is_min_metric(metric):
return metric_loss_score(metric, y_pred, y_val)
else:
return 1.0 - metric_loss_score(metric, y_pred, y_val)
else:
return self._model.score(X_val, y_val, **kwargs)
else:
logger.warning("Estimator is not fit yet. Please run fit() before predict().")
return 0.0
def cleanup(self):
del self._model
self._model = None
@classmethod
def search_space(cls, data_size, task, **params):
"""[required method] search space.
Args:
data_size: A tuple of two integers, number of rows and columns.
task: A str of the task type, e.g., "binary", "multiclass", "regression".
Returns:
A dictionary of the search space.
Each key is the name of a hyperparameter, and value is a dict with
its domain (required) and low_cost_init_value, init_value,
cat_hp_cost (if applicable).
e.g., ```{'domain': tune.randint(lower=1, upper=10), 'init_value': 1}```.
"""
return {}
@classmethod
def size(cls, config: dict) -> float:
"""[optional method] memory size of the estimator in bytes.
Args:
config: A dict of the hyperparameter config.
Returns:
A float of the memory size required by the estimator to train the
given config.
"""
return 1.0
@classmethod
def cost_relative2lgbm(cls) -> float:
"""[optional method] relative cost compared to lightgbm."""
return 1.0
@classmethod
def init(cls):
"""[optional method] initialize the class."""
pass
def config2params(self, config: dict) -> dict:
"""[optional method] config dict to params dict
Args:
config: A dict of the hyperparameter config.
Returns:
A dict that will be passed to self.estimator_class's constructor.
"""
params = config.copy()
if "FLAML_sample_size" in params:
params.pop("FLAML_sample_size")
return params
class SparkEstimator(BaseEstimator):
"""The base class for fine-tuning spark models, using pyspark.ml and SynapseML API."""
def __init__(self, task="binary", **config):
if SPARK_ERROR:
raise SPARK_ERROR
super().__init__(task, **config)
self.df_train = None
def _preprocess(
self,
X_train: Union[psDataFrame, sparkDataFrame],
y_train: psSeries = None,
index_col: str = "tmp_index_col",
return_label: bool = False,
):
# TODO: optimize this, support pyspark.sql.DataFrame
if y_train is not None:
self.df_train = X_train.join(y_train)
else:
self.df_train = X_train
if isinstance(self.df_train, psDataFrame):
self.df_train = self.df_train.to_spark(index_col=index_col)
if return_label:
return self.df_train, y_train.name
else:
return self.df_train
def fit(
self,
X_train: psDataFrame,
y_train: psSeries = None,
budget=None,
free_mem_ratio=0,
index_col: str = "tmp_index_col",
**kwargs,
):
"""Train the model from given training data.
Args:
X_train: A pyspark.pandas DataFrame of training data in shape n*m.
y_train: A pyspark.pandas Series in shape n*1. None if X_train is a pyspark.pandas
Dataframe contains y_train.
budget: A float of the time budget in seconds.
free_mem_ratio: A float between 0 and 1 for the free memory ratio to keep during training.
Returns:
train_time: A float of the training time in seconds.
"""
df_train, label_col = self._preprocess(X_train, y_train, index_col=index_col, return_label=True)
kwargs["labelCol"] = label_col
train_time = self._fit(df_train, **kwargs)
return train_time
def _fit(self, df_train: sparkDataFrame, **kwargs):
current_time = time.time()
pipeline_model = self.estimator_class(**self.params, **kwargs)
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {pipeline_model} fit started with params {self.params}")
pipeline_model.fit(df_train)
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {pipeline_model} fit finished")
train_time = time.time() - current_time
self._model = pipeline_model
return train_time
def predict(self, X, index_col="tmp_index_col", return_all=False, **kwargs):
"""Predict label from features.
Args:
X: A pyspark or pyspark.pandas dataframe of featurized instances, shape n*m.
index_col: A str of the index column name. Default to "tmp_index_col".
return_all: A bool of whether to return all the prediction results. Default to False.
Returns:
A pyspark.pandas series of shape n*1 if return_all is False. Otherwise, a pyspark.pandas dataframe.
"""
if self._model is not None:
X = self._preprocess(X, index_col=index_col)
predictions = to_pandas_on_spark(self._model.transform(X), index_col=index_col)
predictions.index.name = None
pred_y = predictions["prediction"]
if return_all:
return predictions
else:
return pred_y
else:
logger.warning("Estimator is not fit yet. Please run fit() before predict().")
return np.ones(X.shape[0])
def predict_proba(self, X, index_col="tmp_index_col", return_all=False, **kwargs):
"""Predict the probability of each class from features.
Only works for classification problems
Args:
X: A pyspark or pyspark.pandas dataframe of featurized instances, shape n*m.
index_col: A str of the index column name. Default to "tmp_index_col".
return_all: A bool of whether to return all the prediction results. Default to False.
Returns:
A pyspark.pandas dataframe of shape n*c. c is the # classes.
Each element at (i,j) is the probability for instance i to be in
class j.
"""
assert self._task.is_classification(), "predict_proba() only for classification."
if self._model is not None:
X = self._preprocess(X, index_col=index_col)
predictions = to_pandas_on_spark(self._model.transform(X), index_col=index_col)
predictions.index.name = None
pred_y = predictions["probability"]
if return_all:
return predictions
else:
return pred_y
else:
logger.warning("Estimator is not fit yet. Please run fit() before predict().")
return np.ones(X.shape[0])
class SparkLGBMEstimator(SparkEstimator):
"""The class for fine-tuning spark version lightgbm models, using SynapseML API."""
ITER_HP = "numIterations"
DEFAULT_ITER = 100
@classmethod
def search_space(cls, data_size, **params):
upper = max(5, min(32768, int(data_size[0]))) # upper must be larger than lower
# https://github.com/microsoft/SynapseML/blob/master/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/LightGBMBase.scala
return {
"numIterations": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"numLeaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"minDataInLeaf": {
"domain": tune.lograndint(lower=2, upper=2**7 + 1),
"init_value": 20,
},
"learningRate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
"log_max_bin": { # log transformed with base 2
"domain": tune.lograndint(lower=3, upper=11),
"init_value": 8,
},
"featureFraction": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"lambdaL1": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"lambdaL2": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
if "n_jobs" in params:
params.pop("n_jobs")
if "log_max_bin" in params:
params["maxBin"] = (1 << params.pop("log_max_bin")) - 1
return params
@classmethod
def size(cls, config):
num_leaves = int(round(config.get("numLeaves") or 1 << config.get("maxDepth", 16)))
n_estimators = int(round(config["numIterations"]))
return (num_leaves * 3 + (num_leaves - 1) * 4 + 1.0) * n_estimators * 8
def __init__(self, task="binary", **config):
super().__init__(task, **config)
err_msg = (
"SynapseML is not installed. Please refer to [SynapseML]"
+ "(https://github.com/microsoft/SynapseML) for installation instructions."
)
if "regression" == task:
try:
from synapse.ml.lightgbm import LightGBMRegressor
except ImportError:
raise ImportError(err_msg)
self.estimator_class = LightGBMRegressor
self.estimator_params = ParamList_LightGBM_Regressor
elif "rank" == task:
try:
from synapse.ml.lightgbm import LightGBMRanker
except ImportError:
raise ImportError(err_msg)
self.estimator_class = LightGBMRanker
self.estimator_params = ParamList_LightGBM_Ranker
else:
try:
from synapse.ml.lightgbm import LightGBMClassifier
except ImportError:
raise ImportError(err_msg)
self.estimator_class = LightGBMClassifier
self.estimator_params = ParamList_LightGBM_Classifier
self._time_per_iter = None
self._train_size = 0
self._mem_per_iter = -1
self.model_classes_ = None
self.model_n_classes_ = None
def fit(
self,
X_train,
y_train=None,
budget=None,
free_mem_ratio=0,
index_col="tmp_index_col",
**kwargs,
):
start_time = time.time()
if self.model_n_classes_ is None and self._task not in ["regression", "rank"]:
self.model_n_classes_, self.model_classes_ = len_labels(y_train, return_labels=True)
df_train, label_col = self._preprocess(X_train, y_train, index_col=index_col, return_label=True)
# n_iter = self.params.get(self.ITER_HP, self.DEFAULT_ITER)
# trained = False
# mem0 = psutil.virtual_memory().available if psutil is not None else 1
_kwargs = kwargs.copy()
if self._task not in ["regression", "rank"] and "objective" not in _kwargs:
_kwargs["objective"] = "binary" if self.model_n_classes_ == 2 else "multiclass"
for k in list(_kwargs.keys()):
if k not in self.estimator_params:
logger.warning(f"[SparkLGBMEstimator] [Warning] Ignored unknown parameter: {k}")
_kwargs.pop(k)
# TODO: find a better estimation of early stopping
# if (
# (not self._time_per_iter or abs(self._train_size - df_train.count()) > 4)
# and budget is not None
# or self._mem_per_iter < 0
# and psutil is not None
# ) and n_iter > 1:
# self.params[self.ITER_HP] = 1
# self._t1 = self._fit(df_train, **_kwargs)
# if budget is not None and self._t1 >= budget or n_iter == 1:
# return self._t1
# mem1 = psutil.virtual_memory().available if psutil is not None else 1
# self._mem1 = mem0 - mem1
# self.params[self.ITER_HP] = min(n_iter, 4)
# self._t2 = self._fit(df_train, **_kwargs)
# mem2 = psutil.virtual_memory().available if psutil is not None else 1
# self._mem2 = max(mem0 - mem2, self._mem1)
# self._mem_per_iter = min(self._mem1, self._mem2 / self.params[self.ITER_HP])
# self._time_per_iter = (
# (self._t2 - self._t1) / (self.params[self.ITER_HP] - 1)
# if self._t2 > self._t1
# else self._t1
# if self._t1
# else 0.001
# )
# self._train_size = df_train.count()
# if (
# budget is not None
# and self._t1 + self._t2 >= budget
# or n_iter == self.params[self.ITER_HP]
# ):
# # self.params[self.ITER_HP] = n_iter
# return time.time() - start_time
# trained = True
# if n_iter > 1:
# max_iter = min(
# n_iter,
# int(
# (budget - time.time() + start_time - self._t1) / self._time_per_iter
# + 1
# )
# if budget is not None
# else n_iter,
# )
# if trained and max_iter <= self.params[self.ITER_HP]:
# return time.time() - start_time
# # when not trained, train at least one iter
# self.params[self.ITER_HP] = max(max_iter, 1)
_kwargs["labelCol"] = label_col
self._fit(df_train, **_kwargs)
train_time = time.time() - start_time
return train_time
def _fit(self, df_train: sparkDataFrame, **kwargs):
current_time = time.time()
model = self.estimator_class(**self.params, **kwargs)
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {model} fit started with params {self.params}")
self._model = model.fit(df_train)
self._model.classes_ = self.model_classes_
self._model.n_classes_ = self.model_n_classes_
if logger.level == logging.DEBUG:
logger.debug(f"flaml.model - {model} fit finished")
train_time = time.time() - current_time
return train_time
class TransformersEstimator(BaseEstimator):
"""The class for fine-tuning language models, using huggingface transformers API."""
ITER_HP = "global_max_steps"
def __init__(self, task="seq-classification", **config):
super().__init__(task, **config)
import uuid
self.trial_id = str(uuid.uuid1().hex)[:8]
if task not in NLG_TASKS: # TODO: not in NLG_TASKS
from .nlp.huggingface.training_args import (
TrainingArgumentsForAuto as TrainingArguments,
)
else:
from .nlp.huggingface.training_args import (
Seq2SeqTrainingArgumentsForAuto as TrainingArguments,
)
self._TrainingArguments = TrainingArguments
@classmethod
def search_space(cls, data_size, task, **params):
search_space_dict = {
"learning_rate": {
"domain": tune.loguniform(1e-6, 1e-4),
"init_value": 1e-5,
},
"num_train_epochs": {
"domain": tune.choice([1, 2, 3, 4, 5]),
"init_value": 3, # to be consistent with roberta
"low_cost_init_value": 1,
},
"per_device_train_batch_size": {
"domain": tune.choice([4, 8, 16, 32, 64]),
"init_value": 32,
"low_cost_init_value": 64,
},
"seed": {
"domain": tune.choice(range(1, 40)),
"init_value": 20,
},
"global_max_steps": {
"domain": sys.maxsize,
"init_value": sys.maxsize,
},
}
return search_space_dict
@property
def fp16(self):
return self._kwargs.get("gpu_per_trial") and self._training_args.fp16
@property
def no_cuda(self):
return not self._kwargs.get("gpu_per_trial")
def _set_training_args(self, **kwargs):
from .nlp.utils import date_str, Counter
for key, val in kwargs.items():
assert key not in self.params, (
"Since {} is in the search space, it cannot exist in 'custom_fit_kwargs' at the same time."
"If you need to fix the value of {} to {}, the only way is to add a single-value domain in the search "
"space by adding:\n '{}': {{ 'domain': {} }} to 'custom_hp'. For example:"
'automl_settings["custom_hp"] = {{ "transformer": {{ "model_path": {{ "domain" : '
'"google/electra-small-discriminator" }} }} }}'.format(key, key, val, key, val)
)
"""
If use has specified any custom args for TrainingArguments, update these arguments
"""
self._training_args = self._TrainingArguments(**kwargs)
"""
Update the attributes in TrainingArguments with self.params values
"""
for key, val in self.params.items():
if hasattr(self._training_args, key):
setattr(self._training_args, key, val)
"""
Update the attributes in TrainingArguments that depends on the values of self.params
"""
local_dir = os.path.join(self._training_args.output_dir, "train_{}".format(date_str()))
if self._use_ray is True:
import ray
self._training_args.output_dir = ray.tune.get_trial_dir()
else:
self._training_args.output_dir = Counter.get_trial_fold_name(local_dir, self.params, self.trial_id)
self._training_args.fp16 = self.fp16
self._training_args.no_cuda = self.no_cuda
if self._task == TOKENCLASSIFICATION and self._training_args.max_seq_length is not None:
logger.warning(
"For token classification task, FLAML currently does not support customizing the max_seq_length, max_seq_length will be reset to None."
)
setattr(self._training_args, "max_seq_length", None)
def _tokenize_text(self, X, y=None, **kwargs):
from .nlp.huggingface.utils import tokenize_text
from .nlp.utils import is_a_list_of_str
is_str = str(X.dtypes[0]) in ("string", "str")
is_list_of_str = is_a_list_of_str(X[list(X.keys())[0]].to_list()[0])
if is_str or is_list_of_str:
return tokenize_text(
X=X,
Y=y,
task=self._task,
hf_args=self._training_args,
tokenizer=self.tokenizer,
)
else:
return X, y
def _model_init(self):
from .nlp.huggingface.utils import load_model
this_model = load_model(
checkpoint_path=self._training_args.model_path,
task=self._task,
num_labels=self.num_labels,
)
return this_model
def _preprocess_data(self, X, y):
from datasets import Dataset
processed_X, processed_y_df = self._tokenize_text(X=X, y=y, **self._kwargs)
# convert y from pd.DataFrame back to pd.Series
processed_y = processed_y_df.iloc[:, 0]
processed_dataset = Dataset.from_pandas(processed_X.join(processed_y_df))
return processed_dataset, processed_X, processed_y
@property
def num_labels(self):
if self._task == SEQREGRESSION:
return 1
elif self._task == SEQCLASSIFICATION:
return len(set(self._y_train))
elif self._task == TOKENCLASSIFICATION:
return len(self._training_args.label_list)
else:
return None
@property
def tokenizer(self):
from transformers import AutoTokenizer
if self._task == SUMMARIZATION:
return AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=self._training_args.model_path,
cache_dir=None,
use_fast=True,
revision="main",
use_auth_token=None,
)
else:
return AutoTokenizer.from_pretrained(
self._training_args.model_path,
use_fast=True,
add_prefix_space=self._add_prefix_space,
)
@property
def data_collator(self):
from flaml.automl.task.task import Task
from flaml.automl.nlp.huggingface.data_collator import (
task_to_datacollator_class,
)
data_collator_class = task_to_datacollator_class.get(
self._task.name if isinstance(self._task, Task) else self._task
)
if data_collator_class:
kwargs = {
"model": self._model_init(),
# need to set model, or there's ValueError: Expected input batch_size (..) to match target batch_size (..)
"label_pad_token_id": -100, # pad with token id -100
"pad_to_multiple_of": 8,
# pad to multiple of 8 because quote Transformers: "This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta)"
"tokenizer": self.tokenizer,
}
for key in list(kwargs.keys()):
if key not in data_collator_class.__dict__.keys() and key != "tokenizer":
del kwargs[key]
return data_collator_class(**kwargs)
else:
return None
def fit(
self,
X_train: DataFrame,
y_train: Series,
budget=None,
free_mem_ratio=0,
X_val=None,
y_val=None,
gpu_per_trial=None,
metric=None,
**kwargs,
):
import transformers
transformers.logging.set_verbosity_error()
from transformers import TrainerCallback
from transformers.trainer_utils import set_seed
from .nlp.huggingface.trainer import TrainerForAuto
try:
from ray.tune import is_session_enabled
self._use_ray = is_session_enabled()
except ImportError:
self._use_ray = False
this_params = self.params
self._kwargs = kwargs
self._X_train, self._y_train = X_train, y_train
self._set_training_args(**kwargs)
self._add_prefix_space = (
"roberta" in self._training_args.model_path
) # If using roberta model, must set add_prefix_space to True to avoid the assertion error at
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/roberta/tokenization_roberta_fast.py#L249
train_dataset, self._X_train, self._y_train = self._preprocess_data(X_train, y_train)
if X_val is not None:
eval_dataset, self._X_val, self._y_val = self._preprocess_data(X_val, y_val)
else:
eval_dataset, self._X_val, self._y_val = None, None, None
set_seed(self.params.get("seed", self._training_args.seed))
self._metric = metric
class EarlyStoppingCallbackForAuto(TrainerCallback):
def on_train_begin(self, args, state, control, **callback_kwargs):
self.train_begin_time = time.time()
def on_step_begin(self, args, state, control, **callback_kwargs):
self.step_begin_time = time.time()
def on_step_end(self, args, state, control, **callback_kwargs):
if state.global_step == 1:
self.time_per_iter = time.time() - self.step_begin_time
if (
budget
and (time.time() + self.time_per_iter > self.train_begin_time + budget)
or state.global_step >= this_params[TransformersEstimator.ITER_HP]
):
control.should_training_stop = True
control.should_save = True
control.should_evaluate = True
return control
def on_epoch_end(self, args, state, control, **callback_kwargs):
if control.should_training_stop or state.epoch + 1 >= args.num_train_epochs:
control.should_save = True
control.should_evaluate = True
self._trainer = TrainerForAuto(
args=self._training_args,
model_init=self._model_init,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=self.tokenizer,
data_collator=self.data_collator,
compute_metrics=self._compute_metrics_by_dataset_name,
callbacks=[EarlyStoppingCallbackForAuto],
)
if self._task in NLG_TASKS:
setattr(self._trainer, "_is_seq2seq", True)
"""
When not using ray for tuning, set the limit of CUDA_VISIBLE_DEVICES to math.ceil(gpu_per_trial),
so each estimator does not see all the GPUs
"""
if gpu_per_trial is not None:
tmp_cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
self._trainer.args._n_gpu = gpu_per_trial
# if gpu_per_trial == 0:
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
if tmp_cuda_visible_devices.count(",") != math.ceil(gpu_per_trial) - 1:
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(x) for x in range(math.ceil(gpu_per_trial))])
import time
start_time = time.time()
self._trainer.train()
if gpu_per_trial is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = tmp_cuda_visible_devices
self.params[self.ITER_HP] = self._trainer.state.global_step
self._checkpoint_path = self._select_checkpoint(self._trainer)
self._ckpt_remains = list(self._trainer.ckpt_to_metric.keys())
if hasattr(self._trainer, "intermediate_results"):
self.intermediate_results = [
x[1] for x in sorted(self._trainer.intermediate_results.items(), key=lambda x: x[0])
]
self._trainer = None
return time.time() - start_time
def _delete_one_ckpt(self, ckpt_location):
if self._use_ray is False:
if os.path.exists(ckpt_location):
shutil.rmtree(ckpt_location)
def cleanup(self):
super().cleanup()
if hasattr(self, "_ckpt_remains"):
for each_ckpt in self._ckpt_remains:
self._delete_one_ckpt(each_ckpt)
def _select_checkpoint(self, trainer):
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
if trainer.ckpt_to_metric:
best_ckpt, _ = min(trainer.ckpt_to_metric.items(), key=lambda x: x[1]["eval_automl_metric"])
best_ckpt_global_step = trainer.ckpt_to_global_step[best_ckpt]
for each_ckpt in list(trainer.ckpt_to_metric):
if each_ckpt != best_ckpt:
del trainer.ckpt_to_metric[each_ckpt]
del trainer.ckpt_to_global_step[each_ckpt]
self._delete_one_ckpt(each_ckpt)
else:
best_ckpt_global_step = trainer.state.global_step
best_ckpt = os.path.join(
trainer.args.output_dir,
f"{PREFIX_CHECKPOINT_DIR}-{best_ckpt_global_step}",
)
self.params[self.ITER_HP] = best_ckpt_global_step
logger.debug(trainer.state.global_step)
logger.debug(trainer.ckpt_to_global_step)
return best_ckpt
def _compute_metrics_by_dataset_name(self, eval_pred):
# TODO: call self._metric(eval_pred, self)
if isinstance(self._metric, str):
from .ml import metric_loss_score
from .nlp.huggingface.utils import postprocess_prediction_and_true
predictions, y_true = eval_pred
# postprocess the matrix prediction and ground truth into user readable format, e.g., for summarization, decode into text
processed_predictions, processed_y_true = postprocess_prediction_and_true(
task=self._task,
y_pred=predictions,
tokenizer=self.tokenizer,
hf_args=self._training_args,
y_true=y_true,
)
metric_dict = {
"automl_metric": metric_loss_score(
metric_name=self._metric,
y_processed_predict=processed_predictions,
y_processed_true=processed_y_true,
labels=self._training_args.label_list,
)
}
else:
# TODO: debug to see how custom metric can take both tokenized (here) and untokenized input (ml.py)
loss, metric_dict = self._metric(
X_test=self._X_val,
y_test=self._y_val,
estimator=self,
labels=None,
X_train=self._X_train,
y_train=self._y_train,
)
metric_dict["automl_metric"] = loss
return metric_dict
def _init_model_for_predict(self):
from .nlp.huggingface.trainer import TrainerForAuto
"""
Need to reinit training_args because of a bug in deepspeed: if not reinit, the deepspeed config will be inconsistent
with HF config https://github.com/huggingface/transformers/blob/main/src/transformers/training_args.py#L947
"""
training_args = self._TrainingArguments(local_rank=-1, model_path=self._checkpoint_path, fp16=self.fp16)
for key, val in self._training_args.__dict__.items():
if key not in ("local_rank", "model_path", "fp16"):
setattr(training_args, key, val)
self._training_args = training_args
new_trainer = TrainerForAuto(
model=self._model_init(),
args=self._training_args,
data_collator=self.data_collator,
compute_metrics=self._compute_metrics_by_dataset_name,
)
if self._task in NLG_TASKS:
setattr(new_trainer, "_is_seq2seq", True)
return new_trainer
def predict_proba(self, X, **pred_kwargs):
from datasets import Dataset
if pred_kwargs:
for key, val in pred_kwargs.items():
setattr(self._training_args, key, val)
assert self._task.is_classification(), "predict_proba() only for classification tasks."
X_test, _ = self._tokenize_text(X, **self._kwargs)
test_dataset = Dataset.from_pandas(X_test)
new_trainer = self._init_model_for_predict()
try:
predictions = new_trainer.predict(test_dataset).predictions
except ZeroDivisionError:
logger.warning("Zero division error appeared in HuggingFace Transformers.")
predictions = None
return predictions
def score(self, X_val: DataFrame, y_val: Series, **kwargs):
import transformers
transformers.logging.set_verbosity_error()
self._metric = kwargs["metric"]
eval_dataset, X_val, y_val = self._preprocess_data(X_val, y_val)
new_trainer = self._init_model_for_predict()
return new_trainer.evaluate(eval_dataset)
def predict(self, X, **pred_kwargs):
import transformers
from datasets import Dataset
from .nlp.huggingface.utils import postprocess_prediction_and_true
transformers.logging.set_verbosity_error()
if pred_kwargs:
for key, val in pred_kwargs.items():
setattr(self._training_args, key, val)
X_test, _ = self._tokenize_text(X, **self._kwargs)
test_dataset = Dataset.from_pandas(X_test)
new_trainer = self._init_model_for_predict()
kwargs = {} if self._task not in NLG_TASKS else {"metric_key_prefix": "predict"}
try:
predictions = new_trainer.predict(test_dataset, **kwargs).predictions
except ZeroDivisionError:
logger.warning("Zero division error appeared in HuggingFace Transformers.")
predictions = None
post_y_pred, _ = postprocess_prediction_and_true(
task=self._task,
y_pred=predictions,
tokenizer=self.tokenizer,
hf_args=self._training_args,
X=X,
)
return post_y_pred
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params[TransformersEstimator.ITER_HP] = params.get(TransformersEstimator.ITER_HP, sys.maxsize)
return params
class TransformersEstimatorModelSelection(TransformersEstimator):
def __init__(self, task="seq-classification", **config):
super().__init__(task, **config)
@classmethod
def search_space(cls, data_size, task, **params):
search_space_dict = TransformersEstimator.search_space(data_size, task, **params)
"""
For model selection, use the same search space regardless of memory constraint
If OOM, user should change the search space themselves
"""
search_space_dict["model_path"] = {
"domain": tune.choice(
[
"google/electra-base-discriminator",
"bert-base-uncased",
"roberta-base",
"facebook/muppet-roberta-base",
"google/electra-small-discriminator",
]
),
"init_value": "facebook/muppet-roberta-base",
}
return search_space_dict
class SKLearnEstimator(BaseEstimator):
"""
The base class for tuning scikit-learn estimators.
Subclasses can modify the function signature of ``__init__`` to
ignore the values in ``config`` that are not relevant to the constructor
of their underlying estimator. For example, some regressors in ``scikit-learn``
don't accept the ``n_jobs`` parameter contained in ``config``. For these,
one can add ``n_jobs=None,`` before ``**config`` to make sure ``config`` doesn't
contain an ``n_jobs`` key.
"""
def __init__(self, task="binary", **config):
super().__init__(task, **config)
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(include=["category"]).columns
if not cat_columns.empty:
X = X.copy()
X[cat_columns] = X[cat_columns].apply(lambda x: x.cat.codes)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
class LGBMEstimator(BaseEstimator):
"""The class for tuning LGBM, using sklearn API."""
ITER_HP = "n_estimators"
HAS_CALLBACK = True
DEFAULT_ITER = 100
@classmethod
def search_space(cls, data_size, **params):
upper = max(5, min(32768, int(data_size[0]))) # upper must be larger than lower
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"num_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"min_child_samples": {
"domain": tune.lograndint(lower=2, upper=2**7 + 1),
"init_value": 20,
},
"learning_rate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
"log_max_bin": { # log transformed with base 2
"domain": tune.lograndint(lower=3, upper=11),
"init_value": 8,
},
"colsample_bytree": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"reg_alpha": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"reg_lambda": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
if "log_max_bin" in params:
params["max_bin"] = (1 << params.pop("log_max_bin")) - 1
return params
@classmethod
def size(cls, config):
num_leaves = int(
round(config.get("num_leaves") or config.get("max_leaves") or 1 << config.get("max_depth", 16))
)
n_estimators = int(round(config["n_estimators"]))
return (num_leaves * 3 + (num_leaves - 1) * 4 + 1.0) * n_estimators * 8
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if "verbose" not in self.params:
self.params["verbose"] = -1
if self._task.is_classification():
self.estimator_class = LGBMClassifier
elif task == "rank":
self.estimator_class = LGBMRanker
else:
self.estimator_class = LGBMRegressor
self._time_per_iter = None
self._train_size = 0
self._mem_per_iter = -1
self.HAS_CALLBACK = self.HAS_CALLBACK and self._callbacks(0, 0, 0) is not None
def _preprocess(self, X):
if not isinstance(X, DataFrame) and issparse(X) and np.issubdtype(X.dtype, np.integer):
X = X.astype(float)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
def fit(self, X_train, y_train, budget=None, free_mem_ratio=0, **kwargs):
start_time = time.time()
deadline = start_time + budget if budget else np.inf
n_iter = self.params.get(self.ITER_HP, self.DEFAULT_ITER)
trained = False
if not self.HAS_CALLBACK:
mem0 = psutil.virtual_memory().available if psutil is not None else 1
if (
(not self._time_per_iter or abs(self._train_size - X_train.shape[0]) > 4)
and budget is not None
or self._mem_per_iter < 0
and psutil is not None
) and n_iter > 1:
self.params[self.ITER_HP] = 1
self._t1 = self._fit(X_train, y_train, **kwargs)
if budget is not None and self._t1 >= budget or n_iter == 1:
return self._t1
mem1 = psutil.virtual_memory().available if psutil is not None else 1
self._mem1 = mem0 - mem1
self.params[self.ITER_HP] = min(n_iter, 4)
self._t2 = self._fit(X_train, y_train, **kwargs)
mem2 = psutil.virtual_memory().available if psutil is not None else 1
self._mem2 = max(mem0 - mem2, self._mem1)
# if self._mem1 <= 0:
# self._mem_per_iter = self._mem2 / (self.params[self.ITER_HP] + 1)
# elif self._mem2 <= 0:
# self._mem_per_iter = self._mem1
# else:
self._mem_per_iter = min(self._mem1, self._mem2 / self.params[self.ITER_HP])
# if self._mem_per_iter <= 1 and psutil is not None:
# n_iter = self.params[self.ITER_HP]
self._time_per_iter = (
(self._t2 - self._t1) / (self.params[self.ITER_HP] - 1)
if self._t2 > self._t1
else self._t1
if self._t1
else 0.001
)
self._train_size = X_train.shape[0]
if budget is not None and self._t1 + self._t2 >= budget or n_iter == self.params[self.ITER_HP]:
# self.params[self.ITER_HP] = n_iter
return time.time() - start_time
trained = True
# logger.debug(mem0)
# logger.debug(self._mem_per_iter)
if n_iter > 1:
max_iter = min(
n_iter,
int((budget - time.time() + start_time - self._t1) / self._time_per_iter + 1)
if budget is not None
else n_iter,
int((1 - free_mem_ratio) * mem0 / self._mem_per_iter)
if psutil is not None and self._mem_per_iter > 0
else n_iter,
)
if trained and max_iter <= self.params[self.ITER_HP]:
return time.time() - start_time
# when not trained, train at least one iter
self.params[self.ITER_HP] = max(max_iter, 1)
if self.HAS_CALLBACK:
kwargs_callbacks = kwargs.get("callbacks")
if kwargs_callbacks:
callbacks = kwargs_callbacks + self._callbacks(start_time, deadline, free_mem_ratio)
kwargs.pop("callbacks")
else:
callbacks = self._callbacks(start_time, deadline, free_mem_ratio)
if isinstance(self, XGBoostSklearnEstimator):
from xgboost import __version__
if __version__ >= "1.6.0":
# since xgboost>=1.6.0, callbacks can't be passed in fit()
self.params["callbacks"] = callbacks
callbacks = None
self._fit(
X_train,
y_train,
callbacks=callbacks,
**kwargs,
)
if callbacks is None:
# for xgboost>=1.6.0, pop callbacks to enable pickle
callbacks = self.params.pop("callbacks")
self._model.set_params(callbacks=callbacks[:-1])
best_iteration = (
self._model.get_booster().best_iteration
if isinstance(self, XGBoostSklearnEstimator)
else self._model.best_iteration_
)
if best_iteration is not None:
self._model.set_params(n_estimators=best_iteration + 1)
else:
self._fit(X_train, y_train, **kwargs)
train_time = time.time() - start_time
return train_time
def _callbacks(self, start_time, deadline, free_mem_ratio) -> List[Callable]:
return [partial(self._callback, start_time, deadline, free_mem_ratio)]
def _callback(self, start_time, deadline, free_mem_ratio, env) -> None:
from lightgbm.callback import EarlyStopException
now = time.time()
if env.iteration == 0:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
raise EarlyStopException(env.iteration, env.evaluation_result_list)
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < free_mem_ratio:
raise EarlyStopException(env.iteration, env.evaluation_result_list)
class XGBoostEstimator(SKLearnEstimator):
"""The class for tuning XGBoost regressor, not using sklearn API."""
DEFAULT_ITER = 10
@classmethod
def search_space(cls, data_size, **params):
upper = max(5, min(32768, int(data_size[0]))) # upper must be larger than lower
return {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_leaves": {
"domain": tune.lograndint(lower=4, upper=upper),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_depth": {
"domain": tune.choice([0, 6, 12]),
"init_value": 0,
},
"min_child_weight": {
"domain": tune.loguniform(lower=0.001, upper=128),
"init_value": 1.0,
},
"learning_rate": {
"domain": tune.loguniform(lower=1 / 1024, upper=1.0),
"init_value": 0.1,
},
"subsample": {
"domain": tune.uniform(lower=0.1, upper=1.0),
"init_value": 1.0,
},
"colsample_bylevel": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"colsample_bytree": {
"domain": tune.uniform(lower=0.01, upper=1.0),
"init_value": 1.0,
},
"reg_alpha": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1 / 1024,
},
"reg_lambda": {
"domain": tune.loguniform(lower=1 / 1024, upper=1024),
"init_value": 1.0,
},
}
@classmethod
def size(cls, config):
return LGBMEstimator.size(config)
@classmethod
def cost_relative2lgbm(cls):
return 1.6
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
max_depth = params["max_depth"] = params.get("max_depth", 0)
if max_depth == 0:
params["grow_policy"] = params.get("grow_policy", "lossguide")
params["tree_method"] = params.get("tree_method", "hist")
# params["booster"] = params.get("booster", "gbtree")
# use_label_encoder is deprecated in 1.7.
from xgboost import __version__ as xgboost_version
if xgboost_version < "1.7.0":
params["use_label_encoder"] = params.get("use_label_encoder", False)
if "n_jobs" in config:
params["nthread"] = params.pop("n_jobs")
return params
def __init__(
self,
task="regression",
**config,
):
super().__init__(task, **config)
self.params["verbosity"] = 0
def fit(self, X_train, y_train, budget=None, free_mem_ratio=0, **kwargs):
import xgboost as xgb
start_time = time.time()
deadline = start_time + budget if budget else np.inf
if issparse(X_train):
if xgb.__version__ < "1.6.0":
# "auto" fails for sparse input since xgboost 1.6.0
self.params["tree_method"] = "auto"
else:
X_train = self._preprocess(X_train)
if "sample_weight" in kwargs:
dtrain = xgb.DMatrix(X_train, label=y_train, weight=kwargs["sample_weight"])
else:
dtrain = xgb.DMatrix(X_train, label=y_train)
objective = self.params.get("objective")
if isinstance(objective, str):
obj = None
else:
obj = objective
if "objective" in self.params:
del self.params["objective"]
_n_estimators = self.params.pop("n_estimators")
callbacks = XGBoostEstimator._callbacks(start_time, deadline, free_mem_ratio)
if callbacks:
self._model = xgb.train(
self.params,
dtrain,
_n_estimators,
obj=obj,
callbacks=callbacks,
)
self.params["n_estimators"] = self._model.best_iteration + 1
else:
self._model = xgb.train(self.params, dtrain, _n_estimators, obj=obj)
self.params["n_estimators"] = _n_estimators
self.params["objective"] = objective
del dtrain
train_time = time.time() - start_time
return train_time
def predict(self, X, **kwargs):
import xgboost as xgb
if not issparse(X):
X = self._preprocess(X)
dtest = xgb.DMatrix(X)
return super().predict(dtest, **kwargs)
@classmethod
def _callbacks(cls, start_time, deadline, free_mem_ratio):
try:
from xgboost.callback import TrainingCallback
except ImportError: # for xgboost<1.3
return None
class ResourceLimit(TrainingCallback):
def after_iteration(self, model, epoch, evals_log) -> bool:
now = time.time()
if epoch == 0:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return True
if psutil is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < free_mem_ratio:
return True
return False
return [ResourceLimit()]
class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
"""The class for tuning XGBoost with unlimited depth, using sklearn API."""
DEFAULT_ITER = 10
@classmethod
def search_space(cls, data_size, **params):
space = XGBoostEstimator.search_space(data_size)
space.pop("max_depth")
return space
@classmethod
def cost_relative2lgbm(cls):
return XGBoostEstimator.cost_relative2lgbm()
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
max_depth = params["max_depth"] = params.get("max_depth", 0)
if max_depth == 0:
params["grow_policy"] = params.get("grow_policy", "lossguide")
params["tree_method"] = params.get("tree_method", "hist")
params["use_label_encoder"] = params.get("use_label_encoder", False)
return params
def __init__(
self,
task="binary",
**config,
):
super().__init__(task, **config)
del self.params["verbose"]
self.params["verbosity"] = 0
import xgboost as xgb
if "rank" == task:
self.estimator_class = xgb.XGBRanker
elif self._task.is_classification():
self.estimator_class = xgb.XGBClassifier
else:
self.estimator_class = xgb.XGBRegressor
self._xgb_version = xgb.__version__
def fit(self, X_train, y_train, budget=None, free_mem_ratio=0, **kwargs):
if issparse(X_train) and self._xgb_version < "1.6.0":
# "auto" fails for sparse input since xgboost 1.6.0
self.params["tree_method"] = "auto"
if kwargs.get("gpu_per_trial"):
self.params["tree_method"] = "gpu_hist"
kwargs.pop("gpu_per_trial")
return super().fit(X_train, y_train, budget, free_mem_ratio, **kwargs)
def _callbacks(self, start_time, deadline, free_mem_ratio) -> List[Callable]:
return XGBoostEstimator._callbacks(start_time, deadline, free_mem_ratio)
class XGBoostLimitDepthEstimator(XGBoostSklearnEstimator):
"""The class for tuning XGBoost with limited depth, using sklearn API."""
@classmethod
def search_space(cls, data_size, **params):
space = XGBoostEstimator.search_space(data_size)
space.pop("max_leaves")
upper = max(6, int(np.log2(data_size[0])))
space["max_depth"] = {
"domain": tune.randint(lower=1, upper=min(upper, 16)),
"init_value": 6,
"low_cost_init_value": 1,
}
space["learning_rate"]["init_value"] = 0.3
space["n_estimators"]["init_value"] = 10
return space
@classmethod
def cost_relative2lgbm(cls):
return 64
class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
"""The class for tuning Random Forest."""
HAS_CALLBACK = False
nrows = 101
@classmethod
def search_space(cls, data_size, task, **params):
RandomForestEstimator.nrows = int(data_size[0])
upper = min(2048, RandomForestEstimator.nrows)
init = 1 / np.sqrt(data_size[1]) if task.is_classification() else 1
lower = min(0.1, init)
space = {
"n_estimators": {
"domain": tune.lograndint(lower=4, upper=max(5, upper)),
"init_value": 4,
"low_cost_init_value": 4,
},
"max_features": {
"domain": tune.loguniform(lower=lower, upper=1.0),
"init_value": init,
},
"max_leaves": {
"domain": tune.lograndint(
lower=4,
upper=max(5, min(32768, RandomForestEstimator.nrows >> 1)), #
),
"init_value": 4,
"low_cost_init_value": 4,
},
}
if task.is_classification():
space["criterion"] = {
"domain": tune.choice(["gini", "entropy"]),
# "init_value": "gini",
}
return space
@classmethod
def cost_relative2lgbm(cls):
return 2
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
if "max_leaves" in params:
params["max_leaf_nodes"] = params.get("max_leaf_nodes", params.pop("max_leaves"))
if not self._task.is_classification() and "criterion" in config:
params.pop("criterion")
if "random_state" not in params:
params["random_state"] = 12032022
return params
def __init__(
self,
task: Task,
**params,
):
super().__init__(task, **params)
self.params["verbose"] = 0
if self._task.is_classification():
self.estimator_class = RandomForestClassifier
else:
self.estimator_class = RandomForestRegressor
class ExtraTreesEstimator(RandomForestEstimator):
"""The class for tuning Extra Trees."""
@classmethod
def cost_relative2lgbm(cls):
return 1.9
def __init__(self, task="binary", **params):
if isinstance(task, str):
from flaml.automl.task.factory import task_factory
task = task_factory(task)
super().__init__(task, **params)
if task.is_regression():
self.estimator_class = ExtraTreesRegressor
else:
self.estimator_class = ExtraTreesClassifier
class LRL1Classifier(SKLearnEstimator):
"""The class for tuning Logistic Regression with L1 regularization."""
@classmethod
def search_space(cls, **params):
return {
"C": {
"domain": tune.loguniform(lower=0.03125, upper=32768.0),
"init_value": 1.0,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 160
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["tol"] = params.get("tol", 0.0001)
params["solver"] = params.get("solver", "saga")
params["penalty"] = params.get("penalty", "l1")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
assert self._task.is_classification(), "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
class LRL2Classifier(SKLearnEstimator):
"""The class for tuning Logistic Regression with L2 regularization."""
limit_resource = True
@classmethod
def search_space(cls, **params):
return LRL1Classifier.search_space(**params)
@classmethod
def cost_relative2lgbm(cls):
return 25
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["tol"] = params.get("tol", 0.0001)
params["solver"] = params.get("solver", "lbfgs")
params["penalty"] = params.get("penalty", "l2")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
assert self._task.is_classification(), "LogisticRegression for classification task only"
self.estimator_class = LogisticRegression
class CatBoostEstimator(BaseEstimator):
"""The class for tuning CatBoost."""
ITER_HP = "n_estimators"
DEFAULT_ITER = 1000
@classmethod
def search_space(cls, data_size, **params):
upper = max(min(round(1500000 / data_size[0]), 150), 12)
return {
"early_stopping_rounds": {
"domain": tune.lograndint(lower=10, upper=upper),
"init_value": 10,
"low_cost_init_value": 10,
},
"learning_rate": {
"domain": tune.loguniform(lower=0.005, upper=0.2),
"init_value": 0.1,
},
"n_estimators": {
"domain": 8192,
"init_value": 8192,
},
}
@classmethod
def size(cls, config):
n_estimators = config.get("n_estimators", 8192)
max_leaves = 64
return (max_leaves * 3 + (max_leaves - 1) * 4 + 1.0) * n_estimators * 8
@classmethod
def cost_relative2lgbm(cls):
return 15
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(include=["category"]).columns
if not cat_columns.empty:
X = X.copy()
X[cat_columns] = X[cat_columns].apply(
lambda x: x.cat.rename_categories([str(c) if isinstance(c, float) else c for c in x.cat.categories])
)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# numpy array is not of numeric dtype
X = DataFrame(X)
for col in X.columns:
if isinstance(X[col][0], str):
X[col] = X[col].astype("category").cat.codes
X = X.to_numpy()
return X
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["n_estimators"] = params.get("n_estimators", 8192)
if "n_jobs" in params:
params["thread_count"] = params.pop("n_jobs")
return params
def __init__(
self,
task="binary",
**config,
):
super().__init__(task, **config)
self.params.update(
{
"verbose": config.get("verbose", False),
"random_seed": config.get("random_seed", 10242048),
}
)
if self._task.is_classification():
from catboost import CatBoostClassifier
self.estimator_class = CatBoostClassifier
else:
from catboost import CatBoostRegressor
self.estimator_class = CatBoostRegressor
def fit(self, X_train, y_train, budget=None, free_mem_ratio=0, **kwargs):
start_time = time.time()
deadline = start_time + budget if budget else np.inf
train_dir = f"catboost_{str(start_time)}"
X_train = self._preprocess(X_train)
if isinstance(X_train, DataFrame):
cat_features = list(X_train.select_dtypes(include="category").columns)
else:
cat_features = []
use_best_model = kwargs.get("use_best_model", True)
n = max(int(len(y_train) * 0.9), len(y_train) - 1000) if use_best_model else len(y_train)
X_tr, y_tr = X_train[:n], y_train[:n]
from catboost import Pool, __version__
eval_set = Pool(data=X_train[n:], label=y_train[n:], cat_features=cat_features) if use_best_model else None
if "sample_weight" in kwargs:
weight = kwargs["sample_weight"]
if weight is not None:
kwargs["sample_weight"] = weight[:n]
else:
weight = None
model = self.estimator_class(train_dir=train_dir, **self.params)
if __version__ >= "0.26":
model.fit(
X_tr,
y_tr,
cat_features=cat_features,
eval_set=eval_set,
callbacks=CatBoostEstimator._callbacks(
start_time, deadline, free_mem_ratio if use_best_model else None
),
**kwargs,
)
else:
model.fit(
X_tr,
y_tr,
cat_features=cat_features,
eval_set=eval_set,
**kwargs,
)
shutil.rmtree(train_dir, ignore_errors=True)
if weight is not None:
kwargs["sample_weight"] = weight
self._model = model
self.params[self.ITER_HP] = self._model.tree_count_
train_time = time.time() - start_time
return train_time
@classmethod
def _callbacks(cls, start_time, deadline, free_mem_ratio):
class ResourceLimit:
def after_iteration(self, info) -> bool:
now = time.time()
if info.iteration == 1:
self._time_per_iter = now - start_time
if now + self._time_per_iter > deadline:
return False
if psutil is not None and free_mem_ratio is not None:
mem = psutil.virtual_memory()
if mem.available / mem.total < free_mem_ratio:
return False
return True # can continue
return [ResourceLimit()]
class KNeighborsEstimator(BaseEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(512, int(data_size[0] / 2))
return {
"n_neighbors": {
"domain": tune.lograndint(lower=1, upper=max(2, upper)),
"init_value": 5,
"low_cost_init_value": 1,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 30
def config2params(self, config: dict) -> dict:
params = super().config2params(config)
params["weights"] = params.get("weights", "distance")
return params
def __init__(self, task="binary", **config):
super().__init__(task, **config)
if self._task.is_classification():
from sklearn.neighbors import KNeighborsClassifier
self.estimator_class = KNeighborsClassifier
else:
from sklearn.neighbors import KNeighborsRegressor
self.estimator_class = KNeighborsRegressor
def _preprocess(self, X):
if isinstance(X, DataFrame):
cat_columns = X.select_dtypes(["category"]).columns
if X.shape[1] == len(cat_columns):
raise ValueError("kneighbor requires at least one numeric feature")
X = X.drop(cat_columns, axis=1)
elif isinstance(X, np.ndarray) and X.dtype.kind not in "buif":
# drop categocial columns if any
X = DataFrame(X)
cat_columns = []
for col in X.columns:
if isinstance(X[col][0], str):
cat_columns.append(col)
X = X.drop(cat_columns, axis=1)
X = X.to_numpy()
return X
class suppress_stdout_stderr(object):
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1]) | PypiClean |
/Divisi2-2.2.5.tar.gz/Divisi2-2.2.5/divisi2/fileIO.py | from __future__ import with_statement
from pkg_resources import resource_filename
import cPickle as pickle
import codecs
import gzip
"""
Easy functions for loading and saving Divisi matrices and semantic networks.
New in Divisi2.
"""
# Note: gzip.GzipFile is super-slow for things that read a few bytes at a time,
# but quite fast to read the whole file at once. So we do the latter.
#
# If you run out of memory reading a gzip'ed file, un-gzip it first.
def data_filename(filename):
if filename.startswith('data:'):
filename = resource_filename(__name__, 'data/'+filename[5:])
return filename
def _meta_read(filename, encoding=None):
filename = data_filename(filename)
opener = gzip.open if filename.endswith('.gz') else open
f = opener(filename, 'rb')
data = f.read()
f.close()
if encoding is not None:
data = data.decode(encoding)
return data
def _meta_write(filename, data, encoding=None):
filename = data_filename(filename)
if encoding is not None:
data = data.encode(encoding)
opener = gzip.open if filename.endswith('.gz') else open
f = opener(filename, 'wb')
f.write(data)
f.close()
# def _meta_open(filename, mode='rb', encoding=None):
# if filename.endswith('.gz'):
# raise RuntimeError('Opening gzip files is no longer supported.')
# if encoding is None:
# return open(filename, mode)
# else:
# return codecs.open(filename, mode, encoding=encoding)
def load(filename):
"""
Load an object (most likely a Divisi matrix or a semantic network) from a
.pickle or .graph file. If the filename ends in .gz, it will be
uncompressed.
"""
if filename.endswith('.graph.gz') or filename.endswith('.graph'):
return load_graph(filename)
else:
return load_pickle(filename)
def load_pickle(filename):
file = _meta_read(filename)
return pickle.loads(file)
def load_graph(filename, encoding='utf-8'):
import networkx as nx
return nx.read_edgelist(data_filename(filename), encoding=encoding,
data=True, delimiter='\t',
create_using=nx.MultiDiGraph())
def save(obj, filename):
"""
Save an object to the given filename.
If the filename ends in .gz, the file will be compressed. If aside from
a possible .gz, the filename ends in .graph, it will assume that your
object is a semantic network and save it in the NetworkX edgelist format.
"""
if isinstance(obj, basestring) and not isinstance(filename, basestring):
# correct for reversed arguments.
filename, obj = obj, filename
if filename.endswith('.graph') or filename.endswith('.graph.gz'):
save_graph(obj, filename)
else:
save_pickle(obj, filename)
def save_graph(network, filename, encoding='utf-8'):
import networkx as nx
return nx.write_edgelist(network, data_filename(filename), data=True, delimiter='\t')
def save_pickle(matrix, filename):
if isinstance(matrix, basestring) and not isinstance(filename, basestring):
# Catch accidentally reversing argument order.
return save_pickle(filename, matrix)
_meta_write(filename, pickle.dumps(matrix, -1)) | PypiClean |
/FanFicFare-4.27.0.tar.gz/FanFicFare-4.27.0/fanficfare/exceptions.py |
# Copyright 2011 Fanficdownloader team, 2018 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
## A few exceptions for different things for adapters
class FailedToDownload(Exception):
def __init__(self,error):
self.error=error
def __str__(self):
return self.error
class AccessDenied(Exception):
def __init__(self,error):
self.error=error
def __str__(self):
return self.error
class RejectImage(Exception):
def __init__(self,error):
self.error=error
def __str__(self):
return self.error
class InvalidStoryURL(Exception):
def __init__(self,url,domain,example):
self.url=url
self.domain=domain
self.example=example
def __str__(self):
return "Bad Story URL: (%s) for site: (%s) Example: (%s)" % (self.url, self.domain, self.example)
class FailedToLogin(Exception):
def __init__(self,url, username, passwdonly=False):
self.url=url
self.username=username
self.passwdonly=passwdonly
def __str__(self):
if self.passwdonly:
return "URL Failed, password required: (%s) " % (self.url)
else:
return "Failed to Login for URL: (%s) with username: (%s)" % (self.url, self.username)
class AdultCheckRequired(Exception):
def __init__(self,url):
self.url=url
def __str__(self):
return "Story requires confirmation of adult status: (%s)" % self.url
class StoryDoesNotExist(Exception):
def __init__(self,url):
self.url=url
def __str__(self):
return "Story does not exist: (%s)" % self.url
class UnknownSite(Exception):
def __init__(self,url,supported_sites_list):
self.url=url
self.supported_sites_list=supported_sites_list
self.supported_sites_list.sort()
def __str__(self):
return "Unknown Site(%s). Supported sites: (%s)" % (self.url, ", ".join(self.supported_sites_list))
class FailedToWriteOutput(Exception):
def __init__(self,error):
self.error=error
def __str__(self):
return self.error
class PersonalIniFailed(Exception):
def __init__(self,error,part,line):
self.error=error
self.part=part
self.line=line
def __str__(self):
return "personal.ini Error '%s' in '%s' in line '%s'"%(self.error,self.part,self.line)
class RegularExpresssionFailed(PersonalIniFailed):
def __init__(self,error,part,line):
PersonalIniFailed.__init__(self,error,part,line)
def __str__(self):
return "Regular Expression Error '%s' in part '%s' in line '%s'"%(self.error,self.part,self.line)
class FetchEmailFailed(Exception):
def __init__(self,error):
self.error=error
def __str__(self):
return self.error
class CacheCleared(Exception):
def __init__(self,error):
self.error=error
def __str__(self):
return self.error
class HTTPErrorFFF(Exception):
def __init__(self,
url,
status_code,
error_msg,
data=None):
self.url = url
self.status_code = status_code
self.error_msg = error_msg
self.data = data
def __str__(self):
if self.url in self.error_msg:
return "HTTP Error in FFF '%s'(%s)"%(self.error_msg,self.status_code)
else:
return "HTTP Error in FFF '%s'(%s) URL:'%s'"%(self.error_msg,self.status_code,self.url)
class BrowserCacheException(Exception):
pass | PypiClean |
/Firefly_III_API_Client-2.0.5.0-py3-none-any.whl/firefly_iii_client/paths/v1_insight_transfer_no_category/get.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from firefly_iii_client import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from firefly_iii_client import schemas # noqa: F401
from firefly_iii_client.model.unauthenticated import Unauthenticated
from firefly_iii_client.model.insight_total import InsightTotal
from firefly_iii_client.model.bad_request import BadRequest
from firefly_iii_client.model.internal_exception import InternalException
from firefly_iii_client.model.not_found import NotFound
from . import path
# Query params
StartSchema = schemas.DateSchema
EndSchema = schemas.DateSchema
class AccountsSchema(
schemas.ListSchema
):
class MetaOapg:
items = schemas.Int64Schema
def __new__(
cls,
_arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, decimal.Decimal, int, ]], typing.List[typing.Union[MetaOapg.items, decimal.Decimal, int, ]]],
_configuration: typing.Optional[schemas.Configuration] = None,
) -> 'AccountsSchema':
return super().__new__(
cls,
_arg,
_configuration=_configuration,
)
def __getitem__(self, i: int) -> MetaOapg.items:
return super().__getitem__(i)
RequestRequiredQueryParams = typing_extensions.TypedDict(
'RequestRequiredQueryParams',
{
'start': typing.Union[StartSchema, str, date, ],
'end': typing.Union[EndSchema, str, date, ],
}
)
RequestOptionalQueryParams = typing_extensions.TypedDict(
'RequestOptionalQueryParams',
{
'accounts[]': typing.Union[AccountsSchema, list, tuple, ],
},
total=False
)
class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams):
pass
request_query_start = api_client.QueryParameter(
name="start",
style=api_client.ParameterStyle.FORM,
schema=StartSchema,
required=True,
explode=True,
)
request_query_end = api_client.QueryParameter(
name="end",
style=api_client.ParameterStyle.FORM,
schema=EndSchema,
required=True,
explode=True,
)
request_query_accounts = api_client.QueryParameter(
name="accounts[]",
style=api_client.ParameterStyle.FORM,
schema=AccountsSchema,
explode=True,
)
# Header params
XTraceIdSchema = schemas.UUIDSchema
RequestRequiredHeaderParams = typing_extensions.TypedDict(
'RequestRequiredHeaderParams',
{
}
)
RequestOptionalHeaderParams = typing_extensions.TypedDict(
'RequestOptionalHeaderParams',
{
'X-Trace-Id': typing.Union[XTraceIdSchema, str, uuid.UUID, ],
},
total=False
)
class RequestHeaderParams(RequestRequiredHeaderParams, RequestOptionalHeaderParams):
pass
request_header_x_trace_id = api_client.HeaderParameter(
name="X-Trace-Id",
style=api_client.ParameterStyle.SIMPLE,
schema=XTraceIdSchema,
)
_auth = [
'firefly_iii_auth',
]
SchemaFor200ResponseBodyApplicationJson = InsightTotal
@dataclass
class ApiResponseFor200(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor200ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_200 = api_client.OpenApiResponse(
response_cls=ApiResponseFor200,
content={
'application/json': api_client.MediaType(
schema=SchemaFor200ResponseBodyApplicationJson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthenticated
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor404ResponseBodyApplicationJson = NotFound
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor404ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
content={
'application/json': api_client.MediaType(
schema=SchemaFor404ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalException
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
_status_code_to_response = {
'200': _response_for_200,
'400': _response_for_400,
'401': _response_for_401,
'404': _response_for_404,
'500': _response_for_500,
}
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _insight_transfer_no_category_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def _insight_transfer_no_category_oapg(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _insight_transfer_no_category_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _insight_transfer_no_category_oapg(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Insight into transfers, without category.
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params)
self._verify_typed_dict_inputs_oapg(RequestHeaderParams, header_params)
used_path = path.value
prefix_separator_iterator = None
for parameter in (
request_query_start,
request_query_end,
request_query_accounts,
):
parameter_data = query_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
if prefix_separator_iterator is None:
prefix_separator_iterator = parameter.get_prefix_separator_iterator()
serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator)
for serialized_value in serialized_data.values():
used_path += serialized_value
_headers = HTTPHeaderDict()
for parameter in (
request_header_x_trace_id,
):
parameter_data = header_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_headers.extend(serialized_data)
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
response = self.api_client.call_api(
resource_path=used_path,
method='get'.upper(),
headers=_headers,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(
status=response.status,
reason=response.reason,
api_response=api_response
)
return api_response
class InsightTransferNoCategory(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def insight_transfer_no_category(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def insight_transfer_no_category(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def insight_transfer_no_category(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def insight_transfer_no_category(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._insight_transfer_no_category_oapg(
query_params=query_params,
header_params=header_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor200,
]: ...
@typing.overload
def get(
self,
skip_deserialization: typing_extensions.Literal[True],
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor200,
api_client.ApiResponseWithoutDeserialization,
]: ...
def get(
self,
query_params: RequestQueryParams = frozendict.frozendict(),
header_params: RequestHeaderParams = frozendict.frozendict(),
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._insight_transfer_no_category_oapg(
query_params=query_params,
header_params=header_params,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
) | PypiClean |
/MusicOnPolytopes-0.1.0-py3-none-any.whl/polytopes/pattern_manip.py | import math
import copy
import polytopes.model.errors as err
import polytopes.data_manipulation as dm
import polytopes.pattern_factory as pf
# %% Element indexation
def get_index_from_element(element, pattern):
"""
Return the index of a specific element 'element' in an indexed pattern.
This index is a boolean code, indicating how to navigate inside the pattern and the nesting of lists, by dichotomy.
A zero indicates to search in the "left" part of the pattern, and a one indicates to search on the right part.
In that sense, an element index will be of the size of the dimension of the pattern.
Added elements will result in a tuple in the index (similar to tuples in the pattern).
Parameters
----------
element : integer
The element to find in the pattern.
pattern : nest list of integers (and tuples)
The indexed pattern to search the element in.
Returns
-------
list of binary numbers
The index of this element, in the pattern.
"""
return pf.flatten_nested_list(recursively_index_element(element, pattern))
def recursively_index_element(element, pattern):
"""
Recursively computes the index of a specific element 'element' in an indexed pattern.
This index is a boolean code, indicating how to navigate inside the pattern and the nesting of lists, by dichotomy.
A zero indicates to search in the "left" part of the pattern, and a one indicates to search on the right part.
In that sense, an element index will be of the size of the dimension of the pattern.
Added elements will result in a tuple in the index (similar to tuples in the pattern).
Parameters
----------
element : integer
The element to find in the pattern.
pattern : nest list of integers (and tuples)
The indexed pattern to search the element in.
Raises
------
ElementNotFound
Error indicating that this element couldn't be found in this pattern.
Returns
-------
list of binary numbers
The index of this element, in the pattern.
"""
if pf.get_pattern_dimension(pattern) == 1:
if pattern[0] == element:
return 0
if type(pattern[0]) is tuple:
if pattern[0][0] == element:
return (0,0)
elif pattern[0][1] == element:
return (0,1)
if len(pattern) != 1:
if pattern[1] == element:
return 1
if type(pattern[1]) is tuple:
if pattern[1][0] == element:
return (1,0)
if pattern[1][1] == element:
return (1,1)
raise err.ElementNotFound("Element {} not found in the pattern {}.".format(element, pattern))
else:
if element in pf.flatten_pattern(pattern[0]):
tab = [0]
tab.append(recursively_index_element(element, pattern[0]))
return tab
elif len(pattern) != 1:
if element in pf.flatten_pattern(pattern[1]):
tab = [1]
tab.append(recursively_index_element(element, pattern[1]))
return tab
else:
raise err.ElementNotFound("Element {} not found in the pattern {}.".format(element, pattern))
else:
raise err.ElementNotFound("Element {} not found in the pattern {}.".format(element, pattern))
def get_element_from_index(index_element, pattern, with_tuples = False):
"""
Return the element in the pattern, from its index.
Parameters
----------
index_element : list of binary numbers
The index of the element to be found.
pattern : list of nested integers
The pattern, in which we will search for the element.
with_tuples : boolean, optional
A boolean, indicating if the element should be returned in a tuple or not.
It only applies to elements which are in a tuple in a pattern,
i.e. elements which are added or on which an element is added.
If True, the entire tuple is returned and, if False, only the element corresponding to the index is returned.
This argument is mainly used for PPPs.
The default is False.
Raises
------
InvalidIndexSizeException
Error indicating that the index is incoherent with the pattern dimension.
Returns
-------
integer or None
Returns an integer (the element) if the index corresponds to an element in the pattern, or None otherwise.
"""
if len(index_element) != pf.get_pattern_dimension(pattern):
raise err.InvalidIndexSizeException("Wrong index {}, of different dimension than the pattern {}.".format(index_element, pattern))
return recursively_find_element(index_element, pattern, with_tuples = with_tuples)
def recursively_find_element(idx_elt, pattern, with_tuples = False):
"""
Recursively computes the element in the pattern, from its index.
Parameters
----------
idx_elt : list of binary numbers
The index of the element to be found.
pattern : list of nested integers
The pattern, in which we will search for the element.
with_tuples : boolean, optional
A boolean, indicating if the element should be returned in a tuple or not.
It only applies to elements which are in a tuple in a pattern,
i.e. elements which are added or on which an element is added.
If True, the entire tuple is returned and, if False, only the element corresponding to the index is returned.
This argument is mainly used for PPPs.
The default is False.
Raises
------
InvalidIndexSizeException
Error indicating that the index is incoherent with the pattern dimension.
Returns
-------
integer or None
Returns an integer (the element) if the index corresponds to an element in the pattern, or None otherwise.
"""
if len(idx_elt) != pf.get_pattern_dimension(pattern):
raise err.InvalidIndexSizeException("Wrong index {}, of different dimension than the pattern {}.".format(idx_elt, pattern))
if pf.get_pattern_dimension(pattern) == 1:
idx_elt = idx_elt[0]
if type(idx_elt) is tuple:
try:
if type(pattern[idx_elt[0]]) is tuple:
return pattern[idx_elt[0]][idx_elt[1]]
elif idx_elt[1] == 0:
return pattern[idx_elt[0]]
else:
return None
#raise NotImplementedError("Index is a tuple {}, but the polytope doesn't have an addition {}.".format(idx_elt, pattern))
except IndexError:
return None
else:
try:
if type(pattern[idx_elt]) is tuple:
if with_tuples:
return pattern[idx_elt]
else:
return pattern[idx_elt][0]
return pattern[idx_elt]
except IndexError:
return None
else:
try:
return recursively_find_element(idx_elt[1:], pattern[idx_elt[0]], with_tuples = with_tuples)
except IndexError:
return None
def delete_tuples(idx):
"""
Replace the tuple in the index by its first element.
Convenient when you're focusing on the first element of the tuple (the one originally present in the pattern), and working with its index.
Parameters
----------
idx_elt : list of binary numbers
The index to modify.
Returns
-------
idx_cpy : list of binary numbers
The index, without tuples (and with the first element of the tuple instead).
"""
idx_cpy = copy.deepcopy(idx)
if type(idx_cpy[-1]) is tuple:
idx_cpy[-1] = idx_cpy[-1][0]
return idx_cpy
def add_indexes(a, b):
"""
Add two set of indexes, and raises errors accordingly (an index is binary, and cannot be composed of a "2" for instance).
In that sense, it only works if both indexes never share a 1 at a same place.
Parameters
----------
a : list of binary numbers
The index of the first element.
b : list of binary numbers
The index of the second element.
Raises
------
InvalidIndexSizeException
An error, either if both indexed are of different sizes of if the sum results in a '2'.
Returns
-------
to_return : list of binary numbers
The index of the sum of both elements.
"""
if len(a) != len(b):
raise err.InvalidIndexSizeException("Both indexes ({} and {}) are of different lengths".format(a, b)) from None
to_return = []
if type(a[-1]) is tuple:
for i in range(len(a) - 1):
to_return.append(a[i] + b[i])
if type(b[-1]) is tuple:
tup = (a[-1][0] + b[-1][0], a[-1][1] + b[-1][1])
if 2 in tup:
raise err.InvalidIndexException("Summing indexes {} and {} resulted in a 2, should not happen.".format(a, b)) from None
to_return.append(tup)
else:
to_return.append((a[-1][0] + b[-1], a[-1][1]))
if a[-1][0] + b[-1] == 2:
raise err.InvalidIndexException("Summing indexes {} and {} resulted in a 2, should not happen.".format(a, b)) from None
elif type(b[-1]) is tuple:
for i in range(len(a) - 1):
to_return.append(a[i] + b[i])
to_return.append((b[-1][0] + a[-1], b[-1][1]))
if b[-1][0] + a[-1] == 2:
raise err.InvalidIndexException("Summing indexes {} and {} resulted in a 2, should not happen.".format(a, b)) from None
else:
to_return = [a[i] + b[i] for i in range(len(a))]
if 2 in to_return:
raise err.InvalidIndexException("Summing indexes {} and {} resulted in a 2, should not happen.".format(a, b)) from None
return to_return
# %% Antecedents and successors from elements
######## Antecedents
def get_antecedents_from_element(elt, pattern):
"""
Return the antecedents (as elements) of this element (as element) in this indexed pattern.
Parameters
----------
elt : integer
The element, whose antecedents are to be returned.
pattern : list of nested integers
The pattern, in which we will search for antecedents.
Returns
-------
list of integers
List of the antecedents, as integers.
"""
idx_elt = get_index_from_element(elt, pattern)
return get_antecedents_from_index(idx_elt, pattern)
def get_antecedents_from_index(idx_elt, pattern):
"""
Return the antecedents (as elements) of this element (as index) in this indexed pattern.
Parameters
----------
idx_elt : list of binary numbers
The index of the element, whose antecedents are to be returned.
pattern : list of nested integers
The pattern, in which we will search for antecedents.
Returns
-------
list of integers
List of the antecedents, as integers.
"""
antecedents = get_antecedents_index_from_index(idx_elt)
if antecedents == []:
return []
to_return = []
for i in antecedents:
ant = get_element_from_index(i, pattern, with_tuples = False)
if ant != None:
to_return.append(ant)
return to_return
def get_antecedents_index_from_index(idx_elt):
"""
Return the antecedents (as indexes) of this element (as index).
This function does not take a pattern as argument, as the indexes of the antecedents does not depend on any.
Parameters
----------
idx_elt : list of binary numbers
The index of the element, whose antecedents are to be returned.
Returns
-------
list of list of binary numbers
List of the antecedents, as element indexes.
"""
if idx_elt == None:
return []
antecedents = []
idx = copy.deepcopy(idx_elt)
if type(idx[-1]) is tuple:
if idx[-1][1] == 0:
idx[-1] = idx[-1][0]
return get_antecedents_index_from_index(idx)
else:
# # Without other addition as homologuous
# idx[-1] = idx[-1][0]
# return [idx]
# With other addition as homologuous (no return, so it goes in the for loop)
tab = idx[:-1]
tab.append(idx[-1][0])
antecedents.append(tab)
if idx[-1][0] == 1:
tab = idx[:-1]
tab.append((0,1))
antecedents.append(tab)
for i in range(len(idx)):
if idx[i] == 1:
new_idx = copy.deepcopy(idx)
new_idx[i] = 0
antecedents.append(new_idx)
return antecedents
######## Pivot related to antecedents
def get_pivot_from_index(elt_idx, ant_idx, pattern):
"""
Returns the pivot (as element) of this element (elt_idx, as index)
in relation with this antecedent (ant_idx, as index) in the pattern.
Parameters
----------
idx_elt : list of binary numbers
The index of the element, for which we compute antecedent and pivot.
ant_idx : list of binary numbers
The index of the antecdent.
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
integer
The pivot, as element (integer).
"""
pivot_idx = get_pivot_index_from_index(elt_idx, ant_idx)
return get_element_from_index(pivot_idx, pattern, with_tuples = False)
def get_pivot_index_from_index(elt_idx, ant_idx):
"""
Returns the pivot (as index) of this element (elt_idx, as index) in relation with this antecedent (ant_idx, as index).
Parameters
----------
idx_elt : list of binary numbers
The index of the element, for which we compute antecedent and pivot.
ant_idx : list of binary numbers
The index of the antecdent.
Returns
-------
list of binary numbers
The pivot, as index.
"""
if type(elt_idx[-1]) == tuple:
if elt_idx[-1][1] == 0:
if type(ant_idx[-1]) != tuple:
pivot_idx = [delete_tuples(elt_idx)[i] - ant_idx[i] for i in range(len(ant_idx))]
elif elt_idx[-1][1] == ant_idx[-1][1]:
pivot_idx = [delete_tuples(elt_idx)[i] - delete_tuples(ant_idx)[i] for i in range(len(ant_idx))]
else: # Only difference is in last index, so its a warp, so return 0
pivot_idx = [0 for i in range(len(elt_idx))]
else:
pivot_idx = [elt_idx[i] - ant_idx[i] for i in range(len(ant_idx) - 1)]
if type(ant_idx[-1]) != tuple:
if delete_tuples(elt_idx) != ant_idx:
raise NotImplementedError("That's an error, right? Is {} the antecedent of {}?".format(ant_idx, elt_idx))
else:
pivot_idx.append(0)
else:
pivot_idx.append(elt_idx[-1][0] - ant_idx[-1][0])
else:
pivot_idx = [elt_idx[i] - ant_idx[i] for i in range(len(ant_idx))]
return pivot_idx
def get_antecedents_with_pivots_from_index(elt_idx, pattern):
"""
Return a list of tuples (of integers), each tuple corresponding to a couple antecedents/pivot in relation with this antecedent (as elements).
Parameters
----------
idx_elt : list of binary numbers
The index of the element, for which we compute antecedent and pivot.
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
list of tuples of integers
Couples (antecedents, pivot) for this element.
"""
antecedents_idx = get_antecedents_index_from_index(elt_idx)
if antecedents_idx == []:
return []
else:
this_elt_ant = []
for ant_idx in antecedents_idx:
ant = get_element_from_index(ant_idx, pattern, with_tuples = False)
if ant != None:
if ant == 0:
this_elt_ant.append((0,0))
else:
pivot = get_pivot_from_index(elt_idx, ant_idx, pattern)
this_elt_ant.append((ant, pivot))
return this_elt_ant
def get_global_antecedents_with_pivots_from_index(elt_idx, pattern):
"""
Return a list of tuples (of integers), each tuple corresponding to a couple GLOBAL antecedents/pivot in relation with this antecedent (as elements).
This function corresponds to a case in C. Guichaoua's framework, and is (for now) not used in my model.
The principle is to consider antecedents not as the direct antecedents (elements linked by an arrow in the polytope),
but as any element which can be linked to this element.
TODO: Link to some of my reference.
Parameters
----------
idx_elt : list of binary numbers
The index of the element, for which we compute antecedent and pivot.
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
list of tuples of integers
Couples (global antecedents, pivot) for this element.
"""
antecedents_idx = get_antecedents_index_from_index(elt_idx)
if antecedents_idx == []:
return []
elif [0 for i in range(len(elt_idx))] in antecedents_idx:
return [(0,0)]
for idx in antecedents_idx: # Antecedents of the antecedents and etc (the ``append'' is taken in account in the for loop, so it searches for the antecedents of the added antecedents)
if idx != [0 for i in range(len(elt_idx))]:
for ant_ant in get_antecedents_index_from_index(idx):
if ant_ant not in antecedents_idx:
antecedents_idx.append(ant_ant)
else:
this_elt_ant = []
for ant_idx in antecedents_idx:
ant = get_element_from_index(ant_idx, pattern, with_tuples = False)
if ant != None and ant != 0:
try:
pivot = get_pivot_from_index(elt_idx, ant_idx, pattern)
if (pivot, ant) not in this_elt_ant:
this_elt_ant.append((ant, pivot))
except NotImplementedError:
pass
return this_elt_ant
##### Successors of this element
def get_successors_from_element(elt, pattern):
"""
Return the successors (as elements) of this element (as element), subject to this pattern (indexed).
Parameters
----------
elt : integer
The element, for which we compute the successors.
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
list of integers
The successors of this element (as elements).
"""
idx_elt = get_index_from_element(elt, pattern)
return get_successors_from_index(idx_elt, pattern)
def get_successors_from_index(idx_elt, pattern):
"""
Return the successors (as elements) of this element (as index), subject to this pattern (indexed).
Parameters
----------
idx_elt : list of binary numbers
The index of the element, for which we compute successors.
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
list of integers
The successors of this element (as elements).
"""
successors = get_successors_index_from_index(idx_elt)
if successors == []:
return []
to_return = []
for i in successors:
suc = get_element_from_index(i, pattern, with_tuples = False)
if suc != None:
to_return.append(suc)
return to_return
def get_successors_index_from_index(idx_elt):
"""
Return the successors (as indexes) of this element (as index).
The returned successors are all the possible ones, and won't be present in all patterns.
Parameters
----------
idx_elt : list of binary numbers
The index of the element, for which we compute successors.
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
list of list of binary numbers
The successors of this element (as indexes).
"""
if idx_elt == None:
return []
successors = []
idx = copy.deepcopy(idx_elt)
if type(idx[-1]) is tuple:
if idx[-1][1] == 0:
idx[-1] = idx[-1][0]
successors = get_successors_index_from_index(idx)
idx[-1] = (idx[-1], 1)
successors.append(idx)
return successors
else:
# # Without other addition as homologuous
# return []
# With other addition as homologuous (no return, so it goes in the for loop)
if idx_elt[-1][0] == 0:
tab = idx[:-1]
tab.append((1,1))
successors.append(tab)
for i in range(len(idx)):
if idx[i] == 0:
new_idx = copy.deepcopy(idx)
new_idx[i] = 1
successors.append(new_idx)
return successors
def get_global_successors_from_index(idx_elt, pattern):
"""
Return the global successors (as elements) of this element (as index).
The returned successors are all the possible ones, and won't be present in all patterns.
This function corresponds to a case in C. Guichaoua's framework, and is (for now) not used in my model.
The principle is to consider successors not as the direct successors (elements linked by an arrow in the polytope),
but as any element which can be linked to this element.
TODO: Link to some of my reference.
Parameters
----------
idx_elt : list of binary numbers
The index of the element, for which we compute successors.
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
list of integers
The successors of this element (as element).
"""
successors = get_successors_index_from_index(idx_elt) # Direct successors
if successors == []:
return []
for idx in successors: # Successors of the successors and etc (the ``append'' is taken in account in the for loop, so it searches for the successors of the added successors)
for suc_suc in get_successors_index_from_index(idx):
if suc_suc not in successors:
if type(suc_suc[-1]) is not tuple or suc_suc[-1][-1] != 1:
successors.append(suc_suc)
elif type(idx_elt[-1]) is tuple and idx_elt[-1][-1] == 1:
successors.append(suc_suc)
to_return = []
for i in successors:
suc = get_element_from_index(i, pattern, with_tuples = False)
if suc != None:
to_return.append(suc)
return to_return
# %% Primers and under primers, for PPPs
def find_primers_of_low_level_systems(indexed_pattern):
"""
Recursively find all primers (as elements) of low-level systems.
This function is adapted for PPP primers retrieval.
Parameters
----------
indexed_pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Raises
------
PatternToDebugError
If the pattern is totally weird (of size different than 2 or 1, should never happen).
Returns
-------
list of integers
list of the primers of all low-level systems.
"""
if pf.get_pattern_dimension(indexed_pattern) <= 2:
return pf.flatten_pattern(indexed_pattern)[0]
else:
first_nested_pattern = indexed_pattern[0]
size_dim_inf = pf.get_pattern_size(first_nested_pattern)
primer_1 = find_primers_of_low_level_systems(first_nested_pattern)
if len(indexed_pattern) == 2:
primer_2 = find_primers_of_low_level_systems(indexed_pattern[1])
elif len(indexed_pattern) != 1:
raise err.PatternToDebugError("Pattern of invalid size: {}".format(indexed_pattern))
else:
return [find_primers_of_low_level_systems(indexed_pattern[0])]
return [primer_1, primer_2]
def get_under_primers(pattern):
"""
Return the elements (as indexed) which are successors of the primer (first element of the polytope).
These are the elements which lead the permutations in the PPP model.
Note: isn't that killing a fly with a bazooka? Under primers should be all elements containing a single one.
(Could be tricky for polytope of dim n with n-1 dim irregularity, or for irregularity on under-primers though)
Parameters
----------
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
under_primers : list of list of binary nimbers
The under pirmers.
"""
under_primers = []
for i in range(1, pf.get_pattern_size(pattern)):
idx_elt = get_index_from_element(i, pattern)
if 0 in get_antecedents_from_index(idx_elt, pattern):
under_primers.append(idx_elt)
return under_primers
# Generate PPPs
def generate_ppp(pattern):
"""
Generate all PPPs (Primer Preserving Permutation) of this indexed pattern.
Parameters
----------
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
all_ppps : TYPE
DESCRIPTION.
"""
all_ppps = []
under_primers = get_under_primers(pattern)
for fst_primer_list_idx, fst_primer in enumerate(under_primers):
for snd_primer in under_primers[fst_primer_list_idx + 1:]:
other_under_primers = [a for a in under_primers if a != fst_primer and a != snd_primer]
fst_elt = [0 for i in range(len(fst_primer))]
all_ppps.append(recursive_ppp(pattern, fst_primer, snd_primer, other_under_primers, fst_elt))
return all_ppps
# Recursively generating PPPs
def recursive_ppp(pattern, fst_under_primer, snd_under_primer, other_under_primers, fst_elt):
"""
Recursively computes a PPP, with 'fst_under_primer' and 'snd_under_primer' as the first two under primers (successors of the primer).
Parameters
----------
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
fst_under_primer : list of binary numbers
The first under primer defining the PPP.
snd_under_primer : list of binary numbers
The second under primer defining the PPP.
other_under_primers : list of list of binary numbers
The other under primers, for upper-level systems.
fst_elt : integer
Element useful for the recursion, determining the first element for half a part of the pattern.
Returns
-------
nested list of integers
A PPP for that pattern, determined by 'fst_under_primer' and 'snd_under_primer'.
"""
if len(other_under_primers) == 0:
left_ppp = ppp_dim_1_pattern(fst_elt, fst_under_primer, pattern)
right_ppp = ppp_dim_1_pattern(add_indexes(fst_elt,snd_under_primer), fst_under_primer, pattern)
while None in left_ppp:
left_ppp.remove(None)
while None in right_ppp:
right_ppp.remove(None)
if left_ppp == []:
if right_ppp == []:
return None
else:
return [right_ppp]
elif right_ppp == []:
return [left_ppp]
else:
return [left_ppp, right_ppp]
else:
left_ppp = recursive_ppp(pattern, fst_under_primer, snd_under_primer, other_under_primers[:-1], fst_elt)
right_ppp = recursive_ppp(pattern, fst_under_primer, snd_under_primer, other_under_primers[:-1],add_indexes(other_under_primers[-1], fst_elt))
if left_ppp == None:
if right_ppp == None:
return None
else:
return [right_ppp]
elif right_ppp == None:
return [left_ppp]
else:
return [left_ppp, right_ppp]
def ppp_dim_1_pattern(first_elt_idx, up_idx, pattern):
"""
Compute the pattern composed of 'first_elt_idx' and the successor of 'first_elt_idx' which has up_idx as pivot.
In that sense, this pattern is homologuous to the relation between the primer and the under primer up_idx, and starts at first_elt_idx.
(This function is actually the reason for the argument 'with_tuples', as we want to keep addition here)
Parameters
----------
first_elt_idx : list of binary numbers
The first element for this pattern.
up_idx : list of binary numbers
The under primer which relation with the primer we want to apply on 'first_elt_idx'.
pattern : list of nested integers
The pattern, in which we will search for the pivot, as element.
Returns
-------
list (couple) of integers or tuples
A PPP of 1 dimension, composed of 'first_elt_idx' and its successor, determined by the relation between the primer and 'up_idx'.
"""
first_elt = get_element_from_index(first_elt_idx, pattern, with_tuples = True)
snd_elt = get_element_from_index(add_indexes(first_elt_idx, up_idx), pattern, with_tuples = True)
return [first_elt, snd_elt]
def swap_chord_sequence(chord_sequence, permuted_elements_list):
"""
Swap the chord sequence according to the current PPP.
In that sense, the permuted chord sequence will follow the permuted order of the current PPP.
Parameters
----------
chord_sequence : list
List of Chords, of any type.
permuted_elements_list : list of integers
The elements of the permuted indexed pattern.
Returns
-------
new_chord_seq : list
Permuted list of Chords.
"""
new_chord_seq = []
for i in permuted_elements_list:
new_chord_seq.append(chord_sequence[i])
return new_chord_seq
def compute_direct_antecedents(indexed_pattern):
"""
Compute the direct antecedent of every element of the pattern as described by Louboutin's paradigm.
Indeed, each element of the Louboutin's paradigm has only one antecedent, which is conditionned to the current PPP, and can be a fictive element.
Parameters
----------
indexed_pattern : list of nested integers
The pattern, which can be permuted in a PPP framework.
Returns
-------
direct_antecedents : list of integers or tuples of three integers
The list of direct antecedents.
When the antecedent is a tuple, it means that its antecedent will be the fictive element constructed by this system.
"""
pattern_size = pf.get_pattern_size(indexed_pattern)
direct_antecedents = [None for i in range(pattern_size)]
for i in range(1, pattern_size):
direct_antecedents[i] = recursively_find_direct_antecedent(i, indexed_pattern)
return direct_antecedents
def recursively_find_direct_antecedent(i, indexed_pattern):
"""
Find the direct antecedent of element 'i' in the current indexed pattern.
This direct antecedent is the one defined by C. Louboutin's framework, and can be a fictive element.
When it is a fictive element, we return a tuple with the 3 elements defining the system.
Parameters
----------
i : element
The element, whose antecedent is to compute.
indexed_pattern : list of nested integers
The pattern, which can be permuted in a PPP framework.
Raises
------
InvalidIndexException
In cases of invalid indexes.
Returns
-------
integer or tuple of three integers
The antecedent, or the three elements to construct a system with (as the antecedent will be the fictive element determined by this system).
"""
elt_idx = get_index_from_element(i, indexed_pattern)
if type(elt_idx[-1]) is tuple:
if elt_idx[-1][1] == 1:
return i-1 # Previous element, on which it is attached
else:
elt_idx[-1] = elt_idx[-1][0]
pattern_dim = pf.get_pattern_dimension(indexed_pattern)
if pattern_dim < 2:
if len(elt_idx) != 1:
raise err.InvalidIndexException("Index of invalid size: {} (should be of size 1).".format(elt_idx))
else:
if elt_idx[0] == 0:
raise err.InvalidIndexException("Cannot compute an antecedent as it is a primer (index of 0): {}.".format(elt_idx))
elif type(indexed_pattern[0]) is tuple:
return indexed_pattern[0][0]
else:
return indexed_pattern[0]
# By construction, whatever the PPP is, the index will always return a same POSITION and not ELEMENT.
# In that sense, both under-primers used for low-level system definition will always be [0*]01 and [0*]10
up_one = [0 for i in range(pattern_dim)]
up_one[-1] = 1
up_two = [0 for i in range(pattern_dim)]
up_two[-2] = 1
if elt_idx.count(1) == 1: # It's an under-primer
return 0
else:
elt_minus_up_one_idx = [elt_idx[i] - up_one[i] for i in range(pattern_dim)]
elt_minus_up_two_idx = [elt_idx[i] - up_two[i] for i in range(pattern_dim)]
if -1 in elt_minus_up_one_idx:
if -1 in elt_minus_up_two_idx: # Is an element of high-level systems
primers_pattern = find_primers_of_low_level_systems(indexed_pattern)
return recursively_find_direct_antecedent(i, primers_pattern)
else:
return get_element_from_index(elt_minus_up_two_idx, indexed_pattern)
else:
if -1 in elt_minus_up_two_idx:
return get_element_from_index(elt_minus_up_one_idx, indexed_pattern)
else:
local_primer_idx = [elt_idx[i] - up_one[i] - up_two[i] for i in range(pattern_dim)]
local_primer = get_element_from_index(local_primer_idx, indexed_pattern) # a_1
elt_minus_up_one = get_element_from_index(elt_minus_up_one_idx, indexed_pattern) # a_2
elt_minus_up_two = get_element_from_index(elt_minus_up_two_idx, indexed_pattern) # a_3
return (local_primer, elt_minus_up_one, elt_minus_up_two) | PypiClean |
/storages/mysql.py | import aiomysql
import jsonpickle as jsonpickle
from asyncio import AbstractEventLoop
from aiogram.fsm.storage.base import BaseStorage, StorageKey, StateType
from typing import Dict, Optional, Any
class PoolManager:
__slots__ = ("_db_auth_data", "_tables_is_created", "pool", "cursor")
def __init__(self, **kwargs) -> None:
self._db_auth_data = kwargs
self._tables_is_created = False
async def __aenter__(self) -> aiomysql.Cursor:
self.pool: aiomysql.Connection = await aiomysql.connect(**self._db_auth_data, autocommit=True)
self.cursor: aiomysql.Cursor = self.pool.cursor()
if not self._tables_is_created:
await self.cursor.execute("""CREATE TABLE IF NOT EXISTS "aiogram_state"(
"key" TEXT NOT NULL PRIMARY KEY,
"state" TEXT NOT NULL)""")
await self.cursor.execute("""CREATE TABLE IF NOT EXISTS "aiogram_data"(
"key" TEXT NOT NULL PRIMARY KEY,
"data" TEXT)""")
self._tables_is_created = True
return self.cursor
async def __aexit__(self, *args):
await self.cursor.close()
await self.pool.close()
class MySQLStorage(BaseStorage):
__slots__ = ("host", "port", "username", "password", "db", "loop")
def __init__(
self, username: str, password: str, database: str,
host: str = "localhost", port: int = 5432,
loop: AbstractEventLoop = None
) -> None:
self.__auth_data = {
"host": host,
"port": port,
"username": username,
"password": password,
"database": database
}
if loop is not None:
self.__auth_data.update({"loop": loop})
self.__db = PoolManager(**self.__auth_data)
async def set_state(self, key: StorageKey, state: StateType = None) -> None:
async with self.__db as db:
await db.execute("INSERT INTO \"AiogramLegacyStates\" VALUES(%s, %s)", (key, state))
async def get_state(self, key: StorageKey) -> Optional[str]:
async with self.__db as db:
await db.execute("SELECT \"state\" FROM \"AiogramLegacyStates\" WHERE key=%s", (key,))
response = await db.fetchone()
return response[0]
async def set_data(self, key: StorageKey, data: Dict[str, Any]) -> None:
async with self.__db as db:
await db.execute("INSERT INTO \"AiogramLegacyData\" VALUES(%s, %s)", (key, jsonpickle.dumps(data)))
async def get_data(self, key: StorageKey) -> Dict[str, Any]:
async with self.__db as db:
await db.execute("SELECT \"data\" FROM \"AiogramLegacyData\" WHERE key=%s", (key,))
response = await db.fetchone()
return jsonpickle.loads(response[0])
async def update_data(self, key: StorageKey, data: Dict[str, Any]) -> Dict[str, Any]:
async with self.__db as db:
await db.execute("UPDATE \"AiogramLegacyData\" SET data=%s WHERE key=%s", (jsonpickle.dumps(data), key))
response = await self.get_data(key)
return response
async def close(self) -> None:
pass | PypiClean |
/CollectionBatchTool-0.1.6-py3-none-any.whl/specifymodels.py |
from peewee import *
from sys import byteorder
class BitField(Field):
"""Define a custom field type for MySQL's BIT data type"""
dbField = 'bit'
def db_value(self, value):
return (
None if value is None
else (int(value)).to_bytes(1, byteorder=byteorder))
def python_value(self, value):
return (
None if value is None
else int.from_bytes(value, byteorder=byteorder))
MySQLDatabase.register_fields({'bit': 'BIT'})
database = MySQLDatabase(None) # Un-initialized database
class BaseModel(Model):
class Meta:
database = database
DivisionProxy = Proxy()
TaxontreedefProxy = Proxy()
TaxonProxy = Proxy()
class Geographytreedef(BaseModel):
geographytreedefid = PrimaryKeyField(db_column='GeographyTreeDefID')
class Meta:
db_table = 'geographytreedef'
class Specifyuser(BaseModel):
name = CharField(db_column='Name', unique=True)
specifyuserid = PrimaryKeyField(db_column='SpecifyUserID')
class Meta:
db_table = 'specifyuser'
class Agent(BaseModel):
abbreviation = CharField(db_column='Abbreviation', index=True, null=True)
agentid = PrimaryKeyField(db_column='AgentID')
agenttype = IntegerField(db_column='AgentType', index=True)
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True,
rel_model='self', to_field='agentid')
dateofbirth = DateField(db_column='DateOfBirth', null=True)
dateofbirthprecision = IntegerField(
db_column='DateOfBirthPrecision', null=True)
dateofdeath = DateField(db_column='DateOfDeath', null=True)
dateofdeathprecision = IntegerField(
db_column='DateOfDeathPrecision', null=True)
datetype = IntegerField(db_column='DateType', null=True)
divisionid = ForeignKeyField(
db_column='DivisionID', null=True, rel_model=DivisionProxy,
to_field='usergroupscopeid')
email = CharField(db_column='Email', null=True)
firstname = CharField(db_column='FirstName', index=True, null=True)
guid = CharField(db_column='GUID', index=True, null=True)
initials = CharField(db_column='Initials', null=True)
interests = CharField(db_column='Interests', null=True)
jobtitle = CharField(db_column='JobTitle', null=True)
lastname = CharField(db_column='LastName', index=True, null=True)
middleinitial = CharField(db_column='MiddleInitial', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model='self',
related_name='agent_modifiedbyagentid_set', to_field='agentid')
parentorganizationid = ForeignKeyField(
db_column='ParentOrganizationID', null=True, rel_model='self',
related_name='agent_parentorganizationid_set', to_field='agentid')
remarks = TextField(db_column='Remarks', null=True)
specifyuserid = ForeignKeyField(
db_column='SpecifyUserID', null=True,
rel_model=Specifyuser, to_field='specifyuserid')
suffix = CharField(db_column='Suffix', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
title = CharField(db_column='Title', null=True)
url = CharField(db_column='URL', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'agent'
class Storagetreedef(BaseModel):
storagetreedefid = PrimaryKeyField(db_column='StorageTreeDefID')
class Meta:
db_table = 'storagetreedef'
class Institution(BaseModel):
institutionid = IntegerField(db_column='institutionId', null=True)
institution_name = CharField(db_column='Name', index=True, null=True)
storagetreedefid = ForeignKeyField(
db_column='StorageTreeDefID', null=True, rel_model=Storagetreedef,
to_field='storagetreedefid')
usergroupscopeid = PrimaryKeyField(db_column='UserGroupScopeId')
class Meta:
db_table = 'institution'
class Division(BaseModel):
institutionid = ForeignKeyField(
db_column='InstitutionID', rel_model=Institution,
to_field='usergroupscopeid')
division_name = CharField(db_column='Name', index=True, null=True)
usergroupscopeid = PrimaryKeyField(db_column='UserGroupScopeId')
divisionid = IntegerField(db_column='divisionId', null=True)
class Meta:
db_table = 'division'
class Discipline(BaseModel):
divisionid = ForeignKeyField(
db_column='DivisionID', rel_model=Division,
to_field='usergroupscopeid')
geographytreedefid = ForeignKeyField(
db_column='GeographyTreeDefID', rel_model=Geographytreedef,
to_field='geographytreedefid')
discipline_name = CharField(db_column='Name', index=True, null=True)
taxontreedefid = ForeignKeyField(
db_column='TaxonTreeDefID', null=True, rel_model=TaxontreedefProxy,
to_field='taxontreedefid')
usergroupscopeid = PrimaryKeyField(db_column='UserGroupScopeId')
disciplineid = IntegerField(db_column='disciplineId', null=True)
class Meta:
db_table = 'discipline'
class Addressofrecord(BaseModel):
address = CharField(db_column='Address', null=True)
address2 = CharField(db_column='Address2', null=True)
addressofrecordid = PrimaryKeyField(db_column='AddressOfRecordID')
agentid = ForeignKeyField(
db_column='AgentID', null=True, rel_model=Agent, to_field='agentid')
city = CharField(db_column='City', null=True)
country = CharField(db_column='Country', null=True)
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='addressofrecord_createdbyagentid_set',
to_field='agentid')
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='addressofrecord_modifiedbyagentid_set',
to_field='agentid')
postalcode = CharField(db_column='PostalCode', null=True)
remarks = TextField(db_column='Remarks', null=True)
state = CharField(db_column='State', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'addressofrecord'
class Repositoryagreement(BaseModel):
addressofrecordid = ForeignKeyField(
db_column='AddressOfRecordID', null=True, rel_model=Addressofrecord,
to_field='addressofrecordid')
agentid = ForeignKeyField(
db_column='AgentID', rel_model=Agent, to_field='agentid')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='repositoryagreement_createdbyagentid_set',
to_field='agentid')
datereceived = DateField(db_column='DateReceived', null=True)
divisionid = ForeignKeyField(
db_column='DivisionID', rel_model=Division,
to_field='usergroupscopeid')
enddate = DateField(db_column='EndDate', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='repositoryagreement_modifiedbyagentid_set',
to_field='agentid')
number1 = FloatField(db_column='Number1', null=True)
number2 = FloatField(db_column='Number2', null=True)
remarks = TextField(db_column='Remarks', null=True)
repositoryagreementid = PrimaryKeyField(db_column='RepositoryAgreementID')
repositoryagreementnumber = CharField(
db_column='RepositoryAgreementNumber', index=True)
startdate = DateField(db_column='StartDate', index=True, null=True)
status = CharField(db_column='Status', null=True)
text1 = CharField(db_column='Text1', null=True)
text2 = CharField(db_column='Text2', null=True)
text3 = CharField(db_column='Text3', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
version = IntegerField(db_column='Version', null=True)
yesno1 = BitField(db_column='YesNo1', null=True) # bit
yesno2 = BitField(db_column='YesNo2', null=True) # bit
class Meta:
db_table = 'repositoryagreement'
class Accession(BaseModel):
accessioncondition = CharField(db_column='AccessionCondition', null=True)
accessionid = PrimaryKeyField(db_column='AccessionID')
accessionnumber = CharField(db_column='AccessionNumber', index=True)
addressofrecordid = ForeignKeyField(
db_column='AddressOfRecordID', null=True, rel_model=Addressofrecord,
to_field='addressofrecordid')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
to_field='agentid')
dateaccessioned = DateField(
db_column='DateAccessioned', index=True, null=True)
dateacknowledged = DateField(db_column='DateAcknowledged', null=True)
datereceived = DateField(db_column='DateReceived', null=True)
divisionid = ForeignKeyField(
db_column='DivisionID', rel_model=Division,
to_field='usergroupscopeid')
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='accession_modifiedbyagentid_set', to_field='agentid')
number1 = FloatField(db_column='Number1', null=True)
number2 = FloatField(db_column='Number2', null=True)
remarks = TextField(db_column='Remarks', null=True)
repositoryagreementid = ForeignKeyField(
db_column='RepositoryAgreementID', null=True,
rel_model=Repositoryagreement, to_field='repositoryagreementid')
status = CharField(db_column='Status', null=True)
text1 = TextField(db_column='Text1', null=True)
text2 = TextField(db_column='Text2', null=True)
text3 = TextField(db_column='Text3', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
totalvalue = DecimalField(db_column='TotalValue', null=True)
type = CharField(db_column='Type', null=True)
verbatimdate = CharField(db_column='VerbatimDate', null=True)
version = IntegerField(db_column='Version', null=True)
yesno1 = BitField(db_column='YesNo1', null=True) # bit
yesno2 = BitField(db_column='YesNo2', null=True) # bit
class Meta:
db_table = 'accession'
class Accessionagent(BaseModel):
accessionagentid = PrimaryKeyField(db_column='AccessionAgentID')
accessionid = ForeignKeyField(
db_column='AccessionID', null=True, rel_model=Accession,
to_field='accessionid')
agentid = ForeignKeyField(
db_column='AgentID', rel_model=Agent, to_field='agentid')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='accessionagent_createdbyagentid_set',
to_field='agentid')
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='accessionagent_modifiedbyagentid_set',
to_field='agentid')
remarks = TextField(db_column='Remarks', null=True)
repositoryagreementid = ForeignKeyField(
db_column='RepositoryAgreementID', null=True,
rel_model=Repositoryagreement, to_field='repositoryagreementid')
role = CharField(db_column='Role')
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'accessionagent'
class Collection(BaseModel):
collectionname = CharField(
db_column='CollectionName', index=True, null=True)
disciplineid = ForeignKeyField(
db_column='DisciplineID', rel_model=Discipline,
to_field='usergroupscopeid')
usergroupscopeid = PrimaryKeyField(db_column='UserGroupScopeId')
collectionid = IntegerField(db_column='collectionId', null=True)
class Meta:
db_table = 'collection'
class Geographytreedefitem(BaseModel):
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='geographytreedefitem_createdbyagentid_set',
to_field='agentid')
fullnameseparator = CharField(db_column='FullNameSeparator', null=True)
geographytreedefid = ForeignKeyField(
db_column='GeographyTreeDefID', rel_model=Geographytreedef,
to_field='geographytreedefid')
geographytreedefitemid = PrimaryKeyField(
db_column='GeographyTreeDefItemID')
isenforced = BitField(db_column='IsEnforced', null=True) # bit
isinfullname = BitField(db_column='IsInFullName', null=True) # bit
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='geographytreedefitem_modifiedbyagentid_set',
to_field='agentid')
name = CharField(db_column='Name')
parentitemid = ForeignKeyField(
db_column='ParentItemID', null=True, rel_model='self',
to_field='geographytreedefitemid')
rankid = IntegerField(db_column='RankID')
remarks = TextField(db_column='Remarks', null=True)
textafter = CharField(db_column='TextAfter', null=True)
textbefore = CharField(db_column='TextBefore', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
title = CharField(db_column='Title', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'geographytreedefitem'
class Geography(BaseModel):
abbrev = CharField(db_column='Abbrev', null=True)
acceptedid = ForeignKeyField(
db_column='AcceptedID', null=True, rel_model='self',
to_field='geographyid')
centroidlat = DecimalField(db_column='CentroidLat', null=True)
centroidlon = DecimalField(db_column='CentroidLon', null=True)
commonname = CharField(db_column='CommonName', null=True)
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='geography_createdbyagentid_set', to_field='agentid')
fullname = CharField(db_column='FullName', index=True, null=True)
gml = TextField(db_column='GML', null=True)
guid = CharField(db_column='GUID', null=True)
geographycode = CharField(db_column='GeographyCode', null=True)
geographyid = PrimaryKeyField(db_column='GeographyID')
geographytreedefid = ForeignKeyField(
db_column='GeographyTreeDefID', rel_model=Geographytreedef,
to_field='geographytreedefid')
geographytreedefitemid = ForeignKeyField(
db_column='GeographyTreeDefItemID', rel_model=Geographytreedefitem,
to_field='geographytreedefitemid')
highestchildnodenumber = IntegerField(
db_column='HighestChildNodeNumber', null=True)
isaccepted = BitField(db_column='IsAccepted') # bit
iscurrent = BitField(db_column='IsCurrent', null=True) # bit
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='geography_modifiedbyagentid_set', to_field='agentid')
name = CharField(db_column='Name', index=True)
nodenumber = IntegerField(db_column='NodeNumber', null=True)
number1 = IntegerField(db_column='Number1', null=True)
number2 = IntegerField(db_column='Number2', null=True)
parentid = ForeignKeyField(
db_column='ParentID', null=True, rel_model='self',
related_name='geography_parentid_set', to_field='geographyid')
rankid = IntegerField(db_column='RankID')
remarks = TextField(db_column='Remarks', null=True)
text1 = CharField(db_column='Text1', null=True)
text2 = CharField(db_column='Text2', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
timestampversion = DateTimeField(db_column='TimestampVersion', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'geography'
class Locality(BaseModel):
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='locality_createdbyagentid_set', to_field='agentid')
datum = CharField(db_column='Datum', null=True)
disciplineid = ForeignKeyField(
db_column='DisciplineID', rel_model=Discipline,
to_field='usergroupscopeid')
elevationaccuracy = FloatField(db_column='ElevationAccuracy', null=True)
elevationmethod = CharField(db_column='ElevationMethod', null=True)
gml = TextField(db_column='GML', null=True)
guid = CharField(db_column='GUID', null=True)
geographyid = ForeignKeyField(
db_column='GeographyID', null=True,
rel_model=Geography, to_field='geographyid')
lat1text = CharField(db_column='Lat1Text', null=True)
lat2text = CharField(db_column='Lat2Text', null=True)
latlongaccuracy = FloatField(db_column='LatLongAccuracy', null=True)
latlongmethod = CharField(db_column='LatLongMethod', null=True)
latlongtype = CharField(db_column='LatLongType', null=True)
latitude1 = DecimalField(db_column='Latitude1', null=True)
latitude2 = DecimalField(db_column='Latitude2', null=True)
localityid = PrimaryKeyField(db_column='LocalityID')
localityname = CharField(db_column='LocalityName', index=True)
long1text = CharField(db_column='Long1Text', null=True)
long2text = CharField(db_column='Long2Text', null=True)
longitude1 = DecimalField(db_column='Longitude1', null=True)
longitude2 = DecimalField(db_column='Longitude2', null=True)
maxelevation = FloatField(db_column='MaxElevation', null=True)
minelevation = FloatField(db_column='MinElevation', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='locality_modifiedbyagentid_set', to_field='agentid')
namedplace = CharField(db_column='NamedPlace', index=True, null=True)
originalelevationunit = CharField(
db_column='OriginalElevationUnit', null=True)
originallatlongunit = IntegerField(
db_column='OriginalLatLongUnit', null=True)
relationtonamedplace = CharField(
db_column='RelationToNamedPlace', index=True, null=True)
remarks = TextField(db_column='Remarks', null=True)
shortname = CharField(db_column='ShortName', null=True)
srclatlongunit = IntegerField(db_column='SrcLatLongUnit')
text1 = TextField(db_column='Text1', null=True)
text2 = TextField(db_column='Text2', null=True)
text3 = TextField(db_column='Text3', null=True)
text4 = TextField(db_column='Text4', null=True)
text5 = TextField(db_column='Text5', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
verbatimelevation = CharField(db_column='VerbatimElevation', null=True)
verbatimlatitude = CharField(db_column='VerbatimLatitude', null=True)
verbatimlongitude = CharField(db_column='VerbatimLongitude', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'locality'
class Collectingeventattribute(BaseModel):
collectingeventattributeid = PrimaryKeyField(
db_column='CollectingEventAttributeID')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='collectingeventattribute_createdbyagentid_set',
to_field='agentid')
disciplineid = ForeignKeyField(
db_column='DisciplineID', rel_model=Discipline,
to_field='usergroupscopeid')
hosttaxonid = ForeignKeyField(
db_column='HostTaxonID', null=True, rel_model=TaxonProxy,
to_field='taxonid')
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='collectingeventattribute_modifiedbyagentid_set',
to_field='agentid')
number1 = FloatField(db_column='Number1', null=True)
number10 = FloatField(db_column='Number10', null=True)
number11 = FloatField(db_column='Number11', null=True)
number12 = FloatField(db_column='Number12', null=True)
number13 = FloatField(db_column='Number13', null=True)
number2 = FloatField(db_column='Number2', null=True)
number3 = FloatField(db_column='Number3', null=True)
number4 = FloatField(db_column='Number4', null=True)
number5 = FloatField(db_column='Number5', null=True)
number6 = FloatField(db_column='Number6', null=True)
number7 = FloatField(db_column='Number7', null=True)
number8 = FloatField(db_column='Number8', null=True)
number9 = FloatField(db_column='Number9', null=True)
remarks = TextField(db_column='Remarks', null=True)
text1 = TextField(db_column='Text1', null=True)
text10 = CharField(db_column='Text10', null=True)
text11 = CharField(db_column='Text11', null=True)
text12 = CharField(db_column='Text12', null=True)
text13 = CharField(db_column='Text13', null=True)
text14 = CharField(db_column='Text14', null=True)
text15 = CharField(db_column='Text15', null=True)
text16 = CharField(db_column='Text16', null=True)
text17 = CharField(db_column='Text17', null=True)
text2 = TextField(db_column='Text2', null=True)
text3 = TextField(db_column='Text3', null=True)
text4 = CharField(db_column='Text4', null=True)
text5 = CharField(db_column='Text5', null=True)
text6 = CharField(db_column='Text6', null=True)
text7 = CharField(db_column='Text7', null=True)
text8 = CharField(db_column='Text8', null=True)
text9 = CharField(db_column='Text9', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
version = IntegerField(db_column='Version', null=True)
yesno1 = BitField(db_column='YesNo1', null=True) # bit
yesno2 = BitField(db_column='YesNo2', null=True) # bit
yesno3 = BitField(db_column='YesNo3', null=True) # bit
yesno4 = BitField(db_column='YesNo4', null=True) # bit
yesno5 = BitField(db_column='YesNo5', null=True) # bit
class Meta:
db_table = 'collectingeventattribute'
class Collectingevent(BaseModel):
collectingeventattributeid = ForeignKeyField(
db_column='CollectingEventAttributeID', null=True,
rel_model=Collectingeventattribute,
to_field='collectingeventattributeid')
collectingeventid = PrimaryKeyField(db_column='CollectingEventID')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
to_field='agentid')
disciplineid = ForeignKeyField(
db_column='DisciplineID', rel_model=Discipline,
to_field='usergroupscopeid')
enddate = DateField(db_column='EndDate', index=True, null=True)
enddateprecision = IntegerField(db_column='EndDatePrecision', null=True)
enddateverbatim = CharField(db_column='EndDateVerbatim', null=True)
endtime = IntegerField(db_column='EndTime', null=True)
guid = CharField(db_column='GUID', index=True, null=True)
integer1 = IntegerField(db_column='Integer1', null=True)
integer2 = IntegerField(db_column='Integer2', null=True)
localityid = ForeignKeyField(
db_column='LocalityID', null=True, rel_model=Locality,
to_field='localityid')
method = CharField(db_column='Method', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='collectingevent_modifiedbyagentid_set',
to_field='agentid')
remarks = TextField(db_column='Remarks', null=True)
startdate = DateField(db_column='StartDate', index=True, null=True)
startdateprecision = IntegerField(
db_column='StartDatePrecision', null=True)
startdateverbatim = CharField(db_column='StartDateVerbatim', null=True)
starttime = IntegerField(db_column='StartTime', null=True)
stationfieldnumber = CharField(
db_column='StationFieldNumber', index=True, null=True)
text1 = TextField(db_column='Text1', null=True)
text2 = TextField(db_column='Text2', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
verbatimdate = CharField(db_column='VerbatimDate', null=True)
verbatimlocality = TextField(db_column='VerbatimLocality', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'collectingevent'
class Collector(BaseModel):
agentid = ForeignKeyField(
db_column='AgentID', rel_model=Agent, to_field='agentid')
collectingeventid = ForeignKeyField(
db_column='CollectingEventID', rel_model=Collectingevent,
to_field='collectingeventid')
collectorid = PrimaryKeyField(db_column='CollectorID')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True,
rel_model=Agent, related_name='collector_createdbyagentid_set',
to_field='agentid')
divisionid = ForeignKeyField(
db_column='DivisionID', null=True, rel_model=Division,
to_field='usergroupscopeid')
isprimary = BitField(db_column='IsPrimary') # bit
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='collector_modifiedbyagentid_set',
to_field='agentid')
ordernumber = IntegerField(db_column='OrderNumber')
remarks = TextField(db_column='Remarks', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'collector'
class Collectionobjectattribute(BaseModel):
bottomdistance = FloatField(db_column='BottomDistance', null=True)
collectionmemberid = IntegerField(
db_column='CollectionMemberID', index=True)
collectionobjectattributeid = PrimaryKeyField(
db_column='CollectionObjectAttributeID')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='collectionobjectattribute_createdbyagentid',
to_field='agentid')
direction = CharField(db_column='Direction', null=True)
distanceunits = CharField(db_column='DistanceUnits', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='collectionobjectattribute_modifiedbyagentid_set',
to_field='agentid')
number1 = FloatField(db_column='Number1', null=True)
number10 = FloatField(db_column='Number10', null=True)
number11 = FloatField(db_column='Number11', null=True)
number12 = FloatField(db_column='Number12', null=True)
number13 = FloatField(db_column='Number13', null=True)
number14 = FloatField(db_column='Number14', null=True)
number15 = FloatField(db_column='Number15', null=True)
number16 = FloatField(db_column='Number16', null=True)
number17 = FloatField(db_column='Number17', null=True)
number18 = FloatField(db_column='Number18', null=True)
number19 = FloatField(db_column='Number19', null=True)
number2 = FloatField(db_column='Number2', null=True)
number20 = FloatField(db_column='Number20', null=True)
number21 = FloatField(db_column='Number21', null=True)
number22 = FloatField(db_column='Number22', null=True)
number23 = FloatField(db_column='Number23', null=True)
number24 = FloatField(db_column='Number24', null=True)
number25 = FloatField(db_column='Number25', null=True)
number26 = FloatField(db_column='Number26', null=True)
number27 = FloatField(db_column='Number27', null=True)
number28 = FloatField(db_column='Number28', null=True)
number29 = FloatField(db_column='Number29', null=True)
number3 = FloatField(db_column='Number3', null=True)
number30 = IntegerField(db_column='Number30', null=True)
number31 = FloatField(db_column='Number31', null=True)
number32 = FloatField(db_column='Number32', null=True)
number33 = FloatField(db_column='Number33', null=True)
number34 = FloatField(db_column='Number34', null=True)
number35 = FloatField(db_column='Number35', null=True)
number36 = FloatField(db_column='Number36', null=True)
number37 = FloatField(db_column='Number37', null=True)
number38 = FloatField(db_column='Number38', null=True)
number39 = FloatField(db_column='Number39', null=True)
number4 = FloatField(db_column='Number4', null=True)
number40 = FloatField(db_column='Number40', null=True)
number41 = FloatField(db_column='Number41', null=True)
number42 = FloatField(db_column='Number42', null=True)
number5 = FloatField(db_column='Number5', null=True)
number6 = FloatField(db_column='Number6', null=True)
number7 = FloatField(db_column='Number7', null=True)
number8 = IntegerField(db_column='Number8', null=True)
number9 = FloatField(db_column='Number9', null=True)
positionstate = CharField(db_column='PositionState', null=True)
remarks = TextField(db_column='Remarks', null=True)
text1 = TextField(db_column='Text1', null=True)
text10 = CharField(db_column='Text10', null=True)
text11 = CharField(db_column='Text11', null=True)
text12 = CharField(db_column='Text12', null=True)
text13 = CharField(db_column='Text13', null=True)
text14 = CharField(db_column='Text14', null=True)
text15 = CharField(db_column='Text15', null=True)
text2 = TextField(db_column='Text2', null=True)
text3 = TextField(db_column='Text3', null=True)
text4 = CharField(db_column='Text4', null=True)
text5 = CharField(db_column='Text5', null=True)
text6 = CharField(db_column='Text6', null=True)
text7 = CharField(db_column='Text7', null=True)
text8 = CharField(db_column='Text8', null=True)
text9 = CharField(db_column='Text9', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
topdistance = FloatField(db_column='TopDistance', null=True)
version = IntegerField(db_column='Version', null=True)
yesno1 = BitField(db_column='YesNo1', null=True) # bit
yesno2 = BitField(db_column='YesNo2', null=True) # bit
yesno3 = BitField(db_column='YesNo3', null=True) # bit
yesno4 = BitField(db_column='YesNo4', null=True) # bit
yesno5 = BitField(db_column='YesNo5', null=True) # bit
yesno6 = BitField(db_column='YesNo6', null=True) # bit
yesno7 = BitField(db_column='YesNo7', null=True) # bit
class Meta:
db_table = 'collectionobjectattribute'
class Collectionobject(BaseModel):
altcatalognumber = CharField(db_column='AltCatalogNumber', null=True)
availability = CharField(db_column='Availability', null=True)
catalognumber = CharField(db_column='CatalogNumber', index=True, null=True)
catalogeddate = DateField(db_column='CatalogedDate', index=True, null=True)
catalogeddateprecision = IntegerField(
db_column='CatalogedDatePrecision', null=True)
catalogeddateverbatim = CharField(
db_column='CatalogedDateVerbatim', null=True)
catalogerid = ForeignKeyField(
db_column='CatalogerID', null=True, rel_model=Agent,
to_field='agentid')
collectingeventid = ForeignKeyField(
db_column='CollectingEventID', null=True, rel_model=Collectingevent,
to_field='collectingeventid')
collectionid = ForeignKeyField(
db_column='CollectionID', rel_model=Collection,
to_field='usergroupscopeid')
collectionmemberid = IntegerField(
db_column='CollectionMemberID', index=True)
collectionobjectattributeid = ForeignKeyField(
db_column='CollectionObjectAttributeID', null=True,
rel_model=Collectionobjectattribute,
to_field='collectionobjectattributeid')
collectionobjectid = PrimaryKeyField(db_column='CollectionObjectID')
countamt = IntegerField(db_column='CountAmt', null=True)
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='collectionobject_createdbyagentid_set',
to_field='agentid')
deaccessioned = BitField(db_column='Deaccessioned', null=True) # bit
description = CharField(db_column='Description', null=True)
fieldnumber = CharField(db_column='FieldNumber', index=True, null=True)
guid = CharField(db_column='GUID', index=True, null=True)
integer1 = IntegerField(db_column='Integer1', null=True)
integer2 = IntegerField(db_column='Integer2', null=True)
inventorydate = DateField(db_column='InventoryDate', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='collectionobject_modifiedbyagentid_set',
to_field='agentid')
modifier = CharField(db_column='Modifier', null=True)
name = CharField(db_column='Name', null=True)
notifications = CharField(db_column='Notifications', null=True)
number1 = FloatField(db_column='Number1', null=True)
number2 = FloatField(db_column='Number2', null=True)
ocr = TextField(db_column='OCR', null=True)
objectcondition = CharField(db_column='ObjectCondition', null=True)
projectnumber = CharField(db_column='ProjectNumber', null=True)
remarks = TextField(db_column='Remarks', null=True)
restrictions = CharField(db_column='Restrictions', null=True)
text1 = TextField(db_column='Text1', null=True)
text2 = TextField(db_column='Text2', null=True)
text3 = TextField(db_column='Text3', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
totalvalue = DecimalField(db_column='TotalValue', null=True)
version = IntegerField(db_column='Version', null=True)
yesno1 = BitField(db_column='YesNo1', null=True) # bit
yesno2 = BitField(db_column='YesNo2', null=True) # bit
yesno3 = BitField(db_column='YesNo3', null=True) # bit
yesno4 = BitField(db_column='YesNo4', null=True) # bit
yesno5 = BitField(db_column='YesNo5', null=True) # bit
yesno6 = BitField(db_column='YesNo6', null=True) # bit
class Meta:
db_table = 'collectionobject'
class Storagetreedefitem(BaseModel):
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='storagetreedefitem_createdbyagentid_set',
to_field='agentid')
fullnameseparator = CharField(db_column='FullNameSeparator', null=True)
isenforced = BitField(db_column='IsEnforced', null=True) # bit
isinfullname = BitField(db_column='IsInFullName', null=True) # bit
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='storagetreedefitem_modifiedbyagentid_set',
to_field='agentid')
name = CharField(db_column='Name')
parentitemid = ForeignKeyField(
db_column='ParentItemID', null=True, rel_model='self',
to_field='storagetreedefitemid')
rankid = IntegerField(db_column='RankID')
remarks = TextField(db_column='Remarks', null=True)
storagetreedefid = ForeignKeyField(
db_column='StorageTreeDefID', rel_model=Storagetreedef,
to_field='storagetreedefid')
storagetreedefitemid = PrimaryKeyField(db_column='StorageTreeDefItemID')
textafter = CharField(db_column='TextAfter', null=True)
textbefore = CharField(db_column='TextBefore', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
title = CharField(db_column='Title', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'storagetreedefitem'
class Storage(BaseModel):
abbrev = CharField(db_column='Abbrev', null=True)
acceptedid = ForeignKeyField(
db_column='AcceptedID', null=True, rel_model='self',
to_field='storageid')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='storage_createdbyagentid_set', to_field='agentid')
fullname = CharField(db_column='FullName', index=True, null=True)
highestchildnodenumber = IntegerField(
db_column='HighestChildNodeNumber', null=True)
isaccepted = BitField(db_column='IsAccepted') # bit
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='storage_modifiedbyagentid_set', to_field='agentid')
name = CharField(db_column='Name', index=True)
nodenumber = IntegerField(db_column='NodeNumber', null=True)
number1 = IntegerField(db_column='Number1', null=True)
number2 = IntegerField(db_column='Number2', null=True)
parentid = ForeignKeyField(
db_column='ParentID', null=True, rel_model='self',
related_name='storage_parentid_set', to_field='storageid')
rankid = IntegerField(db_column='RankID')
remarks = TextField(db_column='Remarks', null=True)
storageid = PrimaryKeyField(db_column='StorageID')
storagetreedefid = ForeignKeyField(
db_column='StorageTreeDefID', rel_model=Storagetreedef,
to_field='storagetreedefid')
storagetreedefitemid = ForeignKeyField(
db_column='StorageTreeDefItemID', rel_model=Storagetreedefitem,
to_field='storagetreedefitemid')
text1 = CharField(db_column='Text1', null=True)
text2 = CharField(db_column='Text2', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
timestampversion = DateTimeField(db_column='TimestampVersion', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'storage'
class Preptype(BaseModel):
collectionid = ForeignKeyField(
db_column='CollectionID', rel_model=Collection,
to_field='usergroupscopeid')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
to_field='agentid')
isloanable = BitField(db_column='IsLoanable') # bit
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='preptype_modifiedbyagentid_set', to_field='agentid')
name = CharField(db_column='Name')
preptypeid = PrimaryKeyField(db_column='PrepTypeID')
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'preptype'
class Preparation(BaseModel):
collectionmemberid = IntegerField(
db_column='CollectionMemberID', index=True)
collectionobjectid = ForeignKeyField(
db_column='CollectionObjectID', rel_model=Collectionobject,
to_field='collectionobjectid')
countamt = IntegerField(db_column='CountAmt', null=True)
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
to_field='agentid')
description = CharField(db_column='Description', null=True)
integer1 = IntegerField(db_column='Integer1', null=True)
integer2 = IntegerField(db_column='Integer2', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='preparation_modifiedbyagentid_set', to_field='agentid')
number1 = FloatField(db_column='Number1', null=True)
number2 = FloatField(db_column='Number2', null=True)
preptypeid = ForeignKeyField(
db_column='PrepTypeID', rel_model=Preptype, to_field='preptypeid')
preparationid = PrimaryKeyField(db_column='PreparationID')
preparedbyid = ForeignKeyField(
db_column='PreparedByID', null=True, rel_model=Agent,
related_name='agent_preparedbyid_set', to_field='agentid')
prepareddate = DateField(db_column='PreparedDate', index=True, null=True)
prepareddateprecision = IntegerField(
db_column='PreparedDatePrecision', null=True)
remarks = TextField(db_column='Remarks', null=True)
samplenumber = CharField(db_column='SampleNumber', null=True)
status = CharField(db_column='Status', null=True)
storageid = ForeignKeyField(
db_column='StorageID', null=True, rel_model=Storage,
to_field='storageid')
storagelocation = CharField(db_column='StorageLocation', null=True)
text1 = TextField(db_column='Text1', null=True)
text2 = TextField(db_column='Text2', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
version = IntegerField(db_column='Version', null=True)
yesno1 = BitField(db_column='YesNo1', null=True) # bit
yesno2 = BitField(db_column='YesNo2', null=True) # bit
yesno3 = BitField(db_column='YesNo3', null=True) # bit
class Meta:
db_table = 'preparation'
class Taxontreedef(BaseModel):
taxontreedefid = PrimaryKeyField(db_column='TaxonTreeDefID')
class Meta:
db_table = 'taxontreedef'
class Taxontreedefitem(BaseModel):
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='taxontreedefitem_createdbyagentid_set',
to_field='agentid')
formattoken = CharField(
db_column='FormatToken', null=True)
fullnameseparator = CharField(db_column='FullNameSeparator', null=True)
isenforced = BitField(db_column='IsEnforced', null=True) # bit
isinfullname = BitField(db_column='IsInFullName', null=True) # bit
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='taxontreedefitem_modifiedbyagentid_set',
to_field='agentid')
name = CharField(db_column='Name')
parentitemid = ForeignKeyField(
db_column='ParentItemID', null=True, rel_model='self',
to_field='taxontreedefitemid')
rankid = IntegerField(db_column='RankID')
remarks = TextField(db_column='Remarks', null=True)
taxontreedefid = ForeignKeyField(
db_column='TaxonTreeDefID', rel_model=Taxontreedef,
to_field='taxontreedefid')
taxontreedefitemid = PrimaryKeyField(db_column='TaxonTreeDefItemID')
textafter = CharField(db_column='TextAfter', null=True)
textbefore = CharField(db_column='TextBefore', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
title = CharField(db_column='Title', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'taxontreedefitem'
class Taxon(BaseModel):
acceptedid = ForeignKeyField(
db_column='AcceptedID', null=True, rel_model='self',
to_field='taxonid')
author = CharField(db_column='Author', null=True)
colstatus = CharField(db_column='COLStatus', null=True)
citesstatus = CharField(db_column='CitesStatus', null=True)
commonname = CharField(db_column='CommonName', index=True, null=True)
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='taxon_createdbyagentid_set', to_field='agentid')
cultivarname = CharField(db_column='CultivarName', null=True)
environmentalprotectionstatus = CharField(
db_column='EnvironmentalProtectionStatus', index=True, null=True)
esastatus = CharField(db_column='EsaStatus', null=True)
fullname = CharField(db_column='FullName', index=True, null=True)
guid = CharField(db_column='GUID', index=True, null=True)
groupnumber = CharField(db_column='GroupNumber', null=True)
highestchildnodenumber = IntegerField(
db_column='HighestChildNodeNumber', null=True)
hybridparent1id = ForeignKeyField(
db_column='HybridParent1ID', null=True, rel_model='self',
related_name='taxon_hybridparent1id_set', to_field='taxonid')
hybridparent2id = ForeignKeyField(
db_column='HybridParent2ID', null=True, rel_model='self',
related_name='taxon_hybridparent2id_set', to_field='taxonid')
isaccepted = BitField(db_column='IsAccepted') # bit
ishybrid = BitField(db_column='IsHybrid') # bit
isisnumber = CharField(db_column='IsisNumber', null=True)
labelformat = CharField(db_column='LabelFormat', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='taxon_modifiedbyagentid_set', to_field='agentid')
name = CharField(db_column='Name', index=True)
ncbitaxonnumber = CharField(db_column='NcbiTaxonNumber', null=True)
nodenumber = IntegerField(db_column='NodeNumber', null=True)
number1 = IntegerField(db_column='Number1', null=True)
number2 = IntegerField(db_column='Number2', null=True)
number3 = FloatField(db_column='Number3', null=True)
number4 = FloatField(db_column='Number4', null=True)
number5 = FloatField(db_column='Number5', null=True)
parentid = ForeignKeyField(
db_column='ParentID', null=True, rel_model='self',
related_name='taxon_parentid_set', to_field='taxonid')
rankid = IntegerField(db_column='RankID')
remarks = TextField(db_column='Remarks', null=True)
source = CharField(db_column='Source', null=True)
taxonid = PrimaryKeyField(db_column='TaxonID')
taxontreedefid = ForeignKeyField(
db_column='TaxonTreeDefID', rel_model=Taxontreedef,
to_field='taxontreedefid')
taxontreedefitemid = ForeignKeyField(
db_column='TaxonTreeDefItemID', rel_model=Taxontreedefitem,
to_field='taxontreedefitemid')
taxonomicserialnumber = CharField(
db_column='TaxonomicSerialNumber', index=True, null=True)
text1 = CharField(db_column='Text1', null=True)
text2 = CharField(db_column='Text2', null=True)
text3 = TextField(db_column='Text3', null=True)
text4 = TextField(db_column='Text4', null=True)
text5 = TextField(db_column='Text5', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
unitind1 = CharField(db_column='UnitInd1', null=True)
unitind2 = CharField(db_column='UnitInd2', null=True)
unitind3 = CharField(db_column='UnitInd3', null=True)
unitind4 = CharField(db_column='UnitInd4', null=True)
unitname1 = CharField(db_column='UnitName1', null=True)
unitname2 = CharField(db_column='UnitName2', null=True)
unitname3 = CharField(db_column='UnitName3', null=True)
unitname4 = CharField(db_column='UnitName4', null=True)
usfwscode = CharField(db_column='UsfwsCode', null=True)
version = IntegerField(db_column='Version', null=True)
yesno1 = BitField(db_column='YesNo1', null=True) # bit
yesno2 = BitField(db_column='YesNo2', null=True) # bit
yesno3 = BitField(db_column='YesNo3', null=True) # bit
class Meta:
db_table = 'taxon'
class Determination(BaseModel):
addendum = CharField(db_column='Addendum', null=True)
alternatename = CharField(db_column='AlternateName', index=True, null=True)
collectionmemberid = IntegerField(
db_column='CollectionMemberID', index=True)
collectionobjectid = ForeignKeyField(
db_column='CollectionObjectID', rel_model=Collectionobject,
to_field='collectionobjectid')
confidence = CharField(db_column='Confidence', null=True)
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='determination_createdbyagentid_set', to_field='agentid')
determinationid = PrimaryKeyField(db_column='DeterminationID')
determineddate = DateField(
db_column='DeterminedDate', index=True, null=True)
determineddateprecision = IntegerField(
db_column='DeterminedDatePrecision', null=True)
determinerid = ForeignKeyField(
db_column='DeterminerID', null=True, rel_model=Agent,
related_name='agent_determinerid_set', to_field='agentid')
featureorbasis = CharField(db_column='FeatureOrBasis', null=True)
guid = CharField(db_column='GUID', index=True, null=True)
iscurrent = BitField(db_column='IsCurrent') # bit
method = CharField(db_column='Method', null=True)
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='determination_modifiedbyagentid_set', to_field='agentid')
nameusage = CharField(db_column='NameUsage', null=True)
number1 = FloatField(db_column='Number1', null=True)
number2 = FloatField(db_column='Number2', null=True)
preferredtaxonid = ForeignKeyField(
db_column='PreferredTaxonID', null=True, rel_model=Taxon,
to_field='taxonid')
qualifier = CharField(db_column='Qualifier', null=True)
remarks = TextField(db_column='Remarks', null=True)
subspqualifier = CharField(db_column='SubSpQualifier', null=True)
taxonid = ForeignKeyField(
db_column='TaxonID', null=True, rel_model=Taxon,
related_name='taxon_taxonid_set', to_field='taxonid')
text1 = TextField(db_column='Text1', null=True)
text2 = TextField(db_column='Text2', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
typestatusname = CharField(
db_column='TypeStatusName', index=True, null=True)
varqualifier = CharField(db_column='VarQualifier', null=True)
version = IntegerField(db_column='Version', null=True)
yesno1 = BitField(db_column='YesNo1', null=True) # bit
yesno2 = BitField(db_column='YesNo2', null=True) # bit
class Meta:
db_table = 'determination'
class Picklist(BaseModel):
collectionid = ForeignKeyField(
db_column='CollectionID', rel_model=Collection,
to_field='usergroupscopeid')
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='picklist_createdbyagentid_set', to_field='agentid')
fieldname = CharField(db_column='FieldName', null=True)
filterfieldname = CharField(db_column='FilterFieldName', null=True)
filtervalue = CharField(db_column='FilterValue', null=True)
formatter = CharField(db_column='Formatter', null=True)
issystem = BitField(db_column='IsSystem') # bit
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='picklist_modifiedbyagentid_set', to_field='agentid')
name = CharField(db_column='Name', index=True)
picklistid = PrimaryKeyField(db_column='PickListID')
readonly = BitField(db_column='ReadOnly') # bit
sizelimit = IntegerField(db_column='SizeLimit', null=True)
sorttype = IntegerField(db_column='SortType', null=True)
tablename = CharField(db_column='TableName', null=True)
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
type = IntegerField(db_column='Type')
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'picklist'
class Picklistitem(BaseModel):
createdbyagentid = ForeignKeyField(
db_column='CreatedByAgentID', null=True, rel_model=Agent,
related_name='picklistitem_createdbyagentid_set', to_field='agentid')
modifiedbyagentid = ForeignKeyField(
db_column='ModifiedByAgentID', null=True, rel_model=Agent,
related_name='picklistitem_modifiedbyagentid_set', to_field='agentid')
ordinal = IntegerField(
db_column='Ordinal', null=True)
picklistid = ForeignKeyField(
db_column='PickListID', rel_model=Picklist, to_field='picklistid')
picklistitemid = PrimaryKeyField(db_column='PickListItemID')
timestampcreated = DateTimeField(db_column='TimestampCreated')
timestampmodified = DateTimeField(db_column='TimestampModified', null=True)
title = CharField(db_column='Title')
value = CharField(db_column='Value', null=True)
version = IntegerField(db_column='Version', null=True)
class Meta:
db_table = 'picklistitem'
DivisionProxy.initialize(Division)
TaxontreedefProxy.initialize(Taxontreedef)
TaxonProxy.initialize(Taxon) | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/docs/releasenotes/0.7.33.rst | ============================
Djblets 0.7.33 Release Notes
============================
**Release date**: March 1, 2016
Upgrade Instructions
====================
To upgrade to Djblets 0.7.33, run::
pip install Djblets==0.7.33
or::
easy_install Djblets==0.7.33
Security Updates
================
* Fixed a Self-XSS vulnerability in the :py:class:`djblets.datagrid` column
headers.
A recently-discovered vulnerability in the datagrid templates allows an
attacker to generate a URL to any datagrid page containing malicious code in
a column sorting value. If the user visits that URL and then clicks that
column, the code will execute.
The cause of the vulnerability was due to a template not escaping
user-provided values.
This vulnerability was reported by Jose Carlos Exposito Bueno (0xlabs).
Contributors
============
* Christian Hammond
* Jose Carlos Exposito Bueno (0xlabs)
| PypiClean |
/Invariant-Attention-0.1.0.tar.gz/Invariant-Attention-0.1.0/README.md | # Invariant Attention [](https://twitter.com/intent/tweet?text=Wow:&url=https%3A%2F%2Fgithub.com%2FRishit-dagli%2FInvariant-Attention)

[](https://github.com/Rishit-dagli/Invariant-Attention/actions/workflows/tests.yml)
[](https://github.com/Rishit-dagli/Invariant-Attention/actions/workflows/python-publish.yml)
[](https://github.com/psf/black)
[](https://colab.research.google.com/github/Rishit-dagli/Invariant-Attention/blob/main/example/invariant-attention-example.ipynb)

[](https://github.com/Rishit-dagli/Invariant-Attention/stargazers)
[](https://github.com/Rishit-dagli)
[](https://twitter.com/intent/follow?screen_name=rishit_dagli)
Invariant Point Attention which was used in the structure module of Alphafold2 from the paper [Highly accurate protein structure prediction with AlphaFold](https://www.nature.com/articles/s41586-021-03819-2#Abs1) for coordinate refinement. Invariant Point Attention is a form of attention that acts on a set of frames and is invariant under global Euclidean transformations on said frames.
This repository also includes Invariant Point Attention-based transformer block, which is an Invariant Point Attention followed by a feedforward

## Installation
Run the following to install:
```shell
pip install invariant-attention
```
## Developing invariant-attention
To install `invariant-attention`, along with tools you need to develop and test, run the following in your virtualenv:
```sh
git clone https://github.com/Rishit-dagli/invariant-attention.git
# or clone your own fork
cd invariant-attention
pip install -e .[dev]
```
To run rank and shape tests run any of the following:
```py
pytest invariant_attention --verbose
```
## Usage
Running a standalone attention block, we can also use this module without the pairwise representations:
```python
attn = InvariantPointAttention(
dim=64, # single (and pairwise) representation dimension
heads=8, # number of attention heads
scalar_key_dim=16, # scalar query-key dimension
scalar_value_dim=16, # scalar value dimension
point_key_dim=4, # point query-key dimension
point_value_dim=4, # point value dimension
)
single_repr = tf.random.normal((1, 256, 64)) # (batch x seq x dim)
pairwise_repr = tf.random.normal((1, 256, 256, 64)) # (batch x seq x seq x dim)
mask = tf.ones((1, 256), dtype=tf.bool) # # (batch x seq)
rotations = repeat(
tf.eye(3), "... -> b n ...", b=1, n=256
)
translations = tf.zeros((1, 256, 3))
attn_out = attn(
single_repr,
pairwise_repr,
rotations=rotations,
translations=translations,
mask=mask,
) # (1, 256, 64)
```
Running an `IPABlock` (Invariant Point Attention Block) which is an IPA followed by a feedforward and has normalization layers:
```python
block = IPABlock(
dim=64,
heads=8,
scalar_key_dim=16,
scalar_value_dim=16,
point_key_dim=4,
point_value_dim=4,
)
seq = tf.random.normal((1, 256, 64))
pairwise_repr = tf.random.normal((1, 256, 256, 64))
mask = tf.ones((1, 256), dtype=tf.bool)
rotations = repeat(tf.eye(3), "... -> b n ...", b=1, n=256)
translations = tf.zeros((1, 256, 3))
block_out = block(
seq,
pairwise_repr=pairwise_repr,
rotations=rotations,
translations=translations,
mask=mask,
)
updates = tf.keras.layers.Dense(6)(block_out)
quaternion_update, translation_update = tf.split(
updates, num_or_size_splits=2, axis=-1
) # (1, 256, 3), (1, 256, 3)
```
Running an `IPATransformer` which is a stack of `IPABlock` and feedforward layers:
```python
seq = tf.random.normal((1, 256, 32))
pairwise_repr = tf.random.normal((1, 256, 256, 32))
mask = tf.ones((1, 256), dtype=tf.bool)
translations = tf.zeros((1, 256, 3))
model = IPATransformer(
dim=32,
depth=2,
num_tokens=None,
predict_points=False,
detach_rotations=True,
)
outputs = model(
single_repr=seq,
translations=translations,
quaternions=tf.random.normal((1, 256, 4)),
pairwise_repr=pairwise_repr,
mask=mask,
) # (1, 256, 32), (1, 256, 3), (1, 256, 4)
```
## Want to Contribute 🙋♂️?
Awesome! If you want to contribute to this project, you're always welcome! See [Contributing Guidelines](CONTRIBUTING.md). You can also take a look at [open issues](https://github.com/Rishit-dagli/invariant-attention/issues) for getting more information about current or upcoming tasks.
## Want to discuss? 💬
Have any questions, doubts or want to present your opinions, views? You're always welcome. You can [start discussions](https://github.com/Rishit-dagli/invariant-attention/discussions).
## Citation
```bibtex
@article{jumper2021highly,
title={Highly accurate protein structure prediction with AlphaFold},
author={Jumper, John and Evans, Richard and Pritzel, Alexander and Green, Tim and Figurnov, Michael and Ronneberger, Olaf and Tunyasuvunakool, Kathryn and Bates, Russ and {\v{Z}}{\'\i}dek, Augustin and Potapenko, Anna and others},
journal={Nature},
volume={596},
number={7873},
pages={583--589},
year={2021},
publisher={Nature Publishing Group}
}
```
## License
```
Copyright 2022 Rishit Dagli
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
``` | PypiClean |
/GTW-1.2.6.tar.gz/GTW-1.2.6/media/js/V5a/gallery.js |
;
( function ($) {
"use strict";
$.gallery = $.merge
( function gallery (opts, context) {
var O = $.merge ({}, $.gallery.defaults, opts);
var S = $.merge
( { inline : "." + O.inline_class }
, $.gallery.selectors
, O ["selectors"]
);
var controls = $.merge
( {}
, $.gallery.controls
, O ["controls"]
);
var g_els = $.query (S.gallery, context);
function one_gallery (gallery_el) {
var current = 0;
var inline_el = $.query1 (S.inline, gallery_el);
var photo_el = $.query1 (S.selected_photo, gallery_el)
var th_container_el = $.query1 (S.thumbnails, gallery_el);
var play_controls = $.query (controls.play, gallery_el);
var thumbnail_els = $.query (S.thumb, th_container_el);
var timer_cb;
var cb =
{ head : function (ev) {
return show (0, ev);
}
, next : function (ev) {
return show (current + 1, ev);
}
, photo_click : function (ev) {
var result;
if (timer_cb) {
result = cb.stop (ev);
} else {
var box = this.getBoundingClientRect ();
var w_fifth = box.width / 5;
var x = ev.clientX;
if (x < box.left + w_fifth) {
result = cb.prev (ev);
} else if (x > box.left + 2 * w_fifth) {
result = cb.next (ev);
};
$.prevent_default (ev);
};
return result;
}
, play : function (ev) {
if (timer_cb) {
return cb.stop (ev);
} else {
return cb.start (ev);
};
}
, prev : function (ev) {
return show (current - 1, ev);
}
, show : function (ev) {
var i = this.getAttribute ("data-index");
cb.stop (ev);
return show (i, ev);
}
, start : function (ev) {
if (! timer_cb) {
timer_cb = window.setInterval (cb.next, O.delay);
$.for_each
( play_controls
, function (play_control) {
var btn = $.query1 (S.btn, play_control);
btn.classList.add (O.btn_class_stop);
btn.classList.remove (O.btn_class_play);
}
);
};
}
, stop : function (ev) {
if (timer_cb) {
window.clearInterval (timer_cb);
timer_cb = null;
$.for_each
( play_controls
, function (play_control) {
var btn = $.query1 (S.btn, play_control);
btn.classList.add (O.btn_class_play);
btn.classList.remove (O.btn_class_stop);
}
);
};
$.prevent_default (ev);
}
, tail : function (ev) {
return show (-1, ev);
}
};
function show (index, ev) {
var len = thumbnail_els.length;
var alt = photo_el.alt;
var old_th_el = thumbnail_els [current], th_el;
var old_index = current;
var rel_pos = O.rel_pos
old_th_el.classList.remove (O.selected_class);
if (index < 0) {
index += len;
};
current = index % len;
th_el = thumbnail_els [current];
photo_el.src = O.url_transformer (th_el.src);
photo_el.title = (alt ? alt + ":" : "Photo") +
" " + (current+1) + "/" + len;
th_el.classList.add (O.selected_class);
$.scroll_to_h
(th_el, old_index < current ? rel_pos : 1 - rel_pos);
};
for (var ck in controls) {
if (ck in cb) {
( function (k) {
$.$$( controls [k], gallery_el).bind
( "click"
, function (ev) {
(k === "play") || cb.stop (ev);
return cb [k] (ev);
}
);
}
) (ck);
};
};
$.bind (photo_el, "click", cb.photo_click);
$.$$ (thumbnail_els)
.bind ("click", cb.show)
.for_each
( function (el, i) {
el.setAttribute ("data-index", i);
}
);
th_container_el.classList.add (O.inline_class);
show (0);
};
$.for_each (g_els, one_gallery);
}
, { controls :
{ head : "button.first"
, next : "button.next"
, play : "button.play"
, prev : "button.prev"
, tail : "button.last"
}
, defaults :
{ btn_class_play : "fa-play"
, btn_class_stop : "fa-stop"
, delay : 2000
, inline_class : "inline"
, play_class : "playing"
, rel_pos : 1/3
, selected_class : "selected"
, url_transformer : function (name)
{ return name.replace (/\/th\//, "/im/"); }
}
, selectors :
{ btn : ".fa"
, gallery : ".gallery"
, selected_photo : ".photo img"
, thumb : "img"
, thumbnails : ".thumbnails"
}
}
);
} ($V5a)
);
// __END__ V5a/gallery.js | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/auth/__init__.py | import inspect
import re
from django.apps import apps as django_apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.middleware.csrf import rotate_token
from django.utils.crypto import constant_time_compare
from django.utils.module_loading import import_string
from django.utils.translation import LANGUAGE_SESSION_KEY
from .signals import user_logged_in, user_logged_out, user_login_failed
SESSION_KEY = '_auth_user_id'
BACKEND_SESSION_KEY = '_auth_user_backend'
HASH_SESSION_KEY = '_auth_user_hash'
REDIRECT_FIELD_NAME = 'next'
def load_backend(path):
return import_string(path)()
def _get_backends(return_tuples=False):
backends = []
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
backends.append((backend, backend_path) if return_tuples else backend)
if not backends:
raise ImproperlyConfigured(
'No authentication backends have been defined. Does '
'AUTHENTICATION_BACKENDS contain anything?'
)
return backends
def get_backends():
return _get_backends(return_tuples=False)
def _clean_credentials(credentials):
"""
Clean a dictionary of credentials of potentially sensitive info before
sending to less secure functions.
Not comprehensive - intended for user_login_failed signal
"""
SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)
CLEANSED_SUBSTITUTE = '********************'
for key in credentials:
if SENSITIVE_CREDENTIALS.search(key):
credentials[key] = CLEANSED_SUBSTITUTE
return credentials
def _get_user_session_key(request):
# This value in the session is always serialized to a string, so we need
# to convert it back to Python whenever we access it.
return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY])
def authenticate(request=None, **credentials):
"""
If the given credentials are valid, return a User object.
"""
for backend, backend_path in _get_backends(return_tuples=True):
try:
inspect.getcallargs(backend.authenticate, request, **credentials)
except TypeError:
# This backend doesn't accept these credentials as arguments. Try the next one.
continue
try:
user = backend.authenticate(request, **credentials)
except PermissionDenied:
# This backend says to stop in our tracks - this user should not be allowed in at all.
break
if user is None:
continue
# Annotate the user object with the path of the backend.
user.backend = backend_path
return user
# The credentials supplied are invalid to all backends, fire signal
user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials), request=request)
def login(request, user, backend=None):
"""
Persist a user id and a backend in the request. This way a user doesn't
have to reauthenticate on every request. Note that data set during
the anonymous session is retained when the user logs in.
"""
session_auth_hash = ''
if user is None:
user = request.user
if hasattr(user, 'get_session_auth_hash'):
session_auth_hash = user.get_session_auth_hash()
if SESSION_KEY in request.session:
if _get_user_session_key(request) != user.pk or (
session_auth_hash and
not constant_time_compare(request.session.get(HASH_SESSION_KEY, ''), session_auth_hash)):
# To avoid reusing another user's session, create a new, empty
# session if the existing session corresponds to a different
# authenticated user.
request.session.flush()
else:
request.session.cycle_key()
try:
backend = backend or user.backend
except AttributeError:
backends = _get_backends(return_tuples=True)
if len(backends) == 1:
_, backend = backends[0]
else:
raise ValueError(
'You have multiple authentication backends configured and '
'therefore must provide the `backend` argument or set the '
'`backend` attribute on the user.'
)
else:
if not isinstance(backend, str):
raise TypeError('backend must be a dotted import path string (got %r).' % backend)
request.session[SESSION_KEY] = user._meta.pk.value_to_string(user)
request.session[BACKEND_SESSION_KEY] = backend
request.session[HASH_SESSION_KEY] = session_auth_hash
if hasattr(request, 'user'):
request.user = user
rotate_token(request)
user_logged_in.send(sender=user.__class__, request=request, user=user)
def logout(request):
"""
Remove the authenticated user's ID from the request and flush their session
data.
"""
# Dispatch the signal before the user is logged out so the receivers have a
# chance to find out *who* logged out.
user = getattr(request, 'user', None)
if not getattr(user, 'is_authenticated', True):
user = None
user_logged_out.send(sender=user.__class__, request=request, user=user)
# remember language choice saved to session
language = request.session.get(LANGUAGE_SESSION_KEY)
request.session.flush()
if language is not None:
request.session[LANGUAGE_SESSION_KEY] = language
if hasattr(request, 'user'):
from django.contrib.auth.models import AnonymousUser
request.user = AnonymousUser()
def get_user_model():
"""
Return the User model that is active in this project.
"""
try:
return django_apps.get_model(settings.AUTH_USER_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL
)
def get_user(request):
"""
Return the user model instance associated with the given request session.
If no user is retrieved, return an instance of `AnonymousUser`.
"""
from .models import AnonymousUser
user = None
try:
user_id = _get_user_session_key(request)
backend_path = request.session[BACKEND_SESSION_KEY]
except KeyError:
pass
else:
if backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
user = backend.get_user(user_id)
# Verify the session
if hasattr(user, 'get_session_auth_hash'):
session_hash = request.session.get(HASH_SESSION_KEY)
session_hash_verified = session_hash and constant_time_compare(
session_hash,
user.get_session_auth_hash()
)
if not session_hash_verified:
request.session.flush()
user = None
return user or AnonymousUser()
def get_permission_codename(action, opts):
"""
Return the codename of the permission for the specified action.
"""
return '%s_%s' % (action, opts.model_name)
def update_session_auth_hash(request, user):
"""
Updating a user's password logs out all sessions for the user.
Take the current request and the updated user object from which the new
session hash will be derived and update the session hash appropriately to
prevent a password change from logging out the session from which the
password was changed.
"""
request.session.cycle_key()
if hasattr(user, 'get_session_auth_hash') and request.user == user:
request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
default_app_config = 'django.contrib.auth.apps.AuthConfig' | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/geometry/transformations/reflection.py | from compas.geometry import dot_vectors
from compas.geometry import cross_vectors
from compas.geometry import normalize_vector
from compas.geometry.transformations import identity_matrix
from compas.geometry.transformations import Transformation
class Reflection(Transformation):
"""Class representing a reflection transformation.
A reflection transformation mirrors points at a plane.
Parameters
----------
matrix : list[list[float]], optional
A 4x4 matrix (or similar) representing a reflection.
Examples
--------
>>> point = [1, 1, 1]
>>> normal = [0, 0, 1]
>>> R1 = Reflection.from_plane((point, normal))
>>> R2 = Transformation([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 2], [0, 0, 0, 1]])
>>> R1 == R2
True
"""
def __init__(self, matrix=None):
if matrix:
pass
super(Reflection, self).__init__(matrix=matrix)
def __repr__(self):
return "Reflection({0!r})".format(self.matrix)
@classmethod
def from_plane(cls, plane):
"""Construct a reflection transformation that mirrors wrt the given plane.
Parameters
----------
plane : [point, vector] | :class:`~compas.geometry.Plane`
The reflection plane.
Returns
-------
:class:`~compas.geometry.Reflection`
The reflection transformation.
"""
point, normal = plane
normal = normalize_vector((list(normal)))
matrix = identity_matrix(4)
for i in range(3):
for j in range(3):
matrix[i][j] -= 2.0 * normal[i] * normal[j]
for i in range(3):
matrix[i][3] = 2 * dot_vectors(point, normal) * normal[i]
R = cls()
R.matrix = matrix
return R
@classmethod
def from_frame(cls, frame):
"""Construct a reflection transformation that mirrors wrt the given frame.
Parameters
----------
frame : [point, vector, vector] | :class:`~compas.geometry.Frame`
Returns
-------
:class:`~compas.geometry.Reflection`
The reflection transformation.
"""
if isinstance(frame, (tuple, list)):
point = frame[0]
zaxis = cross_vectors(frame[1], frame[2])
else:
point = frame.point
zaxis = frame.zaxis
return cls.from_plane((point, zaxis)) | PypiClean |
/ORE_strhub-0.0.1-py3-none-any.whl/strhub/models/trba/system.py |
from functools import partial
from typing import Sequence, Any, Optional
import torch
import torch.nn.functional as F
from pytorch_lightning.utilities.types import STEP_OUTPUT
from timm.models.helpers import named_apply
from torch import Tensor
from strhub.models.base import CrossEntropySystem, CTCSystem
from strhub.models.utils import init_weights
from .model import TRBA as Model
class TRBA(CrossEntropySystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], num_fiducial: int, output_channel: int, hidden_size: int,
**kwargs: Any) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
self.max_label_length = max_label_length
img_h, img_w = img_size
self.model = Model(img_h, img_w, len(self.tokenizer), num_fiducial,
output_channel=output_channel, hidden_size=hidden_size, use_ctc=False)
named_apply(partial(init_weights, exclude=['Transformation.LocalizationNetwork.localization_fc2']), self.model)
@torch.jit.ignore
def no_weight_decay(self):
return {'model.Prediction.char_embeddings.weight'}
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
max_length = self.max_label_length if max_length is None else min(max_length, self.max_label_length)
text = images.new_full([1], self.bos_id, dtype=torch.long)
return self.model.forward(images, max_length, text)
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
encoded = self.tokenizer.encode(labels, self.device)
inputs = encoded[:, :-1] # remove <eos>
targets = encoded[:, 1:] # remove <bos>
max_length = encoded.shape[1] - 2 # exclude <bos> and <eos> from count
logits = self.model.forward(images, max_length, inputs)
loss = F.cross_entropy(logits.flatten(end_dim=1), targets.flatten(), ignore_index=self.pad_id)
self.log('loss', loss)
return loss
class TRBC(CTCSystem):
def __init__(self, charset_train: str, charset_test: str, max_label_length: int,
batch_size: int, lr: float, warmup_pct: float, weight_decay: float,
img_size: Sequence[int], num_fiducial: int, output_channel: int, hidden_size: int,
**kwargs: Any) -> None:
super().__init__(charset_train, charset_test, batch_size, lr, warmup_pct, weight_decay)
self.save_hyperparameters()
self.max_label_length = max_label_length
img_h, img_w = img_size
self.model = Model(img_h, img_w, len(self.tokenizer), num_fiducial,
output_channel=output_channel, hidden_size=hidden_size, use_ctc=True)
named_apply(partial(init_weights, exclude=['Transformation.LocalizationNetwork.localization_fc2']), self.model)
def forward(self, images: Tensor, max_length: Optional[int] = None) -> Tensor:
# max_label_length is unused in CTC prediction
return self.model.forward(images, None)
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
images, labels = batch
loss = self.forward_logits_loss(images, labels)[1]
self.log('loss', loss)
return loss | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/cometd/longPollTransportJsonEncoded.js | if(!dojo._hasResource["dojox.cometd.longPollTransportJsonEncoded"]){
dojo._hasResource["dojox.cometd.longPollTransportJsonEncoded"]=true;
dojo.provide("dojox.cometd.longPollTransportJsonEncoded");
dojo.require("dojox.cometd._base");
dojox.cometd.longPollTransportJsonEncoded=new function(){
this._connectionType="long-polling";
this._cometd=null;
this.check=function(_1,_2,_3){
return ((!_3)&&(dojo.indexOf(_1,"long-polling")>=0));
};
this.tunnelInit=function(){
var _4={channel:"/meta/connect",clientId:this._cometd.clientId,connectionType:this._connectionType,id:""+this._cometd.messageId++};
_4=this._cometd._extendOut(_4);
this.openTunnelWith([_4]);
};
this.tunnelCollapse=function(){
if(!this._cometd._initialized){
return;
}
if(this._cometd._advice&&this._cometd._advice["reconnect"]=="none"){
return;
}
if(this._cometd._status=="connected"){
setTimeout(dojo.hitch(this,function(){
this._connect();
}),this._cometd._interval());
}else{
setTimeout(dojo.hitch(this._cometd,function(){
this.init(this.url,this._props);
}),this._cometd._interval());
}
};
this._connect=function(){
if(!this._cometd._initialized){
return;
}
if(this._cometd._polling){
return;
}
if((this._cometd._advice)&&(this._cometd._advice["reconnect"]=="handshake")){
this._cometd._status="unconnected";
this._initialized=false;
this._cometd.init(this._cometd.url,this._cometd._props);
}else{
if(this._cometd._status=="connected"){
var _5={channel:"/meta/connect",connectionType:this._connectionType,clientId:this._cometd.clientId,id:""+this._cometd.messageId++};
if(this._cometd.connectTimeout>=this._cometd.expectedNetworkDelay){
_5.advice={timeout:(this._cometd.connectTimeout-this._cometd.expectedNetworkDelay)};
}
_5=this._cometd._extendOut(_5);
this.openTunnelWith([_5]);
}
}
};
this.deliver=function(_6){
};
this.openTunnelWith=function(_7,_8){
this._cometd._polling=true;
var _9={url:(_8||this._cometd.url),postData:dojo.toJson(_7),contentType:"text/json;charset=UTF-8",handleAs:this._cometd.handleAs,load:dojo.hitch(this,function(_a){
this._cometd._polling=false;
this._cometd.deliver(_a);
this._cometd._backon();
this.tunnelCollapse();
}),error:dojo.hitch(this,function(_b){
this._cometd._polling=false;
var _c={failure:true,error:_b,advice:this._cometd._advice};
this._cometd._publishMeta("connect",false,_c);
this._cometd._backoff();
this.tunnelCollapse();
})};
var _d=this._cometd._connectTimeout();
if(_d>0){
_9.timeout=_d;
}
this._poll=dojo.rawXhrPost(_9);
};
this.sendMessages=function(_e){
for(var i=0;i<_e.length;i++){
_e[i].clientId=this._cometd.clientId;
_e[i].id=""+this._cometd.messageId++;
_e[i]=this._cometd._extendOut(_e[i]);
}
return dojo.rawXhrPost({url:this._cometd.url||dojo.config["cometdRoot"],handleAs:this._cometd.handleAs,load:dojo.hitch(this._cometd,"deliver"),postData:dojo.toJson(_e),contentType:"text/json;charset=UTF-8",error:dojo.hitch(this,function(_f){
this._cometd._publishMeta("publish",false,{messages:_e});
}),timeout:this._cometd.expectedNetworkDelay});
};
this.startup=function(_10){
if(this._cometd._status=="connected"){
return;
}
this.tunnelInit();
};
this.disconnect=function(){
var _11={channel:"/meta/disconnect",clientId:this._cometd.clientId,id:""+this._cometd.messageId++};
_11=this._cometd._extendOut(_11);
dojo.rawXhrPost({url:this._cometd.url||dojo.config["cometdRoot"],handleAs:this._cometd.handleAs,postData:dojo.toJson([_11]),contentType:"text/json;charset=UTF-8"});
};
this.cancelConnect=function(){
if(this._poll){
this._poll.cancel();
this._cometd._polling=false;
this._cometd._publishMeta("connect",false,{cancel:true});
this._cometd._backoff();
this.disconnect();
this.tunnelCollapse();
}
};
};
dojox.cometd.longPollTransport=dojox.cometd.longPollTransportJsonEncoded;
dojox.cometd.connectionTypes.register("long-polling",dojox.cometd.longPollTransport.check,dojox.cometd.longPollTransportJsonEncoded);
dojox.cometd.connectionTypes.register("long-polling-json-encoded",dojox.cometd.longPollTransport.check,dojox.cometd.longPollTransportJsonEncoded);
} | PypiClean |
/Flaskel-3.1.0rc2-py3-none-any.whl/flaskel/flaskel.py | import os.path
import typing as t
import flask
from vbcore.datastruct import ObjectDict
from vbcore.datastruct.lazy import Dumper
from vbcore.http import httpcode
from vbcore.http.headers import HeaderEnum
from vbcore.json import JsonDecoder, JsonEncoder
from vbcore.types import OptStr
from werkzeug.routing import Rule
from werkzeug.utils import safe_join
from flaskel.utils.datastruct import Pagination
cap: "Flaskel" = t.cast("Flaskel", flask.current_app)
request: "Request" = t.cast("Request", flask.request)
class Config(flask.Config, ObjectDict):
def __init__(self, root_path: str, defaults: t.Optional[dict] = None):
flask.Config.__init__(self, root_path, defaults)
ObjectDict.__init__(self)
class DumpUrls(Dumper):
def __init__(self, app: flask.Flask):
super().__init__(app.url_map.iter_rules())
@property
def rules(self) -> t.Iterator[Rule]:
return self.data
def dump(self) -> str:
output = []
for rule in self.rules:
methods = ",".join(sorted(rule.methods))
output.append(f"{rule.endpoint:35s} {methods:40s} {rule}")
return "\n".join(sorted(output))
class Request(flask.Request):
@property
def id(self) -> t.Optional[str]: # pylint: disable=invalid-name
hdr = cap.config.REQUEST_ID_HEADER
if hasattr(flask.g, "request_id"):
return flask.g.request_id
if hdr in flask.request.headers:
return flask.request.headers[hdr]
if not hdr:
return None
flask_header_name = f"HTTP_{hdr.upper().replace('-', '_')}"
return flask.request.environ.get(flask_header_name)
def get_json(self, *args, allow_empty=False, **kwargs) -> ObjectDict:
payload = super().get_json(*args, **kwargs)
if payload is None:
if not allow_empty:
flask.abort(httpcode.BAD_REQUEST, "No JSON in request")
payload = ObjectDict() # pragma: no cover
return ObjectDict.normalize(payload)
class Response(flask.Response):
@classmethod
def no_content(cls, status=httpcode.NO_CONTENT, headers=None) -> "Response":
response = flask.make_response(bytes())
response.headers.update(headers or {})
response.headers.pop(HeaderEnum.CONTENT_TYPE)
response.headers.pop(HeaderEnum.CONTENT_LENGTH)
response.status_code = status
return t.cast("Response", response)
@classmethod
def set_sendfile_headers(cls, response: "Response", file_path: str) -> "Response":
hdr = HeaderEnum
conf = cap.config
response.headers[hdr.X_ACCEL_REDIRECT] = os.path.abspath(file_path)
response.headers[hdr.X_ACCEL_CHARSET] = conf.ACCEL_CHARSET or "utf-8"
response.headers[hdr.X_ACCEL_BUFFERING] = (
"yes" if conf.ACCEL_BUFFERING else "no"
)
if conf.ACCEL_LIMIT_RATE:
response.headers[hdr.X_ACCEL_LIMIT_RATE] = conf.ACCEL_LIMIT_RATE
if conf.SEND_FILE_MAX_AGE_DEFAULT:
response.headers[hdr.X_ACCEL_EXPIRES] = conf.SEND_FILE_MAX_AGE_DEFAULT
return response
@classmethod
def send_file(cls, directory: str, filename: str, **kwargs) -> "Response":
kwargs.setdefault("as_attachment", True)
file_path = safe_join(directory, filename)
try:
response = flask.send_file(file_path, etag=True, conditional=True, **kwargs)
except IOError as exc:
cap.logger.warning(str(exc))
return flask.abort(httpcode.NOT_FOUND)
if cap.config.USE_X_SENDFILE is True and cap.config.ENABLE_ACCEL is True:
# following headers works with nginx compatible proxy
return cls.set_sendfile_headers(response, file_path) # type: ignore
return response # type: ignore
def get_json(self, *args, **kwargs) -> ObjectDict:
return ObjectDict.normalize(super().get_json(*args, **kwargs))
@classmethod
def pagination_headers(cls, total: int, pagination: Pagination) -> t.Dict[str, int]:
return {
HeaderEnum.X_PAGINATION_COUNT: total,
HeaderEnum.X_PAGINATION_PAGE: pagination.page or 1,
HeaderEnum.X_PAGINATION_NUM_PAGES: pagination.pages(total),
HeaderEnum.X_PAGINATION_PAGE_SIZE: pagination.per_page(),
}
class Flaskel(flask.Flask):
config_class = Config
request_class = Request
response_class = Response
_json_encoder = JsonEncoder
_json_decoder = JsonDecoder
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.version: OptStr = None
self.config: Config | PypiClean |
/KratosOptimizationApplication-9.4-cp310-cp310-win_amd64.whl/KratosMultiphysics/OptimizationApplication/processes/optimization_problem_ascii_output_process.py | from datetime import datetime
from typing import Union, Any
import KratosMultiphysics as Kratos
from KratosMultiphysics.OptimizationApplication.responses.response_function import ResponseFunction
from KratosMultiphysics.OptimizationApplication.controls.control import Control
from KratosMultiphysics.OptimizationApplication.execution_policies.execution_policy import ExecutionPolicy
from KratosMultiphysics.OptimizationApplication.utilities.component_data_view import ComponentDataView
from KratosMultiphysics.OptimizationApplication.utilities.optimization_problem import OptimizationProblem
from KratosMultiphysics.OptimizationApplication.utilities.helper_utilities import GetAllComponentFullNamesWithData
from KratosMultiphysics.OptimizationApplication.utilities.helper_utilities import GetComponentHavingDataByFullName
def Factory(_: Kratos.Model, parameters: Kratos.Parameters, optimization_problem: OptimizationProblem) -> ExecutionPolicy:
if not parameters.Has("settings"):
raise RuntimeError(f"OptimizationProblemAsciiOutputProcess instantiation requires a \"settings\" in parameters [ parameters = {parameters}].")
return OptimizationProblemAsciiOutputProcess(parameters["settings"], optimization_problem)
class Header:
def __init__(self, header_name: str, value: Any, format_info: dict):
header_name = header_name.strip()
header_length = len(header_name)
if isinstance(value, bool):
value_length = max(len(format_info[type(value)][0]), len(format_info[type(value)][1]))
value_format_post_fix = "s"
self.__value_converter = lambda x: format_info[type(value)][1] if x else format_info[type(value)][0]
elif isinstance(value, int):
value_length = len(("{:" + str(format_info[type(value)]) + "d}").format(value))
value_format_post_fix = "d"
self.__value_converter = lambda x: int(x)
elif isinstance(value, float):
value_length = len(("{:0." + str(format_info[type(value)]) + "e}").format(value))
value_format_post_fix = f".{format_info[type(value)]}e"
self.__value_converter = lambda x: float(x)
else:
value_length = format_info[str]
value_format_post_fix = "s"
self.__value_converter = lambda x: str(x)
if header_length > value_length:
self.__header_name = header_name
self.__value_format = "{:>" + str(header_length) + value_format_post_fix + "}"
else:
self.__header_name = ("{:>" + str(value_length) + "s}").format(header_name)
self.__value_format = "{:>" + str(value_length) + value_format_post_fix + "}"
def GetHeaderName(self) -> str:
return self.__header_name
def GetValueStr(self, value: Any) -> str:
return self.__value_format.format(self.__value_converter(value))
class OptimizationProblemAsciiOutputProcess(Kratos.OutputProcess):
def GetDefaultParameters(self):
return Kratos.Parameters(
"""
{
"output_file_name" : "SPECIFY_OUTPUT_FILE_NAME",
"write_kratos_version" : true,
"write_time_stamp" : true,
"write_initial_values" : true,
"list_of_output_components": ["all"],
"format_info": {
"int_length" : 7,
"float_precision": 9,
"bool_values" : ["no", "yes"],
"string_length" : 10
}
}
"""
)
def __init__(self, parameters: Kratos.Parameters, optimization_problem: OptimizationProblem):
Kratos.OutputProcess.__init__(self)
self.optimization_problem = optimization_problem
parameters.RecursivelyValidateAndAssignDefaults(self.GetDefaultParameters())
self.output_file_name = parameters["output_file_name"].GetString()
if not self.output_file_name.endswith(".csv"):
self.output_file_name += ".csv"
self.write_kratos_version = parameters["write_kratos_version"].GetBool()
self.write_time_stamp = parameters["write_time_stamp"].GetBool()
self.write_initial_values = parameters["write_initial_values"].GetBool()
self.format_info = {
int : parameters["format_info"]["int_length"].GetInt(),
float: parameters["format_info"]["float_precision"].GetInt(),
bool : parameters["format_info"]["bool_values"].GetStringArray(),
str : parameters["format_info"]["string_length"].GetInt(),
}
if len(self.format_info[bool]) != 2:
raise RuntimeError("The \"bool_values\" should have only two strings corresponding to False and True values in the mentioned order.")
self.list_of_components: 'list[Union[str, ResponseFunction, Control, ExecutionPolicy]]' = []
list_of_component_names = parameters["list_of_output_components"].GetStringArray()
if len(list_of_component_names) == 1 and list_of_component_names[0] == "all":
list_of_component_names = GetAllComponentFullNamesWithData(optimization_problem)
for component_name in list_of_component_names:
self.list_of_components.append(GetComponentHavingDataByFullName(component_name, optimization_problem))
self.list_of_headers: 'list[tuple[Any, dict[str, Header]]]' = []
self.initialized_headers = False
def IsOutputStep(self) -> bool:
return True
def PrintOutput(self) -> None:
if not self.initialized_headers:
# now get the buffered data headers
self.list_of_headers = self._GetHeaders(lambda x: x.GetBufferedData())
# write the ehader information
self._WriteHeaders()
self.initialized_headers = True
if self._IsWritingProcess():
# now write step data
with open(self.output_file_name, "a") as file_output:
# write the step
file_output.write("{:>7d}".format(self.optimization_problem.GetStep()))
# wrtie the values
for component, header_info_dict in self.list_of_headers:
componend_data_view = ComponentDataView(component, self.optimization_problem)
buffered_dict = componend_data_view.GetBufferedData()
for k, header in header_info_dict.items():
file_output.write(", " + header.GetValueStr(buffered_dict[k]))
file_output.write("\n")
def ExecuteFinalize(self):
if self._IsWritingProcess():
with open(self.output_file_name, "a") as file_output:
file_output.write("# End of file")
def _IsWritingProcess(self):
if Kratos.IsDistributedRun():
data_communicator: Kratos.DataCommunicator = Kratos.ParallelEnvironment.GetDefaultDataCommunicator()
return data_communicator.Rank() == 0
else:
return True
def _WriteHeaders(self):
if (self._IsWritingProcess()):
kratos_version = "not_given"
if (self.write_kratos_version):
kratos_version = str(Kratos.KratosGlobals.Kernel.Version())
time_stamp = "not_specified"
if (self.write_time_stamp):
time_stamp = str(datetime.now())
msg_header = ""
msg_header = f"{msg_header}# Optimization problem ascii output\n"
msg_header = f"{msg_header}# Kratos version: {kratos_version}\n"
msg_header = f"{msg_header}# Timestamp : {time_stamp}\n"
msg_header = f"{msg_header}# -----------------------------------------------\n"
if self.write_initial_values:
msg_header = f"{msg_header}# --------------- Initial values ----------------\n"
initial_headers = self._GetHeaders(lambda x: x.GetUnBufferedData())
# now write the initial value container data
for component, header_info_dict in initial_headers:
componend_data_view = ComponentDataView(component, self.optimization_problem)
buffered_dict = componend_data_view.GetUnBufferedData()
component_name = componend_data_view.GetComponentName()
msg_header = f"{msg_header}# \t" + component_name + ":\n"
for k, header in header_info_dict.items():
component_name_header = header.GetHeaderName().strip()[len(component_name)+1:]
msg_header = f"{msg_header}# \t\t" + component_name_header + ": " + header.GetValueStr(buffered_dict[k]).strip() + "\n"
msg_header = f"{msg_header}# ------------ End of initial values ------------\n"
msg_header = f"{msg_header}# -----------------------------------------------\n"
msg_header = f"{msg_header}# ------------ Start of step values -------------\n"
msg_header = f"{msg_header}# Headers:\n"
msg_header = f"{msg_header}# STEP"
for _, header_info_dict in self.list_of_headers:
for header in header_info_dict.values():
msg_header = f"{msg_header}, " + header.GetHeaderName()
msg_header = f"{msg_header}\n"
# write the header
with open(self.output_file_name, "w") as file_output:
file_output.write(msg_header)
def _GetHeaders(self, dict_getter_method) -> 'list[tuple[Any, dict[str, Header]]]':
list_of_headers: 'list[tuple[Any, dict[str, Header]]]' = []
for component in self.list_of_components:
componend_data_view = ComponentDataView(component, self.optimization_problem)
values_map = dict_getter_method(componend_data_view).GetMap()
header_info_dict: 'dict[str, Header]' = {}
component_name = componend_data_view.GetComponentName()
for k, v in values_map.items():
if isinstance(v, (bool, int, float, str)):
header_name = component_name + ":" + k[k.rfind("/") + 1:]
if header_name in [header.GetHeaderName().strip() for header in header_info_dict.values()]:
Kratos.Logger.PrintWarning(self.__class__.__name__, "Second value with same header name = \"" + header_name + "\" found.")
header_info_dict[k] = Header(header_name, v, self.format_info)
list_of_headers.append([component, header_info_dict])
return list_of_headers | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/geonode/js/ol-2.13/lib/OpenLayers/Events.js | * @requires OpenLayers/Util.js
*/
/**
* Namespace: OpenLayers.Event
* Utility functions for event handling.
*/
OpenLayers.Event = {
/**
* Property: observers
* {Object} A hashtable cache of the event observers. Keyed by
* element._eventCacheID
*/
observers: false,
/**
* Constant: KEY_SPACE
* {int}
*/
KEY_SPACE: 32,
/**
* Constant: KEY_BACKSPACE
* {int}
*/
KEY_BACKSPACE: 8,
/**
* Constant: KEY_TAB
* {int}
*/
KEY_TAB: 9,
/**
* Constant: KEY_RETURN
* {int}
*/
KEY_RETURN: 13,
/**
* Constant: KEY_ESC
* {int}
*/
KEY_ESC: 27,
/**
* Constant: KEY_LEFT
* {int}
*/
KEY_LEFT: 37,
/**
* Constant: KEY_UP
* {int}
*/
KEY_UP: 38,
/**
* Constant: KEY_RIGHT
* {int}
*/
KEY_RIGHT: 39,
/**
* Constant: KEY_DOWN
* {int}
*/
KEY_DOWN: 40,
/**
* Constant: KEY_DELETE
* {int}
*/
KEY_DELETE: 46,
/**
* Method: element
* Cross browser event element detection.
*
* Parameters:
* event - {Event}
*
* Returns:
* {DOMElement} The element that caused the event
*/
element: function(event) {
return event.target || event.srcElement;
},
/**
* Method: isSingleTouch
* Determine whether event was caused by a single touch
*
* Parameters:
* event - {Event}
*
* Returns:
* {Boolean}
*/
isSingleTouch: function(event) {
return event.touches && event.touches.length == 1;
},
/**
* Method: isMultiTouch
* Determine whether event was caused by a multi touch
*
* Parameters:
* event - {Event}
*
* Returns:
* {Boolean}
*/
isMultiTouch: function(event) {
return event.touches && event.touches.length > 1;
},
/**
* Method: isLeftClick
* Determine whether event was caused by a left click.
*
* Parameters:
* event - {Event}
*
* Returns:
* {Boolean}
*/
isLeftClick: function(event) {
return (((event.which) && (event.which == 1)) ||
((event.button) && (event.button == 1)));
},
/**
* Method: isRightClick
* Determine whether event was caused by a right mouse click.
*
* Parameters:
* event - {Event}
*
* Returns:
* {Boolean}
*/
isRightClick: function(event) {
return (((event.which) && (event.which == 3)) ||
((event.button) && (event.button == 2)));
},
/**
* Method: stop
* Stops an event from propagating.
*
* Parameters:
* event - {Event}
* allowDefault - {Boolean} If true, we stop the event chain but
* still allow the default browser behaviour (text selection,
* radio-button clicking, etc). Default is false.
*/
stop: function(event, allowDefault) {
if (!allowDefault) {
OpenLayers.Event.preventDefault(event);
}
if (event.stopPropagation) {
event.stopPropagation();
} else {
event.cancelBubble = true;
}
},
/**
* Method: preventDefault
* Cancels the event if it is cancelable, without stopping further
* propagation of the event.
*
* Parameters:
* event - {Event}
*/
preventDefault: function(event) {
if (event.preventDefault) {
event.preventDefault();
} else {
event.returnValue = false;
}
},
/**
* Method: findElement
*
* Parameters:
* event - {Event}
* tagName - {String}
*
* Returns:
* {DOMElement} The first node with the given tagName, starting from the
* node the event was triggered on and traversing the DOM upwards
*/
findElement: function(event, tagName) {
var element = OpenLayers.Event.element(event);
while (element.parentNode && (!element.tagName ||
(element.tagName.toUpperCase() != tagName.toUpperCase()))){
element = element.parentNode;
}
return element;
},
/**
* Method: observe
*
* Parameters:
* elementParam - {DOMElement || String}
* name - {String}
* observer - {function}
* useCapture - {Boolean}
*/
observe: function(elementParam, name, observer, useCapture) {
var element = OpenLayers.Util.getElement(elementParam);
useCapture = useCapture || false;
if (name == 'keypress' &&
(navigator.appVersion.match(/Konqueror|Safari|KHTML/)
|| element.attachEvent)) {
name = 'keydown';
}
//if observers cache has not yet been created, create it
if (!this.observers) {
this.observers = {};
}
//if not already assigned, make a new unique cache ID
if (!element._eventCacheID) {
var idPrefix = "eventCacheID_";
if (element.id) {
idPrefix = element.id + "_" + idPrefix;
}
element._eventCacheID = OpenLayers.Util.createUniqueID(idPrefix);
}
var cacheID = element._eventCacheID;
//if there is not yet a hash entry for this element, add one
if (!this.observers[cacheID]) {
this.observers[cacheID] = [];
}
//add a new observer to this element's list
this.observers[cacheID].push({
'element': element,
'name': name,
'observer': observer,
'useCapture': useCapture
});
//add the actual browser event listener
if (element.addEventListener) {
element.addEventListener(name, observer, useCapture);
} else if (element.attachEvent) {
element.attachEvent('on' + name, observer);
}
},
/**
* Method: stopObservingElement
* Given the id of an element to stop observing, cycle through the
* element's cached observers, calling stopObserving on each one,
* skipping those entries which can no longer be removed.
*
* parameters:
* elementParam - {DOMElement || String}
*/
stopObservingElement: function(elementParam) {
var element = OpenLayers.Util.getElement(elementParam);
var cacheID = element._eventCacheID;
this._removeElementObservers(OpenLayers.Event.observers[cacheID]);
},
/**
* Method: _removeElementObservers
*
* Parameters:
* elementObservers - {Array(Object)} Array of (element, name,
* observer, usecapture) objects,
* taken directly from hashtable
*/
_removeElementObservers: function(elementObservers) {
if (elementObservers) {
for(var i = elementObservers.length-1; i >= 0; i--) {
var entry = elementObservers[i];
OpenLayers.Event.stopObserving.apply(this, [
entry.element, entry.name, entry.observer, entry.useCapture
]);
}
}
},
/**
* Method: stopObserving
*
* Parameters:
* elementParam - {DOMElement || String}
* name - {String}
* observer - {function}
* useCapture - {Boolean}
*
* Returns:
* {Boolean} Whether or not the event observer was removed
*/
stopObserving: function(elementParam, name, observer, useCapture) {
useCapture = useCapture || false;
var element = OpenLayers.Util.getElement(elementParam);
var cacheID = element._eventCacheID;
if (name == 'keypress') {
if ( navigator.appVersion.match(/Konqueror|Safari|KHTML/) ||
element.detachEvent) {
name = 'keydown';
}
}
// find element's entry in this.observers cache and remove it
var foundEntry = false;
var elementObservers = OpenLayers.Event.observers[cacheID];
if (elementObservers) {
// find the specific event type in the element's list
var i=0;
while(!foundEntry && i < elementObservers.length) {
var cacheEntry = elementObservers[i];
if ((cacheEntry.name == name) &&
(cacheEntry.observer == observer) &&
(cacheEntry.useCapture == useCapture)) {
elementObservers.splice(i, 1);
if (elementObservers.length == 0) {
delete OpenLayers.Event.observers[cacheID];
}
foundEntry = true;
break;
}
i++;
}
}
//actually remove the event listener from browser
if (foundEntry) {
if (element.removeEventListener) {
element.removeEventListener(name, observer, useCapture);
} else if (element && element.detachEvent) {
element.detachEvent('on' + name, observer);
}
}
return foundEntry;
},
/**
* Method: unloadCache
* Cycle through all the element entries in the events cache and call
* stopObservingElement on each.
*/
unloadCache: function() {
// check for OpenLayers.Event before checking for observers, because
// OpenLayers.Event may be undefined in IE if no map instance was
// created
if (OpenLayers.Event && OpenLayers.Event.observers) {
for (var cacheID in OpenLayers.Event.observers) {
var elementObservers = OpenLayers.Event.observers[cacheID];
OpenLayers.Event._removeElementObservers.apply(this,
[elementObservers]);
}
OpenLayers.Event.observers = false;
}
},
CLASS_NAME: "OpenLayers.Event"
};
/* prevent memory leaks in IE */
OpenLayers.Event.observe(window, 'unload', OpenLayers.Event.unloadCache, false);
/**
* Class: OpenLayers.Events
*/
OpenLayers.Events = OpenLayers.Class({
/**
* Constant: BROWSER_EVENTS
* {Array(String)} supported events
*/
BROWSER_EVENTS: [
"mouseover", "mouseout",
"mousedown", "mouseup", "mousemove",
"click", "dblclick", "rightclick", "dblrightclick",
"resize", "focus", "blur",
"touchstart", "touchmove", "touchend",
"keydown"
],
/**
* Property: listeners
* {Object} Hashtable of Array(Function): events listener functions
*/
listeners: null,
/**
* Property: object
* {Object} the code object issuing application events
*/
object: null,
/**
* Property: element
* {DOMElement} the DOM element receiving browser events
*/
element: null,
/**
* Property: eventHandler
* {Function} bound event handler attached to elements
*/
eventHandler: null,
/**
* APIProperty: fallThrough
* {Boolean}
*/
fallThrough: null,
/**
* APIProperty: includeXY
* {Boolean} Should the .xy property automatically be created for browser
* mouse events? In general, this should be false. If it is true, then
* mouse events will automatically generate a '.xy' property on the
* event object that is passed. (Prior to OpenLayers 2.7, this was true
* by default.) Otherwise, you can call the getMousePosition on the
* relevant events handler on the object available via the 'evt.object'
* property of the evt object. So, for most events, you can call:
* function named(evt) {
* this.xy = this.object.events.getMousePosition(evt)
* }
*
* This option typically defaults to false for performance reasons:
* when creating an events object whose primary purpose is to manage
* relatively positioned mouse events within a div, it may make
* sense to set it to true.
*
* This option is also used to control whether the events object caches
* offsets. If this is false, it will not: the reason for this is that
* it is only expected to be called many times if the includeXY property
* is set to true. If you set this to true, you are expected to clear
* the offset cache manually (using this.clearMouseCache()) if:
* the border of the element changes
* the location of the element in the page changes
*/
includeXY: false,
/**
* APIProperty: extensions
* {Object} Event extensions registered with this instance. Keys are
* event types, values are {OpenLayers.Events.*} extension instances or
* {Boolean} for events that an instantiated extension provides in
* addition to the one it was created for.
*
* Extensions create an event in addition to browser events, which usually
* fires when a sequence of browser events is completed. Extensions are
* automatically instantiated when a listener is registered for an event
* provided by an extension.
*
* Extensions are created in the <OpenLayers.Events> namespace using
* <OpenLayers.Class>, and named after the event they provide.
* The constructor receives the target <OpenLayers.Events> instance as
* argument. Extensions that need to capture browser events before they
* propagate can register their listeners events using <register>, with
* {extension: true} as 4th argument.
*
* If an extension creates more than one event, an alias for each event
* type should be created and reference the same class. The constructor
* should set a reference in the target's extensions registry to itself.
*
* Below is a minimal extension that provides the "foostart" and "fooend"
* event types, which replace the native "click" event type if clicked on
* an element with the css class "foo":
*
* (code)
* OpenLayers.Events.foostart = OpenLayers.Class({
* initialize: function(target) {
* this.target = target;
* this.target.register("click", this, this.doStuff, {extension: true});
* // only required if extension provides more than one event type
* this.target.extensions["foostart"] = true;
* this.target.extensions["fooend"] = true;
* },
* destroy: function() {
* var target = this.target;
* target.unregister("click", this, this.doStuff);
* delete this.target;
* // only required if extension provides more than one event type
* delete target.extensions["foostart"];
* delete target.extensions["fooend"];
* },
* doStuff: function(evt) {
* var propagate = true;
* if (OpenLayers.Event.element(evt).className === "foo") {
* propagate = false;
* var target = this.target;
* target.triggerEvent("foostart");
* window.setTimeout(function() {
* target.triggerEvent("fooend");
* }, 1000);
* }
* return propagate;
* }
* });
* // only required if extension provides more than one event type
* OpenLayers.Events.fooend = OpenLayers.Events.foostart;
* (end)
*
*/
extensions: null,
/**
* Property: extensionCount
* {Object} Keys are event types (like in <listeners>), values are the
* number of extension listeners for each event type.
*/
extensionCount: null,
/**
* Method: clearMouseListener
* A version of <clearMouseCache> that is bound to this instance so that
* it can be used with <OpenLayers.Event.observe> and
* <OpenLayers.Event.stopObserving>.
*/
clearMouseListener: null,
/**
* Constructor: OpenLayers.Events
* Construct an OpenLayers.Events object.
*
* Parameters:
* object - {Object} The js object to which this Events object is being added
* element - {DOMElement} A dom element to respond to browser events
* eventTypes - {Array(String)} Deprecated. Array of custom application
* events. A listener may be registered for any named event, regardless
* of the values provided here.
* fallThrough - {Boolean} Allow events to fall through after these have
* been handled?
* options - {Object} Options for the events object.
*/
initialize: function (object, element, eventTypes, fallThrough, options) {
OpenLayers.Util.extend(this, options);
this.object = object;
this.fallThrough = fallThrough;
this.listeners = {};
this.extensions = {};
this.extensionCount = {};
this._msTouches = [];
// if a dom element is specified, add a listeners list
// for browser events on the element and register them
if (element != null) {
this.attachToElement(element);
}
},
/**
* APIMethod: destroy
*/
destroy: function () {
for (var e in this.extensions) {
if (typeof this.extensions[e] !== "boolean") {
this.extensions[e].destroy();
}
}
this.extensions = null;
if (this.element) {
OpenLayers.Event.stopObservingElement(this.element);
if(this.element.hasScrollEvent) {
OpenLayers.Event.stopObserving(
window, "scroll", this.clearMouseListener
);
}
}
this.element = null;
this.listeners = null;
this.object = null;
this.fallThrough = null;
this.eventHandler = null;
},
/**
* APIMethod: addEventType
* Deprecated. Any event can be triggered without adding it first.
*
* Parameters:
* eventName - {String}
*/
addEventType: function(eventName) {
},
/**
* Method: attachToElement
*
* Parameters:
* element - {HTMLDOMElement} a DOM element to attach browser events to
*/
attachToElement: function (element) {
if (this.element) {
OpenLayers.Event.stopObservingElement(this.element);
} else {
// keep a bound copy of handleBrowserEvent() so that we can
// pass the same function to both Event.observe() and .stopObserving()
this.eventHandler = OpenLayers.Function.bindAsEventListener(
this.handleBrowserEvent, this
);
// to be used with observe and stopObserving
this.clearMouseListener = OpenLayers.Function.bind(
this.clearMouseCache, this
);
}
this.element = element;
var msTouch = !!window.navigator.msMaxTouchPoints;
var type;
for (var i = 0, len = this.BROWSER_EVENTS.length; i < len; i++) {
type = this.BROWSER_EVENTS[i];
// register the event cross-browser
OpenLayers.Event.observe(element, type, this.eventHandler
);
if (msTouch && type.indexOf('touch') === 0) {
this.addMsTouchListener(element, type, this.eventHandler);
}
}
// disable dragstart in IE so that mousedown/move/up works normally
OpenLayers.Event.observe(element, "dragstart", OpenLayers.Event.stop);
},
/**
* APIMethod: on
* Convenience method for registering listeners with a common scope.
* Internally, this method calls <register> as shown in the examples
* below.
*
* Example use:
* (code)
* // register a single listener for the "loadstart" event
* events.on({"loadstart": loadStartListener});
*
* // this is equivalent to the following
* events.register("loadstart", undefined, loadStartListener);
*
* // register multiple listeners to be called with the same `this` object
* events.on({
* "loadstart": loadStartListener,
* "loadend": loadEndListener,
* scope: object
* });
*
* // this is equivalent to the following
* events.register("loadstart", object, loadStartListener);
* events.register("loadend", object, loadEndListener);
* (end)
*
* Parameters:
* object - {Object}
*/
on: function(object) {
for(var type in object) {
if(type != "scope" && object.hasOwnProperty(type)) {
this.register(type, object.scope, object[type]);
}
}
},
/**
* APIMethod: register
* Register an event on the events object.
*
* When the event is triggered, the 'func' function will be called, in the
* context of 'obj'. Imagine we were to register an event, specifying an
* OpenLayers.Bounds Object as 'obj'. When the event is triggered, the
* context in the callback function will be our Bounds object. This means
* that within our callback function, we can access the properties and
* methods of the Bounds object through the "this" variable. So our
* callback could execute something like:
* : leftStr = "Left: " + this.left;
*
* or
*
* : centerStr = "Center: " + this.getCenterLonLat();
*
* Parameters:
* type - {String} Name of the event to register
* obj - {Object} The object to bind the context to for the callback#.
* If no object is specified, default is the Events's 'object' property.
* func - {Function} The callback function. If no callback is
* specified, this function does nothing.
* priority - {Boolean|Object} If true, adds the new listener to the
* *front* of the events queue instead of to the end.
*
* Valid options for priority:
* extension - {Boolean} If true, then the event will be registered as
* extension event. Extension events are handled before all other
* events.
*/
register: function (type, obj, func, priority) {
if (type in OpenLayers.Events && !this.extensions[type]) {
this.extensions[type] = new OpenLayers.Events[type](this);
}
if (func != null) {
if (obj == null) {
obj = this.object;
}
var listeners = this.listeners[type];
if (!listeners) {
listeners = [];
this.listeners[type] = listeners;
this.extensionCount[type] = 0;
}
var listener = {obj: obj, func: func};
if (priority) {
listeners.splice(this.extensionCount[type], 0, listener);
if (typeof priority === "object" && priority.extension) {
this.extensionCount[type]++;
}
} else {
listeners.push(listener);
}
}
},
/**
* APIMethod: registerPriority
* Same as register() but adds the new listener to the *front* of the
* events queue instead of to the end.
*
* TODO: get rid of this in 3.0 - Decide whether listeners should be
* called in the order they were registered or in reverse order.
*
*
* Parameters:
* type - {String} Name of the event to register
* obj - {Object} The object to bind the context to for the callback#.
* If no object is specified, default is the Events's
* 'object' property.
* func - {Function} The callback function. If no callback is
* specified, this function does nothing.
*/
registerPriority: function (type, obj, func) {
this.register(type, obj, func, true);
},
/**
* APIMethod: un
* Convenience method for unregistering listeners with a common scope.
* Internally, this method calls <unregister> as shown in the examples
* below.
*
* Example use:
* (code)
* // unregister a single listener for the "loadstart" event
* events.un({"loadstart": loadStartListener});
*
* // this is equivalent to the following
* events.unregister("loadstart", undefined, loadStartListener);
*
* // unregister multiple listeners with the same `this` object
* events.un({
* "loadstart": loadStartListener,
* "loadend": loadEndListener,
* scope: object
* });
*
* // this is equivalent to the following
* events.unregister("loadstart", object, loadStartListener);
* events.unregister("loadend", object, loadEndListener);
* (end)
*/
un: function(object) {
for(var type in object) {
if(type != "scope" && object.hasOwnProperty(type)) {
this.unregister(type, object.scope, object[type]);
}
}
},
/**
* APIMethod: unregister
*
* Parameters:
* type - {String}
* obj - {Object} If none specified, defaults to this.object
* func - {Function}
*/
unregister: function (type, obj, func) {
if (obj == null) {
obj = this.object;
}
var listeners = this.listeners[type];
if (listeners != null) {
for (var i=0, len=listeners.length; i<len; i++) {
if (listeners[i].obj == obj && listeners[i].func == func) {
listeners.splice(i, 1);
break;
}
}
}
},
/**
* Method: remove
* Remove all listeners for a given event type. If type is not registered,
* does nothing.
*
* Parameters:
* type - {String}
*/
remove: function(type) {
if (this.listeners[type] != null) {
this.listeners[type] = [];
}
},
/**
* APIMethod: triggerEvent
* Trigger a specified registered event.
*
* Parameters:
* type - {String}
* evt - {Event || Object} will be passed to the listeners.
*
* Returns:
* {Boolean} The last listener return. If a listener returns false, the
* chain of listeners will stop getting called.
*/
triggerEvent: function (type, evt) {
var listeners = this.listeners[type];
// fast path
if(!listeners || listeners.length == 0) {
return undefined;
}
// prep evt object with object & div references
if (evt == null) {
evt = {};
}
evt.object = this.object;
evt.element = this.element;
if(!evt.type) {
evt.type = type;
}
// execute all callbacks registered for specified type
// get a clone of the listeners array to
// allow for splicing during callbacks
listeners = listeners.slice();
var continueChain;
for (var i=0, len=listeners.length; i<len; i++) {
var callback = listeners[i];
// bind the context to callback.obj
continueChain = callback.func.apply(callback.obj, [evt]);
if ((continueChain != undefined) && (continueChain == false)) {
// if callback returns false, execute no more callbacks.
break;
}
}
// don't fall through to other DOM elements
if (!this.fallThrough) {
OpenLayers.Event.stop(evt, true);
}
return continueChain;
},
/**
* Method: handleBrowserEvent
* Basically just a wrapper to the triggerEvent() function, but takes
* care to set a property 'xy' on the event with the current mouse
* position.
*
* Parameters:
* evt - {Event}
*/
handleBrowserEvent: function (evt) {
var type = evt.type, listeners = this.listeners[type];
if(!listeners || listeners.length == 0) {
// noone's listening, bail out
return;
}
// add clientX & clientY to all events - corresponds to average x, y
var touches = evt.touches;
if (touches && touches[0]) {
var x = 0;
var y = 0;
var num = touches.length;
var touch;
for (var i=0; i<num; ++i) {
touch = this.getTouchClientXY(touches[i]);
x += touch.clientX;
y += touch.clientY;
}
evt.clientX = x / num;
evt.clientY = y / num;
}
if (this.includeXY) {
evt.xy = this.getMousePosition(evt);
}
this.triggerEvent(type, evt);
},
/**
* Method: getTouchClientXY
* WebKit has a few bugs for clientX/clientY. This method detects them
* and calculate the correct values.
*
* Parameters:
* evt - {Touch} a Touch object from a TouchEvent
*
* Returns:
* {Object} An object with only clientX and clientY properties with the
* calculated values.
*/
getTouchClientXY: function (evt) {
// olMochWin is to override window, used for testing
var win = window.olMockWin || window,
winPageX = win.pageXOffset,
winPageY = win.pageYOffset,
x = evt.clientX,
y = evt.clientY;
if (evt.pageY === 0 && Math.floor(y) > Math.floor(evt.pageY) ||
evt.pageX === 0 && Math.floor(x) > Math.floor(evt.pageX)) {
// iOS4 include scroll offset in clientX/Y
x = x - winPageX;
y = y - winPageY;
} else if (y < (evt.pageY - winPageY) || x < (evt.pageX - winPageX) ) {
// Some Android browsers have totally bogus values for clientX/Y
// when scrolling/zooming a page
x = evt.pageX - winPageX;
y = evt.pageY - winPageY;
}
evt.olClientX = x;
evt.olClientY = y;
return {
clientX: x,
clientY: y
};
},
/**
* APIMethod: clearMouseCache
* Clear cached data about the mouse position. This should be called any
* time the element that events are registered on changes position
* within the page.
*/
clearMouseCache: function() {
this.element.scrolls = null;
this.element.lefttop = null;
this.element.offsets = null;
},
/**
* Method: getMousePosition
*
* Parameters:
* evt - {Event}
*
* Returns:
* {<OpenLayers.Pixel>} The current xy coordinate of the mouse, adjusted
* for offsets
*/
getMousePosition: function (evt) {
if (!this.includeXY) {
this.clearMouseCache();
} else if (!this.element.hasScrollEvent) {
OpenLayers.Event.observe(window, "scroll", this.clearMouseListener);
this.element.hasScrollEvent = true;
}
if (!this.element.scrolls) {
var viewportElement = OpenLayers.Util.getViewportElement();
this.element.scrolls = [
window.pageXOffset || viewportElement.scrollLeft,
window.pageYOffset || viewportElement.scrollTop
];
}
if (!this.element.lefttop) {
this.element.lefttop = [
(document.documentElement.clientLeft || 0),
(document.documentElement.clientTop || 0)
];
}
if (!this.element.offsets) {
this.element.offsets = OpenLayers.Util.pagePosition(this.element);
}
return new OpenLayers.Pixel(
(evt.clientX + this.element.scrolls[0]) - this.element.offsets[0]
- this.element.lefttop[0],
(evt.clientY + this.element.scrolls[1]) - this.element.offsets[1]
- this.element.lefttop[1]
);
},
/**
* Method: addMsTouchListener
*
* Parameters:
* element - {DOMElement} The DOM element to register the listener on
* type - {String} The event type
* handler - {Function} the handler
*/
addMsTouchListener: function (element, type, handler) {
var eventHandler = this.eventHandler;
var touches = this._msTouches;
function msHandler(evt) {
handler(OpenLayers.Util.applyDefaults({
stopPropagation: function() {
for (var i=touches.length-1; i>=0; --i) {
touches[i].stopPropagation();
}
},
preventDefault: function() {
for (var i=touches.length-1; i>=0; --i) {
touches[i].preventDefault();
}
},
type: type
}, evt));
}
switch (type) {
case 'touchstart':
return this.addMsTouchListenerStart(element, type, msHandler);
case 'touchend':
return this.addMsTouchListenerEnd(element, type, msHandler);
case 'touchmove':
return this.addMsTouchListenerMove(element, type, msHandler);
default:
throw 'Unknown touch event type';
}
},
/**
* Method: addMsTouchListenerStart
*
* Parameters:
* element - {DOMElement} The DOM element to register the listener on
* type - {String} The event type
* handler - {Function} the handler
*/
addMsTouchListenerStart: function(element, type, handler) {
var touches = this._msTouches;
var cb = function(e) {
var alreadyInArray = false;
for (var i=0, ii=touches.length; i<ii; ++i) {
if (touches[i].pointerId == e.pointerId) {
alreadyInArray = true;
break;
}
}
if (!alreadyInArray) {
touches.push(e);
}
e.touches = touches.slice();
handler(e);
};
OpenLayers.Event.observe(element, 'MSPointerDown', cb);
// Need to also listen for end events to keep the _msTouches list
// accurate
var internalCb = function(e) {
for (var i=0, ii=touches.length; i<ii; ++i) {
if (touches[i].pointerId == e.pointerId) {
touches.splice(i, 1);
break;
}
}
};
OpenLayers.Event.observe(element, 'MSPointerUp', internalCb);
},
/**
* Method: addMsTouchListenerMove
*
* Parameters:
* element - {DOMElement} The DOM element to register the listener on
* type - {String} The event type
* handler - {Function} the handler
*/
addMsTouchListenerMove: function (element, type, handler) {
var touches = this._msTouches;
var cb = function(e) {
//Don't fire touch moves when mouse isn't down
if (e.pointerType == e.MSPOINTER_TYPE_MOUSE && e.buttons == 0) {
return;
}
if (touches.length == 1 && touches[0].pageX == e.pageX &&
touches[0].pageY == e.pageY) {
// don't trigger event when pointer has not moved
return;
}
for (var i=0, ii=touches.length; i<ii; ++i) {
if (touches[i].pointerId == e.pointerId) {
touches[i] = e;
break;
}
}
e.touches = touches.slice();
handler(e);
};
OpenLayers.Event.observe(element, 'MSPointerMove', cb);
},
/**
* Method: addMsTouchListenerEnd
*
* Parameters:
* element - {DOMElement} The DOM element to register the listener on
* type - {String} The event type
* handler - {Function} the handler
*/
addMsTouchListenerEnd: function (element, type, handler) {
var touches = this._msTouches;
var cb = function(e) {
for (var i=0, ii=touches.length; i<ii; ++i) {
if (touches[i].pointerId == e.pointerId) {
touches.splice(i, 1);
break;
}
}
e.touches = touches.slice();
handler(e);
};
OpenLayers.Event.observe(element, 'MSPointerUp', cb);
},
CLASS_NAME: "OpenLayers.Events"
}); | PypiClean |
/IpfsML-1.0.2.tar.gz/IpfsML-1.0.2/nft_storage/model/forbidden_error_response.py | import re # noqa: F401
import sys # noqa: F401
from nft_storage.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from nft_storage.exceptions import ApiAttributeError
def lazy_import():
from nft_storage.model.forbidden_error_response_error import ForbiddenErrorResponseError
globals()['ForbiddenErrorResponseError'] = ForbiddenErrorResponseError
class ForbiddenErrorResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'ok': (bool,), # noqa: E501
'error': (ForbiddenErrorResponseError,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'ok': 'ok', # noqa: E501
'error': 'error', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ForbiddenErrorResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
ok (bool): [optional] if omitted the server will use the default value of False # noqa: E501
error (ForbiddenErrorResponseError): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ForbiddenErrorResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
ok (bool): [optional] if omitted the server will use the default value of False # noqa: E501
error (ForbiddenErrorResponseError): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/MqttSqliteLogger-1.0.tar.gz/MqttSqliteLogger-1.0/mqttsqlite/core/logs_controller.py | import json
from mqttsqlite.settings.private_settings import MANAGEMENT_PASSWORD, QUERY_PASSWORD
from .utils import Payload, Utils
from mqttsqlite.orm.models import Log
from datetime import datetime, timedelta
class LogController(object):
def add_entry(self, msg):
payload = Payload()
if hasattr(msg, 'topic') and hasattr(msg, 'payload'):
log_register = Log.create(timestamp=datetime.now(), topic=msg.topic, value=str(msg.payload))
payload.topic = 'topic'
payload.result = 'OK'
else:
payload.result = 'KO'
return payload.get_json()
def __get_logs_newer_than(self, topic, date_initial):
query = Log.select().where((Log.timestamp.between(datetime.now() - timedelta(seconds=date_initial), datetime.now())) & (Log.topic == topic))
query_logs = []
for log in query:
query_logs.append({'timestamp': log.timestamp.strftime("%Y-%m-%d %H:%M:%S"), 'value': log.value})
return query_logs
def __delete_entries_from_topic_older_than(self, topic, date):
query = Log.delete().where((Log.timestamp <= datetime.now() - timedelta(seconds=date)) & (Log.topic == topic))
result = query.execute()
if result:
return result
else:
return '0'
def __delete_last_entry_from_topic(self, topic):
try:
last_entry = Log.select().where(Log.topic == topic).order_by(Log.id.desc()).get()
result = last_entry.delete_instance()
if result == 1:
return '1'
else:
return '0'
except Log.DoesNotExist:
return '1'
except:
return '0'
def __get_last_entry_from_topic(self, topic):
try:
result = Log.select().where(Log.topic == topic).order_by(Log.timestamp.desc()).get()
return {'timestamp': result.timestamp.strftime("%Y-%m-%d %H:%M:%S"), 'value': result.value}
except:
return {}
def __time_operations_with_topic_entries(self, operation, msg):
if operation == 'delete':
single = self.__delete_last_entry_from_topic
multiple = self.__delete_entries_from_topic_older_than
else:
single = self.__get_last_entry_from_topic
multiple = self.__get_logs_newer_than
topic = msg.topic.split('/')
payload = Payload()
received_data = json.loads(msg.payload)
if topic[-1] == 'last':
payload = Utils().validate_data(received_data, QUERY_PASSWORD, ['password', 'client'])
if payload.result == 'OK':
payload.values = [single(received_data['topic'])]
elif topic[-1] == 'minutes' or topic[-1] == 'hours' or topic[-1] == 'days':
payload = Utils().validate_data(received_data, QUERY_PASSWORD, ['password', 'client'], options=True)
if payload.result == 'OK':
try:
received_options = int(received_data['options'])
except:
payload.result = 'KO'
payload.error = 'Invalid Options Value'
return payload.get_json()
if topic[-1] == 'minutes':
payload.values = multiple(received_data['topic'], 60 * received_options)
elif topic[-1] == 'hours':
payload.values = multiple(received_data['topic'], 3600 * received_options)
elif topic[-1] == 'days':
payload.values = multiple(received_data['topic'], 86400 * received_options)
else:
payload.result = 'KO'
payload.error = 'Invalid unit time'
return payload.get_json()
def get_topic_entries(self, msg):
return self.__time_operations_with_topic_entries('get', msg)
def delete_topic_entries(self, msg):
return self.__time_operations_with_topic_entries('delete', msg) | PypiClean |
/ApiLogicServer-9.2.18-py3-none-any.whl/api_logic_server_cli/sqlacodegen_wrapper/sqlacodegen/sqlacodegen/main.py | from __future__ import unicode_literals, division, print_function, absolute_import
import argparse
import io
import sys
import pkg_resources
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData
from sqlacodegen_wrapper.sqlacodegen_wrapper import CodeGenerator
def main(calling_args=None):
if calling_args:
args = calling_args
else:
parser = argparse.ArgumentParser(description="Generates SQLAlchemy model code from an existing database.")
parser.add_argument("url", nargs="?", help="SQLAlchemy url to the database")
parser.add_argument("--version", action="store_true", help="print the version number and exit")
parser.add_argument("--schema", help="load tables from an alternate schema")
parser.add_argument("--tables", help="tables to process (comma-separated, default: all)")
parser.add_argument("--noviews", action="store_true", help="ignore views")
parser.add_argument("--noindexes", action="store_true", help="ignore indexes")
parser.add_argument("--noconstraints", action="store_true", help="ignore constraints")
parser.add_argument("--nojoined", action="store_true", help="don't autodetect joined table inheritance")
parser.add_argument("--noinflect", action="store_true", help="don't try to convert tables names to singular form")
parser.add_argument("--noclasses", action="store_true", help="don't generate classes, only tables")
parser.add_argument("--outfile", help="file to write output to (default: stdout)")
args = parser.parse_args()
if args.version:
version = pkg_resources.get_distribution('sqlacodegen').parsed_version
print(version.public)
return
if not args.url:
print('You must supply a url\n', file=sys.stderr)
parser.print_help()
return
# Use reflection to fill in the metadata
engine = create_engine(args.url)
try:
# dirty hack for sqlite TODO review ApiLogicServer
engine.execute("""PRAGMA journal_mode = OFF""")
except:
pass
metadata = MetaData(engine)
tables = args.tables.split(',') if args.tables else None
metadata.reflect(engine, args.schema, not args.noviews, tables)
# Write the generated model code to the specified file or standard output
outfile = io.open(args.outfile, 'w', encoding='utf-8') if args.outfile else sys.stdout
generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined,
args.noinflect, args.noclasses, nocomments=args.nocomments)
generator.render(outfile)
class DotDict(dict):
""" APiLogicServer dot.notation access to dictionary attributes """
# thanks: https://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary/28463329
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def sqlacodegen(db_url: str, models_file: str):
""" ApiLogicServer entry for in-process invocation """
calling_args = DotDict({})
calling_args.url = db_url
calling_args.outfile = models_file
calling_args.version = False
main(calling_args)
# print("imported")
# main() | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/glances/plugins/glances_cpu.py | from glances.timer import getTimeSinceLastUpdate
from glances.compat import iterkeys
from glances.cpu_percent import cpu_percent
from glances.globals import LINUX, WINDOWS, SUNOS
from glances.plugins.glances_core import Plugin as CorePlugin
from glances.plugins.glances_plugin import GlancesPlugin
import psutil
# Fields description
# description: human readable description
# short_name: shortname to use un UI
# unit: unit type
# rate: is it a rate ? If yes, // by time_since_update when displayed,
# min_symbol: Auto unit should be used if value > than 1 'X' (K, M, G)...
fields_description = {
'total': {'description': 'Sum of all CPU percentages (except idle).', 'unit': 'percent'},
'system': {
'description': 'percent time spent in kernel space. System CPU time is the \
time spent running code in the Operating System kernel.',
'unit': 'percent',
},
'user': {
'description': 'CPU percent time spent in user space. \
User CPU time is the time spent on the processor running your program\'s code (or code in libraries).',
'unit': 'percent',
},
'iowait': {
'description': '*(Linux)*: percent time spent by the CPU waiting for I/O \
operations to complete.',
'unit': 'percent',
},
'dpc': {
'description': '*(Windows)*: time spent servicing deferred procedure calls (DPCs)',
'unit': 'percent',
},
'idle': {
'description': 'percent of CPU used by any program. Every program or task \
that runs on a computer system occupies a certain amount of processing \
time on the CPU. If the CPU has completed all tasks it is idle.',
'unit': 'percent',
},
'irq': {
'description': '*(Linux and BSD)*: percent time spent servicing/handling \
hardware/software interrupts. Time servicing interrupts (hardware + \
software).',
'unit': 'percent',
},
'nice': {
'description': '*(Unix)*: percent time occupied by user level processes with \
a positive nice value. The time the CPU has spent running users\' \
processes that have been *niced*.',
'unit': 'percent',
},
'steal': {
'description': '*(Linux)*: percentage of time a virtual CPU waits for a real \
CPU while the hypervisor is servicing another virtual processor.',
'unit': 'percent',
},
'ctx_switches': {
'description': 'number of context switches (voluntary + involuntary) per \
second. A context switch is a procedure that a computer\'s CPU (central \
processing unit) follows to change from one task (or process) to \
another while ensuring that the tasks do not conflict.',
'unit': 'number',
'rate': True,
'min_symbol': 'K',
'short_name': 'ctx_sw',
},
'interrupts': {
'description': 'number of interrupts per second.',
'unit': 'number',
'rate': True,
'min_symbol': 'K',
'short_name': 'inter',
},
'soft_interrupts': {
'description': 'number of software interrupts per second. Always set to \
0 on Windows and SunOS.',
'unit': 'number',
'rate': True,
'min_symbol': 'K',
'short_name': 'sw_int',
},
'syscalls': {
'description': 'number of system calls per second. Always 0 on Linux OS.',
'unit': 'number',
'rate': True,
'min_symbol': 'K',
'short_name': 'sys_call',
},
'cpucore': {'description': 'Total number of CPU core.', 'unit': 'number'},
'time_since_update': {'description': 'Number of seconds since last update.', 'unit': 'seconds'},
}
# SNMP OID
# percentage of user CPU time: .1.3.6.1.4.1.2021.11.9.0
# percentages of system CPU time: .1.3.6.1.4.1.2021.11.10.0
# percentages of idle CPU time: .1.3.6.1.4.1.2021.11.11.0
snmp_oid = {
'default': {
'user': '1.3.6.1.4.1.2021.11.9.0',
'system': '1.3.6.1.4.1.2021.11.10.0',
'idle': '1.3.6.1.4.1.2021.11.11.0',
},
'windows': {'percent': '1.3.6.1.2.1.25.3.3.1.2'},
'esxi': {'percent': '1.3.6.1.2.1.25.3.3.1.2'},
'netapp': {
'system': '1.3.6.1.4.1.789.1.2.1.3.0',
'idle': '1.3.6.1.4.1.789.1.2.1.5.0',
'cpucore': '1.3.6.1.4.1.789.1.2.1.6.0',
},
}
# Define the history items list
# - 'name' define the stat identifier
# - 'y_unit' define the Y label
items_history_list = [
{'name': 'user', 'description': 'User CPU usage', 'y_unit': '%'},
{'name': 'system', 'description': 'System CPU usage', 'y_unit': '%'},
]
class Plugin(GlancesPlugin):
"""Glances CPU plugin.
'stats' is a dictionary that contains the system-wide CPU utilization as a
percentage.
"""
def __init__(self, args=None, config=None):
"""Init the CPU plugin."""
super(Plugin, self).__init__(
args=args, config=config, items_history_list=items_history_list, fields_description=fields_description
)
# We want to display the stat in the curse interface
self.display_curse = True
# Call CorePlugin in order to display the core number
try:
self.nb_log_core = CorePlugin(args=self.args).update()["log"]
except Exception:
self.nb_log_core = 1
@GlancesPlugin._check_decorator
@GlancesPlugin._log_result_decorator
def update(self):
"""Update CPU stats using the input method."""
# Grab stats into self.stats
if self.input_method == 'local':
stats = self.update_local()
elif self.input_method == 'snmp':
stats = self.update_snmp()
else:
stats = self.get_init_value()
# Update the stats
self.stats = stats
return self.stats
def update_local(self):
"""Update CPU stats using psutil."""
# Grab CPU stats using psutil's cpu_percent and cpu_times_percent
# Get all possible values for CPU stats: user, system, idle,
# nice (UNIX), iowait (Linux), irq (Linux, FreeBSD), steal (Linux 2.6.11+)
# The following stats are returned by the API but not displayed in the UI:
# softirq (Linux), guest (Linux 2.6.24+), guest_nice (Linux 3.2.0+)
# Init new stats
stats = self.get_init_value()
stats['total'] = cpu_percent.get()
# Standards stats
# - user: time spent by normal processes executing in user mode; on Linux this also includes guest time
# - system: time spent by processes executing in kernel mode
# - idle: time spent doing nothing
# - nice (UNIX): time spent by niced (prioritized) processes executing in user mode
# on Linux this also includes guest_nice time
# - iowait (Linux): time spent waiting for I/O to complete.
# This is not accounted in idle time counter.
# - irq (Linux, BSD): time spent for servicing hardware interrupts
# - softirq (Linux): time spent for servicing software interrupts
# - steal (Linux 2.6.11+): time spent by other operating systems running in a virtualized environment
# - guest (Linux 2.6.24+): time spent running a virtual CPU for guest operating systems under
# the control of the Linux kernel
# - guest_nice (Linux 3.2.0+): time spent running a niced guest (virtual CPU for guest operating systems
# under the control of the Linux kernel)
# - interrupt (Windows): time spent for servicing hardware interrupts ( similar to “irq” on UNIX)
# - dpc (Windows): time spent servicing deferred procedure calls (DPCs)
cpu_times_percent = psutil.cpu_times_percent(interval=0.0)
for stat in cpu_times_percent._fields:
stats[stat] = getattr(cpu_times_percent, stat)
# Additional CPU stats (number of events not as a %; psutil>=4.1.0)
# - ctx_switches: number of context switches (voluntary + involuntary) since boot.
# - interrupts: number of interrupts since boot.
# - soft_interrupts: number of software interrupts since boot. Always set to 0 on Windows and SunOS.
# - syscalls: number of system calls since boot. Always set to 0 on Linux.
cpu_stats = psutil.cpu_stats()
# By storing time data we enable Rx/s and Tx/s calculations in the
# XML/RPC API, which would otherwise be overly difficult work
# for users of the API
stats['time_since_update'] = getTimeSinceLastUpdate('cpu')
# Core number is needed to compute the CTX switch limit
stats['cpucore'] = self.nb_log_core
# Previous CPU stats are stored in the cpu_stats_old variable
if not hasattr(self, 'cpu_stats_old'):
# Init the stats (needed to have the key name for export)
for stat in cpu_stats._fields:
# @TODO: better to set it to None but should refactor views and UI...
stats[stat] = 0
else:
# Others calls...
for stat in cpu_stats._fields:
if getattr(cpu_stats, stat) is not None:
stats[stat] = getattr(cpu_stats, stat) - getattr(self.cpu_stats_old, stat)
# Save stats to compute next step
self.cpu_stats_old = cpu_stats
return stats
def update_snmp(self):
"""Update CPU stats using SNMP."""
# Init new stats
stats = self.get_init_value()
# Update stats using SNMP
if self.short_system_name in ('windows', 'esxi'):
# Windows or VMWare ESXi
# You can find the CPU utilization of windows system by querying the oid
# Give also the number of core (number of element in the table)
try:
cpu_stats = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name], bulk=True)
except KeyError:
self.reset()
# Iter through CPU and compute the idle CPU stats
stats['nb_log_core'] = 0
stats['idle'] = 0
for c in cpu_stats:
if c.startswith('percent'):
stats['idle'] += float(cpu_stats['percent.3'])
stats['nb_log_core'] += 1
if stats['nb_log_core'] > 0:
stats['idle'] = stats['idle'] / stats['nb_log_core']
stats['idle'] = 100 - stats['idle']
stats['total'] = 100 - stats['idle']
else:
# Default behavior
try:
stats = self.get_stats_snmp(snmp_oid=snmp_oid[self.short_system_name])
except KeyError:
stats = self.get_stats_snmp(snmp_oid=snmp_oid['default'])
if stats['idle'] == '':
self.reset()
return self.stats
# Convert SNMP stats to float
for key in iterkeys(stats):
stats[key] = float(stats[key])
stats['total'] = 100 - stats['idle']
return stats
def update_views(self):
"""Update stats views."""
# Call the father's method
super(Plugin, self).update_views()
# Add specifics information
# Alert and log
for key in ['user', 'system', 'iowait', 'dpc', 'total']:
if key in self.stats:
self.views[key]['decoration'] = self.get_alert_log(self.stats[key], header=key)
# Alert only
for key in ['steal']:
if key in self.stats:
self.views[key]['decoration'] = self.get_alert(self.stats[key], header=key)
# Alert only but depend on Core number
for key in ['ctx_switches']:
if key in self.stats:
self.views[key]['decoration'] = self.get_alert(
self.stats[key], maximum=100 * self.stats['cpucore'], header=key
)
# Optional
for key in ['nice', 'irq', 'idle', 'steal', 'ctx_switches', 'interrupts', 'soft_interrupts', 'syscalls']:
if key in self.stats:
self.views[key]['optional'] = True
def msg_curse(self, args=None, max_width=None):
"""Return the list to display in the UI."""
# Init the return message
ret = []
# Only process if stats exist and plugin not disable
if not self.stats or self.args.percpu or self.is_disabled():
return ret
# Some tag to enable/disable stats (example: idle_tag triggered on Windows OS)
idle_tag = 'user' not in self.stats
# First line
# Total + (idle) + ctx_sw
msg = '{}'.format('CPU')
ret.append(self.curse_add_line(msg, "TITLE"))
trend_user = self.get_trend('user')
trend_system = self.get_trend('system')
if trend_user is None or trend_user is None:
trend_cpu = None
else:
trend_cpu = trend_user + trend_system
msg = ' {:4}'.format(self.trend_msg(trend_cpu))
ret.append(self.curse_add_line(msg))
# Total CPU usage
msg = '{:5.1f}%'.format(self.stats['total'])
ret.append(self.curse_add_line(msg, self.get_views(key='total', option='decoration')))
# Idle CPU
if 'idle' in self.stats and not idle_tag:
msg = ' {:8}'.format('idle')
ret.append(self.curse_add_line(msg, optional=self.get_views(key='idle', option='optional')))
msg = '{:4.1f}%'.format(self.stats['idle'])
ret.append(self.curse_add_line(msg, optional=self.get_views(key='idle', option='optional')))
# ctx_switches
# On WINDOWS/SUNOS the ctx_switches is displayed in the third line
if not WINDOWS and not SUNOS:
ret.extend(self.curse_add_stat('ctx_switches', width=15, header=' '))
# Second line
# user|idle + irq + interrupts
ret.append(self.curse_new_line())
# User CPU
if not idle_tag:
ret.extend(self.curse_add_stat('user', width=15))
elif 'idle' in self.stats:
ret.extend(self.curse_add_stat('idle', width=15))
# IRQ CPU
ret.extend(self.curse_add_stat('irq', width=14, header=' '))
# interrupts
ret.extend(self.curse_add_stat('interrupts', width=15, header=' '))
# Third line
# system|core + nice + sw_int
ret.append(self.curse_new_line())
# System CPU
if not idle_tag:
ret.extend(self.curse_add_stat('system', width=15))
else:
ret.extend(self.curse_add_stat('core', width=15))
# Nice CPU
ret.extend(self.curse_add_stat('nice', width=14, header=' '))
# soft_interrupts
if not WINDOWS and not SUNOS:
ret.extend(self.curse_add_stat('soft_interrupts', width=15, header=' '))
else:
ret.extend(self.curse_add_stat('ctx_switches', width=15, header=' '))
# Fourth line
# iowait + steal + syscalls
ret.append(self.curse_new_line())
if 'iowait' in self.stats:
# IOWait CPU
ret.extend(self.curse_add_stat('iowait', width=15))
elif 'dpc' in self.stats:
# DPC CPU
ret.extend(self.curse_add_stat('dpc', width=15))
# Steal CPU usage
ret.extend(self.curse_add_stat('steal', width=14, header=' '))
# syscalls: number of system calls since boot. Always set to 0 on Linux. (do not display)
if not LINUX:
ret.extend(self.curse_add_stat('syscalls', width=15, header=' '))
# Return the message with decoration
return ret | PypiClean |
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glypher/PhraseGroup.py | import glypher as g
import exceptions
import copy
import draw
import gutils
from aobject.utils import debug_print
from Phrase import *
from Symbol import *
from Spacer import *
import Parser
from sympy.series import limits
from sympy.core.sympify import SympifyError
import Dynamic
from glypher import \
GLYPHER_PG_LEAD_ALL, \
GLYPHER_PG_LEAD_MID, \
GLYPHER_PG_LEAD_VERT, \
GLYPHER_PG_LEAD_HORI
ac = gutils.array_close
fc = gutils.float_close
class GlypherPhraseGroup (GlypherPhrase) :
phrases = None
phrase = None
lead_phrase = None
first_highlighted_pg_over_active = False
target_phrases = None
alts_phrases = None
get_sympy_code = None
to_string_code = None
to_latex_code = None
alternatives_cat = None
ignore_targets = None
#def to_string(self) :
# return '(' + self.mes[len(self.mes)-1] + '|' + '|'.join([t+'='+self.target_phrases[t].to_string() for t in self.target_phrases])
lhs_target = None
def set_lhs_target(self, lhs_target) :
"""Sets (or with None, unsets) the LHS target, for set_lhs."""
if lhs_target is None :
self.lhs_target = None
elif lhs_target in self.target_phrases :
self.lhs_target = lhs_target
else :
raise IndexError("lhs_target should be a target.")
def set_lhs(self, lhs) :
"""If lhs_target is set, puts lhs there, otherwise returns False."""
if self.lhs_target is not None :
self[self.lhs_target].adopt(lhs)
return True
return False
rhs_target = None
def set_rhs_target(self, rhs_target) :
"""Sets (or with None, unsets) the LHS target, for set_rhs."""
if rhs_target is None :
self.rhs_target = None
elif rhs_target in self.target_phrases :
self.rhs_target = rhs_target
else :
raise IndexError("rhs_target should be a target.")
def set_rhs(self, rhs) :
"""If rhs_target is set, puts rhs there, otherwise returns False."""
if self.rhs_target is not None :
self[self.rhs_target].adopt(rhs)
return True
return False
def __setitem__(self, key, value) :
"""Add a new TargetPhrase called key in value."""
self.add_target(value, key)
def __getitem__(self, key) :
"""Retrieve a Target (or child) by key."""
# If this is an int, then let Phrase find it, otherwise it should be a
# string (of some sort) and a Target of ours.
if isinstance(key, int) :
return GlypherPhrase.__getitem__(self, key)
elif not isinstance(key, basestring) :
raise(TypeError("For pg[target], target must be str not " + str(type(key))))
if key in self.target_phrases :
return self.get_target(key)
raise(IndexError("Target "+key+" not found in PhraseGroup"))
# Stop looking up tree to find edit mode
#def is_edit_mode(self) :
# return self.edit_mode
# Stop upward search for all binary expressions, except SpaceArray
stop_for_binary_expression_default = True
stop_for_binary_expression_exceptions = ('space_array', 'equality')
def to_latex(self) :
if not self.get_visible() : return ""
elif self.get_blank() : return " "
if self.to_latex_code :
return Dynamic.eval_for_sympy(self, self.to_latex_code)
return GlypherPhrase.to_latex(self)
def to_string(self, mode = "string") :
if not self.get_visible() : return unicode("")
elif self.get_blank() : return unicode(" ")
if self.to_string_code :
return Dynamic.eval_for_sympy(self, self.to_string_code)
return GlypherPhrase.to_string(self, mode=mode)
def get_sympy(self) :
if self.get_sympy_code :
return Dynamic.eval_for_sympy(self, self.get_sympy_code)
return GlypherPhrase.get_sympy(self)
def draw_alternatives(self, cr) :
pass
def next_alternative(self) :
self._alternative_in_dir(go_next=True)
def _alternative_in_dir(self, go_next = True) :
cat = self.alternatives_cat
if cat is not None :
alts = g.find_phrasegroup_alternatives(cat)
if self.mes[-1] in alts and len(alts) > 1 :
pos = alts.index(self.mes[-1])
pos = (len(alts) + pos + (1 if go_next else -1)) % len(alts)
new_name = alts[pos]
operands = []
targets = [self.lhs_target, self.rhs_target] + \
self.target_phrases.keys()
for tgt in targets :
if tgt is None :
continue
tgt = self.target_phrases[tgt]
if len(tgt) == 0 :
continue
op = tgt.get_entities()[0]
op.orphan()
operands.append(op)
self.parent.exchange(self, Parser.make_phrasegroup(self.parent,
new_name,
operands=operands))
def prev_alternative(self) :
self._alternative_in_dir(go_next=False)
def recalc_basebox(self) :
GlypherPhrase.recalc_basebox(self)
pbasebox = self.config[0].get_basebox()
if self.lead_phrase is None : return pbasebox
b = self.lead_phrase.config[0].get_basebox()
#if len(self.lead_phrase.get_entities()) > 0 :
# debug_print(self.lead_phrase.get_entities()[0].format_me())
# debug_print(self.lead_phrase.get_entities()[0].get_basebox())
# debug_print(self.lead_phrase.format_me())
# debug_print(b)
# debug_print('-'*30)
la = self.get_p('lead_application')
self.config[0].basebox = \
(b[0] if la[0] else pbasebox[0],\
b[1] if la[1] else pbasebox[1],\
b[2] if la[2] else pbasebox[2],\
b[3] if la[3] else pbasebox[3],\
b[4] if la[4] else pbasebox[4],\
b[5] if la[5] else pbasebox[5])
def set_lead(self, lead, application = (True,True,True,True)) :
self.lead_phrase = lead
self.set_p('lead_application', application)
def get_xml(self, name = None, top = True, targets = None, full = False) :
if targets is None :
targets = self.target_phrases
if full :
root = GlypherPhrase.get_xml(self, name, top, targets=targets,
full=False)
else :
root = ET.Element(self.get_name())
root.set('type', self.mes[-1])
tgts = ET.Element('targets')
for t in self.target_phrases :
if t in self.ignore_targets :
continue
r = self.target_phrases[t].get_xml(name='target', top=False,
full=False)
r.set('name', t)
tgts.append(r)
if len(tgts) > 0 :
root.append(tgts)
if self.lhs_target is not None :
root.set('lhs', self.lhs_target)
if self.rhs_target is not None :
root.set('rhs', self.rhs_target)
return root
def child_change(self) :
"""Called if a child changes in a non-geometric sense."""
GlypherPhrase.child_change(self)
if self.included() :
self.make_simplifications()
def make_simplifications(self) :
pass
def add_alts(self, phrase, name) :
ap = make_alts_phrase()
phrase.adopt(ap)
self.alts_phrases[name] = ap
return ap
def add_target(self, phrase, name, stay_enterable = False) :
"""Add a Target, that is, a TargetPhrase which looks a bit funny and can
be directly addressed from the PhraseGroup by a string. It sits Inside
the passed Phrase (i.e. TP=P.IN()=TP.IN()) and, by default, should be
indistinguishable to the end-user through the GUI."""
if not isinstance(name, basestring) :
raise TypeError("Target names in PGs should always be str/unicode")
if not isinstance(phrase, GlypherPhrase) :
raise TypeError("""
Only Phrases may be given to be turned into Targets
""")
# Generate a TargetPhrase
tp = make_target_phrase()
# Ensure it will delete its parent automatically
tp.set_deletable(2)
# Avoid users falling into the nesting gap
if not stay_enterable :
phrase.set_enterable(False)
# Put it in
phrase.adopt(tp)
# Add it to the dictionary
self.target_phrases[name] = tp
# Tell tp who the pg is
tp.pg = self
# Give it a name for ease of finding
tp.set_name(name)
def get_target(self, name) :
return self.target_phrases[name]
# Potentially confusing name similarity
def get_alts(self, name) :
return self.alts_phrases[name]
def get_alt(self, name) :
return self.alts_phrases[name].active_child
def inside_a(self, what, ignore=()) :
if self.am(what) : return self
if self.included() and len(set(self.mes) & set(ignore))>0 :
return self.get_parent().inside_a(what, ignore)
return None
def set_highlight_group(self, highlight_group) : self.set_p('highlight_group', highlight_group)
def get_highlight_group(self) : return self.get_p('highlight_group')
def __init__(self, parent, phrase_defs = [], area = (0,0,0,0), lead_phrase = None, phraser=None, highlight_group = True) :
self.phrases = {}
self.target_phrases = {}
self.alts_phrases = {}
self.ignore_targets = []
GlypherPhrase.__init__(self, parent, area)
self.add_properties({'lead_application' :(True,True,True,True,True,True),
})
#self.add_properties({'local_space' : True})
#debug_print((self,self.mes))
self.set_highlight_group(highlight_group)
self.mes.append('phrasegroup')
self.set_enterable(False)
if phraser == None : phraser = GlypherExpression if g.math_mode else GlypherPhrase
self.phraser = phraser
test = phraser(parent)
self.set_p('phraser', test.mes[len(test.mes)-1] if phraser else None)
del test
if isinstance(phrase_defs, dict) :
for name in phrase_defs : self.append_phrase_to_group(name, phrase_defs[name])
else :
for ind, pd in enumerate(phrase_defs) :
glyph = pd['o']
pd['x'] = ind
self.phrases[pd['n']] = [glyph,pd]
# Make sure that appending doesn't bring the left edge forward
#old_left = self.old_config.bbox[0]
#adj = self.get_adj(ind)
#if pd['n'] == 'col1' : adj = 10
#glyph.translate(adj, 0)
self.append(glyph, row=pd['r'] if 'r' in pd else 0, override_in=True, move=(True,True), align=pd['a'] if 'a' in pd else ('l','m'))
#if self.config[0].bbox[0] > old_left : self.config[0].bbox[0] = old_left
#self.feed_up()
self.characteristics.append('_bodmasable')
#def elevate_entities(self, new_parent, adopt = False, to_front = False) :
# #debug_print(self.lead_phrase)
# if self.lead_phrase is not None :
# return self.get_phrase(self.lead_phrase).elevate_entities(new_parent, adopt, to_front)
def get_phrase(self, phrase) :
return self.phrases[phrase][0]
def add_phrase(self, phr, name) :
self.phrases[name] = [phr, {'x':0}]
phr.set_name(name)
def set_child_active(self, active, desc) :
GlypherPhrase.set_child_active(self, active, desc)
ancs = desc.get_ancestors()
if not active : self.first_highlighted_pg_over_active = False
else :
for anc in ancs :
if anc == self and anc.get_highlight_group() : self.first_highlighted_pg_over_active = True; break
if anc.am('phrasegroup') and anc.get_highlight_group() :
self.first_highlighted_pg_over_active = False; break
def get_adj(self, loc) :
adj = loc
for phr in self.phrases :
p = self.phrases[phr]
#debug_print(p[0].to_string() + ' ' +str(p[0].config[0].bbox))
if loc > p[1]['x'] : adj += p[0].config[0].bbox[2]-p[0].config[0].bbox[0]
return adj
def append_phrase_to_group(self, name, pd) :
phraser = self.phraser
adj = self.get_adj(pd['x'])
if 'g' in pd : phraser = pd['g']
pos = (self.config[0].bbox[0], self.config[0].bbox[3])
glyph = phraser(self, (pos[0]+adj,self.get_topline()+pd['y'],pos[0]+adj,self.get_baseline()+pd['y']),\
pd['ls'] if 'ls' in pd else 1.0,\
pd['fs'] if 'fs' in pd else 1.0,\
pd['a'] if 'a' in pd else (l,b))
glyph.x_offset = adj
glyph.y_offset = pd['y']
self.phrases[name] = [glyph,pd]
# Make sure that appending doesn't bring the left edge forward
old_left = self.old_config.bbox[0]
self.append(glyph, row=pd['r'] if 'r' in pd else 0, override_in=True, move=(True,True), align=pd['a'] if 'a' in pd else ('l','m'))
if self.config[0].bbox[0] > old_left : self.config[0].bbox[0] = old_left
self.feed_up()
def decorate(self, cr) :
self.draw_topbaseline(cr)
if not self.get_visible() : return
if g.additional_highlighting and self.get_attached() :
cr.save()
cr.move_to(self.config[0].bbox[0]-2, self.config[0].bbox[3]+2)
draw.draw_full_blush(cr, self.config[0].bbox[2]-self.config[0].bbox[0]+4, self.config[0].bbox[3]-self.config[0].bbox[1]+4, (0.8,0.95,0.95))
cr.set_source_rgba(0.6, 0.9, 0.9, 1.0)
area=(self.config[0].bbox[0]-2, self.config[0].bbox[2]+2, self.config[0].bbox[1]-2, self.config[0].bbox[3]+2)
draw.trace_rounded(cr, area, 5)
cr.stroke()
cr.restore()
elif self.get_highlight_group() and\
self.first_highlighted_pg_over_active and self.show_decoration() :
cr.save()
#cr.set_line_width(2.0)
#cr.rectangle(self.config[0].bbox[0]-2, self.config[0].bbox[1]-2, self.config[0].bbox[2]-self.config[0].bbox[0]+4, self.config[0].bbox[3]-self.config[0].bbox[1]+4)
#cr.set_source_rgba(0.9, 0.8, 0.6, 0.8)
cr.move_to(self.config[0].bbox[0]-2, self.config[0].bbox[1]-8)
draw.draw_inverse_blush(cr, self.config[0].bbox[2]-self.config[0].bbox[0]+4, self.config[0].bbox[3]-self.config[0].bbox[1]-2, (0.9,0.8,0.6))
if g.stroke_mode :
cr.fill_preserve()
cr.set_source_rgba(0.5, 0.5, 0.4, 0.6)
cr.stroke()
else : cr.fill()
cr.restore()
#if you want to run any phrase functions, you should always run through the expr() fn,
#and below is why.
class GlypherCompoundPhrase(GlypherPhraseGroup) :
phrase_name = ''
in_ready = False
def __init__(self, parent, phrase_defs, area = (0,0,0,0), phrase = None, phraser = GlypherPhrase) :
self.phrase_name = phrase
GlypherPhraseGroup.__init__(self, parent, phrase_defs, area, phrase, phraser, highlight_group=False)
if phrase is not None : self.set_expr(phrase)
#self.in_ready = True
#self.IN()._out = self
#self.phrases[phrase][0].set_deletable(2) # Send delete requests for rhs to me
#self.set_recommending(self.IN())
#get_caret().enter_phrase(self.expr())
#def IN(self) : return self.phrases[self.phrase_name][0].IN() if self.in_ready else self
def set_expr(self, phrase) :
self.phrase_name = phrase
#debug_print(self.phrases)
self.in_ready = True
self.set_in(self.get_target(self.phrase_name))
self.set_lead(self.get_target(phrase).IN(), GLYPHER_PG_LEAD_ALL)
self.recalc_bbox()
class GlypherBracketedPhrase(GlypherCompoundPhrase) :
left_bracket = None
right_bracket = None
is_suspend_collapse_checks = False
collapse_condition = None
stop_for_binary_expression_default = False
stop_for_binary_expression_exceptions = ()
def set_bracket_shapes(self, bracket_shapes) :
self.suspend_recommending()
brkt_shape = bracket_shapes[0]
phrase = self.left_bracket
for i in (0,1) :
symbol = GlypherSymbol(self, brkt_shape, ink=True)
symbol.set_attachable(False)
phrase.IN().adopt(symbol)
brkt_shape = bracket_shapes[1]; phrase = self.right_bracket
self.set_p('bracket_shapes', bracket_shapes)
self.resume_recommending()
def get_bracket_shapes(self) :
return self.get_p('bracket_shapes')
def __init__(self, parent, area = (0,0,0,0), line_size_coeff = 1.0, font_size_coeff = 1.0, align = ('l','m'), no_fices = False,\
auto = True, keep_space = False, hidden_spacing = (0,0), expr = None, bracket_shapes = ('(',')') ) :
#pds = {}
# pass no_fices
#pds['left_bracket'] = { 'x' : 0 , 'y' : 0, 'a' : ('l','m') }
#pds['expression'] = { 'x' : 1 , 'y' : 0, 'a' : align }
#pds['right_bracket'] = { 'x' : 2 , 'y' : 0, 'a' : ('l','m') }
self.suspend_collapse_checks()
GlypherCompoundPhrase.__init__(self, parent, [], area)
self.no_bracket = set()
self.set_p('no_bracket', self.no_bracket)
self.mes.append('bracketed_phrase')
self.no_bracket.add('fraction')
self.no_bracket.add('symbol')
self.no_bracket.add('square_root')
self.no_bracket.add('matrix')
self.no_bracket.add('reference')
self.no_bracket.add('constant')
self.left_bracket = GlypherPhrase(self); self.add_phrase(self.left_bracket, 'left_bracket')
self.right_bracket = GlypherPhrase(self); self.add_phrase(self.right_bracket, 'right_bracket')
#self.left_space = GlypherSpace(self, (hidden_spacing[0],1))
#self.right_space = GlypherSpace(self, (hidden_spacing[1],1))
#self.left_space = GlypherSymbol(None, '-')
#self.right_space = GlypherSymbol(None, '-')
self.expression = GlypherPhrase(self)
self.expression.set_p('align_as_entity', True)
#self.expression_out = GlypherPhrase(self, align_as_entity=True)
self.append(self.left_bracket, override_in=True, move=(True,True), align=('l','m'))
#self.append(self.left_space, override_in=True, move=(True,True), align=('l','m'))
self.append(self.expression)
#self.append(self.expression_out, override_in=True, move=(True,True), align=align)
#self.expression_out.adopt(self.expression)
#self.append(self.right_space, override_in=True, move=(True,True), align=('l','m'))
self.append(self.right_bracket, override_in=True, move=(True,True), align=('l','m'))
#self.target_phrases['expression'] = self.expression
self.add_target(self.expression, 'expression')
self.set_enterable(False)
self.set_expr('expression')
self.set_lead(self.expression.IN(), GLYPHER_PG_LEAD_MID)
self.set_p('keep_space', keep_space)
#self.left_space.hide()
#self.right_space.hide()
brkt_shape = bracket_shapes[0]
phrase = self.left_bracket
for i in (0,1) :
phrase.set_enterable(False)
phrase.set_attachable(False)
phrase = self.right_bracket
self.set_bracket_shapes(bracket_shapes)
#if expr is not None :
# self.phrases['expression'][0].append(expr)
if auto : self.set_auto_bracket(True)
#debug_print(self.left_bracket.format_loc())
#self.set_auto_bracket(False)
#debug_print(self.right_bracket.format_loc())
#debug_print(self.expression.format_loc())
self.resume_collapse_checks()
if expr is not None :
self.expression.append(expr)
self.check_collapse()
self.set_recommending(self.get_target('expression'))
def set_auto_bracket(self, auto_bracket) : self.set_p('auto_bracket', auto_bracket)
def get_auto_bracket(self) : return self.get_p('auto_bracket')
def set_no_bracket(self, no_bracket) : self.set_p('no_bracket', no_bracket)
def get_no_bracket(self) : return self.get_p('no_bracket')
def set_collapse_condition(self, collapse_condition) : self.set_p('collapse_condition', collapse_condition)
def get_collapse_condition(self) : return self.get_p('collapse_condition')
def set_collapsed(self, collapsed) : self.set_p('collapsed', collapsed)
def get_collapsed(self) : return self.get_p('collapsed')
# This ents0 arg allows us to decide should_collapse based on a different
# entity
def should_collapse(self, ents0 = None) :
ents = self.get_target('expression').get_entities()
if ents0 is None and len(ents) == 1 :
ents0 = ents[0]
if ents0 is not None :
while ents0.OUT().mes[-1] in ('phrase', 'target_phrase') and len(ents0) == 1 :
ents0 = ents0[0]
#debug_print(ents)
# ents0.is_wordlike() or
return len(ents) == 0 or (ents0 and \
(len(set(ents0.mes) & self.get_no_bracket())>0 or \
ents0.is_wordlike() or ents0.get_p('force_no_bracket')) \
)
def suspend_collapse_checks(self) :
self.is_suspend_collapse_checks = True
def resume_collapse_checks(self) :
self.is_suspend_collapse_checks = False
self.check_collapse()
def check_collapse(self) :
cc = self.get_collapse_condition()
if self.get_auto_bracket() and not self.is_suspend_collapse_checks :
if self.should_collapse() \
or (cc and cc()) :
self.brackets_collapse()
else :
self.brackets_restore()
def brackets_collapse(self) :
ks = self.get_p('keep_space')
if isinstance(ks, tuple) :
ksl = ks[0]; ksr = ks[1]
else :
ksl = ks; ksr = ks
if self.left_bracket.get_visible() and not ksl : self.left_bracket.hide()#; self.left_space.show()
if not self.left_bracket.get_blank() : self.left_bracket.blank()
if self.right_bracket.get_visible() and not ksr : self.right_bracket.hide()#; self.right_space.show()
if not self.right_bracket.get_blank() : self.right_bracket.blank()
self.set_collapsed(True)
def brackets_restore(self) :
ks = self.get_p('keep_space')
if isinstance(ks, tuple) :
ksl = ks[0]; ksr = ks[1]
else :
ksl = ks; ksr = ks
if not self.left_bracket.get_visible() and not ksl : self.left_bracket.show()#; self.left_space.show()
if self.left_bracket.get_blank() : self.left_bracket.unblank()
if not self.right_bracket.get_visible() and not ksr : self.right_bracket.show()#; self.right_space.show()
if self.right_bracket.get_blank() : self.right_bracket.unblank()
self.set_collapsed(False)
def child_change(self) :
self.check_collapse()
GlypherCompoundPhrase.child_change(self)
_altering = False
def child_altered(self, child = None) :
GlypherCompoundPhrase.child_altered(self, child)
if self.in_ready and not self._altering and not self.is_suspend_collapse_checks : #and False :#RMV
self._altering = True
for b in (self.left_bracket, self.right_bracket) :
#break
#if not b or not b.visible : break
if not b : break
sc = (self.IN().config[0].basebox[5]-self.IN().config[0].basebox[3])
#bc = b.get_scaled_font_size()
bc = (b.config[0].basebox[5]-b.config[0].basebox[3])
if not fc(.8*sc, bc) :
if b.config[0].get_changes() != "" :
raise(RuntimeError('Rescaling parentheses for an un-reset bracket bounding box'))
b.set_font_size_scaling((.8*sc/bc)*b.get_size_scaling())
bc = (b.config[0].basebox[5]-b.config[0].basebox[3])
self._altering = False
class GlypherBODMASBracketedPhrase(GlypherBracketedPhrase) :
def set_bodmas_sensitivity(self, bodmas_sensitivity) : self.set_p('bodmas_sensitivity', bodmas_sensitivity)
def get_bodmas_sensitivity(self) : return self.get_p('bodmas_sensitivity')
def __init__(self, parent, area = (0,0,0,0), line_size_coeff = 1.0, font_size_coeff = 1.0, align = ('l','m'), no_fices = False) :
GlypherBracketedPhrase.__init__(self, parent, area, line_size_coeff, font_size_coeff, align, no_fices)
def should_collapse(self, ents0 = None) :
# TODO: move 'expr' to 'inside'
ents = self.IN().get_entities()
if ents0 is None and len(ents) == 1 :
ents0 = ents[0]
return GlypherBracketedPhrase.should_collapse(self, ents0=ents0) or \
(ents0 and ents0.am_c('_bodmasable') and ents0.get_bodmas_level() < self.get_bodmas_sensitivity())
def child_change(self) :
GlypherBracketedPhrase.child_change(self)
self.check_collapse()
#debug_print(self.entities[0].get_bodmas_level())
class GlypherTargetPhraseError(RuntimeError) :
tp = None
def __init__(self, tp, err = None) :
self.tp = tp
tp.set_error_note(err)
RuntimeError.__init__(self, err)
class GlypherTargetPhrase(GlypherPhrase) :
pg = None
hl_anc = False
error = False
def __init__(self, parent, area = (0,0,0,0), line_size_coeff = 1.0, font_size_coeff = 1.0, align = ('l','m'), auto_fices = False) :
GlypherPhrase.__init__(self, parent, area, line_size_coeff, font_size_coeff, align, auto_fices)
self.mes.append('target_phrase')
self.characteristics.append('_bodmasable')
self.characteristics.append('_in_phrase')
self.add_properties({'blank_ratio' : 0.15, 'attachable' : True,
'local_space' : True})
def get_phrasegroup(self) :
return self.pg
def get_bodmas_level(self) :
ents = self.get_entities()
#debug_print(self.entities)
if (len(ents) == 1 and ents[0].am_c('_bodmasable')) :
return ents[0].get_bodmas_level()
else : return 100
def decorate(self, cr) :
if g.show_rectangles and self.show_decoration() :
cr.save()
cr.set_line_width(2.0)
cr.set_source_rgba(0.5, 0.5, 0.8, 0.4)
cr.rectangle(self.config[0].bbox[0]-2, self.config[0].bbox[1]-2, self.config[0].bbox[2]-self.config[0].bbox[0]+4, self.config[0].bbox[3]-self.config[0].bbox[1]+4)
cr.stroke()
cr.restore()
cr.set_source_rgba(0.5, 0.5, 0.8, 1.0)
cr.move_to(self.config[0].bbox[0]-4, self.config[0].basebox[4])
cr.line_to(self.config[0].bbox[2]+4, self.config[0].basebox[4])
cr.stroke()
if not self.is_enterable() : return
hl_anc = None
# If this is in an unhighlighted highlight group, don't show it, otherwise if the first highlighted group is
# above it, show it
for anc in self.get_ancestors() :
if anc.am('phrasegroup') :
if anc.first_highlighted_pg_over_active : hl_anc = anc; break
#else : hl_anc = None; break
elif anc.get_highlight_group() : hl_anc = None; break
self.hl_anc = hl_anc
if not hl_anc and not self.error : return
cr.save()
red = 1.0 if self.error else 0.4
cr.set_source_rgba(red, 0.4, 0.2, 0.1 if g.show_rectangles else 0.2)
area=(self.config[0].bbox[0]-2, self.config[0].bbox[2]+2, self.config[0].bbox[1]-2, self.config[0].bbox[3]+2)
draw.trace_rounded(cr, area, 5)
if len(self.get_entities()) == 0 :
cr.fill_preserve()
cr.set_source_rgba(red, 0.4, 0.2, 0.2 if g.show_rectangles else 0.4)
cr.stroke()
cr.restore()
def get_sympy(self) :
#if len(self.IN().entities) == 0 :
# raise GlypherTargetPhraseError(self, "Please enter something!")
try :
sy = GlypherPhrase.get_sympy(self)
self.set_error_note()
return sy
except GlypherTargetPhraseError :
self.set_error_note()
raise
except exceptions.RuntimeError as e:
self.set_error_note("Problem with sympy parsing : " +str(e))
raise GlypherTargetPhraseError(self, str(e))
except SympifyError as e:
self.set_error_note("Sympy complained : " +str(e))
raise GlypherTargetPhraseError(self, str(e))
except :
self.set_error_note("Problem with sympy parsing : " +str(sys.exc_info()[1]))
raise GlypherTargetPhraseError(self, str(sys.exc_info()[1]))
ref_target_phrase = None
def make_target_phrase () :
global ref_target_phrase
if ref_target_phrase is None :
ref_target_phrase = GlypherTargetPhrase(None)
return copy.deepcopy(ref_target_phrase)
ref_bracketed_phrase = None
def make_bracketed_phrase () :
global ref_bracketed_phrase
if ref_bracketed_phrase is None :
ref_bracketed_phrase = GlypherBracketedPhrase(None)
return copy.deepcopy(ref_bracketed_phrase)
g.phrasegroups['phrasegroup'] = GlypherPhraseGroup
g.phrasegroups['bracketed_phrase'] = GlypherBracketedPhrase
g.phrasegroups['target_phrase'] = GlypherTargetPhrase | PypiClean |
/KratosCoSimulationApplication-9.4-cp39-cp39-win_amd64.whl/KratosMultiphysics/CoSimulationApplication/solver_wrappers/sdof/sdof_static_solver.py | import KratosMultiphysics
from KratosMultiphysics.CoSimulationApplication.function_callback_utility import GenericCallFunction
# Other imports
import json
import os
class SDoFStaticSolver(object):
def __init__(self, input_name):
# mimicing two constructors
if isinstance(input_name, dict):
parameters = input_name
elif isinstance(input_name, str):
if not input_name.endswith(".json"):
input_name += ".json"
with open(input_name,'r') as ProjectParameters:
parameters = json.load(ProjectParameters)
else:
raise Exception("The input has to be provided as a dict or a string")
default_settings = {
"system_parameters":{
"stiffness" : 4000.0
},
"initial_values":{
"displacement" : 0.0,
},
"boundary_conditions":{
"external_load" : 5000.0
},
"solver_parameters": {
"buffer_size" : 1
},
"output_parameters":{
"write_output_file": False,
"file_name" : "sdof_static_solver/results_sdof.dat"
}}
RecursivelyValidateAndAssignDefaults(default_settings, parameters)
self.stiffness = parameters["system_parameters"]["stiffness"]
self.initial_displacement = parameters["initial_values"]["displacement"]
self.force = parameters["boundary_conditions"]["external_load"]
self.buffer_size = parameters["solver_parameters"]["buffer_size"]
self.output_file_name = parameters["output_parameters"]["file_name"]
self.write_output_file = parameters["output_parameters"]["write_output_file"]
def Initialize(self):
initial_values = self.initial_displacement
self.dx = initial_values
if self.write_output_file:
if os.path.isfile(self.output_file_name):
os.remove(self.output_file_name)
self.InitializeOutput()
self.time = 0.0
def InitializeOutput(self):
with open(self.output_file_name, "w") as results_sdof_static:
results_sdof_static.write("displacement" + "\n")
self.OutputSolutionStep()
def OutputSolutionStep(self):
if self.write_output_file:
with open(self.output_file_name, "a") as results_sdof_static:
#outputs results
results_sdof_static.write(str(self.dx) + "\n")
def AdvanceInTime(self, current_time):
self.time = 0.0
return self.time
def SolveSolutionStep(self):
self.dx = self.force/self.stiffness
KratosMultiphysics.Logger.PrintInfo('SDoFStaticSolver', 'Force Imported = ', self.force)
KratosMultiphysics.Logger.PrintInfo('SDoFStaticSolver', 'Structure Stiffness = ', self.stiffness)
KratosMultiphysics.Logger.PrintInfo('SDoFStaticSolver', 'New Displacement = ', self.dx)
def CalculateReaction(self, buffer_idx=0):
reaction = self.stiffness * (self.dx)
return reaction
def GetSolutionStepValue(self, identifier, buffer_idx=0):
if identifier == "DISPLACEMENT":
return self.dx
elif identifier == "REACTION":
return self.CalculateReaction()
else:
raise Exception("Identifier is unknown!")
def SetSolutionStepValue(self, identifier, value, buffer_idx=0):
if identifier == "DISPLACEMENT":
self.dx= value
elif identifier == "LOAD":
self.force = 0.0
self.force = value
elif identifier == "ROOT_POINT_DISPLACEMENT":
self.root_point_displacement = 0.0
self.root_point_displacement = value
else:
raise Exception("Identifier is unknown!")
def ValidateAndAssignDefaults(defaults, settings, recursive=False):
for key, val in settings.items():
# check if the current entry also exists in the defaults
if not key in defaults.keys():
err_msg = 'The item with name "' + key + '" is present in this '
err_msg += 'settings\nbut NOT in the defaults!\n'
err_msg += 'settings are:\n'
err_msg += json.dumps(settings, indent=4)
err_msg += '\ndefaults are:\n'
err_msg += json.dumps(defaults, indent=4)
raise Exception(err_msg)
# check if the type is the same in the defaults
if type(settings[key]) != type(defaults[key]):
err_msg = 'The type of the item with name "' + key + '" (type: "'
err_msg += str(type(settings[key]).__name__)+'") in this '
err_msg += 'settings\nis NOT the same as in the defaults (type: "'
err_msg += str(type(defaults[key]).__name__)+'")!\n'
err_msg += 'settings are:\n'
err_msg += json.dumps(settings, indent=4)
err_msg += '\ndefaults are:\n'
err_msg += json.dumps(defaults, indent=4)
raise Exception(err_msg)
# loop the defaults and add the missing entries
for key_d, val_d in defaults.items():
if key_d not in settings: # add the default in case the setting is not present
settings[key_d] = val_d
elif recursive and type(val_d) is dict:
RecursivelyValidateAndAssignDefaults(val_d, settings[key_d])
def RecursivelyValidateAndAssignDefaults(defaults, settings):
ValidateAndAssignDefaults(defaults, settings, recursive=True) | PypiClean |
/Dre_binomial_distribution-0.1.tar.gz/Dre_binomial_distribution-0.1/Dre_binomial_distributions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/Homie4-0.4.0.tar.gz/Homie4-0.4.0/homie/support/network_information.py |
import logging
#import netifaces
import socket
logger = logging.getLogger(__name__)
""" or use psutils
import psutil
mac_addresses = []
nics = psutil.net_if_addrs()
nics.pop('lo') # remove loopback since it doesnt have a real mac address
for i in nics:
for j in nics[i]:
if j.family == 17: # AF_LINK
mac_addresses.append(j.address)
"""
class Network_Information(object):
"""Util for getting a interface' ip to a specific host and the corresponding mac address."""
def __init__(self):
self.ip_to_interface = self.__build_ip_to_interface_dict()
def __build_ip_to_interface_dict(self):
"""Build a map of IPv4-Address to Interface-Name (like 'eth0')"""
map = {}
for interface in netifaces.interfaces():
try:
ifInfo = netifaces.ifaddresses(interface)[netifaces.AF_INET]
for addrInfo in ifInfo:
addr = addrInfo.get("addr")
if addr:
map[addr] = interface
except Exception:
pass
return map
def get_local_ip(self, targetHost, targetPort):
"""Gets the local ip to reach the given ip.
That can be influenced by the system's routing table.
A socket is opened and closed immediately to achieve that."""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((targetHost, targetPort))
except Exception as e:
logger.exception(
"Cannot create socket to target " + targetHost + ":" + targetPort
)
else:
ip = s.getsockname()[0]
s.close()
return ip
def get_local_mac_for_ip(self, ip):
"""Get the mac address for that given ip."""
logger.debug("Interfaces found: %s", self.ip_to_interface)
logger.debug("Looking for IP: %s", ip)
mac_addr = None
if_name = self.ip_to_interface.get(ip)
try:
link = netifaces.ifaddresses(if_name)[netifaces.AF_LINK]
except (KeyError, TypeError):
logger.warning("Could not determine MAC for: %s", if_name)
else:
logger.debug("Found link: %s", link)
if len(link) > 1:
logger.warning("Conflict: Multiple interfaces found for IP: %s!", ip)
mac_addr = link[0].get("addr")
return mac_addr | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ace/theme-merbivore_soft.js | ace.define("ace/theme/merbivore_soft",["require","exports","module","ace/lib/dom"], function(require, exports, module) {
exports.isDark = true;
exports.cssClass = "ace-merbivore-soft";
exports.cssText = ".ace-merbivore-soft .ace_gutter {\
background: #262424;\
color: #E6E1DC\
}\
.ace-merbivore-soft .ace_print-margin {\
width: 1px;\
background: #262424\
}\
.ace-merbivore-soft {\
background-color: #1C1C1C;\
color: #E6E1DC\
}\
.ace-merbivore-soft .ace_cursor {\
color: #FFFFFF\
}\
.ace-merbivore-soft .ace_marker-layer .ace_selection {\
background: #494949\
}\
.ace-merbivore-soft.ace_multiselect .ace_selection.ace_start {\
box-shadow: 0 0 3px 0px #1C1C1C;\
border-radius: 2px\
}\
.ace-merbivore-soft .ace_marker-layer .ace_step {\
background: rgb(102, 82, 0)\
}\
.ace-merbivore-soft .ace_marker-layer .ace_bracket {\
margin: -1px 0 0 -1px;\
border: 1px solid #404040\
}\
.ace-merbivore-soft .ace_marker-layer .ace_active-line {\
background: #333435\
}\
.ace-merbivore-soft .ace_gutter-active-line {\
background-color: #333435\
}\
.ace-merbivore-soft .ace_marker-layer .ace_selected-word {\
border: 1px solid #494949\
}\
.ace-merbivore-soft .ace_invisible {\
color: #404040\
}\
.ace-merbivore-soft .ace_entity.ace_name.ace_tag,\
.ace-merbivore-soft .ace_keyword,\
.ace-merbivore-soft .ace_meta,\
.ace-merbivore-soft .ace_meta.ace_tag,\
.ace-merbivore-soft .ace_storage {\
color: #FC803A\
}\
.ace-merbivore-soft .ace_constant,\
.ace-merbivore-soft .ace_constant.ace_character,\
.ace-merbivore-soft .ace_constant.ace_character.ace_escape,\
.ace-merbivore-soft .ace_constant.ace_other,\
.ace-merbivore-soft .ace_support.ace_type {\
color: #68C1D8\
}\
.ace-merbivore-soft .ace_constant.ace_character.ace_escape {\
color: #B3E5B4\
}\
.ace-merbivore-soft .ace_constant.ace_language {\
color: #E1C582\
}\
.ace-merbivore-soft .ace_constant.ace_library,\
.ace-merbivore-soft .ace_string,\
.ace-merbivore-soft .ace_support.ace_constant {\
color: #8EC65F\
}\
.ace-merbivore-soft .ace_constant.ace_numeric {\
color: #7FC578\
}\
.ace-merbivore-soft .ace_invalid,\
.ace-merbivore-soft .ace_invalid.ace_deprecated {\
color: #FFFFFF;\
background-color: #FE3838\
}\
.ace-merbivore-soft .ace_fold {\
background-color: #FC803A;\
border-color: #E6E1DC\
}\
.ace-merbivore-soft .ace_comment,\
.ace-merbivore-soft .ace_meta {\
font-style: italic;\
color: #AC4BB8\
}\
.ace-merbivore-soft .ace_entity.ace_other.ace_attribute-name {\
color: #EAF1A3\
}\
.ace-merbivore-soft .ace_indent-guide {\
background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAEklEQVQImWOQkpLyZfD09PwPAAfYAnaStpHRAAAAAElFTkSuQmCC) right repeat-y\
}";
var dom = require("../lib/dom");
dom.importCssString(exports.cssText, exports.cssClass);
}); | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_hr-hr.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"nedjelja",
"ponedjeljak",
"utorak",
"srijeda",
"\u010detvrtak",
"petak",
"subota"
],
"MONTH": [
"sije\u010dnja",
"velja\u010de",
"o\u017eujka",
"travnja",
"svibnja",
"lipnja",
"srpnja",
"kolovoza",
"rujna",
"listopada",
"studenoga",
"prosinca"
],
"SHORTDAY": [
"ned",
"pon",
"uto",
"sri",
"\u010det",
"pet",
"sub"
],
"SHORTMONTH": [
"sij",
"velj",
"o\u017eu",
"tra",
"svi",
"lip",
"srp",
"kol",
"ruj",
"lis",
"stu",
"pro"
],
"fullDate": "EEEE, d. MMMM y.",
"longDate": "d. MMMM y.",
"medium": "d. MMM y. HH:mm:ss",
"mediumDate": "d. MMM y.",
"mediumTime": "HH:mm:ss",
"short": "dd.MM.y. HH:mm",
"shortDate": "dd.MM.y.",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "kn",
"DECIMAL_SEP": ",",
"GROUP_SEP": ".",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "-",
"negSuf": "\u00a0\u00a4",
"posPre": "",
"posSuf": "\u00a0\u00a4"
}
]
},
"id": "hr-hr",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (vf.v == 0 && i % 10 == 1 && i % 100 != 11 || vf.f % 10 == 1 && vf.f % 100 != 11) { return PLURAL_CATEGORY.ONE; } if (vf.v == 0 && i % 10 >= 2 && i % 10 <= 4 && (i % 100 < 12 || i % 100 > 14) || vf.f % 10 >= 2 && vf.f % 10 <= 4 && (vf.f % 100 < 12 || vf.f % 100 > 14)) { return PLURAL_CATEGORY.FEW; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/Markups-4.0.0.tar.gz/Markups-4.0.0/docs/standard_markups.rst | ================
Built-in markups
================
These markups are available by default:
Markdown markup
===============
Markdown_ markup uses Python-Markdown_ as a backend (version 2.6 or later
is required).
There are several ways to enable `Python-Markdown extensions`_.
* List extensions in a file named :file:`markdown-extensions.yaml` or
:file:`markdown-extensions.txt` in the :ref:`configuration directory
<configuration-directory>`. The extensions will be automatically applied
to all documents.
* If :file:`markdown-extensions.yaml` or :file:`markdown-extensions.txt`
is placed into working directory, all documents in that directory will
get extensions that are listed in that file.
* If first line of a document contains ":samp:`Required extensions:
{ext1 ext2 ...}`", that list will be applied to a document.
* Finally, one can programmatically pass list of extension names to
:class:`markups.MarkdownMarkup` constructor.
The YAML file should be a list of extensions, possibly with configuration
options, for example:
.. code-block:: yaml
- smarty:
substitutions:
left-single-quote: "‚"
right-single-quote: "‘"
smart_dashes: False
- toc:
permalink: True
separator: "_"
toc_depth: 3
- sane_lists
Or using a JSON-like syntax:
.. code-block:: yaml
["smarty", "sane_lists"]
YAML support works only when the PyYAML_ module is installed.
The txt file is a simple list of extensions, separated by newlines. Lines
starting with ``#`` are treated as comments and ignored. It is possible to
specify string options in brackets, for example::
toc(title=Contents)
sane_lists
The same syntax to specify options works in the ``Required extensions``
line. You can put it into a comment to make it invisible in the output::
<!-- Required extensions: toc(title=Contents) sane_lists -->
The `Math Markdown extension`_ is enabled by default. This extension
supports a syntax for LaTeX-style math formulas (powered by MathJax_).
The delimiters are:
================ ===============
Inline math Standalone math
================ ===============
``$...$`` [#f1]_ ``$$...$$``
``\(...\)`` ``\[...\]``
================ ===============
.. [#f1] To enable single-dollar-sign delimiter, one should add
``mdx_math(enable_dollar_delimiter=1)`` to the extensions list.
The `Python-Markdown Extra`_ set of extensions is enabled by default.
To disable it, one can enable virtual ``remove_extra`` extension
(which also completely disables LaTeX formulas support).
The default file extension associated with Markdown markup is ``.mkd``,
though many other extensions (including ``.md`` and ``.markdown``) are
supported as well.
.. _Markdown: https://daringfireball.net/projects/markdown/
.. _Python-Markdown: https://python-markdown.github.io/
.. _MathJax: https://www.mathjax.org/
.. _`Python-Markdown extensions`: https://python-markdown.github.io/extensions/
.. _PyYAML: https://pypi.org/project/PyYAML/
.. _`Math Markdown extension`: https://github.com/mitya57/python-markdown-math
.. _`Python-Markdown Extra`: https://python-markdown.github.io/extensions/extra/
.. autoclass:: markups.MarkdownMarkup
reStructuredText markup
========================
This markup provides support for reStructuredText_ language (the language
this documentation is written in). It uses Docutils_ Python module.
The file extension associated with reStructuredText markup is ``.rst``.
.. _reStructuredText: https://docutils.sourceforge.io/rst.html
.. _Docutils: https://docutils.sourceforge.io/
.. autoclass:: markups.ReStructuredTextMarkup
Textile markup
==============
This markup provides support for Textile_ language. It uses python-textile_
module.
The file extension associated with Textile markup is ``.textile``.
.. _Textile: https://en.wikipedia.org/wiki/Textile_(markup_language)
.. _python-textile: https://github.com/textile/python-textile
.. autoclass:: markups.TextileMarkup
AsciiDoc markup
===============
This markup provides support for AsciiDoc_ language. It uses asciidoc-py_
module.
The file extension associated with AsciiDoc markup is ``.adoc``.
.. _AsciiDoc: https://asciidoc.org
.. _asciidoc-py: https://asciidoc-py.github.io
.. autoclass:: markups.AsciiDocMarkup
| PypiClean |
/CLinters-1.3.5.tar.gz/CLinters-1.3.5/hooks/utils.py | """fns for clang-format, clang-tidy, oclint"""
import difflib
import os
import re
import shutil
import subprocess as sp
import sys
from typing import List
class Command:
"""Super class that all commands inherit"""
def __init__(self, command: str, look_behind: str, args: List[str]):
self.args = args
self.look_behind = look_behind
self.command = command
# Will be [] if not run using pre-commit or if there are no committed files
self.files = self.get_added_files()
self.edit_in_place = False
self.stdout = b""
self.stderr = b""
self.returncode = 0
def check_installed(self):
"""Check if command is installed and fail exit if not."""
path = shutil.which(self.command)
if path is None:
website = "https://github.com/pocc/pre-commit-hooks#example-usage"
problem = self.command + " not found"
details = """Make sure {} is installed and on your PATH.\nFor more info: {}""".format(
self.command, website
) # noqa: E501
self.raise_error(problem, details)
def get_added_files(self):
"""Find added files using git."""
added_files = sys.argv[1:] # 1: don't include the hook file
# cfg files are used by uncrustify and won't be source files
added_files = [f for f in added_files if os.path.exists(f) and not f.endswith(".cfg")]
# Taken from https://github.com/pre-commit/pre-commit-hooks/blob/master/pre_commit_hooks/util.py
# If no files are provided and if this is used as a command,
# Find files the same way pre-commit does.
if len(added_files) == 0:
cmd = ["git", "diff", "--staged", "--name-only", "--diff-filter=A"]
sp_child = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
if sp_child.stderr or sp_child.returncode != 0:
self.raise_error(
"Problem determining which files are being committed using git.", sp_child.stderr.decode()
)
added_files = sp_child.stdout.decode().splitlines()
return added_files
def parse_args(self, args: List[str]):
"""Parse the args into usable variables"""
self.args = list(args[1:]) # don't include calling function
for arg in args:
if arg in self.files and not arg.startswith("-"):
self.args.remove(arg)
if arg.startswith("--version"):
# If --version is passed in as 2 arguments, where the second is version
if arg == "--version" and args.index(arg) != len(args) - 1:
expected_version = args[args.index(arg) + 1]
# Expected split of --version=8.0.0 or --version 8.0.0 with as many spaces as needed
else:
expected_version = arg.replace(" ", "").replace("=", "").replace("--version", "")
actual_version = self.get_version_str()
self.assert_version(actual_version, expected_version)
# All commands other than clang-tidy or oclint require files, --version ok
is_cmd_clang_analyzer = self.command == "clang-tidy" or self.command == "oclint"
has_args = self.files or self.args or "version" in self.args
if not has_args and not is_cmd_clang_analyzer:
self.raise_error("Missing arguments", "No file arguments found and no files are pending commit.")
def add_if_missing(self, new_args: List[str]):
"""Add a default if it's missing from the command. This library
exists to force checking, so prefer those options.
len(new_args) should be 1, or 2 for options like --key=value
If first arg is missing, add new_args to command's args
Do not change an option - in those cases return."""
new_arg_key = new_args[0].split("=")[0]
for arg in self.args:
existing_arg_key = arg.split("=")[0]
if existing_arg_key == new_arg_key:
return
self.args += new_args
def assert_version(self, actual_ver: str, expected_ver: str):
"""--version hook arg enforces specific versions of tools."""
expected_len = len(expected_ver) # allows for fuzzy versions
if expected_ver not in actual_ver[:expected_len]:
problem = "Version of " + self.command + " is wrong"
details = """Expected version: {}
Found version: {}
Edit your pre-commit config or use a different version of {}.""".format(
expected_ver, actual_ver, self.command
)
self.raise_error(problem, details)
# If the version is correct, exit normally
sys.exit(0)
def raise_error(self, problem: str, details: str):
"""Raise a formatted error."""
format_list = [self.command, problem, details]
stderr_str = """Problem with {}: {}\n{}\n""".format(*format_list)
# All strings are generated by this program, so decode should be safe
self.stderr = stderr_str.encode()
self.returncode = 1
sys.stderr.buffer.write(self.stderr)
sys.exit(self.returncode)
def get_version_str(self):
"""Get the version string like 8.0.0 for a given command."""
args = [self.command, "--version"]
sp_child = sp.run(args, stdout=sp.PIPE, stderr=sp.PIPE)
version_str = str(sp_child.stdout, encoding="utf-8")
# After version like `8.0.0` is expected to be '\n' or ' '
regex = self.look_behind + r"((?:\d+\.)+[\d+_\+\-a-z]+)"
search = re.search(regex, version_str)
if not search:
details = """The version format for this command has changed.
Create an issue at github.com/pocc/pre-commit-hooks."""
self.raise_error("getting version", details)
version = search.group(1)
return version
class StaticAnalyzerCmd(Command):
"""Commmands that analyze code and are not formatters.s"""
def __init__(self, command: str, look_behind: str, args: List[str]):
super().__init__(command, look_behind, args)
def run_command(self, args: List[str]):
"""Run the command and check for errors. Args includes options and filepaths"""
args = [self.command, *args]
sp_child = sp.run(args, stdout=sp.PIPE, stderr=sp.PIPE)
self.stdout += sp_child.stdout
self.stderr += sp_child.stderr
self.returncode = sp_child.returncode
def exit_on_error(self):
if self.returncode != 0:
sys.stderr.buffer.write(self.stdout + self.stderr)
sys.exit(self.returncode)
class FormatterCmd(Command):
"""Commands that format code: clang-format, uncrustify"""
def __init__(self, command: str, look_behind: str, args: List[str]):
super().__init__(command, look_behind, args)
self.file_flag = None
def set_diff_flag(self):
self.no_diff_flag = "--no-diff" in self.args
if self.no_diff_flag:
self.args.remove("--no-diff")
def compare_to_formatted(self, filename_str: str) -> None:
"""Compare the expected formatted output to file contents."""
# This string encode is from argparse, so we should be able to trust it.
filename = filename_str.encode()
actual = self.get_filelines(filename_str)
expected = self.get_formatted_lines(filename_str)
if self.edit_in_place:
# If edit in place is used, the formatter will fix in place with
# no stdout. So compare the before/after file for hook pass/fail
expected = self.get_filelines(filename_str)
diff = list(
difflib.diff_bytes(difflib.unified_diff, actual, expected, fromfile=b"original", tofile=b"formatted")
)
if len(diff) > 0:
if not self.no_diff_flag:
header = filename + b"\n" + 20 * b"=" + b"\n"
self.stderr += header + b"\n".join(diff) + b"\n"
self.returncode = 1
def get_filename_opts(self, filename: str):
"""uncrustify, to get stdout like clang-format, requires -f flag"""
if self.file_flag and not self.edit_in_place:
return [self.file_flag, filename]
return [filename]
def get_formatted_lines(self, filename: str) -> List[bytes]:
"""Get the expected output for a command applied to a file."""
filename_opts = self.get_filename_opts(filename)
args = [self.command, *self.args, *filename_opts]
child = sp.run(args, stdout=sp.PIPE, stderr=sp.PIPE)
if len(child.stderr) > 0 or child.returncode != 0:
problem = f"Unexpected Stderr/return code received when analyzing {filename}.\nArgs: {args}"
self.raise_error(problem, child.stdout.decode() + child.stderr.decode())
if child.stdout == b"":
return []
return child.stdout.split(b"\x0a")
def get_filelines(self, filename: str):
"""Get the lines in a file."""
if not os.path.exists(filename):
self.raise_error(f"File {filename} not found", "Check your path to the file.")
with open(filename, "rb") as f:
filetext = f.read()
return filetext.split(b"\x0a") | PypiClean |
/MJOLNIRGui-0.9.10.tar.gz/MJOLNIRGui-0.9.10/src/main/python/Views/MolecularCalculationManager.py | import sys
sys.path.append('..')
try:
from MJOLNIRGui.src.main.python._tools import ProgressBarDecoratorArguments, loadUI
import MJOLNIRGui.src.main.python._tools as _GUItools
except ImportError:
from _tools import ProgressBarDecoratorArguments, loadUI
import _tools as _GUItools
from os import path
from PyQt5 import QtWidgets,uic,QtGui,QtCore
import numpy as np
from MJOLNIR._tools import calculateMolarMass,symbols,_relative_atomic_masses
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt
# Handles all functionality related to the MolecularCalculationManager.
masses = _relative_atomic_masses.split(' ')
class ElementModel(QtCore.QAbstractTableModel):
def __init__(self, *args, elements=None, view=None, **kwargs):
super(ElementModel, self).__init__(*args, **kwargs)
self.view = view
self.reset(elements)
def data(self, index, role):
if role == Qt.DisplayRole:
column = index.column()
if column == 0:
return self.names[index.row()]
elif column == 1:
return self.numbers[index.row()]
elif column == 2:
return self.masses[index.row()]
def getData(self,*args,**kwargs):
return self.data(*args,**kwargs)
def rowCount(self, index):
return len(self.names)
def columnCount(self,index):
return 3
def reset(self,elements):
if elements is None:
elements = {}
self.elements = elements
if len(elements)==0:
self.names = []
self.numbers = []
self.masses = []
else:
self.names = list(elements.keys())
self.numbers = list(elements.values())
self.masses = [masses[list(symbols).index(name)] for name in self.names]
self.layoutChanged.emit()
self.dataChanged.emit(self.index(0,0),self.index(len(self.names),2))
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return ["Element", "Amount", "Mass [g/mol]"][section]
def flags(self,index):
return QtCore.Qt.ItemIsSelectable
def checkValidSampleFormula(self,text,sampleMassL):
try:
mass,elements = calculateMolarMass(text,returnElements=True)
self.setStyleSheet("color: black;")
except AttributeError as e:
self.setStyleSheet("color: red;")
mass = 0
elements = {}
if len(text)!=0 and mass==0: # A string was provided but no mass found, typically error in input
self.setStyleSheet("color: red;")
elif len(text)==0:
mass,elements = calculateMolarMass(self.placeholderText(),returnElements=True)
sampleMassL.setText(sampleMassL.textformat.format(mass))
self.parent().elementModel.reset(elements)
MolecularCalculationManagerBase, MolecularCalculationManagerForm = loadUI('MolecularCalculationManager.ui')
# All of this connects the buttons and their functions to the main window.
class MolecularCalculationManager(MolecularCalculationManagerBase, MolecularCalculationManagerForm):
def __init__(self, parent=None, guiWindow=None):
super(MolecularCalculationManager, self).__init__(parent)
self.setupUi(self)
self.guiWindow = guiWindow
self.setWindowIcon(QtGui.QIcon(self.guiWindow.AppContext.get_resource('Icons/Own/balance.png')))
self.elementModel = ElementModel()
self.initMolecularCalculationManager()
def initMolecularCalculationManager(self):
self.setup()
def setup(self):
sampleFormLE = self.MolecularCalculationManager_sampleFormula_lineEdit
sampleMassL = self.MolecularCalculationManager_sampleMolarMass_label
sampleMassL.textformat = "{:.6f} g/mol"
# Connect the check validater when text is edited
sampleFormLE.textChanged.connect(lambda text:checkValidSampleFormula(sampleFormLE,text,sampleMassL))
self.MolecularCalculationManager_molarMass_tableView.setModel(self.elementModel)
# Call textChanged with empty text to update list view
sampleFormLE.textChanged.emit('') | PypiClean |
/ORCHISM.1.0.tar.gz/ORCHISM.1.0/code/TOOLS/bfgs.py | #
# ------------------------------------------------------------------------------
# This source code is governed by the CeCILL licence
#
#*******************************************************************************
import os
from orchis_config import Config
from time import localtime, strftime
import numpy as np
#from TOOLS import io
import io
# ==============================================================================
# Write the input NetCDF file for BFGS
#
# ------------------------------------------------------------------------------
def write_bfgs_in(Opti, logfile):
#import numpy as np
# -- Define the dimensions of the NetCDF file
dims = [{'name': 'parameter', 'size':1}]
dims.append({'name': 'n', 'size':Opti.n})
dims.append({'name': 'dim_wa', 'size': Opti.BFGS['size_wa']})
dims.append({'name': 'dim_iwa', 'size': Opti.BFGS['size_iwa']})
dims.append({'name': 'dim_isave', 'size': Opti.BFGS['size_isave']})
dims.append({'name': 'dim_dsave', 'size': Opti.BFGS['size_dsave']})
dims.append({'name': 'dim_lsave', 'size': Opti.BFGS['size_lsave']})
# -- Define the global attributes of the NetCDF file
gattr = [{'name': 'date', 'value':strftime("%a, %d %b %Y %H:%M:%S +0000", localtime())}]
# -- Define BFGS parameters
paraBFGS = {}
#- Constants
paraBFGS['factr'] = {'datatype': 'd', 'ndims':1, 'dim_name':('parameter',), \
'dim_size': 1, 'value': Opti.BFGS['factr'], \
'attr_name':['longname'], 'attr_value':['Tolerance in the termination test']}
paraBFGS['pgtol'] = {'datatype': 'd', 'ndims':1, 'dim_name':('parameter',), \
'dim_size': 1, 'value': Opti.BFGS['pgtol'], \
'attr_name':['longname'], 'attr_value':['Tolerance in the termination test']}
paraBFGS['m'] = {'datatype':'i','ndims':1, 'dim_name':('parameter',), \
'dim_size': 1, 'value': Opti.BFGS['m'], \
'attr_name':['longname'], 'attr_value':['Number of corrections in the memory matrix']}
paraBFGS['iprint'] = {'datatype': 'i', 'ndims':1, 'dim_name':('parameter',), \
'dim_size': 1, 'value': Opti.BFGS['iprint'], \
'attr_name':['longname'], 'attr_value':['Control the frequency and type of outputs']}
# the value for task is written as an attribute as it is a string
paraBFGS['task'] = {'datatype':'c', 'ndims':1, 'dim_name':('parameter',), \
'dim_size': 1, 'value': '-', \
'attr_name':['longname','value'], 'attr_value':['Define the BFGS task to perform',Opti.BFGS['task']]}
#- Optimization parameters
# Parameter values
paraBFGS['x'] = {'datatype': 'd', 'ndims':1, 'dim_name':('n',), \
'dim_size': Opti.n, 'value': Opti.chi['all'], \
'attr_name':['longname'], 'attr_value':['Values of the optimization parameters']}
# lower bounds
paraBFGS['l'] = {'datatype': 'd', 'ndims':1, 'dim_name':('n',), \
'dim_size': len(Opti.chi_lb['all']), 'value': Opti.chi_lb['all'], \
'attr_name':['longname'], 'attr_value':['Lower bound of the optimization parameters']}
# upper bounds
paraBFGS['u'] = {'datatype': 'd', 'ndims':1, 'dim_name':('n',), \
'dim_size': len(Opti.chi_ub['all']), 'value': Opti.chi_ub['all'], \
'attr_name':['longname'], 'attr_value':['Upper bound of the optimization parameters']}
# Type of bounds
paraBFGS['nbd'] = {'datatype': 'i', 'ndims':1, 'dim_name':('n',), \
'dim_size': len(Opti.BFGS['nbd']), 'value': Opti.BFGS['nbd'], \
'attr_name':['longname'], 'attr_value':['Type of bounds']}
# Misfit function
paraBFGS['f'] = {'datatype': 'd', 'ndims':1, 'dim_name':('parameter',), \
'dim_size': 1, 'value': Opti.MF, \
'attr_name':['longname'], 'attr_value':['Value of the misfit function']}
# Misfit function
paraBFGS['g'] = {'datatype': 'd', 'ndims':1, 'dim_name':('n',), \
'dim_size': Opti.n, 'value': Opti.gradMF, \
'attr_name':['longname'], 'attr_value':['Value of the gradient of misfit function']}
#- Some BFGS working variables
paraBFGS['wa'] = {'datatype': 'd', 'ndims':1, 'dim_name':('dim_wa',), \
'dim_size': Opti.BFGS['size_wa'], 'value': Opti.BFGS['wa'], \
'attr_name':['longname'], 'attr_value':['BFGS workspace']}
paraBFGS['iwa'] = {'datatype': 'i', 'ndims':1, 'dim_name':('dim_iwa',), \
'dim_size': Opti.BFGS['size_iwa'], 'value': Opti.BFGS['iwa'], \
'attr_name':['longname'], 'attr_value':['BFGS workspace']}
paraBFGS['isave'] = {'datatype': 'i', 'ndims':1, 'dim_name':('dim_isave',), \
'dim_size': Opti.BFGS['size_isave'], 'value': Opti.BFGS['isave'], \
'attr_name':['longname'], 'attr_value':['Some BFGS info on the optimization']}
paraBFGS['dsave'] = {'datatype': 'd', 'ndims':1, 'dim_name':('dim_dsave',), \
'dim_size': Opti.BFGS['size_dsave'], 'value': Opti.BFGS['dsave'], \
'attr_name':['longname'], 'attr_value':['Some BFGS info on the optimization']}
paraBFGS['lsave'] = {'datatype': 'i', 'ndims':1, 'dim_name':('dim_lsave',), \
'dim_size': Opti.BFGS['size_lsave'], 'value': Opti.BFGS['lsave'], \
'attr_name':['longname'], 'attr_value':['Some BFGS info on the bounds at exit']}
# the value for csave is written as an attribute as it is a string
paraBFGS['csave'] = {'datatype':'c', 'ndims':1, 'dim_name':('parameter',), \
'dim_size': 1, 'value': '-', \
'attr_name':['longname','value'], 'attr_value':['BFGS character working array',Opti.BFGS['csave']]}
# -- Write the file
# global attributes + dimensions
#print 'BFGS : ecriture attributs et dimensions'
io.writenc(os.path.join(Config.PATH_MAIN_TMP,Opti.BFGS['input']),gattr = gattr, dims = dims)
# variables
var_order = ['factr', 'pgtol', 'm', 'iprint', 'task', \
'x', 'l', 'u', 'nbd', 'f', 'g', \
'wa', 'iwa','isave', 'dsave', 'lsave', 'csave' ]
#io.writenc(BFGS['input'], vars = paraBFGS , append = 1, var_order = var_order)
for name in var_order:
print 'Ecriture', name
io.writenc(os.path.join(Config.PATH_MAIN_TMP,Opti.BFGS['input']), vars = {name:paraBFGS[name]} , append = 1)
# END write_bfgs_in
# ==============================================================================
# ==============================================================================
# Read the output NetCDF file from BFGS
#
# ------------------------------------------------------------------------------
def read_bfgs_out(opti_varname, Opti, logfile):
var_order = ['factr', 'pgtol', 'm', 'iprint', 'task', \
'x', 'l', 'u', 'nbd', 'f', 'g', \
'wa', 'iwa','isave', 'dsave', 'lsave', 'csave' ]
# - Read the NetCDF File
[vars, gattr, dims] = io.readnc(os.path.join(Config.PATH_MAIN_TMP,Opti.BFGS['output']))
# - Modify the BFGS structure
for name in var_order:
if name == 'task' or name == 'csave':
ind = vars[name]['attr_name'].index('value')
Opti.BFGS[name] = vars[name]['attr_value'][ind]
else:
Opti.BFGS[name] = vars[name]['value']
# - Modify the Opti structure
ind = [-1]
#print 'BFGS READ'
for i in range(len(opti_varname)):
name = opti_varname[i]
n = Opti.xmask[name].count()
if n>0:
ind = np.arange(ind[len(ind)-1]+1, ind[len(ind)-1]+n+0.1)
ind = ind.astype(np.int32).tolist()
idxOK = np.ma.masked_array(range(len(Opti.xmask[name])),Opti.xmask[name].mask).compressed()
np.put(Opti.chi[name], idxOK, np.take(vars['x']['value'], ind) )
else:
Opti.chi[name] = np.array(Config.missval[0], np.float64)
# - Task history
Opti.BFGS['task_hist'] = [Opti.BFGS['task_hist'][0] + ';'+ Opti.BFGS['task']]
# END read_bfgs_out
# ==============================================================================
# ==============================================================================
# Write informations
#
# ------------------------------------------------------------------------------
def write_infos(Opti, logfile, case = None ):
logfile.write('\n')
if case == 'input':
print ' #### BFGS : inputs ###'
logfile.write(' #### BFGS : inputs ###\n')
if case == 'output':
print ' #### BFGS : outputs ###'
logfile.write(' #### BFGS : outputs ###\n')
x = []
ub = []
lb = []
for name in Opti.name :
x.extend(Opti.chi[name].ravel().tolist())
ub.extend(Opti.chi_ub[name].ravel().tolist())
lb.extend(Opti.chi_lb[name].ravel().tolist())
print ' + task : '+Opti.BFGS['task']
print ' + x : '+str(x)
print ' + ub : '+str(ub)
print ' + lb : '+str(lb)
print ' + f : '+str(Opti.MF)
print ' + g : '+str(Opti.gradMF)
print ' + projected gradient : '+str(Opti.BFGS['dsave'][12])
print ' + previous f : '+str(Opti.BFGS['dsave'][1])
print ' + maximum relative step length imposed in line search : ' + str(Opti.BFGS['dsave'][11])
print ' + relative step length imposed in line search : ' + str(Opti.BFGS['dsave'][13])
logfile.write(' + task : '+Opti.BFGS['task'] +'\n')
logfile.write(' + x : '+str(x) +'\n')
logfile.write(' + ub : '+str(ub) +'\n')
logfile.write(' + lb : '+str(lb) +'\n')
logfile.write(' + f : '+str(Opti.MF) +'\n')
logfile.write(' + g : '+str(Opti.gradMF) +'\n')
logfile.write(' + projected gradient : '+str(Opti.BFGS['dsave'][12]) +'\n')
logfile.write(' + previous f : '+str(Opti.BFGS['dsave'][1]) +'\n')
logfile.write(' + maximum relative step length imposed in line search : ' + str(Opti.BFGS['dsave'][11]) + '\n')
logfile.write(' + relative step length imposed in line search : ' + str(Opti.BFGS['dsave'][13]) +'\n')
if case == 'output':
print
print
print ' ## BFGS - dsave(12) : maximum relative step length imposed in line search :'
print ' ',Opti.BFGS['dsave'][11]
print ' ## BFGS - dsave(14) : relative step length imposed in line search :'
print ' ',Opti.BFGS['dsave'][13]
print
print
logfile.write( '\n ## BFGS - dsave(12) : maximum relative step length imposed in line search :'+str(Opti.BFGS['dsave'][11])+'\n')
logfile.write( '\n ## BFGS - dsave(14) : relative step length imposed in line search :'+str(Opti.BFGS['dsave'][13])+'\n')
# END write_infos
# ==============================================================================
# --- Modify the Opti class containing informations to pass to the optimization
# --- algorithm
#Opti.BFGS = Opti.BFGS
## nparas = 0
## for i in range(len(vars['opti_varname'])):
## name = vars['opti_varname'][i]
## nparas = nparas+len(vars[name]['value'].ravel())
## Opti.lb = np.zeros(nparas, np.float64)
## Opti.ub = np.zeros(nparas, np.float64)
## ind = [-1]
## for i in range(len(vars['opti_varname'])):
## name = vars['opti_varname'][i]
## lb = vars[name]['min']
## ub = vars[name]['max']
## n = len(vars[name]['value'].ravel())
## ind = np.arange(ind[len(ind)-1]+1, ind[len(ind)-1]+n+0.1).tolist()
## np.put(Opti.lb,ind, np.resize(lb,(n,)))
## np.put(Opti.ub,ind, np.resize(ub,(n,)))
# END initopti
# ============================================================================== | PypiClean |
/ImageD11-1.9.9.tar.gz/ImageD11-1.9.9/webgui/data.js | var vertices = [0.024007, -0.170263, -0.025806,
-0.049983, -0.047993, 0.160247,
0.050915, 0.048949, -0.160937,
-0.024233, 0.172786, 0.024761,
-0.075482, 0.123279, 0.184939,
0.232187, -0.043675, -0.026195,
0.158404, 0.078971, 0.158919,
-0.157509, -0.078819, -0.160103,
-0.233164, 0.044241, 0.024861,
-0.026661, -0.219245, 0.134534,
0.026592, 0.221117, -0.135283,
0.256379, -0.214613, -0.051693,
0.107879, 0.030082, 0.319389,
-0.133550, -0.249565, -0.185575,
-0.283923, -0.004976, 0.184885,
0.134382, 0.250848, 0.184280,
-0.106315, -0.031037, -0.320499,
-0.107435, -0.030175, -0.320259,
-0.257232, 0.215670, 0.050381,
0.048285, -0.342039, -0.051398,
-0.100651, -0.096719, 0.320515,
-0.048034, 0.343359, 0.049960,
0.101321, 0.097365, -0.320899,
0.130197, -0.140750, 0.294647,
0.205603, -0.263564, 0.108876,
-0.184402, -0.298549, -0.025187,
-0.259052, -0.176352, 0.160794,
0.259097, 0.175927, -0.161482,
0.184526, 0.299220, 0.024101,
-0.131180, 0.141730, -0.295037,
-0.206157, 0.264480, -0.109624,
0.098758, -0.293228, -0.211600,
-0.126751, 0.074400, 0.345232,
0.124936, -0.073109, -0.346223,
-0.099225, 0.295076, 0.210101,
-0.002909, -0.391139, 0.109218,
-0.077086, -0.268221, 0.295210,
0.077372, 0.268864, -0.295346,
0.002403, 0.391883, -0.109985,
0.307151, -0.166013, -0.211841,
-0.082860, -0.200892, -0.345823,
-0.308053, 0.166804, 0.210349,
0.083642, 0.202586, 0.344977,
-0.389259, -0.052420, -0.133805,
-0.388076, -0.060561, -0.133805,
-0.390571, -0.041531, -0.133805,
-0.388490, -0.057850, -0.133805,
-0.390272, -0.044257, -0.133805,
-0.389953, -0.046980, -0.133805,
-0.392704, -0.007597, -0.133805,
0.334478, 0.212857, 0.217639,
0.149356, -0.244450, -0.371666,
-0.150327, 0.246473, 0.370565,
0.280854, -0.385948, -0.076960,
-0.109500, -0.421340, -0.211108,
-0.056491, 0.019469, -0.480166,
-0.280650, 0.386888, 0.075472,
0.055160, -0.019249, 0.480408,
0.110255, 0.422241, 0.209728,
-0.334111, -0.053676, 0.347148,
-0.160146, -0.470155, -0.050680,
-0.080169, 0.190231, -0.454629,
0.229915, -0.435386, 0.083355,
0.309372, 0.224264, -0.321640,
0.079725, -0.189226, 0.455360,
0.160130, 0.470671, 0.049136,
-0.229891, 0.435432, -0.084506,
-0.310102, -0.224875, 0.321696,
0.053416, 0.439948, -0.270165,
-0.053133, -0.440155, 0.270035,
0.151760, 0.145706, -0.480579,
0.072614, -0.513990, -0.076399,
-0.071842, 0.514658, 0.075120,
-0.151532, -0.145479, 0.481467,
-0.032055, -0.151870, -0.505564,
-0.331674, 0.338402, 0.235309,
-0.058865, -0.372345, -0.371296,
0.331433, -0.337512, -0.237263,
-0.359313, 0.117820, 0.370464,
0.358173, -0.117083, -0.371907,
0.033009, 0.154289, 0.505474,
0.059291, 0.374234, 0.369948,
0.176000, -0.024313, -0.505833,
0.122787, -0.465007, -0.237055,
-0.177594, 0.025001, 0.505610,
-0.123253, 0.466248, 0.235247,
0.490609, 0.131904, -0.187796,
-0.288269, 0.063157, -0.454323,
0.342849, 0.377647, 0.183276,
-0.438243, 0.307945, -0.084138,
0.438925, -0.307826, 0.083221,
0.288451, -0.063094, 0.455255,
-0.342321, -0.378203, -0.185023,
-0.492822, -0.132567, 0.187329,
-0.515921, 0.039010, 0.210108,
0.266170, 0.108971, 0.479097,
0.292627, 0.329472, 0.343634,
-0.265342, -0.109231, -0.479636,
-0.489488, 0.259222, 0.075807,
-0.291577, -0.329184, -0.345316,
0.489561, -0.259063, -0.077075,
0.516242, -0.038585, -0.211848,
-0.083472, 0.091661, -0.553992,
0.128081, 0.316839, -0.455288,
-0.020995, 0.562776, -0.084790,
0.021928, -0.563222, 0.083979,
-0.128015, -0.317108, 0.456212,
-0.103913, 0.361175, -0.429286,
0.285778, 0.395544, -0.296016,
0.211446, 0.518478, -0.110693,
-0.179095, 0.483961, -0.244187,
0.179252, -0.484355, 0.244114,
0.104432, -0.361127, 0.430174,
-0.211533, -0.519260, 0.110154,
-0.285668, -0.396618, 0.296364,
0.466927, 0.303322, -0.162071,
0.392641, 0.426241, 0.023285,
-0.312423, 0.233928, -0.429061,
-0.387398, 0.356333, -0.243841,
0.387900, -0.356899, 0.243800,
0.312718, -0.234063, 0.429889,
-0.393415, -0.426821, -0.024415,
-0.468576, -0.303951, 0.161820,
0.200079, -0.195483, -0.531539,
0.173799, -0.416147, -0.397245,
-0.174057, 0.417996, 0.395603,
-0.201614, 0.197635, 0.531032,
-0.007981, -0.323414, -0.531104,
0.382262, -0.288504, -0.397647,
-0.383283, 0.289753, 0.395785,
0.008549, 0.325691, 0.530698,
0.396191, 0.413111, 0.248832,
-0.540494, 0.209906, 0.235650,
-0.240803, -0.280223, -0.505205,
0.242034, 0.281057, 0.504453,
0.540448, -0.209434, -0.237674,
0.382953, 0.101189, -0.506571,
-0.005419, 0.067311, -0.639797,
0.381770, 0.103816, -0.507477,
-0.304962, 0.558103, 0.100838,
0.085840, 0.593528, 0.234601,
-0.085786, -0.593108, -0.236604,
0.305447, -0.558007, -0.102571,
0.004449, -0.066166, 0.641411,
-0.384911, -0.102407, 0.508306,
0.103831, 0.488148, -0.429820,
0.029309, 0.610955, -0.244758,
0.359477, 0.272750, -0.481040,
-0.029343, 0.238587, -0.614279,
-0.104265, -0.488900, 0.430870,
0.137204, 0.641444, 0.074422,
-0.253945, 0.606496, -0.059083,
-0.029225, -0.612379, 0.245125,
-0.135693, -0.642230, -0.076243,
0.253810, -0.607416, 0.058212,
0.029339, -0.238036, 0.616327,
-0.361009, -0.273757, 0.482706,
-0.336081, 0.405079, -0.403448,
0.442929, 0.474521, -0.136774,
0.337151, -0.405855, 0.404562,
-0.444134, -0.476222, 0.136293,
0.540809, 0.180363, -0.347363,
-0.237577, 0.111912, -0.613780,
-0.462096, 0.479262, -0.058755,
0.318486, 0.549406, 0.208732,
-0.318546, -0.549410, -0.210575,
0.463445, -0.479218, 0.057990,
0.237666, -0.111734, 0.615997,
-0.543448, -0.181619, 0.348273,
-0.672859, -0.039803, 0.050172,
0.524523, 0.285150, 0.317353,
-0.646263, 0.180419, -0.083945,
-0.214612, -0.060458, -0.639614,
-0.498657, -0.065107, -0.453792,
-0.524772, -0.285825, -0.319164,
-0.567213, -0.009845, 0.370413,
-0.513381, 0.430500, 0.100986,
0.268487, 0.500999, 0.369173,
0.647941, -0.180752, 0.083327,
-0.267667, -0.500440, -0.371100,
0.513766, -0.430972, -0.102593,
-0.520086, 0.106597, -0.428665,
0.648599, 0.210727, -0.028431,
-0.595216, 0.228858, -0.243635,
0.574814, 0.333286, 0.157016,
0.597062, -0.229522, 0.243866,
-0.575984, -0.334236, -0.158746,
0.522112, -0.107032, 0.429824,
-0.651239, -0.211446, 0.027333,
-0.261333, 0.282561, -0.588431,
0.516862, 0.351544, -0.322186,
-0.410601, 0.527583, -0.218188,
0.368828, 0.597349, 0.048367,
0.224658, -0.367176, -0.556946,
-0.225391, 0.369393, 0.556149,
0.412140, -0.528802, 0.218738,
0.261796, -0.282886, 0.590654,
-0.369885, -0.598544, -0.050089,
-0.519378, -0.353304, 0.322682,
-0.697176, 0.131316, 0.075739,
0.474696, 0.236338, 0.477919,
-0.474024, -0.236977, -0.479420,
0.698871, -0.131251, -0.077201,
-0.053117, 0.409510, -0.588803,
0.335792, 0.443870, -0.455533,
-0.202187, 0.654924, -0.218875,
0.187327, 0.689629, -0.085277,
0.451264, -0.364665, -0.427069,
0.203876, -0.656207, 0.218851,
-0.187588, -0.691323, 0.084458,
0.053565, -0.410118, 0.591298,
-0.337076, -0.445637, 0.457145,
-0.189947, -0.231452, -0.665231,
-0.591245, 0.161792, 0.395802,
-0.564233, 0.382186, 0.261046,
-0.216796, -0.451667, -0.530958,
0.217920, 0.452781, 0.529649,
0.564550, -0.381617, -0.263232,
0.191775, 0.232630, 0.665428,
0.591209, -0.161016, -0.397827,
0.178730, 0.365204, -0.614580,
-0.044319, 0.733454, -0.059270,
0.045412, -0.735216, 0.058728,
-0.178837, -0.366205, 0.617485,
0.042673, -0.274337, -0.690956,
0.016060, -0.494999, -0.556700,
0.433015, -0.239461, -0.557403,
-0.406828, 0.461774, 0.420952,
0.406319, -0.460384, -0.423056,
-0.434290, 0.240996, 0.556393,
-0.015377, 0.497324, 0.555785,
-0.042498, 0.277288, 0.691477,
0.250791, -0.146568, -0.691246,
-0.197999, 0.589039, 0.420888,
0.197904, -0.587853, -0.422844,
-0.252665, 0.148665, 0.691703,
-0.251987, 0.149511, 0.692066,
-0.445259, -0.014723, -0.613787,
-0.670517, 0.351212, -0.059097,
0.501084, 0.456742, 0.342479,
-0.448234, -0.016580, -0.613752,
-0.500437, -0.457607, -0.344711,
0.672531, -0.351891, 0.057636,
0.698546, 0.258263, -0.188006,
-0.618720, 0.399801, -0.218374,
-0.469013, 0.155398, -0.588456,
0.550693, 0.504369, 0.182321,
-0.552040, -0.505867, -0.184602,
0.471570, -0.155814, 0.590416,
0.622839, -0.400565, 0.218406,
-0.702677, -0.259809, 0.188125,
-0.285065, 0.453209, -0.562958,
0.080566, 0.658831, -0.404424,
0.418778, 0.645318, -0.111252,
0.492723, 0.522953, -0.296534,
-0.359772, 0.575738, -0.378571,
-0.079829, -0.661446, 0.405858,
0.286319, -0.454573, 0.565747,
0.361949, -0.577768, 0.379428,
-0.421567, -0.647341, 0.110848,
-0.495157, -0.525101, 0.297149,
-0.078632, -0.102955, -0.773024,
-0.747657, 0.083361, 0.235098,
-0.722020, 0.301955, 0.100704,
0.451071, 0.407790, 0.503183,
-0.423433, -0.188071, -0.639102,
0.424376, 0.187966, 0.638551,
-0.449536, -0.408680, -0.504720,
0.723143, -0.302681, -0.102293,
0.749834, -0.082158, -0.237533,
-0.033658, -0.173287, -0.785577,
-0.077091, 0.580135, -0.563232,
-0.152475, 0.702484, -0.378612,
0.312623, 0.614127, -0.430425,
0.237461, 0.737662, -0.245196,
-0.154710, -0.538494, 0.592159,
0.152436, -0.705573, 0.379568,
0.077750, -0.582314, 0.566151,
-0.237453, -0.740928, 0.245640,
-0.312651, -0.617923, 0.431776,
-0.493172, 0.325964, -0.562770,
-0.165595, -0.402649, -0.690564,
-0.615217, 0.332656, 0.421101,
0.675126, 0.429626, -0.162635,
-0.567775, 0.449043, -0.378149,
0.601144, 0.552793, 0.022325,
0.167301, 0.404411, 0.690412,
0.614980, -0.332680, -0.423529,
0.570553, -0.450187, 0.379287,
0.495660, -0.327287, 0.565404,
-0.603597, -0.554748, -0.023788,
-0.678061, -0.432177, 0.162741,
0.069308, -0.053598, -0.824735,
-0.379011, 0.681054, 0.285889,
-0.783108, -0.070372, -0.267413,
0.011661, 0.716634, 0.419783,
-0.461215, 0.019762, 0.691231,
-0.010650, -0.715992, -0.422133,
0.379957, -0.681311, -0.288429,
-0.069846, 0.057160, 0.827465,
-0.433415, 0.698439, -0.193134,
0.567180, 0.399090, -0.481788,
-0.210322, 0.330988, -0.747940,
0.345968, 0.767596, 0.073870,
0.067077, -0.446059, -0.716555,
0.457238, -0.410860, -0.583129,
-0.457883, 0.412712, 0.581513,
-0.345485, -0.770544, -0.074886,
-0.066366, 0.448734, 0.716530,
0.437150, -0.700617, 0.193298,
0.211318, -0.331542, 0.751815,
-0.570068, -0.402795, 0.484068,
-0.771887, 0.254525, 0.260947,
-0.399034, -0.359299, -0.664851,
0.400010, 0.360005, 0.663963,
0.773664, -0.253802, -0.263052,
-0.138964, -0.182699, -0.824833,
-0.588388, 0.552984, 0.286177,
-0.642478, 0.112882, 0.556095,
0.193722, 0.624159, 0.554790,
-0.192356, -0.623065, -0.556615,
0.641817, -0.111883, -0.557514,
0.589050, -0.553465, -0.288762,
0.141311, 0.183924, 0.826373,
-0.394213, 0.033661, -0.772981,
-0.693884, 0.522502, -0.033076,
0.476467, 0.628035, 0.367979,
-0.476933, -0.628760, -0.370332,
0.696628, -0.523922, 0.032214,
0.252252, 0.242089, -0.799754,
-0.119262, 0.856448, 0.125541,
-0.777530, -0.137652, 0.375062,
0.120926, -0.858417, -0.127402,
0.386235, 0.491232, -0.614933,
-0.226662, 0.825025, -0.193142,
-0.002517, 0.457456, -0.748457,
-0.276799, -0.071822, 0.829274,
-0.253345, -0.243293, 0.803997,
0.162920, 0.860249, -0.060171,
-0.308620, 0.624133, -0.537675,
0.469678, 0.693093, -0.271165,
-0.418084, 0.204311, -0.747534,
0.748874, 0.306016, -0.348516,
-0.163089, -0.863857, 0.059353,
0.228576, -0.828723, 0.193703,
-0.642949, 0.571214, -0.193407,
0.002832, -0.459159, 0.752607,
-0.387717, -0.494922, 0.618582,
0.527760, 0.675477, 0.207755,
-0.449740, -0.750953, -0.109407,
0.310801, -0.626700, 0.540196,
-0.471844, -0.697018, 0.271873,
-0.527551, -0.678249, -0.210287,
0.645966, -0.573432, 0.192908,
0.420277, -0.204983, 0.751732,
-0.753039, -0.309627, 0.349472,
-0.676774, 0.028373, -0.587787,
-0.826546, 0.273387, -0.218020,
0.732699, 0.412157, 0.316085,
-0.734616, -0.413173, -0.318674,
0.831073, -0.274105, 0.218312,
-0.885071, -0.168434, 0.053813,
-0.904515, 0.004180, 0.075153,
-0.877483, 0.225159, -0.058862,
0.301674, -0.097381, -0.850782,
0.669326, 0.063252, 0.611476,
0.652854, 0.160557, 0.611476,
0.655032, 0.151426, 0.611476,
0.663236, 0.110068, 0.611476,
0.651718, 0.165111, 0.611476,
0.653959, 0.155996, 0.611476,
0.646853, 0.183244, 0.611476,
0.645558, 0.187755, 0.611476,
0.649349, 0.174195, 0.611476,
0.682792, 0.363962, 0.476728,
-0.221147, 0.760461, 0.445842,
0.222562, -0.759728, -0.448204,
-0.656641, -0.143965, -0.613626,
-0.303791, 0.099403, 0.852253,
-0.683358, -0.364590, -0.478860,
0.882303, -0.223697, 0.057673,
0.130734, 0.706558, -0.563670,
0.057634, 0.829411, -0.378884,
-0.196751, 0.040083, -0.892647,
-0.056037, -0.833419, 0.380857,
-0.131193, -0.710650, 0.567116,
-0.700781, 0.199296, -0.562225,
-0.775186, 0.321927, -0.377484,
0.856571, 0.336716, -0.029266,
0.782317, 0.460573, 0.156237,
-0.785932, -0.462237, -0.158082,
0.705102, -0.199853, 0.565408,
0.780338, -0.322780, 0.379156,
-0.442111, 0.374849, -0.722054,
0.724914, 0.477458, -0.322431,
-0.861142, -0.339514, 0.028317,
-0.591285, 0.619658, -0.352391,
0.576583, 0.723977, 0.047458,
-0.578679, -0.727315, -0.049296,
0.595488, -0.622018, 0.353862,
0.444632, -0.376406, 0.726648,
-0.729286, -0.481248, 0.323840,
0.299560, -0.489643, -0.742646,
-0.299885, 0.492466, 0.741768,
-0.796278, 0.425629, 0.286194,
-0.823687, 0.204463, 0.420580,
-0.347977, -0.310657, -0.824717,
-0.374591, -0.530830, -0.690242,
0.376416, 0.531417, 0.689111,
0.350140, 0.311217, 0.825017,
0.824768, -0.204105, -0.423764,
0.798976, -0.424707, -0.288659,
0.390036, 0.173265, 0.848769,
-0.928548, 0.176908, 0.101258,
0.632829, 0.314827, 0.637175,
-0.632199, -0.315697, -0.638967,
0.932340, -0.174978, -0.102827,
-0.111239, 0.038007, -0.957083,
-0.135471, 0.208510, -0.932065,
0.640892, 0.276407, -0.666278,
-0.113372, 0.036295, -0.957650,
-0.509931, 0.820586, -0.008222,
0.270931, 0.891107, 0.259038,
0.118017, -0.396701, -0.875788,
-0.270458, -0.893675, -0.261482,
0.512565, -0.823112, 0.007145,
-0.481535, 0.584588, 0.606170,
0.090649, -0.617719, -0.742511,
0.508428, -0.362040, -0.742691,
0.135478, -0.208826, 0.938157,
-0.089744, 0.620156, 0.741253,
0.481081, -0.583465, -0.608507,
-0.509100, 0.363896, 0.741997,
0.205048, 0.583863, -0.748660,
-0.117417, 0.400371, 0.877249,
-0.019447, 0.951966, -0.193945,
-0.645609, -0.279378, 0.670671,
0.325785, -0.268675, -0.876174,
-0.381328, 0.894378, 0.038868,
-0.271738, 0.712114, 0.606159,
0.273857, -0.709654, -0.608178,
-0.724248, 0.369852, -0.536901,
-0.327492, 0.271995, 0.877627,
0.832602, 0.508442, -0.003922,
0.094777, 0.163052, -0.957831,
0.482765, 0.197740, -0.825459,
0.020178, -0.957140, 0.193974,
-0.625757, 0.078070, -0.747099,
-0.206578, -0.587433, 0.753798,
-0.849601, 0.444233, -0.192880,
0.929738, 0.214954, -0.214466,
0.037426, 0.935268, 0.285269,
-0.352268, 0.900485, 0.151110,
0.709287, 0.582750, 0.341686,
-0.037973, -0.936987, -0.287244,
0.729675, -0.371503, 0.540388,
0.354147, -0.902716, -0.152566,
-0.095811, -0.163424, 0.963878,
-0.709914, -0.585930, -0.344284,
-0.486980, -0.199502, 0.831073,
0.856023, -0.445630, 0.192973,
0.630170, -0.077465, 0.752301,
-0.936805, -0.216407, 0.215042,
-0.956071, -0.044642, 0.235409,
0.659259, 0.534850, 0.502183,
-0.605931, -0.095839, -0.773477,
-0.659131, -0.536299, -0.504513,
-0.403958, 0.851586, 0.311187,
-0.013183, 0.887429, 0.445037,
0.071919, 0.334302, -0.932424,
0.459777, 0.369587, -0.799949,
-0.512220, -0.029178, 0.851518,
0.013776, -0.888079, -0.447992,
0.404719, -0.853157, -0.313744,
-0.611872, 0.723902, 0.311340,
-0.510945, -0.028256, 0.856601,
0.168959, 0.795414, 0.579703,
-0.693527, 0.063920, 0.716031,
-0.539545, 0.667973, -0.511549,
-0.168815, -0.794935, -0.582025,
-0.072129, -0.335591, 0.939004,
0.700240, 0.648790, -0.297452,
-0.466416, 0.545167, -0.696623,
-0.462954, -0.371619, 0.805421,
0.613065, -0.725694, -0.314149,
-0.257379, 0.672325, -0.696465,
-0.650038, 0.247358, -0.721089,
-0.332811, 0.794078, -0.512241,
-0.798260, 0.492674, -0.352256,
0.520437, 0.740345, -0.431030,
0.445026, 0.864391, -0.245689,
0.544495, -0.671544, 0.515088,
0.469841, -0.547999, 0.701430,
-0.705987, -0.652711, 0.298223,
0.336210, -0.798625, 0.515000,
0.259755, -0.676408, 0.701487,
-0.521492, -0.746898, 0.433083,
0.805172, -0.494363, 0.354015,
0.654644, -0.248895, 0.726706,
0.750961, -0.668617, 0.105552,
0.263709, 0.539274, -0.815401,
-0.321914, -0.091117, -0.957944,
-0.850380, -0.014090, 0.555429,
0.403536, 0.750782, 0.553436,
-0.402152, -0.751415, -0.555715,
0.608979, 0.487000, 0.662754,
0.582671, 0.266762, 0.798027,
-0.581271, -0.267202, -0.799097,
-0.608318, -0.487376, -0.664435,
-0.090540, -0.525037, -0.876252,
-0.690607, 0.455236, 0.606375,
-0.689211, 0.457705, 0.606939,
0.690020, -0.455380, -0.608825,
0.092488, 0.527813, 0.876281,
-0.847365, 0.376657, 0.446250,
-0.323556, -0.481956, -0.850365,
-0.924267, 0.387271, 0.238039,
0.326017, 0.483171, 0.850053,
0.848835, -0.376399, -0.449155,
0.048019, 0.505163, -0.907163,
0.436139, 0.540028, -0.774255,
-0.836959, 0.147257, -0.600919,
0.081004, -0.231292, 1.012626,
0.008284, -0.195911, 1.023327,
-0.048137, -0.508147, 0.913999,
-0.439231, -0.543778, 0.780283,
-0.390397, 0.423055, -0.880934,
0.774093, 0.526494, -0.481953,
-0.183689, 0.549490, -0.881328,
-0.673742, 0.417893, -0.695716,
0.593839, 0.617698, -0.615435,
-0.747784, 0.540876, -0.512023,
-0.124987, 0.304526, -1.004323,
-0.662599, 0.676389, 0.471578,
-0.117424, -0.745467, -0.742307,
-0.717013, 0.236439, 0.742101,
0.119188, 0.747135, 0.740509,
0.716498, -0.235025, -0.743340,
0.367943, 0.217982, -0.969736,
0.665271, -0.675299, -0.474967,
0.393812, -0.425590, 0.887800,
0.181688, 0.754388, -0.723125,
-0.050363, 0.798386, -0.697203,
-0.781199, -0.529696, 0.485095,
0.337939, 0.833379, -0.564357,
-0.125310, 0.921270, -0.512659,
0.184345, -0.552836, 0.888294,
0.678225, -0.421199, 0.701568,
0.754476, -0.543288, 0.515013,
-0.597507, -0.623690, 0.619976,
-0.884379, -0.097652, -0.587637,
0.914728, 0.318404, 0.450091,
-0.181724, -0.760194, 0.728498,
0.127324, -0.927222, 0.516026,
0.050461, -0.803949, 0.702577,
-0.341065, -0.838926, 0.567977,
-0.908142, 0.072849, -0.562274,
-0.890687, -0.100532, -0.587330,
0.534622, -0.236719, -0.898221,
-0.556352, -0.438361, -0.824544,
0.558142, 0.438540, 0.824045,
0.864630, 0.271136, 0.610560,
-0.866047, -0.271927, -0.613178,
0.558809, -0.312578, -0.902586,
-0.504908, 0.755748, 0.631905,
0.115366, -0.789228, -0.768157,
-0.113643, 0.791471, 0.766478,
0.506031, -0.754807, -0.634417,
-0.560389, 0.315222, 0.902769,
0.603377, -0.298820, 0.887892,
-0.530507, -0.218516, -0.957971,
0.584633, 0.658569, 0.687984,
0.532728, 0.217818, 0.959181,
-0.583570, -0.659505, -0.690243,
-0.897993, 0.328368, 0.606536,
-0.697059, 0.589047, -0.670764,
-0.300196, -0.653160, -0.875447,
0.301370, 0.654467, 0.874950,
0.900105, -0.326447, -0.609532,
0.570617, 0.787352, -0.590103,
-0.206889, 0.720278, -0.855922,
0.703123, -0.593103, 0.676083,
0.209520, -0.725419, 0.862980,
-0.841108, 0.550533, -0.556873,
-0.901113, -0.064376, 0.715514,
0.348164, 0.956539, 0.549876,
-0.904396, -0.064032, 0.723824,
-0.122023, 0.475993, 1.052405,
0.509894, 0.416940, -0.958033,
0.888796, -0.293075, 0.701592,
0.374713, -0.611854, -0.927727,
-0.514277, -0.420509, 0.967169,
-0.374681, 0.615588, 0.927402,
-0.782063, -0.000598, -0.905259,
-0.788573, -0.001523, -0.906766,
-0.438233, 0.763775, -0.830445,
0.813746, -0.171034, 0.888940,
0.444136, -0.769671, 0.838088,
-0.830051, 0.339494, -0.854540,
-0.765141, 0.578279, 0.791769,
0.766076, -0.577172, -0.794700,
0.837799, -0.342448, 0.863227,
0.278570, 0.825405, 0.899570,
-0.275012, -0.824919, -0.901306,
-0.149270, -0.735981, -1.004739,
-0.178122, -0.733609, 1.004418,
0.767064, 0.564187, 0.821805,
0.741104, 0.344986, 0.956866,
-0.739934, -0.345294, -0.958782,
-0.767051, -0.566382, -0.824648,
-0.786115, 0.608534, 0.775254,
0.462508, 0.758235, -0.907353,
-0.465866, -0.766036, 0.917252,
-0.951368, -0.112752, 0.874850,
0.607042, -0.656532, -0.953402,
-0.956022, -0.113154, 0.885615,
-0.723687, -0.549684, 0.968898,
0.816971, 0.505411, -1.031294,
-0.772059, 0.643978, 1.036305]; | PypiClean |
/Marcellus-1.1.3.tar.gz/Marcellus-1.1.3/marcellus/sendmail.py |
from email import Encoders
from email.header import Header
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import os
import re
import smtplib
def send_mail(from_, to_, subject, message, server, login, password, files=[], images=[],
html='', cc=[], bcc=[], reply_to=None, charset='utf-8', ssl=False):
"""
IN
from_ (<e-mail address>, <name>)
to_ [(<e-mail address>, <name>), ...]
subject <str>
message <str>
server <str> (smtp.domain.com, smtp.domain.com:<port>)
login <str>
password <str>
files [<str>, ...] (optional)
[(<StringIO>, <str>), ...] (optional)
images [(<StringIO>, <str>, <str>), ...] (optional)
html <str> (optional)
cc [(<e-mail address>, <name>), ...]
bcc [(<e-mail address>, <name>), ...]
reply_to <str> => None
charset <str> => utf-8
ssl <bool> => False
For example,
send_mail(('[email protected]', 'Pepe Pérez'),
['[email protected]', 'María López'), ([email protected], 'María')],
'Hello, world!', 'How you doing?\n\nPepe',
'smtp.gmail.com',
'[email protected]', 'ilovemarialopez')
"""
msg = MIMEText(message)
msg.set_type('text/plain')
msg.set_charset(charset)
msg_root = None
if html:
msg_root = MIMEMultipart('related')
msg_root.preamble = 'This is a multi-part message in MIME format.'
if html:
msg_alt = MIMEMultipart('alternative')
msg_alt.attach(msg)
msg_html = MIMEText(html)
msg_html.set_type('text/html')
msg_html.set_charset(charset)
msg_alt.attach(msg_html)
msg_root.attach(msg_alt)
msg = msg_root
files_obj = []
if files:
if not msg_root:
msg_root = MIMEMultipart()
msg_root.attach(msg)
msg = msg_root
for f in files:
part = MIMEBase('application', 'octet-stream')
if isinstance(f, (str, unicode,)):
f_name = os.path.basename(f)
f = open(f, 'rb')
files_obj.append(f)
else:
# tuple (<file-like>, <file name>,)
f, f_name = f
f.seek(0)
part.set_payload(f.read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="{0}"'.format(f_name))
msg.attach(part)
if images:
if not msg_root:
msg_root = MIMEMultipart()
msg_root.attach(msg)
msg = msg_root
# f=StringIO
for f, file_name, cid in images:
f.seek(0)
part_img = MIMEImage(f.read(), name=file_name)
# Encoders.encode_base64(part_img)
part_img.add_header('Content-ID', '<{0}>'.format(cid))
msg.attach(part_img)
msg['Subject'] = Header(subject, charset)
msg['From'] = '{0} <{1}>'.format(from_[1], from_[0])
nombres = ['{0} <{1}>'.format(dst[1], dst[0]) for dst in to_]
msg['To'] = ';'.join(nombres)
msg['Cc'] = ';'.join(['{0} <{1}>'.format(dst[1], dst[0]) for dst in cc])
# reply-to
if reply_to:
msg.add_header('Reply-to', reply_to)
# identify the port in given host (smtp.domain.com:123)
m_server = re.search(r'^([\w\-\.]+)(:\d+)?', server)
port = m_server.group(2)
if port:
port = dict(port=int(port.replace(':', '')))
else:
port = {}
SMTP_ = smtplib.SMTP
if ssl:
SMTP_ = smtplib.SMTP_SSL
s = SMTP_(m_server.group(1), **port)
try:
s.set_debuglevel(False)
# authenticate to the server
s.ehlo()
# STARTTLS?
if s.has_extn('STARTTLS'):
s.starttls()
# re-identify ourselves over TLS connection
s.ehlo()
# login
if isinstance(login, unicode):
login = login.encode('utf-8')
if isinstance(password, unicode):
password = password.encode('utf-8')
s.login(login, password)
emails_dst_to = [dst[0] for dst in to_]
emails_dst_cc = [dst[0] for dst in cc]
emails_dst_bcc = [dst[0] for dst in bcc]
emails_dst = emails_dst_to + emails_dst_cc + emails_dst_bcc
# send e-mail
result = s.sendmail(from_[0], emails_dst, msg.as_string())
# close files (if there's any)
for f in files_obj:
f.close()
return result
finally:
s.quit()
def normalizar_a_html(texto):
return texto.\
replace('\'', ''').\
replace('&', '&').\
replace('#', '#').\
replace('á', 'á').\
replace('Á', 'Á').\
replace('é', 'é').\
replace('É', 'É').\
replace('í', 'í').\
replace('Í', 'Í').\
replace('ó', 'ó').\
replace('Ó', 'Ó').\
replace('ú', 'ú').\
replace('Ú', 'Ú').\
replace('ü', 'ü').\
replace('Ü', 'Ü').\
replace('ñ', 'ñ').\
replace('Ñ', 'Ñ').\
replace('<', '<').\
replace('>', '>').\
replace('¡', '¡').\
replace('?', '¿').\
replace('"', '"').\
replace('%', '%') | PypiClean |
/CherryPy-18.8.0.tar.gz/CherryPy-18.8.0/CHANGES.rst | v18.8.0
-------
* :issue:`1974`: Dangerous characters received in a host header
encoded using RFC 2047 are now elided by default. Currently,
dangerous characters are defined as CR and LF. The original
value is still available as ``cherrypy.request.headers['Host'].raw``
if needed.
v18.7.0
-------
* :pr:`1923`: Drop support for Python 3.5.
* :pr:`1945`: Fixed compatibility on Python 3.11.
v18.6.1
-------
* :issue:`1849` via :pr:`1879`: Fixed XLF flag in gzip header
emitted by gzip compression tool per
:rfc:`1952#section-2.3.1` -- by :user:`webknjaz`.
* :issue:`1874`: Restricted depending on pywin32 only under
CPython so that it won't get pulled-in under PyPy
-- by :user:`webknjaz`.
* :issue:`1920`: Bumped minimum version of PyWin32 to 227.
Block pywin32 install on Python 3.10 and later.
v18.6.0
-------
* :issue:`1776` via :pr:`1851`: Add support for UTF-8 encoded attachment
file names in ``Content-Disposition`` header via :rfc:`6266#appendix-D`.
v18.5.0
-------
* :issue:`1827`: Fixed issue where bytes values in a ``HeaderMap``
would be converted to strings.
* :pr:`1826`: Rely on
`jaraco.collections <https://pypi.org/project/jaraco.collections>`_
for its case-insensitive dictionary support.
v18.4.0
-------
* :pr:`1715`: Fixed issue in cpstats where the ``data/`` endpoint
would fail with encoding errors on Python 3.
* :pr:`1821`: Simplify the passthrough of parameters to
``CPWebCase.getPage`` to cheroot. CherryPy now requires
cheroot 8.2.1 or later.
v18.3.0
-------
* :pr:`1806`: Support handling multiple exceptions when processing hooks as
reported in :issue:`1770`.
v18.2.0
-------
* File-based sessions no longer attempt to remove the lock files
when releasing locks, instead deferring to the default behavior
of zc.lockfile. Fixes :issue:`1391` and :issue:`1779`.
* :pr:`1794`: Add native support for ``308 Permanent Redirect``
usable via ``raise cherrypy.HTTPRedirect('/new_uri', 308)``.
v18.1.2
-------
* Fixed :issue:`1377` via :pr:`1785`: Restore a native WSGI-less
HTTP server support.
* :pr:`1769`: Reduce log level for non-error events in win32.py
v18.1.1
-------
* :pr:`1774` reverts :pr:`1759` as new evidence emerged that
the original behavior was intentional. Re-opens :issue:`1758`.
v18.1.0
-------
* :issue:`1758` via :pr:`1759`: In the bus, when awaiting a
state change, only publish after the state has changed.
v18.0.1
-------
* :issue:`1738` via :pr:`1736`: Restore support for 'bytes'
in response headers.
* Substantial removal of Python 2 compatibility code.
v18.0.0
-------
* :issue:`1730`: Drop support for Python 2.7. CherryPy 17 will
remain an LTS release for bug and security fixes.
* Drop support for Python 3.4.
v17.4.2
-------
* Fixed :issue:`1377` by backporting :pr:`1785` via :pr:`1786`:
Restore a native WSGI-less HTTP server support.
v17.4.1
-------
* :issue:`1738` via :pr:`1755`: Restore support for 'bytes'
in response headers (backport from v18.0.1).
v17.4.0
-------
* :commit:`a95e619f`: When setting Response Body, reject Unicode
values, making behavior on Python 2 same as on Python 3.
* Other inconsequential refactorings.
v17.3.0
-------
* :issue:`1193` via :pr:`1729`: Rely on zc.lockfile for
session concurrency support.
v17.2.0
-------
* :issue:`1690` via :pr:`1692`: Prevent orphaned Event object in cached
304 response.
v17.1.0
-------
* :issue:`1694` via :pr:`1695`: Add support for accepting uploaded files
with non-ascii filenames per RFC 5987.
v17.0.0
-------
* :issue:`1673`: CherryPy now allows namespace packages for
its dependencies. Environments that cannot handle namespace
packgaes like py2exe will need to add such support or pin to
older CherryPy versions.
v16.0.3
-------
* :issue:`1722`: Pinned the ``tempora`` dependency against
version 1.13 to avoid pulling in namespace packages.
v16.0.2
-------
* :issue:`1716` via :pr:`1717`: Fixed handling of url-encoded parameters
in digest authentication handling, correcting regression in v14.2.0.
* :issue:`1719` via :commit:`1d41828`: Digest-auth tool will now return
a status code of 401 for when a scheme other than 'digest' is
indicated.
v16.0.0
-------
* :issue:`1688` via :commit:`38ad1da`: Removed ``basic_auth`` and
``digest_auth`` tools and the ``httpauth`` module, which have been
officially deprecated earlier in v14.0.0.
* Removed deprecated properties:
- ``cherrypy._cpreqbody.Entity.type`` deprecated in favor of
:py:attr:`cherrypy._cpreqbody.Entity.content_type`
- ``cherrypy._cprequest.Request.body_params`` deprecated in favor of
:py:attr:`cherrypy._cprequest.RequestBody.params`
* :issue:`1377`: In _cp_native server, set ``req.status`` using bytes
(fixed in :pr:`1712`).
* :issue:`1697` via :commit:`841f795`: Fixed error on Python 3.7 with
AutoReloader when ``__file__`` is ``None``.
* :issue:`1713` via :commit:`15aa80d`: Fix warning emitted during
test run.
* :issue:`1370` via :commit:`38f199c`: Fail with HTTP 400 for invalid
headers.
v15.0.0
-------
* :issue:`1708`: Removed components from webtest that were
removed in the refactoring of cheroot.test.webtest for
cheroot 6.1.0.
v14.2.0
-------
* :issue:`1680` via :pr:`1683`: Basic Auth and Digest Auth
tools now support :rfc:`7617` UTF-8 charset decoding where
possible, using latin-1 as a fallback.
v14.1.0
-------
* :cr-pr:`37`: Add support for peercreds lookup over UNIX domain socket.
This enables app to automatically identify "who's on the other
end of the wire".
This is how you enable it::
server.peercreds: True
server.peercreds_resolve: True
The first option will put remote numeric data to WSGI env vars:
app's PID, user's id and group.
Second option will resolve that into user and group names.
To prevent expensive syscalls, data is cached on per connection
basis.
v14.0.1
-------
* :issue:`1700`: Improve windows pywin32 dependency declaration via
conditional extras.
v14.0.0
-------
* :issue:`1688`: Officially deprecated ``basic_auth`` and ``digest_auth``
tools and the ``httpauth`` module, triggering DeprecationWarnings
if they're used. Applications should instead adapt to use the
more recent ``auth_basic`` and ``auth_digest`` tools.
This deprecated functionality will be removed in a subsequent
release soon.
* Removed ``DeprecatedTool`` and the long-deprecated and disabled
``tidy`` and ``nsgmls`` tools. See `the rationale
<https://github.com/cherrypy/cherrypy/pull/1689#issuecomment-362924962>`_
for this change.
v13.1.0
-------
* :issue:`1231` via :pr:`1654`: CaseInsensitiveDict now re-uses the
generalized functionality from ``jaraco.collections`` to
provide a more complete interface for a CaseInsensitiveDict
and HeaderMap.
Users are encouraged to use the implementation from
`jaraco.collections <https://pypi.org/project/jaraco.collections>`_
except when dealing with headers in CherryPy.
v13.0.1
-------
* :pr:`1671`: Restore support for installing CherryPy into
environments hostile to namespace packages, broken since
the 11.1.0 release.
v13.0.0
-------
* :issue:`1666`: Drop support for Python 3.3.
v12.0.2
-------
* :issue:`1665`: In request processing, when an invalid cookie is
received, render the actual error message reported rather
than guessing (sometimes incorrectly) what error occurred.
v12.0.1
-------
* Fixed issues importing :py:mod:`cherrypy.test.webtest` (by creating
a module and importing classes from :py:mod:`cheroot`) and added a
corresponding :py:class:`DeprecationWarning`.
v12.0.0
-------
* Drop support for Python 3.1 and 3.2.
* :issue:`1625`: Removed response timeout and timeout monitor and
related exceptions, as it not possible to interrupt a request.
Servers that wish to exit a request prematurely are
recommended to monitor ``response.time`` and raise an
exception or otherwise act accordingly.
Servers that previously disabled timeouts by invoking
``cherrypy.engine.timeout_monitor.unsubscribe()`` will now
crash. For forward-compatibility with this release on older
versions of CherryPy, disable
timeouts using the config option::
'engine.timeout_monitor.on': False,
Or test for the presence of the timeout_monitor attribute::
with contextlib2.suppress(AttributeError):
cherrypy.engine.timeout_monitor.unsubscribe()
Additionally, the ``TimeoutError`` exception has been removed,
as it's no longer called anywhere. If your application
benefits from this Exception, please comment in the linked
ticket describing the use case, and we'll help devise a
solution or bring the exception back.
v11.3.0
-------
* Bump to cheroot 5.9.0.
* ``cherrypy.test.webtest`` module is now merged with the
``cheroot.test.webtest`` module. The CherryPy name is retained
for now for compatibility and will be removed eventually.
v11.2.0
-------
* ``cherrypy.engine.subscribe`` now may be called without a
callback, in which case it returns a decorator expecting the
callback.
* :pr:`1656`: Images are now compressed using lossless compression
and consume less space.
v11.1.0
-------
* :pr:`1611`: Expose default status logic for a redirect as
``HTTPRedirect.default_status``.
* :pr:`1615`: ``HTTPRedirect.status`` is now an instance property and
derived from the value in ``args``. Although it was previously
possible to set the property on an instance, and this change
prevents that possibilty, CherryPy never relied on that behavior
and we presume no applications depend on that interface.
* :issue:`1627`: Fixed issue in proxy tool where more than one port would
appear in the ``request.base`` and thus in ``cherrypy.url``.
* :pr:`1645`: Added new log format markers:
- ``i`` holds a per-request UUID4
- ``z`` outputs UTC time in format of RFC 3339
- ``cherrypy._cprequest.Request.unique_id.uuid4`` now has lazily
invocable UUID4
* :issue:`1646`: Improve http status conversion helper.
* :pr:`1638`: Always use backslash for path separator when processing
paths in staticdir.
* :issue:`1190`: Fix gzip, caching, and staticdir tools integration. Makes
cache of gzipped content valid.
* Requires cheroot 5.8.3 or later.
* Also, many improvements around continuous integration and code
quality checks.
This release contained an unintentional regression in environments that
are hostile to namespace packages, such as Pex, Celery, and py2exe.
See :pr:`1671` for details.
v11.0.0
-------
* :issue:`1607`: Dropped support for Python 2.6.
v10.2.2
-------
* :issue:`1595`: Fixed over-eager normalization of paths in cherrypy.url.
v10.2.1
-------
* Remove unintended dependency on ``graphviz`` in Python
2.6.
v10.2.0
-------
* :pr:`1580`: ``CPWSGIServer.version`` now reported as
``CherryPy/x.y.z Cheroot/x.y.z``. Bump to cheroot 5.2.0.
* The codebase is now :pep:`8` complaint, flake8 linter is `enabled in TravisCI by
default <https://github.com/cherrypy/cherrypy/commit/b6e752b>`_.
* Max line restriction is now set to 120 for flake8 linter.
* :pep:`257` linter runs as separate allowed failure job in Travis CI.
* A few bugs related to undeclared variables have been fixed.
* ``pre-commit`` testing goes faster due to enabled caching.
v10.1.1
-------
* :issue:`1342`: Fix AssertionError on shutdown.
v10.1.0
-------
* Bump to cheroot 5.1.0.
* :issue:`794`: Prefer setting max-age for session cookie
expiration, moving MSIE hack into a function
documenting its purpose.
v10.0.0
-------
* :issue:`1332`: CherryPy now uses `portend
<https://pypi.org/project/portend>`_ for checking and
waiting on ports for startup and teardown checks. The
following names are no longer present:
- cherrypy._cpserver.client_host
- cherrypy._cpserver.check_port
- cherrypy._cpserver.wait_for_free_port
- cherrypy._cpserver.wait_for_occupied_port
- cherrypy.process.servers.check_port
- cherrypy.process.servers.wait_for_free_port
- cherrypy.process.servers.wait_for_occupied_port
Use this functionality from the portend package directly.
v9.0.0
------
* :issue:`1481`: Move functionality from cherrypy.wsgiserver to
the `cheroot 5.0 <https://pypi.org/project/Cheroot/5.0.1/>`_
project.
v8.9.1
------
* :issue:`1537`: Restore dependency on pywin32 for Python 3.6.
v8.9.0
------
* :pr:`1547`: Replaced ``cherryd`` distutils script with a setuptools
console entry point.
When running CherryPy in daemon mode, the forked process no
longer changes directory to ``/``. If that behavior is something
on which your application relied and should rely, please file
a ticket with the project.
v8.8.0
------
* :pr:`1528`: Allow a timeout of 0 to server.
v8.7.0
------
* :issue:`645`: Setting a bind port of 0 will bind to an ephemeral port.
v8.6.0
------
* :issue:`1538` and :issue:`1090`: Removed cruft from the setup script and
instead rely on `include_package_data
<https://setuptools.readthedocs.io/en/latest/setuptools.html?highlight=include_package_data#new-and-changed-setup-keywords>`_
to ensure the relevant files are included in the package.
Note, this change does cause LICENSE.md no longer to
be included in the installed package.
v8.5.0
------
* The pyOpenSSL support is now included on Python 3 builds,
removing the last disparity between Python 2 and Python 3
in the CherryPy package. This change is one small step
in consideration of :issue:`1399`. This change also fixes RPM
builds, as reported in :issue:`1149`.
v8.4.0
------
* :issue:`1532`: Also release wheels for Python 2, enabling
offline installation.
v8.3.1
------
* :issue:`1537`: Disable dependency on pypiwin32 on Python 3.6
until a viable build of pypiwin32 can be made on that
Python version.
v8.3.0
------
* Consolidated some documentation and include the more
concise readme in the package long description, as found
on PyPI.
v8.2.0
------
* :issue:`1463`: CherryPy tests are now run under pytest and
invoked using tox.
v8.1.3
------
* :issue:`1530`: Fix the issue with TypeError being swallowed by
decorated handlers.
v8.1.2
------
* :issue:`1508`
v8.1.1
------
* :issue:`1497`: Handle errors thrown by ``ssl_module: 'builtin'``
when client opens connection to HTTPS port using HTTP.
* :issue:`1350`: Fix regression introduced in v6.1.0 where environment
construction for WSGIGateway_u0 was passing one parameter
and not two.
* Other miscellaneous fixes.
v8.1.0
------
* :issue:`1473`: ``HTTPError`` now also works as a context manager.
* :issue:`1487`: The sessions tool now accepts a ``storage_class``
parameter, which supersedes the new deprecated
``storage_type`` parameter. The ``storage_class`` should
be the actual Session subclass to be used.
* Releases now use ``setuptools_scm`` to track the release
versions. Therefore, releases can be cut by simply tagging
a commit in the repo. Versions numbers are now stored in
exactly one place.
v8.0.1
------
* :issue:`1489` via :pr:`1493`: Additionally reject anything else that's
not bytes.
* :issue:`1492`: systemd socket activation.
v8.0.0
------
* :issue:`1483`: Remove Deprecated constructs:
- ``cherrypy.lib.http`` module.
- ``unrepr``, ``modules``, and ``attributes`` in
``cherrypy.lib``.
* :pr:`1476`: Drop support for python-memcached<1.58
* :issue:`1401`: Handle NoSSLErrors.
* :issue:`1489`: In ``wsgiserver.WSGIGateway.respond``, the application
must now yield bytes and not text, as the spec requires.
If text is received, it will now raise a ValueError instead
of silently encoding using ISO-8859-1.
* Removed unicode filename from the package, working around
:gh:`pypa/pip#3894 <pypa/pip/issues/3894>` and :gh:`pypa/setuptools#704
<pypa/setuptools/issues/704>`.
v7.1.0
------
* :pr:`1458`: Implement systemd's socket activation mechanism for
CherryPy servers, based on work sponsored by Endless Computers.
Socket Activation allows one to setup a system so that
systemd will sit on a port and start services
'on demand' (a little bit like inetd and xinetd
used to do).
v7.0.0
------
Removed the long-deprecated backward compatibility for
legacy config keys in the engine. Use the config for the
namespaced-plugins instead:
- autoreload_on -> autoreload.on
- autoreload_frequency -> autoreload.frequency
- autoreload_match -> autoreload.match
- reload_files -> autoreload.files
- deadlock_poll_frequency -> timeout_monitor.frequency
v6.2.1
------
* :issue:`1460`: Fix KeyError in Bus.publish when signal handlers
set in config.
v6.2.0
------
* :issue:`1441`: Added tool to automatically convert request
params based on type annotations (primarily in
Python 3). For example::
@cherrypy.tools.params()
def resource(self, limit: int):
assert isinstance(limit, int)
v6.1.1
------
* Issue :issue:`1411`: Fix issue where autoreload fails when
the host interpreter for CherryPy was launched using
``python -m``.
v6.1.0
------
* Combined wsgiserver2 and wsgiserver3 modules into a
single module, ``cherrypy.wsgiserver``.
v6.0.2
------
* Issue :pr:`1445`: Correct additional typos.
v6.0.1
------
* Issue :issue:`1444`: Correct typos in ``@cherrypy.expose``
decorators.
v6.0.0
------
* Setuptools is now required to build CherryPy. Pure
distutils installs are no longer supported. This change
allows CherryPy to depend on other packages and re-use
code from them. It's still possible to install
pre-built CherryPy packages (wheels) using pip without
Setuptools.
* `six <https://pypi.io/project/six>`_ is now a
requirement and subsequent requirements will be
declared in the project metadata.
* :issue:`1440`: Back out changes from :pr:`1432` attempting to
fix redirects with Unicode URLs, as it also had the
unintended consequence of causing the 'Location'
to be ``bytes`` on Python 3.
* ``cherrypy.expose`` now works on classes.
* ``cherrypy.config`` decorator is now used throughout
the code internally.
v5.6.0
------
* ``@cherrypy.expose`` now will also set the exposed
attribute on a class.
* Rewrote all tutorials and internal usage to prefer
the decorator usage of ``expose`` rather than setting
the attribute explicitly.
* Removed test-specific code from tutorials.
v5.5.0
------
* :issue:`1397`: Fix for filenames with semicolons and quote
characters in filenames found in headers.
* :issue:`1311`: Added decorator for registering tools.
* :issue:`1194`: Use simpler encoding rules for SCRIPT_NAME
and PATH_INFO environment variables in CherryPy Tree
allowing non-latin characters to pass even when
``wsgi.version`` is not ``u.0``.
* :issue:`1352`: Ensure that multipart fields are decoded even
when cached in a file.
v5.4.0
------
* ``cherrypy.test.webtest.WebCase`` now honors a
'WEBTEST_INTERACTIVE' environment variable to disable
interactive tests (still enabled by default). Set to '0'
or 'false' or 'False' to disable interactive tests.
* :issue:`1408`: Fix AttributeError when listiterator was accessed
using the ``next`` attribute.
* :issue:`748`: Removed ``cherrypy.lib.sessions.PostgresqlSession``.
* :pr:`1432`: Fix errors with redirects to Unicode URLs.
v5.3.0
------
* :issue:`1202`: Add support for specifying a certificate authority when
serving SSL using the built-in SSL support.
* Use ssl.create_default_context when available.
* :issue:`1392`: Catch platform-specific socket errors on OS X.
* :issue:`1386`: Fix parsing of URIs containing ``://`` in the path part.
v5.2.0
------
* :issue:`1410`: Moved hosting to Github
(`cherrypy/cherrypy <https://github.com/cherrypy/cherrypy>`_).
v5.1.0
------
* Bugfix issue :issue:`1315` for ``test_HTTP11_pipelining`` test in Python 3.5
* Bugfix issue :issue:`1382` regarding the keyword arguments support for Python 3
on the config file.
* Bugfix issue :issue:`1406` for ``test_2_KeyboardInterrupt`` test in Python 3.5.
by monkey patching the HTTPRequest given a bug on CPython
that is affecting the testsuite (https://bugs.python.org/issue23377).
* Add additional parameter ``raise_subcls`` to the tests helpers
`openURL` and ``CPWebCase.getPage`` to have finer control on
which exceptions can be raised.
* Add support for direct keywords on the calls (e.g. ``foo=bar``) on
the config file under Python 3.
* Add additional validation to determine if the process is running
as a daemon on ``cherrypy.process.plugins.SignalHandler`` to allow
the execution of the testsuite under CI tools.
v5.0.1
------
* Bugfix for NameError following :issue:`94`.
v5.0.0
------
* Removed deprecated support for ``ssl_certificate`` and
``ssl_private_key`` attributes and implicit construction
of SSL adapter on Python 2 WSGI servers.
* Default SSL Adapter on Python 2 is the builtin SSL adapter,
matching Python 3 behavior.
* Pull request :issue:`94`: In proxy tool, defer to Host header for
resolving the base if no base is supplied.
v4.0.0
------
* Drop support for Python 2.5 and earlier.
* No longer build Windows installers by default.
v3.8.2
------
* Pull Request :issue:`116`: Correct InternalServerError when null bytes in
static file path. Now responds with 404 instead.
v3.8.0
------
* Pull Request :issue:`96`: Pass ``exc_info`` to logger as keyword rather than
formatting the error and injecting into the message.
v3.7.0
------
* CherryPy daemon may now be invoked with ``python -m cherrypy`` in
addition to the ``cherryd`` script.
* Issue :issue:`1298`: Fix SSL handling on CPython 2.7 with builtin SSL module
and pyOpenSSL 0.14. This change will break PyPy for now.
* Several documentation fixes.
v3.6.0
------
* Fixed HTTP range headers for negative length larger than content size.
* Disabled universal wheel generation as wsgiserver has Python duality.
* Pull Request :issue:`42`: Correct TypeError in ``check_auth`` when encrypt is used.
* Pull Request :issue:`59`: Correct signature of HandlerWrapperTool.
* Pull Request :issue:`60`: Fix error in SessionAuth where login_screen was
incorrectly used.
* Issue :issue:`1077`: Support keyword-only arguments in dispatchers (Python 3).
* Issue :issue:`1019`: Allow logging host name in the access log.
* Pull Request :issue:`50`: Fixed race condition in session cleanup.
v3.5.0
------
* Issue :issue:`1301`: When the incoming queue is full, now reject additional
connections. This functionality was added to CherryPy 3.0, but
unintentionally lost in 3.1.
v3.4.0
------
* Miscellaneous quality improvements.
v3.3.0
------
CherryPy adopts semver.
| PypiClean |
/FicusFramework-3.1.0.post2.tar.gz/FicusFramework-3.1.0.post2/src/schedule/utils/log/TaskLogger.py | import datetime
import logging
import traceback
# 本地log
from schedule.utils.log import TaskLogFileAppender
local_log = logging.getLogger('Ficus')
class TaskLogger:
def __init__(self,log_file_name) -> None:
self._log_file_name = log_file_name
def log(self, append_log: str):
"""
记录日志信息
:param append_log: 需要记录的内容
:return:
"""
call_info = self._get_filename_function_line(limit=2)
# print(call_info)
self._log_detail(call_info, append_log)
def error(self, e: Exception):
"""
记录异常信息
:param e: 抛出的异常
:return:
"""
call_info = self._get_filename_function_line(limit=2)
self._log_detail(call_info, str(e) if e is not None else "")
def _log_detail(self,call_info, append_log):
"""
把东西写入文件中
:param call_info:
:param append_log:
:return:
"""
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
format_append_log = f"{now} {call_info} {append_log if append_log is not None else ''}"
log_file_name = self._log_file_name or TaskLogFileAppender.get_log_file_name()
if log_file_name is not None and log_file_name.strip() != "":
TaskLogFileAppender.append_log(log_file_name, format_append_log)
else:
local_log.info(format_append_log)
def _stack_tuple_to_function_line_filename(self,stackTuple):
''''' stackTuple: (文件名,行号,函数名,这一行的代码)
'''
filename = stackTuple[0]
linenumber = stackTuple[1]
funcname = stackTuple[2]
filename = filename if "<string>" != filename else "动态脚本"
import threading
return f"[{filename}#{funcname}]-[{linenumber}]-[{threading.current_thread().name}]"
def _get_filename_function_line(self,limit=1): # limit = 1 表示抽取该函数调用者的位置,注意输入到extract_stack中的limit=limit+1
stackTuple = traceback.extract_stack(limit=limit + 1)[0]
return self._stack_tuple_to_function_line_filename(stackTuple) | PypiClean |
/Nbdler-3.0.3.tar.gz/Nbdler-3.0.3/nbdler/client/__init__.py | from . import aiohttp, requests
from .abstract import AbstractClient
from collections import defaultdict
__all__ = ['get_policy', 'ClientPolicy']
_solutions = defaultdict(list)
_name_solution = {}
class ClientPolicy:
def __init__(self, **specified_mapping):
self._specified = {k.lower(): v.lower() for k, v in specified_mapping.items()}
def get_solution(self, protocol):
""" 返回根据策略决定的客户端处理模块。
Args:
protocol: 要处理的协议
Returns:
返回客户端处理方案
"""
sol_name = self._specified.get(protocol, None)
if sol_name is None:
# 使用该协议最新注册的客户端处理器作为默认的处理策略
sol_name = _solutions.get(protocol, [None])[-1]
if sol_name is None:
raise NotImplementedError(f'没有找到协议{protocol}的处理策略。')
solution = _name_solution.get(sol_name, None)
if solution is None:
raise NotImplementedError(f'没有找到名称为{sol_name}的客户端处理器。')
return solution
def __iter__(self):
return iter(self._specified.items())
class ProtocolSolution:
def __init__(self, module):
self._module = module
@property
def name(self):
return self._module.NAME
@property
def supported_protocols(self):
return self._module.PROTOCOL_SUPPORT
def is_async(self):
return self._module.ASYNC_EXECUTE
@property
def dlopen(self):
return self._module.ClientHandler.dlopen
def get_client(self, *args, **kwargs):
return self._module.ClientHandler(*args, **kwargs)
def get_session(self, *args, **kwargs):
return self._module.ClientSession(*args, **kwargs)
def get_policy(**kwargs):
return ClientPolicy(**kwargs)
def register(module):
""" 注册下载客户端处理模块。
客户端模块规范:
1. 客户端处理程序要求继承abstract_base.py中的AbstractClient类
2. 使用类变量NAME作为客户端的唯一标识名称,尽量避免与其他客户端重名,
重名的处理策略是后注册覆盖前注册。
3. 使用ClientHandler作为客户端的类名,或通过赋值该模块变量名实现
4. 使用ClientSession作为客户端会话,必须存在该变量,若不需要会话则赋值noop函数,
客户端会话创建不提供参数,若需要提供使用functions.partial传递定义
Args:
module: 协议处理解决方案
"""
global _solutions, _name_solution
solution = ProtocolSolution(module)
for protocol in solution.supported_protocols:
_solutions[protocol].append(solution.name)
_name_solution[solution.name] = solution
def main():
# 多线程HTTP/HTTPS,使用requests库
register(requests)
# 异步HTTP/HTTPS,使用aiohttp库
register(aiohttp)
# 注册下载客户端
main() | PypiClean |
/EllucianEthosPythonClient-0.2.17.tar.gz/EllucianEthosPythonClient-0.2.17/versioneer.py | from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version",
"date": None}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if 'py2exe' in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1) | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/baseline_processor_request_py3.py |
from msrest.serialization import Model
class BaselineProcessorRequest(Model):
"""BaselineProcessorRequest.
:param include_in_cap: Indicates if the meter will be included in savings
calculations
:type include_in_cap: bool
:param adjust_area: Indicates if floor area adjustments should be made
:type adjust_area: bool
:param adjusted_cost_type_id: The type of cost adjustment to perform.
Possible values include 1 (current average unit cost) and 5 (net cost
difference)
:type adjusted_cost_type_id: int
:param adjust_winter: Indicates if winter weather adjustments should be
made
:type adjust_winter: bool
:param adjust_summer: Indicates if summer weather adjustments should be
made
:type adjust_summer: bool
:param baseline_months: The number of months in the baseline.
Setting this to anything other than 12 will turn off weather adjustments
irregardless of the settings in adjustWinter and adjustSummer
:type baseline_months: int
:param baseline_start_date: The baseline's start date
:type baseline_start_date: datetime
:param savings_start_date: The savings period's start date
:type savings_start_date: datetime
:param cooling_temp: The cooling balance point
:type cooling_temp: int
:param heating_temp: The heating balance point
:type heating_temp: int
:param use_extended_weather: Indicates if extended weather should be used.
Extended weather uses additional months prior to the baseline start date
to
determine weather sensitivity.
:type use_extended_weather: bool
:param extended_weather_start_date: The start date of the extended
weather. This should be 1, 2 or 3 years prior to the baseline start date.
Extended weather uses additional months prior to the baseline start date
to
determine weather sensitivity.
:type extended_weather_start_date: datetime
:param baseline_bills: Indicates if each bill in the baseline should be
included in the baseline regression
:type baseline_bills:
list[~energycap.sdk.models.BaselineProcessorRequestBaselineBill]
"""
_attribute_map = {
'include_in_cap': {'key': 'includeInCAP', 'type': 'bool'},
'adjust_area': {'key': 'adjustArea', 'type': 'bool'},
'adjusted_cost_type_id': {'key': 'adjustedCostTypeId', 'type': 'int'},
'adjust_winter': {'key': 'adjustWinter', 'type': 'bool'},
'adjust_summer': {'key': 'adjustSummer', 'type': 'bool'},
'baseline_months': {'key': 'baselineMonths', 'type': 'int'},
'baseline_start_date': {'key': 'baselineStartDate', 'type': 'iso-8601'},
'savings_start_date': {'key': 'savingsStartDate', 'type': 'iso-8601'},
'cooling_temp': {'key': 'coolingTemp', 'type': 'int'},
'heating_temp': {'key': 'heatingTemp', 'type': 'int'},
'use_extended_weather': {'key': 'useExtendedWeather', 'type': 'bool'},
'extended_weather_start_date': {'key': 'extendedWeatherStartDate', 'type': 'iso-8601'},
'baseline_bills': {'key': 'baselineBills', 'type': '[BaselineProcessorRequestBaselineBill]'},
}
def __init__(self, *, include_in_cap: bool=None, adjust_area: bool=None, adjusted_cost_type_id: int=None, adjust_winter: bool=None, adjust_summer: bool=None, baseline_months: int=None, baseline_start_date=None, savings_start_date=None, cooling_temp: int=None, heating_temp: int=None, use_extended_weather: bool=None, extended_weather_start_date=None, baseline_bills=None, **kwargs) -> None:
super(BaselineProcessorRequest, self).__init__(**kwargs)
self.include_in_cap = include_in_cap
self.adjust_area = adjust_area
self.adjusted_cost_type_id = adjusted_cost_type_id
self.adjust_winter = adjust_winter
self.adjust_summer = adjust_summer
self.baseline_months = baseline_months
self.baseline_start_date = baseline_start_date
self.savings_start_date = savings_start_date
self.cooling_temp = cooling_temp
self.heating_temp = heating_temp
self.use_extended_weather = use_extended_weather
self.extended_weather_start_date = extended_weather_start_date
self.baseline_bills = baseline_bills | PypiClean |
/Gewel-0.3.1-cp39-cp39-manylinux_2_24_x86_64.whl/gewel/_timekeeper.py | from typing import Iterable, Protocol, Union
import tvx
# noinspection PyPropertyDefinition
class TimekeeperProtocol(Protocol):
@property
def time(self) -> float: ...
def set_time(self, time: float): ...
def _manage_time(self, duration: float, update_time: bool): ...
class TimekeeperMixin:
"""
This mixin is used by classes that need to keep track
of a next-action time during the scripting phase. Next-action
time is the time that the next action, for example, the motion
created by the next call to :py:meth:`~XYDrawable.move_to`, will
begin.
See :ref:`draw_update_time` for more on next-action time.
"""
def _manage_time(self: 'TimekeeperProtocol', duration: float, update_time: bool):
if update_time:
self.set_time(self.time + duration)
def wait(self, duration: float) -> None:
"""
Wait for a specified amount of time. This updates
the object's next-action time by adding a constant
amount of time to it.
See :ref:`draw_update_time` for more on next-action time.
Parameters
----------
duration
How long to wait, in seconds.
"""
self._manage_time(duration, True)
def wait_until(self: 'TimekeeperProtocol', at_least: float) -> None:
"""
Update the next-action time so that it is at least
the given time. If it is already greater than that,
change nothing.
See :ref:`draw_update_time` for more on next-action time.
Parameters
----------
at_least
The minimum new next-action time.
"""
if at_least > self.time:
self._manage_time(at_least - self.time, True)
def wait_for(
self,
other: Union['TimekeeperMixin', Iterable['TimekeeperMixin']]
) -> None:
"""
Wait for another object to finish whatever action it is currently
doing. This method us used at scripting time to ensure that an
object updates its next-action time so that it is no earlier than
the next-action time of another object.
See :ref:`draw_update_time` for more on next-action time.
Parameters
----------
other
The object to wait for. Or, an iterable collection of
objects. If iterable, then wait for the one with the
latest time.
"""
if isinstance(other, TimekeeperMixin):
self.wait_until(getattr(other, 'time'))
else:
for o in other:
self.wait_for(o)
def ramp_attr_to(
self: 'TimekeeperProtocol',
name: str,
to: float,
duration: float,
update_time: bool = True
) -> None:
"""
Change the value of an attribute of the object from the
value it has at the current next-action time to a new value
by ramping it linearly between the old and new values over
the course of a given duration.
See :ref:`draw_update_time` for more on next-action time.
Parameters
----------
name
Name of the attribute to update.
to
Value to change to.
duration
Time in seconds over which the value ramps from the
old to the new value.
update_time
If ``True``, update the object's next-action time so
that it is ``duration`` later than it was.
"""
frm = getattr(self, name)
if isinstance(frm, tvx.Tvf):
frm_val = frm(self.time)
else:
frm_val = frm
rmp = tvx.ramp(frm_val, to, self.time, duration)
ct = tvx.cut(frm, self.time, rmp)
setattr(self, name, ct)
self._manage_time(duration, update_time)
def sync(tks: Iterable[Union[TimekeeperMixin, TimekeeperProtocol]]) -> None:
"""
Synchronize objects so that all of their next-action times are
set to the latest next-action time of any of them.
See :ref:`draw_update_time` for more on next-action time.
Parameters
----------
tks
The objects to synchronize.
"""
if not isinstance(tks, list):
tks = list(tks)
max_time = max([tk.time for tk in tks])
for tk in tks:
tk.wait_until(max_time)
def all_wait_for(waiters: Iterable[TimekeeperMixin], waited_on: TimekeeperMixin) -> None:
"""
Have all of the objects in a group wait for the latest
of the objects in another group.
Parameters
----------
waiters
Objects that should wait.
waited_on
Objects that should be waited on.
"""
for w in waiters:
w.wait_for(waited_on) | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/jinja2/jinja2/exceptions.py | from ._compat import imap
from ._compat import implements_to_string
from ._compat import PY2
from ._compat import text_type
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode("utf-8")
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode("utf-8", "replace")
def __unicode__(self):
return self.message or u""
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist.
.. versionchanged:: 2.11
If the given name is :class:`Undefined` and no message was
provided, an :exc:`UndefinedError` is raised.
"""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self, name)
if message is None:
from .runtime import Undefined
if isinstance(name, Undefined):
name._fail_with_undefined_error()
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionchanged:: 2.11
If a name in the list of names is :class:`Undefined`, a message
about it being undefined is shown rather than the empty string.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
from .runtime import Undefined
parts = []
for name in names:
if isinstance(name, Undefined):
parts.append(name._undefined_message)
else:
parts.append(name)
message = u"none of the templates given were found: " + u", ".join(
imap(text_type, parts)
)
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = "line %d" % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, " " + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(" " + line.strip())
return u"\n".join(lines)
def __reduce__(self):
# https://bugs.python.org/issue1692335 Exceptions that take
# multiple required arguments have problems with pickling.
# Without this, raises TypeError: __init__() missing 1 required
# positional argument: 'lineno'
return self.__class__, (self.message, self.lineno, self.name, self.filename)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
""" | PypiClean |
/BuildSimHubAPI-2.0.0-py3-none-any.whl/buildsimdata/convert_hourly_to_weekly.py | import pandas as pd
from datetime import datetime as dt
import logging
import calendar
import matplotlib.pyplot as plt
import numpy as np
from pyod.models.knn import KNN
from pyod.models.lof import LOF
from pyod.models.pca import PCA
# from pyod.models.mcd import MCD
# from pyod.models.cblof import CBLOF
# from pyod.models.hbos import HBOS
def generate_df_frame(processedarr, num):
plot_df = pd.DataFrame(
{'x': range(0, 24), 'Monday': np.asarray(processedarr[num][0]), 'Tuesday': np.asarray(processedarr[num][1]),
'Wednesday': np.asarray(processedarr[num][2]), 'Thursday': np.asarray(processedarr[num][3]),
'Friday': np.asarray(processedarr[num][4]), 'Saturday': np.asarray(processedarr[num][5]),
'Sunday': np.asarray(processedarr[num][6])})
return plot_df
def generate_graph(row_fields, data_after):
plt.style.use('seaborn-darkgrid')
palette = plt.get_cmap('Set1')
num = 0
for index in range(len(row_fields) - 1):
plt_df = generate_df_frame(data_after, num)
num += 1
plt.subplot(3, 3, num)
i = 0
for v in plt_df.drop('x', axis=1):
plt.plot(plt_df['x'], plt_df[v], marker='', color=palette(i), linewidth=2.4, alpha=0.9,
label=row_fields[index + 1])
i = i + 1
plt.suptitle("Plot of average lighting power consuption at different hour across different days", fontsize=13,
fontweight=0, color='black', style='italic', y=1.02)
plt.show()
def main(model_selector):
model = {1: KNN(), 2: LOF(), 3: PCA()}
df = pd.read_csv('pow_lighting.csv') # original dataframe
df = df.dropna() # adter remove na row
row_fields = df.keys() # the data fields
data_store = [] # store the df data fields -> weekday -> hour
no_outlier = [] # store the inliner data
data_after = [] # calc the average of each hours' data and use it to generate graph
for i in range(1, len(row_fields)):
data_store.append(list())
no_outlier.append(list())
data_after.append(list())
for j in range(7):
data_store[i - 1].append(list())
no_outlier[i - 1].append(list())
data_after[i - 1].append(list())
data_after[i - 1][j] = []
for z in range(24):
data_store[i - 1][j].append(list())
no_outlier[i - 1][j].append(list())
for index, row in df.iterrows():
date_str = row[row_fields[0]]
templist = date_str.split('/')
date_str = templist[0] + '/' + templist[1] + '/20' + templist[2]
date_obj = dt.strptime(date_str, '%m/%d/%Y %H:%M')
weekday = date_obj.weekday()
hour_in_date = date_obj.hour
for m in range(1, len(row_fields)):
data_store[m - 1][weekday][hour_in_date].append(row[row_fields[m]])
for i in range(len(data_store)):
for j in range(len(data_store[i])):
for z in range(len(data_store[i][j])):
clf = model[model_selector]
print(data_store[i][j][z])
X_train = np.asarray(data_store[i][j][z]).reshape(-1, 1)
clf.fit(X_train)
y_train_pred = clf.labels_
for index in range(len(data_store[i][j][z])):
if y_train_pred[index] == 0:
no_outlier[i][j][z].append(data_store[i][j][z][index])
for i in range(len(data_store)):
for j in range(len(data_store[i])):
for z in range(len(data_store[i][j])):
data_after[i][j].append(float(sum(no_outlier[i][j][z]) / len(no_outlier[i][j][z])))
generate_graph(row_fields, data_after)
main(1) | PypiClean |
/Ezpub-karjakak-1.3.3.tar.gz/Ezpub-karjakak-1.3.3/README.md | # Ezpub [cli-environment]
## Tool to help developer to publish package to PyPI
## Installation
```
pip3 install Ezpub-karjakak
```
## Usage
**Create token for variable environment and save it for publish with twine [token key-in in tkinter simpledialog for showing in hidden].**
```
ezpub -t None
```
**Delete saved token.**
```
ezpub -t d
```
**Create save token.**
```
# Windows
ezpub -t %VARTOKEN%
# MacOS X
ezpub -t $VARTOKEN
```
**Building the package and create [build, dist, and package.egg-info] for uploading to PyPI.**
```
# Window
ezpub -b "\package-path"
# MacOS X
ezpub -b /package_path
```
**TAKE NOTE:**
* **Ezpub will try to move existing [build, dist, and package.egg-info] to created archive folder and create new one.**
* **If Exception occured, user need to remove them manually.**
**Pubish to PyPI.**
```
# For Windows only
ezpub -p \package-path\dist\*
# For MacOS X
ezpub -p "package_path/dist/*"
```
**TAKE NOTE:**
* **If token is not created yet, ~~it will start process "-t" automatically~~ user will be prompt to create first.**
* **Some firewall not allowed moving files to archive, you may exclude Ezpub from it.**
* **You can move the files manually and using `py -m build` instead. [Please see the source code for assurance]**
* **MacOS X:**
* **Extra secure with locking.**
* **Dependency:**
* **twine**
* **Clien**
* **filepmon**
* **filfla**
## Links
* **https://packaging.python.org/tutorials/packaging-projects/**
* **https://twine.readthedocs.io/en/latest/** | PypiClean |
/FeatherStore-0.2.1-py3-none-any.whl/featherstore/_table/insert.py | import pandas as pd
import pyarrow as pa
from featherstore.connection import Connection
from featherstore._table import _raise_if
from featherstore._table import _table_utils
def can_insert_table(table, df):
Connection._raise_if_not_connected()
_raise_if.table_not_exists(table)
_raise_if.df_is_not_pandas_table(df)
if isinstance(df, pd.Series):
cols = [df.name]
else:
cols = df.columns.tolist()
_raise_if.index_name_not_same_as_stored_index(df, table._table_data)
_raise_if.col_names_contains_duplicates(cols)
_raise_if.index_values_contains_duplicates(df.index)
_raise_if.index_type_not_same_as_stored_index(df, table._table_data)
_raise_if.cols_does_not_match(df, table._table_data)
def insert_data(df, *, to):
index_name = _table_utils.get_index_name(df)
_raise_if_rows_in_old_data(to, df, index_name)
df = _table_utils.concat_arrow_tables(to, df)
df = _table_utils.sort_arrow_table(df, by=index_name)
return df
def _raise_if_rows_in_old_data(old_df, df, index_name):
index = df[index_name]
old_index = old_df[index_name]
is_in = pa.compute.is_in(index, value_set=old_index)
rows_in_old_df = pa.compute.any(is_in).as_py()
if rows_in_old_df:
raise ValueError("Some rows already in stored table")
def create_partitions(df, rows_per_partition, partition_names, all_partition_names):
partitions = _table_utils.make_partitions(df, rows_per_partition)
new_partition_names = _insert_new_partition_ids(partitions, partition_names,
all_partition_names)
partitions = _table_utils.assign_ids_to_partitions(partitions, new_partition_names)
return partitions
def _insert_new_partition_ids(partitioned_df, partition_names, all_partition_names):
num_partitions = len(partitioned_df)
num_partition_names = len(partition_names)
num_names_to_make = num_partitions - num_partition_names
subsequent_partition = _table_utils.get_next_item(item=partition_names[-1],
sequence=all_partition_names)
new_partition_names = _make_partition_names(num_names_to_make,
partition_names,
subsequent_partition)
return new_partition_names
def _make_partition_names(num_names, partition_names, subsequent_partition):
last_id = _table_utils.convert_partition_id_to_int(partition_names[-1])
subsequent_partition_exists = subsequent_partition is not None
if subsequent_partition_exists:
subsequent_id = _table_utils.convert_partition_id_to_int(subsequent_partition)
increment = (subsequent_id - last_id) / (num_names + 1)
else: # Called only when partition_names[-1] is the end of the table
increment = 1
new_partition_names = partition_names.copy()
for partition_num in range(1, num_names + 1):
new_partition_id = last_id + increment * partition_num
new_partition_id = _table_utils.convert_int_to_partition_id(new_partition_id)
new_partition_names.append(new_partition_id)
return sorted(new_partition_names)
def has_still_default_index(table, df):
has_default_index = table._table_data["has_default_index"]
if not has_default_index:
return False
index_name = table._table_data["index_name"]
rows = df[index_name]
last_stored_value = _table_utils.get_last_stored_index_value(table._partition_data)
first_row_value = rows[0].as_py()
rows_are_continuous = all(a.as_py() + 1 == b.as_py() for a, b in zip(rows, rows[1:]))
if first_row_value > last_stored_value and rows_are_continuous:
_has_still_default_index = True
elif len(rows) == 0:
_has_still_default_index = True
else:
_has_still_default_index = False
return _has_still_default_index | PypiClean |
/Myghty-1.2.tar.gz/Myghty-1.2/lib/myghty/importer.py |
import string, os, sys, imp, re, stat, types, time, weakref, __builtin__
"""
module loading and management. loads modules by file paths directly, as well
as via module names. keeps track of last modified time to provide a "reasonable"
level of "reload when changed" behavior without restarting the application. By
"reasonable" we include the module itself, but not its parent package or any
of its named dependencies.
in the case of file-based modules, which correspond to compiled templates as well
as module components resolved via file paths, they are kept out
of sys.modules so they can be cleanly reloaded when modified, and removed from
memory when they fall out of scope. To maintain "importability" of these
modules, the builtin __import__ method is overridden application-wide to
search for these modules in a local weak dictionary first before proceeding to
normal import behavior.
in the case of modules located by a package/module name,
they are loaded into sys.modules via the default __import__ method
and are reloaded via reload(). For these modules, the "singleton" behavior of
Python's regular module system applies. This behavior includes the caveats that old
attributes stay lying around, and the module is reloaded "in place" which in rare
circumstances could affect code executing against the module. The advantage is that
the module's parent packages all remain pointing to the correctly reloaded module
and no exotic synchronization-intensive "reconnection" of newly reloaded modules
to their packages needs to happen.
The "importability" of a module loaded here is usually not even an issue as it
typcially is only providing Myghty components which are solely invoked by the Interpreter.
However, in addition to the case where the developer is explicitly importing
from a module that also provides Myghty components, the other case when the module requires
import is when a class defined within it is deserialized, such as from a cache or session
object; hence the need to override __import__ as well as maintaining the structure
of packages.
"""
modules = weakref.WeakValueDictionary()
# override __import__ to look in our own local module dict first
builtin_importer = __builtin__.__import__
if sys.version_info >= (2, 5):
def import_module(name, globals = None, locals = None, fromlist = None, level = -1):
if level == -1:
try:
return modules[name].module
except KeyError:
pass
return builtin_importer(name, globals, locals, fromlist, level)
else:
def import_module(name, globals = None, locals = None, fromlist = None):
try:
return modules[name].module
except KeyError:
return builtin_importer(name, globals, locals, fromlist)
__builtin__.__import__ = import_module
class ModValue:
"""2.3 is not letting us make a weakref to a module. so create a lovely
circular reference thingy and weakref to that."""
def __init__(self, module):
self.module = module
module.__modvalue__ = self
def module(name):
"""imports a module by string name via normal module importing, attaches timestamp information"""
if name == '__main__':
return sys.modules[name]
mod = builtin_importer(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
if not hasattr(mod, "__modified_time"):
mod.__modified_time = modulemodtime(mod)
mod.__is_file = False
return mod
def filemodule(path, id = None, reload = True, forcereload = False):
"""loads a module directly from a file path."""
if id is None:
id = re.sub(r'\W+','_',path)
if not forcereload:
try:
module = modules[id].module
if not reload or module.__modified_time >= modulemodtime(module):
return module
except KeyError:
pass
modfile = open(path, 'r')
try:
#print "loading: " + path
# Check mtime before loading module, so that modified_time
# is guaranteed not to be later than the mtime of the loaded
# version of the file.
modified_time = os.fstat(modfile.fileno())[stat.ST_MTIME]
module = imp.load_source(id, path, modfile)
del sys.modules[id]
modules[id] = ModValue(module)
module.__modified_time = modified_time
module.__is_file = True
return module
finally:
modfile.close()
def reload_module(module):
"""reloads any module that was loaded with filemodule(), if its
modification time has changed.
"""
if not hasattr(module, '__modified_time'):
# if we didnt load it, we dont change it
return module
elif module.__modified_time < modulemodtime(module):
if module.__is_file is False:
#print "regular reload: " + module.__name__
# Get mtime before reload to ensure it is <= the actual mtime
# of the reloaded module.
modified_time = modulemodtime(module)
reload(module)
module.__modified_time = modified_time
return module
else:
file = module.__file__
file = re.sub(r'\.pyc$|\.pyo$', '.py', file)
return filemodule(file, id = module.__name__, forcereload = True)
else:
return module
def mod_time(module):
try:
return module.__modified_time
except AttributeError:
return modulemodtime(module)
def modulemodtime(module):
"""returns the modified time of a module's source file"""
try:
file = module.__file__
pyfile = re.sub(r'\.pyc$|\.pyo$', '.py', file)
if os.access(pyfile, os.F_OK):
file = pyfile
#print "checking time on " + file
st = os.stat(file)
return st[stat.ST_MTIME]
except AttributeError:
return None
class ObjectPathIterator:
"""walks a file path looking for a python module. once it loads the
python module, then continues walking the path into module's attributes."""
def __init__(self, path, reload = True):
self.path = path
self.reload = reload
self.module = None
self.objpath = []
if isinstance(path, types.ModuleType):
self.module = path
if reload:
reload_module(self.module)
self.last_modified = None
def get_unit(self, tokens, stringtokens = [], moduletokens = []):
if isinstance(self.path, str):
return self.get_string_unit(tokens + stringtokens)
else:
return self.get_attr_unit(tokens + moduletokens)
def get_string_unit(self, tokens):
for token in tokens:
path = self.path + "/" + token
#print "check path " + repr(path)
if self._check_module(path):
return (self.path, token)
if not os.access(path, os.F_OK):
continue
self.path = path
return (self.path, token)
else:
raise StopIteration
def get_attr_unit(self, tokens):
for token in tokens:
try:
#print "check attr path " + repr(self.path) + " " + token
attr = getattr(self.path, token)
if isinstance(attr, types.ModuleType):
raise AttributeError(token)
self.path = attr
self.objpath.append(token)
return (self.path, token)
except AttributeError:
continue
else:
self.path = None
raise StopIteration
def _check_module(self, path):
try:
st = os.stat(path + ".py")
except OSError:
return False
if stat.S_ISREG(st[stat.ST_MODE]):
self.path = filemodule(path + ".py", reload = self.reload)
self.module = self.path
self.last_modified = mod_time(self.module)
return True | PypiClean |
/CoilMQ-1.0.1.tar.gz/CoilMQ-1.0.1/coilmq/scheduler.py | import abc
import random
__authors__ = ['"Hans Lellelid" <[email protected]>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
class SubscriberPriorityScheduler(object):
""" Abstract base class for choosing which recipient (subscriber) should receive a message. """
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def choice(self, subscribers, message):
"""
Chooses which subscriber (from list) should recieve specified message.
@param subscribers: Collection of subscribed connections eligible to receive message.
@type subscribers: C{list} of L{coilmq.server.StompConnection}
@param message: The message to be delivered.
@type message: L{stompclient.frame.Frame}
@return: A selected subscriber from the list or None if no subscriber could be chosen (e.g. list is empty).
@rtype: L{coilmq.server.StompConnection}
"""
class QueuePriorityScheduler(object):
"""
Abstract base class for objects that provide a way to prioritize the queues.
"""
def choice(self, queues, connection):
"""
Choose which queue to select for messages to specified connection.
@param queues: A C{dict} mapping queue name to queues (sets of frames) to which
specified connection is subscribed.
@type queues: C{dict} of C{str} to C{set} of L{stompclient.frame.Frame}
@param connection: The connection that is going to be delivered the frame(s).
@type connection: L{coilmq.server.StompConnection}
@return: A selected queue destination (name) or None if queues C{dict} is empty.
@rtype: C{str}
"""
raise NotImplementedError
class RandomSubscriberScheduler(SubscriberPriorityScheduler):
""" A delivery scheduler that chooses a random subscriber for message recipient. """
def choice(self, subscribers, message):
"""
Chooses a random connection from subscribers to deliver specified message.
@param subscribers: Collection of subscribed connections to destination.
@type subscribers: C{list} of L{coilmq.server.StompConnection}
@param message: The message to be delivered.
@type message: L{stompclient.frame.Frame}
@return: A random subscriber from the list or None if list is empty.
@rtype: L{coilmq.server.StompConnection}
"""
if not subscribers:
return None
return random.choice(subscribers)
class FavorReliableSubscriberScheduler(SubscriberPriorityScheduler):
"""
A random delivery scheduler which prefers reliable subscribers.
"""
def choice(self, subscribers, message):
"""
Choose a random connection, favoring those that are reliable from
subscriber pool to deliver specified message.
@param subscribers: Collection of subscribed connections to destination.
@type subscribers: C{list} of L{coilmq.server.StompConnection}
@param message: The message to be delivered.
@type message: L{stompclient.frame.Frame}
@return: A random subscriber from the list or None if list is empty.
@rtype: L{coilmq.server.StompConnection}
"""
if not subscribers:
return None
reliable_subscribers = [
s for s in subscribers if s.reliable_subscriber]
if reliable_subscribers:
return random.choice(reliable_subscribers)
else:
return random.choice(subscribers)
class RandomQueueScheduler(QueuePriorityScheduler):
"""
Implementation of L{QueuePriorityScheduler} that selects a random queue from the list.
"""
def choice(self, queues, connection):
"""
Chooses a random queue for messages to specified connection.
@param queues: A C{dict} mapping queue name to queues (sets of frames) to which
specified connection is subscribed.
@type queues: C{dict} of C{str} to C{set} of L{stompclient.frame.Frame}
@param connection: The connection that is going to be delivered the frame(s).
@type connection: L{coilmq.server.StompConnection}
@return: A random queue destination or None if list is empty.
@rtype: C{str}
"""
if not queues:
return None
return random.choice(list(queues.keys())) | PypiClean |
/Kapok-0.2.1-cp35-cp35m-win_amd64.whl/kapok/cohoptp.py | import time
import numpy as np
import numpy.linalg as linalg
def pdopt(tm, om, numph=30, step=50, reg=0.0, returnall=False):
"""Phase diversity coherence optimization.
Solves an eigenvalue problem in order to find the complex coherences with
maximum separation (|a - b|) in the complex plane. Of these two
coherences, one should in theory represent the coherence with the
smallest ground contribution present in the data (the 'high' coherence).
The other then represents the coherence with the largest ground
contribution present in the data (the 'low' coherence).
Arguments:
tm (array): The polarimetric covariance (T) matrix of the data,
with dimensions: [az, rng, num_pol, num_pol]. Note that in the
HDF5 file, covariance matrix elements below the diagonal are
zero-valued, in order to save disk space. The (j,i) elements
should therefore calculated from the complex conjugate of the
(i,j) elements using the kapok.lib.makehermitian() function before
the matrix is passed to this function. Note: This should be the
average matrix of the two tracks forming the baseline, assuming
polarimetric stationarity.
om (array): The polarimetric interferometric (Omega) matrix of the
data, with dimensions [az, rng, num_pol, num_pol].
numph (int): The number of phase shifts to calculate coherences for.
The higher the number, the smaller the spacing of the coherences
around the coherence region perimeter. The smaller the number,
the faster the computation time. Default: 30.
step (int): Block size (in pixels) used for linalg.eig. Higher values
will use more memory but can run a little faster.
Default: 50.
reg (float): Regularization factor. The tm matrix is added to
the matrix reg*Tr(tm)*I, where Tr(tm) is the trace of tm, and I
is the identity matrix. Similarly, the omega matrix is added to
the matrix reg*Tr(om)*I. This regularization reduces the spread
of the coherence region for pixels where the backscatter is
highly polarization dependent.
returnall (bool): True/False flag. Set to true to return the
weight vectors for the optimized coherences, as well as the
pair of minor axis coherences (optimized coherence pair with
minimum separation in the complex plane). Default: False.
Returns:
gammamax (array): The optimized coherence with the max eigenvalue.
gammamin (array): The optimized coherence with the min eigenvalue.
gammaminormax (array): Of the coherences with the minimum separation
in the complex plane (e.g., along the minor axis of a elliptical
coherence region), this will be the one with the max eigenvalue.
Only returned if returnall == True.
gammaminormin (array): Of the coherences with the maximum separation
in the complex plane (e.g., along the minor axis of a elliptical
coherence region), this will be the one with the min eigenvalue.
Only returned if returnall == True.
wmax (array): The weight vector for the max eigenvalue coherence, if
returnall == True.
wmin (array): The weight vector for the min eigenvalue coherence, if
returnall == True.
"""
dim = np.shape(tm)
# Matrix regularization:
if reg > 0:
regmat = np.zeros(dim, dtype='complex64')
regmat[:,:] = np.eye(dim[2])
regmat = regmat * reg * np.trace(tm, axis1=2, axis2=3)[:,:,np.newaxis,np.newaxis]
tm = tm + regmat
regmat = np.zeros(dim, dtype='complex64')
regmat[:,:] = np.eye(dim[2])
regmat = regmat * reg * np.trace(om, axis1=2, axis2=3)[:,:,np.newaxis,np.newaxis]
om = om + regmat
del regmat
# Arrays to store coherence separation, and the two complex coherence values.
cohsize = (dim[0],dim[1]) # number of az, rng pixels
cohdiff = np.zeros(cohsize,dtype='float32')
gammamax = np.zeros(cohsize,dtype='complex64')
gammamin = np.zeros(cohsize,dtype='complex64')
# Arrays to store minor axis coherences.
mincohdiff = np.ones(cohsize,dtype='float32') * 99
gammaminormax = np.zeros(cohsize,dtype='complex64')
gammaminormin = np.zeros(cohsize,dtype='complex64')
# Arrays to store polarimetric weighting vectors for the optimized coherences.
weightsize = (dim[0],dim[1],dim[3])
wmax = np.zeros(weightsize,dtype='complex64')
wmin = np.zeros(weightsize,dtype='complex64')
# Main Loop
for Ph in np.arange(0,numph): # loop through rotation angles
Pr = Ph * np.pi / numph # phase shift to be applied
print('kapok.cohopt.pdopt | Current Progress: '+str(np.round(Pr/np.pi*100,decimals=2))+'%. ('+time.ctime()+') ', end='\r')
for az in range(0,dim[0],step):
azend = az + step
if azend > dim[0]:
azend = dim[0]
for rng in range(0,dim[1],step):
rngend = rng + step
if rngend > dim[1]:
rngend = dim[1]
omblock = om[az:azend,rng:rngend]
tmblock = tm[az:azend,rng:rngend]
z12 = omblock.copy()
# Apply phase shift to omega matrix:
z12 = z12*np.exp(1j*Pr)
z12 = 0.5 * (z12 + np.rollaxis(np.conj(z12),3,start=2))
# Check if any pixels have singular covariance matrices.
# If so, set those matrices to the identity, to keep an
# exception from being thrown by linalg.inv().
det = linalg.det(tmblock)
ind = (det == 0)
if np.any(ind):
tmblock[ind] = np.eye(dim[3])
# Solve the eigenvalue problem:
nu, w = linalg.eig(np.einsum('...ij,...jk->...ik', linalg.inv(tmblock), z12))
wH = np.rollaxis(np.conj(w),3,start=2)
Tmp = np.einsum('...ij,...jk->...ik', omblock, w)
Tmp12 = np.einsum('...ij,...jk->...ik', wH, Tmp)
Tmp = np.einsum('...ij,...jk->...ik', tmblock, w)
Tmp11 = np.einsum('...ij,...jk->...ik', wH, Tmp)
azind = np.tile(np.arange(0,w.shape[0]),(w.shape[1],1)).T
rngind = np.tile(np.arange(0,w.shape[1]),(w.shape[0],1))
lmin = np.argmin(nu,axis=2)
gmin = Tmp12[azind,rngind,lmin,lmin] / np.abs(Tmp11[azind,rngind,lmin,lmin])
lmax = np.argmax(nu,axis=2)
gmax = Tmp12[azind,rngind,lmax,lmax] / np.abs(Tmp11[azind,rngind,lmax,lmax])
ind = (np.abs(gmax-gmin) > cohdiff[az:azend,rng:rngend])
# If we've found the coherences with the best separation
# so far, save them.
if np.any(ind):
(azupdate, rngupdate) = np.where(ind)
cohdiff[az+azupdate,rng+rngupdate] = np.abs(gmax-gmin)[azupdate,rngupdate]
gammamax[az+azupdate,rng+rngupdate] = gmax[azupdate,rngupdate]
gammamin[az+azupdate,rng+rngupdate] = gmin[azupdate,rngupdate]
if returnall:
wmax[az+azupdate,rng+rngupdate,:] = np.squeeze(w[azupdate,rngupdate,:,lmax[azupdate,rngupdate]])
wmin[az+azupdate,rng+rngupdate,:] = np.squeeze(w[azupdate,rngupdate,:,lmin[azupdate,rngupdate]])
# If returnall is True, also check if this coherence pair
# has the smallest separation found so far.
if returnall:
ind = (np.abs(gmax-gmin) < mincohdiff[az:azend,rng:rngend])
if np.any(ind):
(azupdate, rngupdate) = np.where(ind)
mincohdiff[az+azupdate,rng+rngupdate] = np.abs(gmax-gmin)[azupdate,rngupdate]
gammaminormax[az+azupdate,rng+rngupdate] = gmax[azupdate,rngupdate]
gammaminormin[az+azupdate,rng+rngupdate] = gmin[azupdate,rngupdate]
print('kapok.cohopt.pdopt | Optimization complete. ('+time.ctime()+') ')
if returnall:
return gammamax, gammamin, gammaminormax, gammaminormin, wmax, wmin
else:
return gammamax, gammamin
def pdopt_pixel(tm, om, numph=60, reg=0.0):
"""Phase diversity coherence optimization for a single pixel.
Same functionality as the pdopt function above, but for a single pixel
only. This is the function called when plotting a coherence region.
Arguments:
tm (array): The polarimetric covariance (T) matrix of the data,
with dimensions: [num_pol, num_pol]. Note that in the
HDF5 file, covariance matrix elements below the diagonal are
zero-valued, in order to save disk space. The (j,i) elements
should therefore calculated from the complex conjugate of the
(i,j) elements using the kapok.lib.makehermitian() function before
the matrix is passed to this function. Note: This should be the
average matrix of the two tracks forming the baseline, assuming
polarimetric stationarity.
om (array): The polarimetric interferometric (Omega) matrix of the
data, with dimensions [num_pol, num_pol].
numph (int): The number of phase shifts to calculate coherences for.
The higher the number, the smaller the spacing of the coherences
around the coherence region perimeter. The smaller the number,
the faster the computation time. Default: 30.
reg (float): Regularization factor. The tm matrix is added to
the matrix reg*Tr(tm)*I, where Tr(tm) is the trace of tm, and I
is the identity matrix. Similarly, the omega matrix is added to
the matrix reg*Tr(om)*I. This regularization reduces the spread
of the coherence region for pixels where the backscatter is
highly polarization dependent.
Returns:
gammamax (complex): the optimized coherence with the max eigenvalue.
gammamin (complex): the optimized coherence with the min eigenvalue.
gammaregion (array): Every coherence from the solved eigenvalue
problems. These coherences will lie around the edge of the
coherence region.
"""
cohdiff = 0
gammaregion = np.empty((numph*2 + 1),dtype='complex')
# Matrix regularization:
if reg > 0:
tm = tm + reg*np.trace(tm)*np.eye(3)
om = om + reg*np.trace(om)*np.eye(3)
for Ph in range(0,numph): # loop through rotation angles
Pr = Ph * np.pi / numph # phase shift to be applied
# Apply phase shift to omega matrix:
z12 = om.copy()*np.exp(1j*Pr)
z12 = 0.5 * (z12 + np.transpose(np.conj(z12)))
# Solve the eigenvalue problem:
nu, w = linalg.eig(np.dot(linalg.inv(tm),z12))
wH = np.transpose(np.conj(w))
Tmp = np.dot(om,w)
Tmp12 = np.dot(wH,Tmp)
Tmp = np.dot(tm,w)
Tmp11 = np.dot(wH,Tmp)
l = np.argmin(nu)
gmin = Tmp12[l,l] / np.abs(Tmp11[l,l]) # min eigenvalue coherence
l = np.argmax(nu)
gmax = Tmp12[l,l] / np.abs(Tmp11[l,l]) # max eigenvalue coherence
gammaregion[Ph] = gmin
gammaregion[Ph+numph] = gmax
if (np.abs(gmax-gmin) > cohdiff):
cohdiff = np.abs(gmax-gmin)
gammamax = gmax
gammamin = gmin
gammaregion[-1] = gammaregion[0] # copy the first coherence to the end of the array, for a continuous coherence region plot
return gammamax, gammamin, gammaregion | PypiClean |
/CsuPMTD-1.0.27.tar.gz/CsuPMTD-1.0.27/PMTD/maskrcnn_benchmark/apex/apex/contrib/multihead_attn/fast_self_multihead_attn_func.py | import torch
import fast_self_multihead_attn
import fast_self_multihead_attn_bias
import fast_self_multihead_attn_bias_additive_mask
class FastSelfAttnFunc(torch.autograd.Function) :
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs, input_weights, output_weights, input_biases, output_biases, pad_mask, mask_additive, dropout_prob):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = (pad_mask is not None)
if use_biases_t[0]:
if not mask_additive:
input_lin_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn_bias.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
else:
input_lin_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn_bias_additive_mask.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
else:
input_lin_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
if use_biases_t[0]:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
fast_self_multihead_attn_bias.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
else:
input_bias_grads = None
output_bias_grads = None
input_grads, \
input_weight_grads, \
output_weight_grads = \
fast_self_multihead_attn.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
return None, None, None, input_grads, input_weight_grads, output_weight_grads,input_bias_grads, output_bias_grads, None, None, None
fast_self_attn_func = FastSelfAttnFunc.apply | PypiClean |
/Moose-0.9.9b3.tar.gz/Moose-0.9.9b3/moose/utils/crypto.py | from __future__ import unicode_literals
import binascii
import hashlib
import hmac
import random
import struct
import time
from moose.conf import settings
from moose.utils import six
from moose.utils.encoding import force_bytes
from moose.utils.six.moves import range
# Use the system PRNG if possible
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def salted_hmac(key_salt, value, secret=None):
"""
Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = hashlib.sha1(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s%s" % (
random.getstate(),
time.time(),
settings.SECRET_KEY)).encode('utf-8')
).digest())
return ''.join(random.choice(allowed_chars) for i in range(length))
if hasattr(hmac, "compare_digest"):
# Prefer the stdlib implementation, when available.
def constant_time_compare(val1, val2):
return hmac.compare_digest(force_bytes(val1), force_bytes(val2))
else:
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths. Since Django only uses it to compare hashes of
known expected length, this is acceptable.
"""
if len(val1) != len(val2):
return False
result = 0
if six.PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
for x, y in zip(val1, val2):
result |= x ^ y
else:
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def _bin_to_long(x):
"""
Convert a binary string into a long integer
This is a clever optimization for fast xor vector math
"""
return int(binascii.hexlify(x), 16)
def _long_to_bin(x, hex_format_string):
"""
Convert a long integer into a binary string.
hex_format_string is like "%020x" for padding 10 characters.
"""
return binascii.unhexlify((hex_format_string % x).encode('ascii'))
if hasattr(hashlib, "pbkdf2_hmac"):
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""
Implements PBKDF2 with the same API as Django's existing
implementation, using the stdlib.
This is used in Python 2.7.8+ and 3.4+.
"""
if digest is None:
digest = hashlib.sha256
if not dklen:
dklen = None
password = force_bytes(password)
salt = force_bytes(salt)
return hashlib.pbkdf2_hmac(
digest().name, password, salt, iterations, dklen)
else:
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""
Implements PBKDF2 as defined in RFC 2898, section 5.2
HMAC+SHA256 is used as the default pseudo random function.
As of 2014, 100,000 iterations was the recommended default which took
100ms on a 2.7Ghz Intel i7 with an optimized implementation. This is
probably the bare minimum for security given 1000 iterations was
recommended in 2001. This code is very well optimized for CPython and
is about five times slower than OpenSSL's implementation. Look in
django.contrib.auth.hashers for the present default, it is lower than
the recommended 100,000 because of the performance difference between
this and an optimized implementation.
"""
assert iterations > 0
if not digest:
digest = hashlib.sha256
password = force_bytes(password)
salt = force_bytes(salt)
hlen = digest().digest_size
if not dklen:
dklen = hlen
if dklen > (2 ** 32 - 1) * hlen:
raise OverflowError('dklen too big')
l = -(-dklen // hlen)
r = dklen - (l - 1) * hlen
hex_format_string = "%%0%ix" % (hlen * 2)
inner, outer = digest(), digest()
if len(password) > inner.block_size:
password = digest(password).digest()
password += b'\x00' * (inner.block_size - len(password))
inner.update(password.translate(hmac.trans_36))
outer.update(password.translate(hmac.trans_5C))
def F(i):
u = salt + struct.pack(b'>I', i)
result = 0
for j in range(int(iterations)):
dig1, dig2 = inner.copy(), outer.copy()
dig1.update(u)
dig2.update(dig1.digest())
u = dig2.digest()
result ^= _bin_to_long(u)
return _long_to_bin(result, hex_format_string)
T = [F(x) for x in range(1, l)]
return b''.join(T) + F(l)[:r] | PypiClean |
/Hikka_Pyro-2.0.66-py3-none-any.whl/pyrogram/types/bots_and_keyboards/callback_query.py |
from typing import Union, List, Match, Optional
import pyrogram
from pyrogram import raw, enums
from pyrogram import types
from ..object import Object
from ..update import Update
from ... import utils
class CallbackQuery(Object, Update):
"""An incoming callback query from a callback button in an inline keyboard.
If the button that originated the query was attached to a message sent by the bot, the field *message*
will be present. If the button was attached to a message sent via the bot (in inline mode), the field
*inline_message_id* will be present. Exactly one of the fields *data* or *game_short_name* will be present.
Parameters:
id (``str``):
Unique identifier for this query.
from_user (:obj:`~pyrogram.types.User`):
Sender.
chat_instance (``str``, *optional*):
Global identifier, uniquely corresponding to the chat to which the message with the callback button was
sent. Useful for high scores in games.
message (:obj:`~pyrogram.types.Message`, *optional*):
Message with the callback button that originated the query. Note that message content and message date will
not be available if the message is too old.
inline_message_id (``str``):
Identifier of the message sent via the bot in inline mode, that originated the query.
data (``str`` | ``bytes``, *optional*):
Data associated with the callback button. Be aware that a bad client can send arbitrary data in this field.
game_short_name (``str``, *optional*):
Short name of a Game to be returned, serves as the unique identifier for the game.
matches (List of regex Matches, *optional*):
A list containing all `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_ that match
the data of this callback query. Only applicable when using :obj:`Filters.regex <pyrogram.Filters.regex>`.
"""
def __init__(
self,
*,
client: "pyrogram.Client" = None,
id: str,
from_user: "types.User",
chat_instance: str,
message: "types.Message" = None,
inline_message_id: str = None,
data: Union[str, bytes] = None,
game_short_name: str = None,
matches: List[Match] = None
):
super().__init__(client)
self.id = id
self.from_user = from_user
self.chat_instance = chat_instance
self.message = message
self.inline_message_id = inline_message_id
self.data = data
self.game_short_name = game_short_name
self.matches = matches
@staticmethod
async def _parse(client: "pyrogram.Client", callback_query, users) -> "CallbackQuery":
message = None
inline_message_id = None
if isinstance(callback_query, raw.types.UpdateBotCallbackQuery):
chat_id = utils.get_peer_id(callback_query.peer)
message_id = callback_query.msg_id
message = client.message_cache[(chat_id, message_id)]
if not message:
message = await client.get_messages(chat_id, message_id)
elif isinstance(callback_query, raw.types.UpdateInlineBotCallbackQuery):
inline_message_id = utils.pack_inline_message_id(callback_query.msg_id)
# Try to decode callback query data into string. If that fails, fallback to bytes instead of decoding by
# ignoring/replacing errors, this way, button clicks will still work.
try:
data = callback_query.data.decode()
except (UnicodeDecodeError, AttributeError):
data = callback_query.data
return CallbackQuery(
id=str(callback_query.query_id),
from_user=types.User._parse(client, users[callback_query.user_id]),
message=message,
inline_message_id=inline_message_id,
chat_instance=str(callback_query.chat_instance),
data=data,
game_short_name=callback_query.game_short_name,
client=client
)
async def answer(self, text: str = None, show_alert: bool = None, url: str = None, cache_time: int = 0):
"""Bound method *answer* of :obj:`~pyrogram.types.CallbackQuery`.
Use this method as a shortcut for:
.. code-block:: python
await client.answer_callback_query(
callback_query.id,
text="Hello",
show_alert=True
)
Example:
.. code-block:: python
await callback_query.answer("Hello", show_alert=True)
Parameters:
text (``str``, *optional*):
Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters.
show_alert (``bool`` *optional*):
If true, an alert will be shown by the client instead of a notification at the top of the chat screen.
Defaults to False.
url (``str`` *optional*):
URL that will be opened by the user's client.
If you have created a Game and accepted the conditions via @Botfather, specify the URL that opens your
game – note that this will only work if the query comes from a callback_game button.
Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter.
cache_time (``int`` *optional*):
The maximum amount of time in seconds that the result of the callback query may be cached client-side.
Telegram apps will support caching starting in version 3.14. Defaults to 0.
"""
return await self._client.answer_callback_query(
callback_query_id=self.id,
text=text,
show_alert=show_alert,
url=url,
cache_time=cache_time
)
async def edit_message_text(
self,
text: str,
parse_mode: Optional["enums.ParseMode"] = None,
disable_web_page_preview: bool = None,
reply_markup: "types.InlineKeyboardMarkup" = None
) -> Union["types.Message", bool]:
"""Edit the text of messages attached to callback queries.
Bound method *edit_message_text* of :obj:`~pyrogram.types.CallbackQuery`.
Parameters:
text (``str``):
New text of the message.
parse_mode (:obj:`~pyrogram.enums.ParseMode`, *optional*):
By default, texts are parsed using both Markdown and HTML styles.
You can combine both syntaxes together.
disable_web_page_preview (``bool``, *optional*):
Disables link previews for links in this message.
reply_markup (:obj:`~pyrogram.types.InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
:obj:`~pyrogram.types.Message` | ``bool``: On success, if the edited message was sent by the bot, the edited
message is returned, otherwise True is returned (message sent via the bot, as inline query result).
Raises:
RPCError: In case of a Telegram RPC error.
"""
if self.inline_message_id is None:
return await self._client.edit_message_text(
chat_id=self.message.chat.id,
message_id=self.message.id,
text=text,
parse_mode=parse_mode,
disable_web_page_preview=disable_web_page_preview,
reply_markup=reply_markup
)
else:
return await self._client.edit_inline_text(
inline_message_id=self.inline_message_id,
text=text,
parse_mode=parse_mode,
disable_web_page_preview=disable_web_page_preview,
reply_markup=reply_markup
)
async def edit_message_caption(
self,
caption: str,
parse_mode: Optional["enums.ParseMode"] = None,
reply_markup: "types.InlineKeyboardMarkup" = None
) -> Union["types.Message", bool]:
"""Edit the caption of media messages attached to callback queries.
Bound method *edit_message_caption* of :obj:`~pyrogram.types.CallbackQuery`.
Parameters:
caption (``str``):
New caption of the message.
parse_mode (:obj:`~pyrogram.enums.ParseMode`, *optional*):
By default, texts are parsed using both Markdown and HTML styles.
You can combine both syntaxes together.
reply_markup (:obj:`~pyrogram.types.InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
:obj:`~pyrogram.types.Message` | ``bool``: On success, if the edited message was sent by the bot, the edited
message is returned, otherwise True is returned (message sent via the bot, as inline query result).
Raises:
RPCError: In case of a Telegram RPC error.
"""
return await self.edit_message_text(caption, parse_mode, reply_markup=reply_markup)
async def edit_message_media(
self,
media: "types.InputMedia",
reply_markup: "types.InlineKeyboardMarkup" = None
) -> Union["types.Message", bool]:
"""Edit animation, audio, document, photo or video messages attached to callback queries.
Bound method *edit_message_media* of :obj:`~pyrogram.types.CallbackQuery`.
Parameters:
media (:obj:`~pyrogram.types.InputMedia`):
One of the InputMedia objects describing an animation, audio, document, photo or video.
reply_markup (:obj:`~pyrogram.types.InlineKeyboardMarkup`, *optional*):
An InlineKeyboardMarkup object.
Returns:
:obj:`~pyrogram.types.Message` | ``bool``: On success, if the edited message was sent by the bot, the edited
message is returned, otherwise True is returned (message sent via the bot, as inline query result).
Raises:
RPCError: In case of a Telegram RPC error.
"""
if self.inline_message_id is None:
return await self._client.edit_message_media(
chat_id=self.message.chat.id,
message_id=self.message.id,
media=media,
reply_markup=reply_markup
)
else:
return await self._client.edit_inline_media(
inline_message_id=self.inline_message_id,
media=media,
reply_markup=reply_markup
)
async def edit_message_reply_markup(
self,
reply_markup: "types.InlineKeyboardMarkup" = None
) -> Union["types.Message", bool]:
"""Edit only the reply markup of messages attached to callback queries.
Bound method *edit_message_reply_markup* of :obj:`~pyrogram.types.CallbackQuery`.
Parameters:
reply_markup (:obj:`~pyrogram.types.InlineKeyboardMarkup`):
An InlineKeyboardMarkup object.
Returns:
:obj:`~pyrogram.types.Message` | ``bool``: On success, if the edited message was sent by the bot, the edited
message is returned, otherwise True is returned (message sent via the bot, as inline query result).
Raises:
RPCError: In case of a Telegram RPC error.
"""
if self.inline_message_id is None:
return await self._client.edit_message_reply_markup(
chat_id=self.message.chat.id,
message_id=self.message.id,
reply_markup=reply_markup
)
else:
return await self._client.edit_inline_reply_markup(
inline_message_id=self.inline_message_id,
reply_markup=reply_markup
) | PypiClean |
/Firefly%20III%20API%20Python%20Client-1.5.6.post2.tar.gz/Firefly III API Python Client-1.5.6.post2/firefly_iii_client/model/link_type_single.py | import re # noqa: F401
import sys # noqa: F401
from firefly_iii_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from firefly_iii_client.exceptions import ApiAttributeError
def lazy_import():
from firefly_iii_client.model.link_type_read import LinkTypeRead
globals()['LinkTypeRead'] = LinkTypeRead
class LinkTypeSingle(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (LinkTypeRead,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs): # noqa: E501
"""LinkTypeSingle - a model defined in OpenAPI
Args:
data (LinkTypeRead):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs): # noqa: E501
"""LinkTypeSingle - a model defined in OpenAPI
Args:
data (LinkTypeRead):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/Compare_SQL_RedShift307_Updated-0.1.4-py3-none-any.whl/MasterClass.py | from __future__ import generators
from multiprocessing import Pool
import datacompy
import psycopg2
import pandas as pd
import numpy as np
import datetime
import smtplib
import xlrd
import ast
import json
import os
import time
import boto3
import io
import pyodbc
from tabulate import tabulate
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import xml.etree.ElementTree as et
def ResultIter(cursor, arraysize=20):
'An iterator that uses fetchmany to keep memory usage down'
while True:
results = cursor.fetchmany(arraysize)
if not results:
break
for result in results:
yield result
class Datavalidation:
''' Park the files with int he drop zone. With this method we can drop multiple files in one go. filestring parameter needs to the ending part of the filename '''
def UpLoadFilestoDropZone(self, bucketname, sourcedirectory, filestring):
s3 = boto3.resource('s3')
bucket_name = bucketname
directory = sourcedirectory
files_dir = listdir(directory)
print(files_dir)
newlist = []
for names in files_dir:
if names.endswith(filestring):
newlist.append(names)
for filename in newlist:
s3.Bucket(bucket_name).upload_file(directory + filename, '%s/%s' % ('uip', filename))
''' Generate Bucket Names using the keywords '''
def bucketname(self, zone):
if zone == 'DropZone':
bucketname = 'edw-qa-s3-dropzone-bucket-2hd8ual2p76y'
elif zone == 'RawZone':
bucketname = 'edw-qa-s3-rawzone-bucket-1avmupg900hqh'
elif zone == 'ArchiveZone':
bucketname = 'edw-qa-s3-archive-bucket-1sesp8tdqgoq'
elif zone == 'RefinedZone':
bucketname = 'edw-qa-s3-refinedzone-bucket-1tw39k5srarek'
return bucketname
''' Generate Key Value based on different buckets. Every Bucket has different way of represnting the Key values. For the refined zone, key generation is partial as it depends on the partition column and partition dates. '''
def KeyName(self, project, zone, etlrundate, FullFileName):
filename = FullFileName[
FullFileName.index('_') + 1:FullFileName.index('_', FullFileName.index('_') + 1, len(FullFileName))]
if zone == 'RawZone':
key = project + '/' + filename + '/partitioned_date=' + etlrundate + '/' + FullFileName + '.csv'
elif zone == 'DropZone':
key = project + '/' + FullFileName + '.zip'
elif zone == 'ArchiveZone':
key = project + '/' + filename + '/partitioned_date=' + etlrundate + '/' + FullFileName + '.zip'
elif zone == 'RefinedZone':
key = project + '/' + filename
return key
''' check if File Exists '''
def fileexists(self, bucketname, key):
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucketname)
filename = key
obj = list(bucket.objects.filter(Prefix=filename))
if len(obj) > 0:
result = 'File Exists'
else:
result = 'File does not Exists'
return result
''' Dataframe Creation for a file with in a directory (local or shared) '''
def sourcefiledataframe(self, sourcefilename, dict):
df_local = pd.read_csv(sourcefilename, quotechar='"', sep=',', dtype=dict, chunksize=500000, low_memory=False)
df_list = []
for df_ in df_local:
df_list += [df_.copy()]
df = pd.concat(df_list)
return (df)
''' Dataframe Creation for a s3 file with in a directory (local or shared) '''
def s3fileprocessing(self, bucketname, key, dict):
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=bucketname,
Key=key)
df_s3 = pd.read_csv(io.BytesIO(obj['Body'].read()), quotechar='"', sep=',', dtype=dict, low_memory=False,
chunksize=500000)
df_list = []
for df_ in df_s3:
df_list += [df_.copy()]
df = pd.concat(df_list)
return (df)
''' Dataframe creation for paquet files with in the Refined Zone. This has a limitation. Currently, not able to read the parquet file with in S3.Need to download the file and then create the data frame. - This method best suits when we use dockr for testing. <<<Future Implementation>>>'''
''' Dataframe creation for Redshift tables (Non SCD, SCD & Analytical table) '''
def RedshiftDataframe(self, query, username, dbname, clusteridentifier, host):
client = boto3.client('redshift', region_name='us-east-1')
cluster_creds = client.get_cluster_credentials(DbUser=username, DbName=dbname,
ClusterIdentifier=clusteridentifier,
DurationSeconds=1800, AutoCreate=False)
conn = psycopg2.connect(host=host, port=5439, user=cluster_creds['DbUser'],
password=cluster_creds['DbPassword'],
database=dbname, sslmode='require')
df_list = []
for df_ in pd.read_sql_query(query, conn, chunksize=1000000):
df_list += [df_.copy()]
df = pd.concat(df_list)
return (df)
''' Dictionary Creation. This is required to handle the errors while creating the dataframes '''
''' Column Renaming Function & Data types conversion function '''
def column_renaming_function(self, df, sourcecolumnlist, hubcolumnlist):
for i in range(len(sourcecolumnlist)):
if df.columns[i] == sourcecolumnlist[i]:
df.rename(columns={df.columns[i]: hubcolumnlist[i]}, inplace=True)
df = df.columns.tolist()
return (df)
''' Change the data types of the columns '''
def data_type_change_fun(self, df, sourcecolumnlist, hubcolumnlist, hubdatatypelist):
for i in range(len(sourcecolumnlist)):
if df.columns[i] == hubcolumnlist[i]:
if hubdatatypelist[i] == 'timestamp':
df[hubcolumnlist[i]] = pd.to_datetime(df[hubcolumnlist[i]])
elif hubdatatypelist[i] == 'integer':
df[hubcolumnlist[i]] = pd.to_numeric(df[hubcolumnlist[i]])
elif hubdatatypelist[i] == 'float':
df[hubcolumnlist[i]] = pd.to_numeric(df[hubcolumnlist[i]])
else:
df[hubcolumnlist[i]] = df[hubcolumnlist[i]].astype(str)
return df
def DatavalidationReport_Function(self, df1, df2, primarykeycolumn):
compare = datacompy.Compare(
df1,
df2,
join_columns=primarykeycolumn,
abs_tol=0,
rel_tol=0,
df1_name='Source',
df2_name='Destination'
)
compare.matches(ignore_extra_columns=False)
return compare.report()
def s3_dataframe_DMSEQ(self, FullFileName):
key = 'CustomerRiskAttributes/DMSEQ/' + FullFileName + '.csv'
bucketname = 'edw-qa-s3-refinedzone-bucket-1tw39k5srarek'
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=bucketname, Key=key)
df_ = pd.read_csv(io.BytesIO(obj['Body'].read()))
df_ = df_.replace('', np.NaN)
df_ = df_.replace('NULL', np.NaN)
df_.fillna(value=pd.np.NaN, inplace=True)
df_.fillna(0, inplace=True)
df_ = df_.sort_values(by=['ReportId'])
return df_
def s3_dataframe_DMSEX(self, FullFileName):
key = 'CustomerRiskAttributes/DMSEX/' + FullFileName + '.csv'
bucketname = 'edw-qa-s3-refinedzone-bucket-1tw39k5srarek'
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=bucketname, Key=key)
df_ = pd.read_csv(io.BytesIO(obj['Body'].read()))
df_ = df_.replace('', np.NaN)
df_ = df_.replace('NULL', np.NaN)
df_.fillna(value=pd.np.NaN, inplace=True)
df_.fillna(0, inplace=True)
df_ = df_.sort_values(by=['ReportId'])
return df_
def s3_dataframe_LN(self, FullFileName):
key = 'CustomerRiskAttributes/LN/' + FullFileName + '.csv'
bucketname = 'edw-qa-s3-refinedzone-bucket-1tw39k5srarek'
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=bucketname, Key=key)
df_ = pd.read_csv(io.BytesIO(obj['Body'].read()))
df_ = df_.replace('', np.NaN)
df_ = df_.replace('NULL', np.NaN)
df_.fillna(value=pd.np.NaN, inplace=True)
df_.fillna(0, inplace=True)
df_ = df_.sort_values(by=['ReportId'])
return df_
def s3_dataframe_IDA(self, FullFileName):
key = 'CustomerRiskAttributes/IDA/' + FullFileName + '.csv'
bucketname = 'edw-qa-s3-refinedzone-bucket-1tw39k5srarek'
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=bucketname, Key=key)
df_ = pd.read_csv(io.BytesIO(obj['Body'].read()), na_filter=False)
df_ = df_.replace('', np.NaN)
df_ = df_.replace('NULL', np.NaN)
df_.fillna(value=pd.np.NaN, inplace=True)
df_.fillna(value=0, inplace=True)
df_ = df_.sort_values(by=['ReportId'])
return df_
def datatypeConversion_UIP(self, source_df, target_df):
sr_ss_tr_df_datatype = source_df.dtypes
ss_tr_df_datatype = pd.DataFrame(
{'column_name': sr_ss_tr_df_datatype.index, 'sql_Data_type': sr_ss_tr_df_datatype.values})
sr_rs_datatype = target_df.dtypes
df_rs_datatype = pd.DataFrame({'column_name': sr_rs_datatype.index, 'rs_Data_type': sr_rs_datatype.values})
mismatch_ss_tr_datatype = ss_tr_df_datatype.loc[
(ss_tr_df_datatype['sql_Data_type'] != df_rs_datatype['rs_Data_type'])]
mismatch_rs_datatype = df_rs_datatype.loc[
(ss_tr_df_datatype['sql_Data_type'] != df_rs_datatype['rs_Data_type'])]
mismatch_final_df = pd.merge(mismatch_rs_datatype, mismatch_ss_tr_datatype)
if mismatch_ss_tr_datatype.empty and mismatch_rs_datatype.empty and mismatch_final_df.empty:
print("Datatypes match")
else:
for i in range(len(mismatch_final_df)):
print(i)
col_name = mismatch_final_df.loc[i, "column_name"]
data_type = mismatch_final_df.loc[i, "rs_Data_type"]
source_df[col_name] = source_df[col_name].astype(data_type)
print("Printing converted column names")
print(i)
return source_df
def datatypeConversion(self, df_s3, df_xml):
s3_dtype_series = df_s3.dtypes
s3_dtype_df = pd.DataFrame({'column_name': s3_dtype_series.index, 's3_dtype': s3_dtype_series.values})
xml_dtype_series = df_xml.dtypes
xml_dtype_df = pd.DataFrame({'column_name': xml_dtype_series.index, 'xml_dtype': xml_dtype_series.values})
dtype_df = pd.merge(s3_dtype_df, xml_dtype_df)
for i in range(len(dtype_df)):
col_name = dtype_df.loc[i, "column_name"]
print(col_name)
data_type = dtype_df.loc[i, "s3_dtype"]
df_xml[col_name] = df_xml[col_name].astype(data_type)
return df_xml
def text_report(self, FullFileName):
ts = time.gmtime()
readable_time = time.strftime("%Y-%m-%d_%H-%M-%S", ts)
readable_date = time.strftime("%Y-%m-%d", ts)
# report_dir = '\\acaqaam02\c$\Python_Reports\DMA1059\ '+FullFileName + '_' + readable_date
report_dir = "xml_reports"
html_file_name = report_dir + r"/" + FullFileName + "_" + readable_time
if not os.path.exists(report_dir):
os.mkdir(report_dir)
text_file = open(html_file_name + '.txt', "w")
else:
text_file = open(html_file_name + '.txt', "w")
return text_file
def text_report_json(self):
ts = time.gmtime()
readable_time = time.strftime("%Y-%m-%d_%H-%M-%S", ts)
readable_date = time.strftime("%Y-%m-%d", ts)
report_dir = r"\\acaqaam02\\c$\\Python_Reports\\Json_reports_" + readable_date
html_file_name = report_dir + r"\\JsonReport_" + readable_time
if not os.path.exists(report_dir):
os.mkdir(report_dir)
text_file = open(html_file_name + '.txt', "w")
else:
text_file = open(html_file_name + '.txt', "w")
return text_file
def email_report(self, emailfrom, emailto, df_report, subject, email_text):
report_table = df_report.to_html()
body = """<html>
<head>
<title>""" + subject + """</title>
</head>
<body>
""" + email_text + """
""" + report_table + """
</body></html>"""
message = MIMEMultipart("alternative", None, [MIMEText(email_text, "plain"), MIMEText(body, "html")])
message['Subject'] = subject
message['from'] = emailfrom
message['To'] = emailto
smtpObj = smtplib.SMTP('smtp.aca.local')
smtpObj.sendmail(emailfrom, emailto, message.as_string())
def mismatched_data(self, df_source, df_target, column_list):
report_df = pd.DataFrame(columns=column_list)
# list_final_fail = []
fail_count = 0
pass_count = 0
col_num = 0
df = pd.concat([df_source, df_target]).drop_duplicates(keep=False)
mismatch_count = df.shape[0]
if mismatch_count == 0:
status = "PASS"
pass_count = df_source.shape[0]
else:
status = "FAIL"
fail_count = df.shape[0] // 2
pass_count = df_source.shape[0] - fail_count
print("\nTotal Count", pass_count + fail_count)
print("Pass Count", pass_count)
print("Fail Count", fail_count)
print("Validation is: ", status)
print("\nFail", df)
return df, status, pass_count, fail_count
def mismatched_data_list(self, df_source, df_target, list_final_fail):
df = pd.concat([df_source, df_target]).drop_duplicates(keep=False)
mismatch_count = df.shape[0]
if mismatch_count == 0:
status = "PASS"
else:
status = "FAIL"
temp = df.iloc[0]
list_final_fail.append(temp)
return list_final_fail
def convert_object_column_data_to_string(self, df):
dtype_series = df.dtypes
dtype_df = pd.DataFrame({'column_name': dtype_series.index, 'dtype': dtype_series.values})
for i in range(len(dtype_df)):
dtype = dtype_df.loc[i, "dtype"]
if dtype == 'object':
col_name = dtype_df.loc[i, "column_name"]
df[col_name] = df[col_name].apply(lambda x: str(x)) | PypiClean |
/MetPy-1.5.1-py3-none-any.whl/metpy/calc/indices.py | """Contains calculation of various derived indices."""
import numpy as np
from .thermo import mixing_ratio, saturation_vapor_pressure
from .tools import _remove_nans, get_layer
from .. import constants as mpconsts
from ..package_tools import Exporter
from ..units import check_units, concatenate, units
from ..xarray import preprocess_and_wrap
exporter = Exporter(globals())
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', bottom='[pressure]', top='[pressure]')
def precipitable_water(pressure, dewpoint, *, bottom=None, top=None):
r"""Calculate precipitable water through the depth of a sounding.
Formula used is:
.. math:: -\frac{1}{\rho_l g} \int\limits_{p_\text{bottom}}^{p_\text{top}} r dp
from [Salby1996]_, p. 28
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
dewpoint : `pint.Quantity`
Atmospheric dewpoint profile
bottom: `pint.Quantity`, optional
Bottom of the layer, specified in pressure. Defaults to None (highest pressure).
top: `pint.Quantity`, optional
Top of the layer, specified in pressure. Defaults to None (lowest pressure).
Returns
-------
`pint.Quantity`
Precipitable water in the layer
Examples
--------
>>> pressure = np.array([1000, 950, 900]) * units.hPa
>>> dewpoint = np.array([20, 15, 10]) * units.degC
>>> pw = precipitable_water(pressure, dewpoint)
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
.. versionchanged:: 1.0
Signature changed from ``(dewpt, pressure, bottom=None, top=None)``
"""
# Sort pressure and dewpoint to be in decreasing pressure order (increasing height)
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
dewpoint = dewpoint[sort_inds]
pressure, dewpoint = _remove_nans(pressure, dewpoint)
min_pressure = np.nanmin(pressure)
max_pressure = np.nanmax(pressure)
if top is None:
top = min_pressure
elif not min_pressure <= top <= max_pressure:
raise ValueError(f'The pressure and dewpoint profile ranges from {max_pressure} to '
f'{min_pressure}, after removing missing values. {top} is outside '
'this range.')
if bottom is None:
bottom = max_pressure
elif not min_pressure <= bottom <= max_pressure:
raise ValueError(f'The pressure and dewpoint profile ranges from {max_pressure} to '
f'{min_pressure}, after removing missing values. {bottom} is outside '
'this range.')
pres_layer, dewpoint_layer = get_layer(pressure, dewpoint, bottom=bottom,
depth=bottom - top)
w = mixing_ratio(saturation_vapor_pressure(dewpoint_layer), pres_layer)
# Since pressure is in decreasing order, pw will be the opposite sign of that expected.
pw = -np.trapz(w, pres_layer) / (mpconsts.g * mpconsts.rho_l)
return pw.to('millimeters')
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]')
def mean_pressure_weighted(pressure, *args, height=None, bottom=None, depth=None):
r"""Calculate pressure-weighted mean of an arbitrary variable through a layer.
Layer bottom and depth specified in height or pressure.
.. math:: MPW = \frac{\int_{p_s}^{p_b} A p dp}{\int_{p_s}^{p_b} p dp}
where:
* :math:`MPW` is the pressure-weighted mean of a variable.
* :math:`p_b` is the bottom pressure level.
* :math:`p_s` is the top pressure level.
* :math:`A` is the variable whose pressure-weighted mean is being calculated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile.
args : `pint.Quantity`
Parameters for which the weighted-continuous mean is to be calculated.
height : `pint.Quantity`, optional
Heights from sounding. Standard atmosphere heights assumed (if needed)
if no heights are given.
bottom: `pint.Quantity`, optional
The bottom of the layer in either the provided height coordinate
or in pressure. Don't provide in meters AGL unless the provided
height coordinate is meters AGL. Default is the first observation,
assumed to be the surface.
depth: `pint.Quantity`, optional
Depth of the layer in meters or hPa. Defaults to 100 hPa.
Returns
-------
list of `pint.Quantity`
list of layer mean value for each profile in args.
See Also
--------
weighted_continuous_average
Examples
--------
>>> from metpy.calc import mean_pressure_weighted
>>> from metpy.units import units
>>> p = [1000, 850, 700, 500] * units.hPa
>>> T = [30, 15, 5, -5] * units.degC
>>> mean_pressure_weighted(p, T)
[<Quantity(298.54368, 'kelvin')>]
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``heights`` parameter to ``height``
"""
# Split pressure profile from other variables to average
pres_prof, *others = get_layer(pressure, *args, height=height, bottom=bottom, depth=depth)
# Taking the integral of the weights (pressure) to feed into the weighting
# function. Said integral works out to this function:
pres_int = 0.5 * (pres_prof[-1] ** 2 - pres_prof[0] ** 2)
# Perform integration on the profile for each variable
return [np.trapz(var_prof * pres_prof, x=pres_prof) / pres_int for var_prof in others]
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]')
def weighted_continuous_average(pressure, *args, height=None, bottom=None, depth=None):
r"""Calculate weighted-continuous mean of an arbitrary variable through a layer.
Layer top and bottom specified in height or pressure.
Formula based on that from [Holton2004]_ pg. 76 and the NCL function ``wgt_vertical_n``
.. math:: WCA = \frac{\int_{p_s}^{p_b} A dp}{\int_{p_s}^{p_b} dp}
where:
* :math:`WCA` is the weighted continuous average of a variable.
* :math:`p_b` is the bottom pressure level.
* :math:`p_s` is the top pressure level.
* :math:`A` is the variable whose weighted continuous average is being calculated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile.
args : `pint.Quantity`
Parameters for which the weighted-continuous mean is to be calculated.
height : `pint.Quantity`, optional
Heights from sounding. Standard atmosphere heights assumed (if needed)
if no heights are given.
bottom: `pint.Quantity`, optional
The bottom of the layer in either the provided height coordinate
or in pressure. Don't provide in meters AGL unless the provided
height coordinate is meters AGL. Default is the first observation,
assumed to be the surface.
depth: `pint.Quantity`, optional
Depth of the layer in meters or hPa.
Returns
-------
list of `pint.Quantity`
list of layer mean value for each profile in args.
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
"""
# Split pressure profile from other variables to average
pres_prof, *others = get_layer(
pressure, *args, height=height, bottom=bottom, depth=depth
)
return [np.trapz(var_prof, x=pres_prof) / (pres_prof[-1] - pres_prof[0])
for var_prof in others]
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[speed]', '[speed]', '[length]')
def bunkers_storm_motion(pressure, u, v, height):
r"""Calculate right-mover and left-mover supercell storm motions using the Bunkers method.
This is a physically based, shear-relative, and Galilean invariant method for predicting
supercell motion. Full atmospheric profiles of wind components, as well as pressure and
heights, need to be provided so that calculation can properly calculate the required
surface to 6 km mean flow.
The calculation in summary is (from [Bunkers2000]_):
* surface to 6 km non-pressure-weighted mean wind
* a deviation from the sfc to 6 km mean wind of 7.5 m s−1
* a 5.5 to 6 km mean wind for the head of the vertical wind shear vector
* a surface to 0.5 km mean wind for the tail of the vertical wind shear vector
Parameters
----------
pressure : `pint.Quantity`
Pressure from full profile
u : `pint.Quantity`
Full profile of the U-component of the wind
v : `pint.Quantity`
Full profile of the V-component of the wind
height : `pint.Quantity`
Full profile of height
Returns
-------
right_mover: (`pint.Quantity`, `pint.Quantity`)
Scalar U- and V- components of Bunkers right-mover storm motion
left_mover: (`pint.Quantity`, `pint.Quantity`)
Scalar U- and V- components of Bunkers left-mover storm motion
wind_mean: (`pint.Quantity`, `pint.Quantity`)
Scalar U- and V- components of surface to 6 km mean flow
Examples
--------
>>> from metpy.calc import bunkers_storm_motion, wind_components
>>> from metpy.units import units
>>> p = [1000, 925, 850, 700, 500, 400] * units.hPa
>>> h = [250, 700, 1500, 3100, 5720, 7120] * units.meters
>>> wdir = [165, 180, 190, 210, 220, 250] * units.degree
>>> sped = [5, 15, 20, 30, 50, 60] * units.knots
>>> u, v = wind_components(sped, wdir)
>>> bunkers_storm_motion(p, u, v, h)
(<Quantity([22.09618172 12.43406736], 'knot')>,
<Quantity([ 6.02861839 36.76517865], 'knot')>,
<Quantity([14.06240005 24.599623 ], 'knot')>)
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``heights`` parameter to ``height``
"""
# mean wind from sfc-6km
wind_mean = weighted_continuous_average(pressure, u, v, height=height,
depth=units.Quantity(6000, 'meter'))
wind_mean = units.Quantity.from_list(wind_mean)
# mean wind from sfc-500m
wind_500m = weighted_continuous_average(pressure, u, v, height=height,
depth=units.Quantity(500, 'meter'))
wind_500m = units.Quantity.from_list(wind_500m)
# mean wind from 5.5-6km
wind_5500m = weighted_continuous_average(
pressure, u, v, height=height,
depth=units.Quantity(500, 'meter'),
bottom=height[0] + units.Quantity(5500, 'meter'))
wind_5500m = units.Quantity.from_list(wind_5500m)
# Calculate the shear vector from sfc-500m to 5.5-6km
shear = wind_5500m - wind_500m
# Take the cross product of the wind shear and k, and divide by the vector magnitude and
# multiply by the deviation empirically calculated in Bunkers (2000) (7.5 m/s)
shear_cross = concatenate([shear[1], -shear[0]])
shear_mag = np.hypot(*shear)
rdev = shear_cross * (units.Quantity(7.5, 'm/s').to(u.units) / shear_mag)
# Add the deviations to the layer average wind to get the RM motion
right_mover = wind_mean + rdev
# Subtract the deviations to get the LM motion
left_mover = wind_mean - rdev
return right_mover, left_mover, wind_mean
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[speed]', '[speed]')
def bulk_shear(pressure, u, v, height=None, bottom=None, depth=None):
r"""Calculate bulk shear through a layer.
Layer top and bottom specified in meters or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
u : `pint.Quantity`
U-component of wind
v : `pint.Quantity`
V-component of wind
height : `pint.Quantity`, optional
Heights from sounding
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa. Defaults to 100 hPa.
bottom: `pint.Quantity`, optional
The bottom of the layer in height or pressure coordinates.
If using a height, it must be in the same coordinates as the given
heights (i.e., don't use meters AGL unless given heights
are in meters AGL.) Defaults to the highest pressure or lowest height given.
Returns
-------
u_shr: `pint.Quantity`
U-component of layer bulk shear
v_shr: `pint.Quantity`
V-component of layer bulk shear
Examples
--------
>>> from metpy.calc import bulk_shear, wind_components
>>> from metpy.units import units
>>> p = [1000, 925, 850, 700, 500] * units.hPa
>>> wdir = [165, 180, 190, 210, 220] * units.degree
>>> sped = [5, 15, 20, 30, 50] * units.knots
>>> u, v = wind_components(sped, wdir)
>>> bulk_shear(p, u, v)
(<Quantity(2.41943319, 'knot')>, <Quantity(11.6920573, 'knot')>)
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``heights`` parameter to ``height``
"""
_, u_layer, v_layer = get_layer(pressure, u, v, height=height,
bottom=bottom, depth=depth)
u_shr = u_layer[-1] - u_layer[0]
v_shr = v_layer[-1] - v_layer[0]
return u_shr, v_shr
@exporter.export
@preprocess_and_wrap(wrap_like='mucape')
@check_units('[energy] / [mass]', '[speed] * [speed]', '[speed]')
def supercell_composite(mucape, effective_storm_helicity, effective_shear):
r"""Calculate the supercell composite parameter.
The supercell composite parameter is designed to identify
environments favorable for the development of supercells,
and is calculated using the formula developed by
[Thompson2004]_:
.. math:: \text{SCP} = \frac{\text{MUCAPE}}{1000 \text{J/kg}} *
\frac{\text{Effective SRH}}{50 \text{m}^2/\text{s}^2} *
\frac{\text{Effective Shear}}{20 \text{m/s}}
The effective_shear term is set to zero below 10 m/s and
capped at 1 when effective_shear exceeds 20 m/s.
Parameters
----------
mucape : `pint.Quantity`
Most-unstable CAPE
effective_storm_helicity : `pint.Quantity`
Effective-layer storm-relative helicity
effective_shear : `pint.Quantity`
Effective bulk shear
Returns
-------
`pint.Quantity`
Supercell composite
Examples
--------
>>> from metpy.calc import supercell_composite
>>> from metpy.units import units
>>> supercell_composite(2500 * units('J/kg'), 125 * units('m^2/s^2'),
... 50 * units.knot).to_base_units()
<Quantity([6.25], 'dimensionless')>
"""
effective_shear = np.clip(np.atleast_1d(effective_shear), None, units.Quantity(20, 'm/s'))
effective_shear[effective_shear < units.Quantity(10, 'm/s')] = units.Quantity(0, 'm/s')
effective_shear = effective_shear / units.Quantity(20, 'm/s')
return ((mucape / units.Quantity(1000, 'J/kg'))
* (effective_storm_helicity / units.Quantity(50, 'm^2/s^2'))
* effective_shear).to('dimensionless')
@exporter.export
@preprocess_and_wrap(wrap_like='sbcape')
@check_units('[energy] / [mass]', '[length]', '[speed] * [speed]', '[speed]')
def significant_tornado(sbcape, surface_based_lcl_height, storm_helicity_1km, shear_6km):
r"""Calculate the significant tornado parameter (fixed layer).
The significant tornado parameter is designed to identify
environments favorable for the production of significant
tornadoes contingent upon the development of supercells.
It's calculated according to the formula used on the SPC
mesoanalysis page, updated in [Thompson2004]_:
.. math:: \text{SIGTOR} = \frac{\text{SBCAPE}}{1500 \text{J/kg}} * \frac{(2000 \text{m} -
\text{LCL}_\text{SB})}{1000 \text{m}} *
\frac{SRH_{\text{1km}}}{150 \text{m}^\text{s}/\text{s}^2} *
\frac{\text{Shear}_\text{6km}}{20 \text{m/s}}
The lcl height is set to zero when the lcl is above 2000m and
capped at 1 when below 1000m, and the shr6 term is set to 0
when shr6 is below 12.5 m/s and maxed out at 1.5 when shr6
exceeds 30 m/s.
Parameters
----------
sbcape : `pint.Quantity`
Surface-based CAPE
surface_based_lcl_height : `pint.Quantity`
Surface-based lifted condensation level
storm_helicity_1km : `pint.Quantity`
Surface-1km storm-relative helicity
shear_6km : `pint.Quantity`
Surface-6km bulk shear
Returns
-------
`pint.Quantity`
Significant tornado parameter
Examples
--------
>>> from metpy.calc import significant_tornado
>>> from metpy.units import units
>>> significant_tornado(3000 * units('J/kg'), 750 * units.meters,
... 150 * units('m^2/s^2'), 25 * units.knot).to_base_units()
<Quantity([1.28611111], 'dimensionless')>
"""
surface_based_lcl_height = np.clip(np.atleast_1d(surface_based_lcl_height),
units.Quantity(1000., 'm'), units.Quantity(2000., 'm'))
surface_based_lcl_height = ((units.Quantity(2000., 'm') - surface_based_lcl_height)
/ units.Quantity(1000., 'm'))
shear_6km = np.clip(np.atleast_1d(shear_6km), None, units.Quantity(30, 'm/s'))
shear_6km[shear_6km < units.Quantity(12.5, 'm/s')] = units.Quantity(0, 'm/s')
shear_6km /= units.Quantity(20, 'm/s')
return ((sbcape / units.Quantity(1500., 'J/kg'))
* surface_based_lcl_height
* (storm_helicity_1km / units.Quantity(150., 'm^2/s^2'))
* shear_6km)
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[speed]', '[speed]', '[length]', '[speed]', '[speed]')
def critical_angle(pressure, u, v, height, u_storm, v_storm):
r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding
u : `pint.Quantity`
U-component of sounding winds
v : `pint.Quantity`
V-component of sounding winds
height : `pint.Quantity`
Heights from sounding
u_storm : `pint.Quantity`
U-component of storm motion
v_storm : `pint.Quantity`
V-component of storm motion
Returns
-------
`pint.Quantity`
Critical angle in degrees
Examples
--------
>>> from metpy.calc import critical_angle, wind_components
>>> from metpy.units import units
>>> p = [1000, 925, 850, 700, 500, 400] * units.hPa
>>> h = [250, 700, 1500, 3100, 5720, 7120] * units.meters
>>> wdir = [165, 180, 190, 210, 220, 250] * units.degree
>>> sped = [5, 15, 20, 30, 50, 60] * units.knots
>>> u, v = wind_components(sped, wdir)
>>> critical_angle(p, u, v, h, 7 * units.knots, 7 * units.knots)
<Quantity(67.0942521, 'degree')>
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``heights`` parameter to ``height``
"""
# Convert everything to m/s
u = u.to('m/s')
v = v.to('m/s')
u_storm = u_storm.to('m/s')
v_storm = v_storm.to('m/s')
sort_inds = np.argsort(pressure[::-1])
pressure = pressure[sort_inds]
height = height[sort_inds]
u = u[sort_inds]
v = v[sort_inds]
# Calculate sfc-500m shear vector
shr5 = bulk_shear(pressure, u, v, height=height, depth=units.Quantity(500, 'meter'))
# Make everything relative to the sfc wind orientation
umn = u_storm - u[0]
vmn = v_storm - v[0]
vshr = np.asarray([shr5[0].magnitude, shr5[1].magnitude])
vsm = np.asarray([umn.magnitude, vmn.magnitude])
angle_c = np.dot(vshr, vsm) / (np.linalg.norm(vshr) * np.linalg.norm(vsm))
critical_angle = units.Quantity(np.arccos(angle_c), 'radian')
return critical_angle.to('degrees') | PypiClean |
/Misago-0.36.1.tar.gz/Misago-0.36.1/misago/oauth2/exceptions.py | from django.utils.translation import gettext_lazy as _
class OAuth2Error(Exception):
pass
class OAuth2ProviderError(OAuth2Error):
pass
class OAuth2AccessDeniedError(OAuth2ProviderError):
message = _("The OAuth2 process was canceled by the provider.")
class OAuth2StateError(OAuth2Error):
recoverable = True
class OAuth2StateNotSetError(OAuth2StateError):
message = _("The OAuth2 session is missing state.")
class OAuth2StateNotProvidedError(OAuth2StateError):
message = _("The OAuth2 state was not sent by the provider.")
class OAuth2StateMismatchError(OAuth2StateError):
message = _(
"The OAuth2 state sent by the provider did not match one in the session."
)
class OAuth2CodeError(OAuth2Error):
recoverable = True
class OAuth2CodeNotProvidedError(OAuth2CodeError):
message = _("The OAuth2 authorization code was not sent by the provider.")
class OAuth2ProviderError(OAuth2Error):
recoverable = True
class OAuth2AccessTokenRequestError(OAuth2ProviderError):
message = _("Failed to connect to the OAuth2 provider to retrieve an access token.")
class OAuth2AccessTokenResponseError(OAuth2ProviderError):
message = _("The OAuth2 provider responded with error for an access token request.")
class OAuth2AccessTokenJSONError(OAuth2ProviderError):
message = _(
"The OAuth2 provider did not respond with a valid JSON "
"for an access token request."
)
class OAuth2AccessTokenNotProvidedError(OAuth2ProviderError):
message = _("JSON sent by the OAuth2 provider did not contain an access token.")
class OAuth2UserDataRequestError(OAuth2ProviderError):
message = _("Failed to connect to the OAuth2 provider to retrieve user profile.")
class OAuth2UserDataResponseError(OAuth2ProviderError):
message = _("The OAuth2 provider responded with error for user profile request.")
class OAuth2UserDataJSONError(OAuth2ProviderError):
message = _(
"The OAuth2 provider did not respond with a valid JSON "
"for user profile request."
)
class OAuth2UserIdNotProvidedError(OAuth2Error):
message = _("JSON sent by the OAuth2 provider did not contain a user id.")
class OAuth2UserAccountDeactivatedError(OAuth2Error):
recoverable = False
message = _(
"User account associated with the profile from the OAuth2 provider was "
"deactivated by the site administrator."
)
class OAuth2UserDataValidationError(OAuth2ProviderError):
recoverable = False
error_list: list[str]
message = _("User profile retrieved from the OAuth2 provider did not validate.")
def __init__(self, error_list: list[str]):
self.error_list = error_list | PypiClean |
/Gbtestapi0.3-0.1a10-py3-none-any.whl/gailbot/services/organizer/organizer.py | from typing import Dict, List, Union, Callable
from .source import SourceObject, SourceManager
from gailbot.core.utils.logger import makelogger
from .settings import SettingManager, SettingObject, SettingDict
from gailbot.configs import default_setting_loader
logger = makelogger("organizer")
CONFIG = default_setting_loader()
DEFAULT_SETTING_NAME = CONFIG.profile_name
DEFAULT_SETTING = CONFIG.profile_data
DEFAULT_ENGINE_NAME = CONFIG.engine_name
DEFAULT_ENGINE_SETTING = CONFIG.engine_data
class Organizer:
def __init__(
self, setting_workspace: str, load_exist_setting: bool = False
) -> None:
self.setting_manager = SettingManager(setting_workspace, load_exist_setting)
self.source_manager = SourceManager()
def add_source(self, source_path: str, output: str) -> Union[str, bool]:
"""
Adds given source to the output directory
Args:
source_path: str: path to the source to add
output: str: path to the output directory
Returns:
Union[str, bool]: return the name if successfully added, false if not
"""
try:
name = self.source_manager.add_source(source_path, output)
assert name
return name
except Exception as e:
logger.error(e, exc_info=e)
return False
def remove_source(self, source_name: str) -> bool:
"""
Removes given source
Args:
source_path: str: path to the source to remove
Returns:
bool: True if successfully removed, false if not
"""
return self.source_manager.remove_source(source_name)
def is_source(self, source_name: str) -> bool:
"""
Determines if given name corresponds to an existing source
Args:
source_name: str: name of potential source
Returns:
bool: true if the name corresponds to an existing source, false if not
"""
return self.source_manager.is_source(source_name)
def get_source(self, source_name: str) -> Union[bool, SourceObject]:
"""
Accesses source with a given name
Args:
source_name: str: source name to access
Returns:
Source object associated with the given name or false if source object is not found
"""
return self.source_manager.get_source(source_name)
def get_source_outdir(self, source_name: str) -> Union[bool, str]:
"""
Accesses source output directory with a given name
Args:
source_name: str: source name to access
Returns:
a string stores the output of the source
"""
return self.source_manager.get_source_outdir(source_name)
def get_source_setting(self, source_name: str) -> SettingObject:
"""
Accesses the settings of a source with a given name
Args:
source_name: str: source name whose setting to access
Returns:
Source settings associated with the given name or false if source object is not found
"""
return self.source_manager.get_source_setting(source_name)
def is_setting_applied(self, source_name: str) -> bool:
"""
Determines if a given source has configured settings
Args:
source_name: str: source name to access
Returns:
bool: True if given source is configured, false if not
"""
return self.source_manager.is_source_configured(source_name)
def apply_setting_to_source(
self, source_name: str, setting_name: str, overwrite: bool = True
) -> bool:
"""apply setting to a source
Args:
sources (str): a string that identifies the source
setting (str): the setting name
overwrite (bool, optional): if true, overwrites the existing setting
. Defaults to True.
Returns:
bool: return true if settings can be applied
"""
return self.source_manager.apply_setting_profile_to_source(
source_name, self.get_setting_obj(setting_name), overwrite
)
def apply_setting_to_sources(
self, sources: List[str], setting_name: str, overwrite: bool = True
) -> bool:
"""apply setting to a list of sources
Args:
sources (List[str]): a list of string that identifies the sources
setting (str): the setting name
overwrite (bool, optional): if true, overwrites the existing setting
. Defaults to True.
Returns:
bool: return true if settings can be applied
"""
try:
for source in sources:
logger.info(f"organizer change {source} setting to {setting_name}")
assert self.apply_setting_to_source(source, setting_name, overwrite)
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def add_progress_display(self, source_name: str, displayer: Callable):
"""add a displayer function to the source to track the progress of the
source in the pipeline
Args:
source_name (str): the name of the source
displayer (Callable): a callable function that only takes in
one argument that stores the progress message
as a string
Returns:
bool: true if the displayer is added correctly, false other wise
"""
return self.source_manager.add_progress_display(source_name, displayer)
def create_new_setting(self, setting_name: str, setting: SettingDict) -> bool:
"""create a new setting
Args:
name (str): the name of the setting
setting (Dict[str, str]): the setting content
Returns:
bool: return true if the setting can be created, if the setting uses
an existing name, the setting cannot be created
"""
return self.setting_manager.add_new_setting(setting_name, setting)
def save_setting_profile(self, setting_name: str) -> str:
"""save the setting locally on the disk
Args:
setting_name (str): the setting name of the setting
Returns:
bool: return true if the setting is saved correctly
"""
return self.setting_manager.save_setting(setting_name)
def rename_setting(self, setting_name: str, new_name: str) -> bool:
"""rename a setting
Args:
old_name (str): the old name that identifies the setting
new_name (str): the new name of the setting
Returns:
bool: return true if the setting can be renamed correctly,
return false if the new setting name has been taken
"""
try:
self.setting_manager.rename_setting(setting_name, new_name)
return True
except:
return False
def remove_setting(self, setting_name: str) -> bool:
"""remove a setting
Args:
setting_name (str): the name of the setting that will be removed
Returns:
bool: true if the setting is removed, false otherwise
"""
if not self.setting_manager.is_setting(setting_name):
return False
try:
assert self.setting_manager.remove_setting(setting_name)
sources = self.source_manager.get_sources_with_setting(setting_name)
for source in sources:
self.remove_setting_from_source(source)
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def update_setting(self, setting_name: str, new_setting: Dict[str, str]) -> bool:
"""updating the setting with new setting content
Args:
setting_name (str): the setting name that identifies the setting
new_setting (SettingDict): the content of the new settings
Returns:
bool: return true if the setting can be updated correctly
"""
return self.setting_manager.update_setting(setting_name, new_setting)
def get_setting_obj(self, setting_name: str) -> SettingObject:
"""get setting object that is identified by setting name
Args:
setting_name (str): the name that identifies the setting object
Returns:
SettingObject: a setting object that stores the setting data
"""
return self.setting_manager.get_setting(setting_name)
def get_setting_dict(self, setting_name: str) -> Union[bool, SettingDict]:
"""given a source name, return the setting content of the source
in a dictionary
Args:
source_name (str): name that identifies a source
Returns:
Union[bool, SettingDict]: if the source is found, returns its setting
content stored in a dictionary, else returns false
"""
return self.setting_manager.get_setting_dict(setting_name)
def is_setting(self, setting_name: str) -> bool:
"""check if a setting exists or not
Args:
name (str): names that identifies the settings
Returns:
bool: return true if the setting exists, false otherwise
"""
return self.setting_manager.is_setting(setting_name)
def is_setting_in_use(self, setting_name: str) -> bool:
"""check if a setting is being used by any source
Args:
setting_name (str): the name of the setting
Returns:
bool: return true if the setting is being used, false otherwise
"""
src_with_set = self.source_manager.get_sources_with_setting(setting_name)
if len(src_with_set) == 0:
return False
else:
return True
def remove_setting_from_source(self, source_name: str) -> bool:
"""given a source name, remove the current setting from the source,
set the setting of the source to default
Args:
source_name (str): the name that identifies the source
Returns:
bool: return true if the setting is removed successfully false otherwise
"""
return self.apply_setting_to_source(source_name, DEFAULT_SETTING_NAME, True)
def get_plugin_setting(self, name: str):
"""returns the plugin setting of the setting
Args:
setting_name (str): name that identifies a setting
Returns:
Union[bool, Dict[str, str]]: if the setting is found, return the
list of string that identifies which plugins are used, else return
false
"""
setting: SettingObject = self.setting_manager.get_setting(name)
if setting:
return setting.get_plugin_setting()
else:
return False
def get_configured_sources(self, sources: List[str] = None) -> List[SourceObject]:
"""given the a list of source name, return a list of the sourceObject
that stores the source configured with setting
Args:
sources (List[str], optional): a list of source name, if not
given, return a list of configured source. Defaults to None.
Returns:
List[SourceObject]: a list of source object that stores the source data
"""
return self.source_manager.get_configured_sources(sources)
def get_engine_setting_names(self) -> List[str]:
"""get a list of available engine setting name
Returns:
List[str]: the list of engine setting name
"""
return self.setting_manager.get_engine_setting_names()
def add_new_engine(self, name, setting, overwrite=False) -> bool:
"""add a new engine setting
Args:
name (str): the name of the engine setting
setting (Dict[str, str]): the setting data stored in a dictionary
overwrite (bool, optional): if True, overwrite the existing
engine setting with the same name. Defaults to False.
Returns:
bool: return True if the engine setting is successfully created
"""
return self.setting_manager.add_new_engine(name, setting, overwrite)
def remove_engine_setting(self, name) -> bool:
"""remove the engine setting identified by nanme
Args:
name (str): the name of the engine setting to be removed
Returns:
bool: return True if the engine setting is successfully removed
"""
return self.setting_manager.remove_engine_setting(name)
def update_engine_setting(self, name, setting_data: Dict[str, str]) -> bool:
"""update the engine setting identified by name
Args:
name (str): the name of the engine setting to be updated
setting_data (Dict[str, str]): the content of the new setting
Returns:
bool: return True if the engine setting is successfully updated
"""
return self.setting_manager.update_engine_setting(name, setting_data)
def is_engine_setting(self, name: str):
"""check if the given engine name is engine setting
Args:
name (str): the name of the engine setting
"""
return self.setting_manager.is_engine_setting(name)
def get_engine_setting_data(self, name: str) -> Union[bool, Dict[str, str]]:
"""get the enigine setting data
Args:
name (str): the name of the engine setting
Returns:
Union[bool, Dict[str, str]]: if the engine setting name is available
return the engine setting data as stored in a dictionary, else return False
"""
return self.setting_manager.get_engine_setting_data(name)
def is_engine_setting_in_use(self, name: str) -> bool:
"""check if the engine setting identified by name is in use
Args:
name (str): the name of the engine setting
Returns:
bool: return true if the engine setting is in use, false other wise
"""
return self.setting_manager.is_engine_setting_in_use(name)
def remove_all_settings(self) -> bool:
"""remove all settings except for the default setting
Returns:
bool: return true if the removal is successful
"""
try:
for setting in self.setting_manager.get_setting_names():
if setting != DEFAULT_SETTING_NAME:
assert self.remove_setting(setting)
for source in self.source_manager.get_configured_sources():
assert source.setting_name() == DEFAULT_SETTING_NAME
return True
except Exception as e:
logger.error(e, exc_info=e)
return False
def get_setting_names(self) -> List[str]:
"""return a list of available setting names
Returns:
List[str]: a list of available setting names
"""
return self.setting_manager.get_setting_names()
def get_all_settings_data(self) -> Dict[str, SettingDict]:
"""
return a dictionary that stores all setting data
"""
return self.setting_manager.get_all_settings_data()
def get_all_profile_names(self) -> List[str]:
"""
return a list of string that stores all setting name
"""
return self.setting_manager.get_setting_names()
def get_default_engine_setting_name(self) -> str:
"""get the default setting name
Returns:
str: a string that represent the default setting
"""
return self.setting_manager.get_default_engine_setting_name()
def get_default_profile_setting_name(self) -> str:
"""get the default setting name
Returns:
str: a string that represent the default setting
"""
return self.setting_manager.get_default_profile_setting_name()
def set_default_setting(self, setting_name: str) -> bool:
"""set the default setting to setting_name
Args:
setting_name (str)
Returns:
bool:return true if the setting can be set, false otherwise
"""
return self.setting_manager.set_to_default_setting(setting_name)
def set_default_engine(self, engine_name: str) -> bool:
"""set the default setting to engine_name
Args:
engine_name (str)
Returns:
bool:return true if the setting can be set, false otherwise
"""
return self.setting_manager.set_to_default_engine_setting(engine_name)
def is_suite_in_use(self, suite_name: str) -> bool:
"""given a suite_name, check if this suite is used
in any of the setting
Args:
suite_name (str): the name of the plugin suite
Returns:
bool: return true if the suite is used in any of the setting,
false otherwise
"""
return self.setting_manager.is_suite_in_use(suite_name)
def get_profile_src_path(self, name: str):
"""get the path to the profile setting source
Args:
name (str): the name of the profile
"""
return self.setting_manager.get_profile_src_path(name)
def get_engine_src_path(self, name: str):
"""get the path to the engine setting source
Args:
name (str): the name of the engine
"""
return self.setting_manager.get_engine_src_path(name) | PypiClean |
/BoJo-0.1.2.tar.gz/BoJo-0.1.2/bojo/command_line.py |
import json
import sys
from datetime import datetime
from typing import List, Optional
import click
import dateparser
import sqlalchemy as sql
from bojo.config import should_use_verbose
from bojo.db import (
get_session,
Item,
ItemState,
ItemStateDict,
ItemSignifier,
ItemSignifierDict,
)
from bojo.render_utils import (
parse_choice,
parse_state,
parse_signifier,
render_items,
render_title,
NONE_STR,
)
from bojo.subcommands.list import list_command
if should_use_verbose():
STATE_OPTS = ', '.join(
[f'[{k}] {v.value}' for k, v in ItemStateDict.items()])
SIGNIFIER_OPTS = ', '.join(
[f'[{k}] {v.value}' for k, v in ItemSignifierDict.items()])
STATE_PROMPT = f'State ({STATE_OPTS})'
SIGNIFIER_PROMPT = f'Signifier ({SIGNIFIER_OPTS})'
else:
STATE_PROMPT = 'State'
SIGNIFIER_PROMPT = 'Signifier'
@click.group()
def cli():
"""A command-line bullet journal."""
pass
cli.add_command(list_command)
@cli.command(help='Provides information about annotation')
def info() -> None:
click.echo(Item.info())
@cli.command(help='Delete an item forever')
@click.argument('id', type=int)
def delete(id: int) -> None:
session = get_session()
item = session.query(Item).get(id)
if item is None:
raise RuntimeError(f'Item {id} not found')
click.echo(item)
if click.confirm('Do you want to delete this item?', abort=True):
session.delete(item)
session.commit()
click.echo('Deleted item')
@cli.command(help='Update item state')
@click.argument('state', type=str)
@click.argument('id', type=int)
def mark(state: str, id: int) -> None:
session = get_session()
item = session.query(Item).get(id)
if item is None:
raise RuntimeError(f'Item {id} not found')
state = parse_choice(state)
if isinstance(state, ItemState):
item.state = state
ostr = f'Marked item {id} as {state.value}'
elif isinstance(state, ItemSignifier):
item.signifier = state
ostr = f'Marked item {id} as {state.value}'
else:
item.signifier = None
ostr = f'Cleared signifier for item {id}'
session.commit()
click.echo(item)
click.echo(ostr)
@cli.command(help='Mark past items as complete')
def complete() -> None:
session = get_session()
items = session.query(Item) \
.filter(Item.time < datetime.now()) \
.filter(Item.state != ItemState.COMPLETE)
num_items = items.count()
if num_items:
if click.confirm(f'Mark {num_items} items as complete?', abort=True):
items.update({'state': ItemState.COMPLETE})
session.commit()
click.echo(f'Completed {num_items} items')
else:
click.echo('All past items are complete')
@cli.command(help='Run a text query on all items')
@click.argument('substring')
@click.option('-s', '--show-complete', is_flag=True, help='If set, show completed items')
def query(substring: str, show_complete: bool) -> None:
session = get_session()
query = session.query(Item).filter(Item.description.contains(substring))
if not show_complete:
query = query.filter(Item.state != ItemState.COMPLETE)
items = query.order_by(Item.time_updated.desc())
render_items(items, 'Matching Items', 'No matching items found')
@cli.command('export', help='Exports events to JSON')
@click.argument('file', default='-')
def export_func(file: str) -> None:
session = get_session()
all_items = [item.as_dict() for item in session.query(Item)]
with click.open_file(file, 'w') as f:
json.dump(all_items, f, indent=2)
@cli.command('import', help='Imports events from JSON')
@click.argument('file', default='-')
def import_func(file: str) -> None:
session = get_session()
click.get_text_stream('stdin')
all_items = []
with click.open_file(file, 'r') as f:
for item_str in json.load(f):
all_items.append(Item.from_dict(item_str))
session = get_session()
session.add_all(all_items)
session.commit()
click.echo(f'Added {len(all_items)} items')
@cli.command(help='Adds a new item')
@click.option('-d', '--description', prompt=f'Description',
help='The description of the item being added')
@click.option('-s', '--state', prompt=STATE_PROMPT,
help='The state of the item being added')
@click.option('--signifier', prompt=SIGNIFIER_PROMPT,
default=NONE_STR, help='The signifier of the item being added')
@click.option('-p', '--parent', prompt='Parent', default=NONE_STR,
help='The parent ID of the item being added')
@click.option('-t', '--time', prompt='Time', default=NONE_STR,
help='The time of the item being added')
def add(description: str, state: str, signifier: str, parent: str, time: str) -> None:
# Parses the parent.
if parent != NONE_STR:
parent = int(parent)
state = parse_state(state)
signifier = parse_signifier(signifier)
# Parses the time.
if time != NONE_STR:
time = dateparser.parse(time)
else:
time = None
# Creates the item to insert.
item = Item(description=description, state=state,
signifier=signifier, time=time, parent_id=parent)
click.echo(item)
if click.confirm('Do you want to add this item?', abort=True):
session = get_session()
session.add(item)
session.commit()
click.echo('Added item') | PypiClean |
/APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/schedulers/async_.py | from __future__ import annotations
import os
import platform
import random
from contextlib import AsyncExitStack
from datetime import datetime, timedelta, timezone
from logging import Logger, getLogger
from typing import Any, Callable, Iterable, Mapping, cast
from uuid import UUID, uuid4
import anyio
import attrs
from anyio import TASK_STATUS_IGNORED, create_task_group, move_on_after
from anyio.abc import TaskGroup, TaskStatus
from .._context import current_scheduler
from .._converters import as_async_datastore, as_async_eventbroker
from .._enums import CoalescePolicy, ConflictPolicy, JobOutcome, RunState
from .._events import (
Event,
JobReleased,
ScheduleAdded,
SchedulerStarted,
SchedulerStopped,
ScheduleUpdated,
)
from .._exceptions import (
JobCancelled,
JobDeadlineMissed,
JobLookupError,
ScheduleLookupError,
)
from .._structures import Job, JobResult, Schedule, Task
from ..abc import AsyncDataStore, AsyncEventBroker, Subscription, Trigger
from ..datastores.memory import MemoryDataStore
from ..eventbrokers.async_local import LocalAsyncEventBroker
from ..marshalling import callable_to_ref
from ..workers.async_ import AsyncWorker
_microsecond_delta = timedelta(microseconds=1)
_zero_timedelta = timedelta()
@attrs.define(eq=False)
class AsyncScheduler:
"""An asynchronous (AnyIO based) scheduler implementation."""
data_store: AsyncDataStore = attrs.field(
converter=as_async_datastore, factory=MemoryDataStore
)
event_broker: AsyncEventBroker = attrs.field(
converter=as_async_eventbroker, factory=LocalAsyncEventBroker
)
identity: str = attrs.field(kw_only=True, default=None)
start_worker: bool = attrs.field(kw_only=True, default=True)
logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_task_group: TaskGroup | None = attrs.field(init=False, default=None)
_wakeup_event: anyio.Event = attrs.field(init=False)
_wakeup_deadline: datetime | None = attrs.field(init=False, default=None)
_schedule_added_subscription: Subscription = attrs.field(init=False)
def __attrs_post_init__(self) -> None:
if not self.identity:
self.identity = f"{platform.node()}-{os.getpid()}-{id(self)}"
async def __aenter__(self):
self._task_group = create_task_group()
await self._task_group.__aenter__()
await self._task_group.start(self._run)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.stop()
await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
self._task_group = None
def _schedule_added_or_modified(self, event: Event) -> None:
event_ = cast("ScheduleAdded | ScheduleUpdated", event)
if not self._wakeup_deadline or (
event_.next_fire_time and event_.next_fire_time < self._wakeup_deadline
):
self.logger.debug(
"Detected a %s event – waking up the scheduler", type(event).__name__
)
self._wakeup_event.set()
@property
def state(self) -> RunState:
"""The current running state of the scheduler."""
return self._state
async def add_schedule(
self,
func_or_task_id: str | Callable,
trigger: Trigger,
*,
id: str | None = None,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
coalesce: CoalescePolicy = CoalescePolicy.latest,
misfire_grace_time: float | timedelta | None = None,
max_jitter: float | timedelta | None = None,
tags: Iterable[str] | None = None,
conflict_policy: ConflictPolicy = ConflictPolicy.do_nothing,
) -> str:
"""
Schedule a task to be run one or more times in the future.
:param func_or_task_id: either a callable or an ID of an existing task
definition
:param trigger: determines the times when the task should be run
:param id: an explicit identifier for the schedule (if omitted, a random, UUID
based ID will be assigned)
:param args: positional arguments to be passed to the task function
:param kwargs: keyword arguments to be passed to the task function
:param coalesce: determines what to do when processing the schedule if multiple
fire times have become due for this schedule since the last processing
:param misfire_grace_time: maximum number of seconds the scheduled job's actual
run time is allowed to be late, compared to the scheduled run time
:param max_jitter: maximum number of seconds to randomly add to the scheduled
time for each job created from this schedule
:param tags: strings that can be used to categorize and filter the schedule and
its derivative jobs
:param conflict_policy: determines what to do if a schedule with the same ID
already exists in the data store
:return: the ID of the newly added schedule
"""
id = id or str(uuid4())
args = tuple(args or ())
kwargs = dict(kwargs or {})
tags = frozenset(tags or ())
if isinstance(misfire_grace_time, (int, float)):
misfire_grace_time = timedelta(seconds=misfire_grace_time)
if callable(func_or_task_id):
task = Task(id=callable_to_ref(func_or_task_id), func=func_or_task_id)
await self.data_store.add_task(task)
else:
task = await self.data_store.get_task(func_or_task_id)
schedule = Schedule(
id=id,
task_id=task.id,
trigger=trigger,
args=args,
kwargs=kwargs,
coalesce=coalesce,
misfire_grace_time=misfire_grace_time,
max_jitter=max_jitter,
tags=tags,
)
schedule.next_fire_time = trigger.next()
await self.data_store.add_schedule(schedule, conflict_policy)
self.logger.info(
"Added new schedule (task=%r, trigger=%r); next run time at %s",
task,
trigger,
schedule.next_fire_time,
)
return schedule.id
async def get_schedule(self, id: str) -> Schedule:
"""
Retrieve a schedule from the data store.
:param id: the unique identifier of the schedule
:raises ScheduleLookupError: if the schedule could not be found
"""
schedules = await self.data_store.get_schedules({id})
if schedules:
return schedules[0]
else:
raise ScheduleLookupError(id)
async def get_schedules(self) -> list[Schedule]:
"""
Retrieve all schedules from the data store.
:return: a list of schedules, in an unspecified order
"""
return await self.data_store.get_schedules()
async def remove_schedule(self, id: str) -> None:
"""
Remove the given schedule from the data store.
:param id: the unique identifier of the schedule
"""
await self.data_store.remove_schedules({id})
async def add_job(
self,
func_or_task_id: str | Callable,
*,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
tags: Iterable[str] | None = None,
result_expiration_time: timedelta | float = 0,
) -> UUID:
"""
Add a job to the data store.
:param func_or_task_id:
:param args: positional arguments to call the target callable with
:param kwargs: keyword arguments to call the target callable with
:param tags: strings that can be used to categorize and filter the job
:param result_expiration_time: the minimum time (as seconds, or timedelta) to
keep the result of the job available for fetching (the result won't be
saved at all if that time is 0)
:return: the ID of the newly created job
"""
if callable(func_or_task_id):
task = Task(id=callable_to_ref(func_or_task_id), func=func_or_task_id)
await self.data_store.add_task(task)
else:
task = await self.data_store.get_task(func_or_task_id)
job = Job(
task_id=task.id,
args=args or (),
kwargs=kwargs or {},
tags=tags or frozenset(),
result_expiration_time=result_expiration_time,
)
await self.data_store.add_job(job)
return job.id
async def get_job_result(self, job_id: UUID, *, wait: bool = True) -> JobResult:
"""
Retrieve the result of a job.
:param job_id: the ID of the job
:param wait: if ``True``, wait until the job has ended (one way or another),
``False`` to raise an exception if the result is not yet available
:raises JobLookupError: if ``wait=False`` and the job result does not exist in
the data store
"""
wait_event = anyio.Event()
def listener(event: JobReleased) -> None:
if event.job_id == job_id:
wait_event.set()
with self.data_store.events.subscribe(listener, {JobReleased}):
result = await self.data_store.get_job_result(job_id)
if result:
return result
elif not wait:
raise JobLookupError(job_id)
await wait_event.wait()
return await self.data_store.get_job_result(job_id)
async def run_job(
self,
func_or_task_id: str | Callable,
*,
args: Iterable | None = None,
kwargs: Mapping[str, Any] | None = None,
tags: Iterable[str] | None = (),
) -> Any:
"""
Convenience method to add a job and then return its result.
If the job raised an exception, that exception will be reraised here.
:param func_or_task_id: either a callable or an ID of an existing task
definition
:param args: positional arguments to be passed to the task function
:param kwargs: keyword arguments to be passed to the task function
:param tags: strings that can be used to categorize and filter the job
:returns: the return value of the task function
"""
job_complete_event = anyio.Event()
def listener(event: JobReleased) -> None:
if event.job_id == job_id:
job_complete_event.set()
job_id: UUID | None = None
with self.data_store.events.subscribe(listener, {JobReleased}):
job_id = await self.add_job(
func_or_task_id,
args=args,
kwargs=kwargs,
tags=tags,
result_expiration_time=timedelta(minutes=15),
)
await job_complete_event.wait()
result = await self.get_job_result(job_id)
if result.outcome is JobOutcome.success:
return result.return_value
elif result.outcome is JobOutcome.error:
raise result.exception
elif result.outcome is JobOutcome.missed_start_deadline:
raise JobDeadlineMissed
elif result.outcome is JobOutcome.cancelled:
raise JobCancelled
else:
raise RuntimeError(f"Unknown job outcome: {result.outcome}")
async def stop(self) -> None:
"""
Signal the scheduler that it should stop processing schedules.
This method does not wait for the scheduler to actually stop.
For that, see :meth:`wait_until_stopped`.
"""
if self._state is RunState.started:
self._state = RunState.stopping
self._wakeup_event.set()
async def wait_until_stopped(self) -> None:
"""
Wait until the scheduler is in the "stopped" or "stopping" state.
If the scheduler is already stopped or in the process of stopping, this method
returns immediately. Otherwise, it waits until the scheduler posts the
``SchedulerStopped`` event.
"""
if self._state in (RunState.stopped, RunState.stopping):
return
event = anyio.Event()
with self.event_broker.subscribe(
lambda ev: event.set(), {SchedulerStopped}, one_shot=True
):
await event.wait()
async def _run(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None:
if self._state is not RunState.stopped:
raise RuntimeError(
f'Cannot start the scheduler when it is in the "{self._state}" '
f"state"
)
self._state = RunState.starting
async with AsyncExitStack() as exit_stack:
self._wakeup_event = anyio.Event()
# Initialize the event broker
await self.event_broker.start()
exit_stack.push_async_exit(
lambda *exc_info: self.event_broker.stop(force=exc_info[0] is not None)
)
# Initialize the data store
await self.data_store.start(self.event_broker)
exit_stack.push_async_exit(
lambda *exc_info: self.data_store.stop(force=exc_info[0] is not None)
)
# Wake up the scheduler if the data store emits a significant schedule event
exit_stack.enter_context(
self.event_broker.subscribe(
self._schedule_added_or_modified, {ScheduleAdded, ScheduleUpdated}
)
)
# Start the built-in worker, if configured to do so
if self.start_worker:
token = current_scheduler.set(self)
exit_stack.callback(current_scheduler.reset, token)
worker = AsyncWorker(
self.data_store, self.event_broker, is_internal=True
)
await exit_stack.enter_async_context(worker)
# Signal that the scheduler has started
self._state = RunState.started
task_status.started()
await self.event_broker.publish_local(SchedulerStarted())
exception: BaseException | None = None
try:
while self._state is RunState.started:
schedules = await self.data_store.acquire_schedules(
self.identity, 100
)
now = datetime.now(timezone.utc)
for schedule in schedules:
# Calculate a next fire time for the schedule, if possible
fire_times = [schedule.next_fire_time]
calculate_next = schedule.trigger.next
while True:
try:
fire_time = calculate_next()
except Exception:
self.logger.exception(
"Error computing next fire time for schedule %r of "
"task %r – removing schedule",
schedule.id,
schedule.task_id,
)
break
# Stop if the calculated fire time is in the future
if fire_time is None or fire_time > now:
schedule.next_fire_time = fire_time
break
# Only keep all the fire times if coalesce policy = "all"
if schedule.coalesce is CoalescePolicy.all:
fire_times.append(fire_time)
elif schedule.coalesce is CoalescePolicy.latest:
fire_times[0] = fire_time
# Add one or more jobs to the job queue
max_jitter = (
schedule.max_jitter.total_seconds()
if schedule.max_jitter
else 0
)
for i, fire_time in enumerate(fire_times):
# Calculate a jitter if max_jitter > 0
jitter = _zero_timedelta
if max_jitter:
if i + 1 < len(fire_times):
next_fire_time = fire_times[i + 1]
else:
next_fire_time = schedule.next_fire_time
if next_fire_time is not None:
# Jitter must never be so high that it would cause a
# fire time to equal or exceed the next fire time
jitter_s = min(
[
max_jitter,
(
next_fire_time
- fire_time
- _microsecond_delta
).total_seconds(),
]
)
jitter = timedelta(
seconds=random.uniform(0, jitter_s)
)
fire_time += jitter
schedule.last_fire_time = fire_time
job = Job(
task_id=schedule.task_id,
args=schedule.args,
kwargs=schedule.kwargs,
schedule_id=schedule.id,
scheduled_fire_time=fire_time,
jitter=jitter,
start_deadline=schedule.next_deadline,
tags=schedule.tags,
)
await self.data_store.add_job(job)
# Update the schedules (and release the scheduler's claim on them)
await self.data_store.release_schedules(self.identity, schedules)
# If we received fewer schedules than the maximum amount, sleep
# until the next schedule is due or the scheduler is explicitly
# woken up
wait_time = None
if len(schedules) < 100:
self._wakeup_deadline = (
await self.data_store.get_next_schedule_run_time()
)
if self._wakeup_deadline:
wait_time = (
self._wakeup_deadline - datetime.now(timezone.utc)
).total_seconds()
self.logger.debug(
"Sleeping %.3f seconds until the next fire time (%s)",
wait_time,
self._wakeup_deadline,
)
else:
self.logger.debug("Waiting for any due schedules to appear")
with move_on_after(wait_time):
await self._wakeup_event.wait()
self._wakeup_event = anyio.Event()
else:
self.logger.debug(
"Processing more schedules on the next iteration"
)
except BaseException as exc:
exception = exc
raise
finally:
self._state = RunState.stopped
if isinstance(exception, Exception):
self.logger.exception("Scheduler crashed")
elif exception:
self.logger.info(
f"Scheduler stopped due to {exception.__class__.__name__}"
)
else:
self.logger.info("Scheduler stopped")
with move_on_after(3, shield=True):
await self.event_broker.publish_local(
SchedulerStopped(exception=exception)
) | PypiClean |
/Assembly-1.3.0.tar.gz/Assembly-1.3.0/assembly/utils.py | from __future__ import division
import os
import re
import time
import json
import uuid
import arrow
import string
import random
import hashlib
import datetime
from slugify import slugify
from distutils.file_util import copy_file, move_file
from distutils.dir_util import copy_tree, remove_tree, mkpath
from inflection import (dasherize,
underscore,
camelize,
pluralize,
singularize,
titleize,
ordinalize,
ordinal)
"""
--- Reference ---
gen_md5
gen_uuid
gen_uuid_hex
to_json
chunk_list
in_any_list
dict_replace
list_replace
DotDict
is_valid_email
is_valid_password
is_valid_username
is_valid_url
#lib: slugify
slugify
#lib: inflection
camelize
titleize
dasherize
underscore
plurialize
singularize
ordinalize
ordinal
copy_file,
move_file,
copy_tree,
remove_tree,
mkpath
"""
def is_valid_email(email):
"""
Check if email is valid
"""
pattern = re.compile(r'[\w\.-]+@[\w\.-]+[.]\w+')
return bool(pattern.match(email))
def is_valid_password(password):
"""
- min length is 6 and max length is 25
- at least include a digit number,
- at least a upcase and a lowcase letter
- at least a special characters
:return bool:
"""
pattern = re.compile('^(?=\S{6,25}$)(?=.*?\d)(?=.*?[a-z])(?=.*?[A-Z])(?=.*?[^A-Za-z\s0-9])')
return bool(pattern.match(password))
def is_valid_username(username):
"""
Check if a valid username.
valid:
oracle
bill-gates
steve.jobs
micro_soft
not valid
Bill Gates - no space allowed
[email protected] - @ is not a valid character
:param username: string
:return:
"""
pattern = re.compile(r"^[a-zA-Z0-9_.-]+$")
return bool(pattern.match(username))
def is_valid_url(url):
"""
Check if url is valid
"""
pattern = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
#r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return bool(pattern.match(url))
def gen_md5(value):
"""
Generates MD5
:param value: string
:return: string
"""
return hashlib.md5(value.encode()).hexdigest()
def gen_uuid():
"""
Generates and returns a UUID 4 value
:return: string
"""
return str(uuid.uuid4())
def gen_uuid_hex():
"""
Creates and returns a UUID 4 hex value
:return: string
"""
return uuid.uuid4().hex
def chunk_list(items, size):
"""
Return a list of chunks
:param items: List
:param size: int The number of items per chunk
:return: List
"""
size = max(1, size)
return [items[i:i + size] for i in range(0, len(items), size)]
def in_any_list(list1, list2):
"""
Check if any items in list1 are in list2
:param list1: list
:param list2: list
:return:
"""
list1 = list1.split(" ") if isinstance(list1, str) else list1
list2 = list2.split(" ") if isinstance(list2, str) else list2
return any(i in list2 for i in list1)
def generate_random_string(length=8):
"""
Generate a random string
"""
char_set = string.ascii_uppercase + string.digits
return ''.join(random.sample(char_set * (length - 1), length))
def generate_random_hash(size=32):
"""
Return a random hash key
:param size: The max size of the hash
:return: string
"""
return os.urandom(size//2).encode('hex')
def list_replace(subject_list, replacement, string):
"""
To replace a list of items by a single replacement
:param subject_list: list
:param replacement: string
:param string: string
:return: string
"""
for s in subject_list:
string = string.replace(s, replacement)
return string
def dict_replace(subject_dict, string):
"""
Replace a dict map, key to its value in the stirng
:param subject_dict: dict
:param string: string
:return: string
"""
for i, j in subject_dict.items():
string = string.replace(i, j)
return string
def to_json(d):
"""
Convert data to json. It formats datetime/arrow time
:param d: dict or list
:return: json data
"""
return json.dumps(d, cls=_JSONEncoder)
class _JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, arrow.Arrow):
return obj.for_json()
elif isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
class DotDict(dict):
"""
A dict extension that allows dot notation to access the data.
ie: dict.get('key.key2.0.keyx')
my_dict = {...}
d = DotDict(my_dict)
d.get("key1")
d.get("key1.key2")
d.get("key3.key4.0.keyX")
Still have the ability to access it as a normal dict
d[key1][key2]
"""
def get(self, key, default=None):
"""
Access data via
:param key:
:param default: the default value
:return:
"""
try:
val = self
if "." not in key:
return self[key]
for k in key.split('.'):
if k.isdigit():
k = int(k)
val = val[k]
return val
except (TypeError, KeyError, IndexError) as e:
return default
def flatten_config_property(key, config):
"""
To flatten a config property
This method is mutable
Having a flask config or an object:
class Conf(object):
AWS = {
"ACCESS_KEY_ID": "",
"SECRET_ACCESS_KEY": ""
}
app = Flask(__name__)
app.config.from_object(Conf())
flatten_config_property("AWS", app.config)
it will flatten the config to be:
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
If the key exists already, it will not modify it
:param key: string - the key to flatten
:param dict: app.config - the flask app.config or dict
"""
if key in config:
for k, v in config[key].items():
_ = "%s_%s" % (key, k.upper())
if _ not in config:
config[_] = v
# ------------
# internal usage
def prepare_view_response(data):
"""
Prepare a view response from a data returned
params data (dict, tuple): the data from the view response
return tuple: data:dict, status:int|None, headers:dict|None
"""
if isinstance(data, dict) or data is None:
data = {} if data is None else data
return data, 200, None
elif isinstance(data, tuple):
data, status, headers = prepare_view_response_set_from_tuple(data)
return data or {}, status, headers
return data, None, None
def prepare_view_response_set_from_tuple(tuple_):
"""
Helper function to normalize view return values .
It always returns (dict, status, headers). Missing values will be None.
For example in such cases when tuple_ is
(dict, status), (dict, headers), (dict, status, headers),
(dict, headers, status)
It assumes what status is int, so this construction will not work:
(dict, None, headers) - it doesn't make sense because you just use
(dict, headers) if you want to skip status.
"""
v = tuple_ + (None,) * (3 - len(tuple_))
return v if isinstance(v[1], int) else (v[0], v[2], v[1]) | PypiClean |
/AutoDiffCC-1.1.1.tar.gz/AutoDiffCC-1.1.1/autodiffcc/Equation/__init__.py | all = ['util']
try:
from Equation._info import *
except ImportError:
from autodiffcc.Equation._info import *
try:
from Equation.core import Expression
except ImportError:
from autodiffcc.Equation.core import Expression
def load():
import os
import os.path
import sys
import traceback
try:
import importlib
except ImportError:
# Python 2.6 dosen't have importlib used dummy object
# wrap __import__
class importlib():
@staticmethod
def import_module(name):
__import__(name)
return sys.modules[name]
try:
from Equation.core import recalculateFMatch
except ImportError:
from autodiffcc.Equation.core import recalculateFMatch
if not hasattr(load, "loaded"):
load.loaded = False
if not load.loaded:
load.loaded = True
plugins_loaded = {}
if __name__ == "__main__":
dirname = os.path.abspath(os.getcwd())
prefix = ""
else:
dirname = os.path.dirname(os.path.abspath(__file__))
if __package__ != None:
prefix = __package__ + "."
else:
prefix = ""
for file in os.listdir(dirname):
plugin_file,extension = os.path.splitext(os.path.basename(file))
if not plugin_file.lower().startswith("equation_",0,9) or extension.lower() not in ['.py','.pyc']:
continue
if plugin_file not in plugins_loaded:
plugins_loaded[plugin_file] = 1
try:
plugin_script = importlib.import_module(prefix + plugin_file)
except:
errtype, errinfo, errtrace = sys.exc_info()
fulltrace = ''.join(traceback.format_exception(errtype, errinfo, errtrace)[1:])
sys.stderr.write("Was unable to load {0:s}: {1:s}\nTraceback:\n{2:s}\n".format(plugin_file, errinfo, fulltrace))
continue
if not hasattr(plugin_script,'equation_extend'):
sys.stderr.write("The plugin '{0:s}' from file '{1:s}' is invalid because its missing the attribute 'equation_extend'\n".format(plugin_file,(dirname.rstrip('/') + '/' + plugin_file + extension)))
continue
plugin_script.equation_extend()
recalculateFMatch()
load()
del load | PypiClean |
/Kallithea-0.7.0.tar.gz/Kallithea-0.7.0/docs/administrator_guide/vcs_setup.rst | .. _vcs_setup:
=============================
Version control systems setup
=============================
Kallithea supports Git and Mercurial repositories out-of-the-box.
For Git, you do need the ``git`` command line client installed on the server.
You can always disable Git or Mercurial support by editing the
file ``kallithea/__init__.py`` and commenting out the backend. For example, to
disable Git but keep Mercurial enabled:
.. code-block:: python
BACKENDS = {
'hg': 'Mercurial repository',
#'git': 'Git repository',
}
Git-specific setup
------------------
Web server with chunked encoding
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Large Git pushes require an HTTP server with support for
chunked encoding for POST. The Python web servers waitress_ and
gunicorn_ (Linux only) can be used. By default, Kallithea uses
waitress_ for `gearbox serve` instead of the built-in `paste` WSGI
server.
The web server used by gearbox is controlled in the .ini file::
use = egg:waitress#main
or::
use = egg:gunicorn#main
Also make sure to comment out the following options::
threadpool_workers =
threadpool_max_requests =
use_threadpool =
Increasing Git HTTP POST buffer size
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If Git pushes fail with HTTP error code 411 (Length Required), you may need to
increase the Git HTTP POST buffer. Run the following command as the user that
runs Kallithea to set a global Git variable to this effect::
git config --global http.postBuffer 524288000
.. _waitress: http://pypi.python.org/pypi/waitress
.. _gunicorn: http://pypi.python.org/pypi/gunicorn
.. _subrepositories: http://mercurial.aragost.com/kick-start/en/subrepositories/
| PypiClean |
/LFPykernels-0.2.0.tar.gz/LFPykernels-0.2.0/examples/README_example.ipynb | ```
%matplotlib inline
import os
import matplotlib.pyplot as plt
import scipy.stats as st
import numpy as np
from lfpykernels import GaussCylinderPotential, KernelApprox
import neuron
# recompile mod files if needed
mech_loaded = neuron.load_mechanisms('mod')
if not mech_loaded:
os.system('cd mod && nrnivmodl && cd -')
mech_loaded = neuron.load_mechanisms('mod')
print(f'mechanisms loaded: {mech_loaded}')
# misc parameters
dt = 2**-4 # time resolution (ms)
t_X = 500 # time of synaptic activations (ms)
tau = 50 # duration of impulse response function after onset (ms)
Vrest = -65 # assumed average postsynaptic potential (mV)
X=['E', 'I'] # presynaptic population names
N_X = np.array([8192, 1024]) # presynpatic population sizes
Y = 'E' # postsynaptic population
N_Y = 8192 # postsynaptic population size
C_YX = np.array([0.05, 0.05]) # pairwise connection probability between populations X and Y
nu_X = {'E': 2.5, 'I': 5.0} # assumed spike rates of each population (spikes/s)
g_eff = True # account for changes in passive leak due to persistent synaptic activations
def set_passive(cell, Vrest):
"""Insert passive leak channel across all sections
Parameters
----------
cell: object
LFPy.NetworkCell like object
Vrest: float
Steady state potential
"""
for sec in cell.template.all:
sec.insert('pas')
sec.g_pas = 0.0003 # (S/cm2)
sec.e_pas = Vrest # (mV)
# parameters for LFPy.NetworkCell representative of postsynaptic population
cellParameters={
'templatefile': 'BallAndSticksTemplate.hoc',
'templatename': 'BallAndSticksTemplate',
'custom_fun': [set_passive],
'custom_fun_args': [{'Vrest': Vrest}],
'templateargs': None,
'delete_sections': False,
'morphology': 'BallAndSticks_E.hoc'}
populationParameters={
'radius': 150.0, # population radius (µm)
'loc': 0.0, # average depth of cell bodies (µm)
'scale': 75.0} # standard deviation (µm)
# Predictor for extracellular potentials across depth assuming planar disk source
# elements convolved with Gaussian along z-axis.
# See https://lfpykernels.readthedocs.io/en/latest/#class-gausscylinderpotential for details
probe = GaussCylinderPotential(
cell=None,
z=np.linspace(1000., -200., 13), # depth of contacts (µm)
sigma=0.3, # tissue conductivity (S/m)
R=populationParameters['radius'], #
sigma_z=populationParameters['scale'],
)
# Create KernelApprox object. See https://lfpykernels.readthedocs.io/en/latest/#class-kernelapprox for details
kernel = KernelApprox(
X=X,
Y=Y,
N_X=N_X,
N_Y=N_Y,
C_YX=C_YX,
cellParameters=cellParameters,
populationParameters=populationParameters,
# function and parameters used to estimate average multapse count:
multapseFunction=st.truncnorm,
multapseParameters=[
{'a': (1 - 2.) / .6, 'b': (10 - 2.) / .6, 'loc': 2.0, 'scale': 0.6},
{'a': (1 - 5.) / 1.1, 'b': (10 - 5.) / 1.1, 'loc': 5.0, 'scale': 1.1}],
# function and parameters for delay distribution from connections between a
# population in X onto population Y:
delayFunction=st.truncnorm,
delayParameters=[{'a': -2.2, 'b': np.inf, 'loc': 1.3, 'scale': 0.5},
{'a': -1.5, 'b': np.inf, 'loc': 1.2, 'scale': 0.6}],
# parameters for synapses from connections by populations X onto Y
synapseParameters=[
{'weight': 0.00012, 'syntype': 'Exp2Syn', 'tau1': 0.2, 'tau2': 1.8, 'e': 0.0},
{'weight': 0.002, 'syntype': 'Exp2Syn', 'tau1': 0.1, 'tau2': 9.0, 'e': -80.0}],
# parameters for spatial synaptic connectivity by populations X onto Y
synapsePositionArguments=[
{'section': ['apic', 'dend'],
'fun': [st.norm],
'funargs': [{'loc': 50.0, 'scale': 100.0}],
'funweights': [1.0]},
{'section': ['soma', 'apic', 'dend'],
'fun': [st.norm],
'funargs': [{'loc': -100.0, 'scale': 100.0}],
'funweights': [1.0]}],
# parameters for extrinsic synaptic input
extSynapseParameters={'syntype': 'Exp2Syn', 'weight': 0.0002, 'tau1': 0.2, 'tau2': 1.8, 'e': 0.0},
nu_ext=40., # external activation rate (spikes/s)
n_ext=450, # number of extrinsic synapses
nu_X=nu_X,
)
# make kernel predictions for connection from populations X='I' onto Y='E'
H = kernel.get_kernel(
probes=[probe],
Vrest=Vrest, dt=dt, X='I', t_X=t_X, tau=tau,
g_eff=g_eff)
# display kernel
fig = plt.figure(figsize=(16, 10))
data = H['GaussCylinderPotential']
plt.imshow(data,
extent=[-tau, tau, probe.z.min()-50, probe.z.max()+50],
vmin=-abs(data).max() / 2, vmax=abs(data).max() / 2,
interpolation='nearest')
plt.axis('tight')
plt.xlabel(r'$\tau$ (ms)')
plt.ylabel('depth (µm)')
cb = plt.colorbar()
cb.set_label('mV')
```
| PypiClean |
/Collectors-1.0-rc1.tar.gz/Collectors-1.0-rc1/docs/_build/html/searchindex.js | Search.setIndex({desctypes:{"0":"method","1":"function","2":"class"},terms:{all:[1,3,5,6,7,8],col_func:7,get_object:[3,7,8,2],earrai:6,prefix:[6,7,8],create_seri:5,per:[0,3],follow:[3,4,6,2],row:1,whose:3,deviat:3,under:6,exit:3,introduc:3,liter:[4,5,8],everi:[5,7],string:[6,7],variable_nam:[],veri:[3,6,7],list:[6,5,7],iter:[0,1],objid_attrnam:8,core:[0,5,2],spam_group:[],second:1,plt:3,pass:[3,6,5,7,8],download:6,further:6,append:[5,7],even:7,index:[0,5,7,1],what:[0,6,7,1],section:[3,6,7,1],abl:7,invok:5,current:[6,5,7,1],version:[3,6],"new":[3,6,5,7,1],method:[3,6,5,7,1],contrast:7,themselv:5,gener:[3,7],here:[3,6,5,7],let:[3,5],address:[],becom:[7,8],sinc:[7,1],valu:[1,3,5,6,7,8],search:0,hdf5:[0,4,7,6],amount:[3,6],base:[5,1],doctest:8,via:5,dtype:3,modul:[0,4,5],submodul:6,api:[0,2],instal:6,plot:3,from:[1,2,3,4,6,7,8],describ:[5,7,1],would:7,memori:3,two:[3,5,7],next:[0,6,7,1],call:[6,5,7,1,8],usr:6,type:[6,7,1],tell:1,tightli:[],more:[6,7],desir:5,appropi:5,hold:[3,7],must:[6,5,7,8],fly:7,retriev:5,alia:5,work:[0,1,3,5,7,8],uniqu:[6,5,8],dev:6,kwarg:5,can:[1,2,3,4,5,6,7,8],def:[3,7,8],overrid:5,quickstart:[0,1],give:3,process:[0,1,3],sudo:6,indic:0,sourc:6,want:[6,7,8],serial:6,gcc:6,cours:7,multipl:[0,6,8,3],anoth:[3,7],ordinari:1,write:6,how:[0,1,3,5,6,7],"__init__":[3,7,8],pure:7,instead:6,simpl:[3,6,5,1,8],sim:3,pypi:6,after:[3,5],duplic:5,befor:6,mac:6,plane:[],mai:5,end:7,underscor:8,varianc:3,data:[3,4,5,6],demonstr:1,element:5,inform:6,allow:[6,7],callabl:7,first:1,order:[5,7],help:[3,5,7,8],over:[0,1],becaus:6,coll:3,nbsp:[4,8],basestorag:5,hierarchi:[],paramet:[5,7],group:6,monitor:[0,1,3,5,7,8],window:6,mystorag:6,might:[3,6,7],easier:1,them:[3,6,5,7],within:[6,7],"return":[6,5,7],thei:[6,5,7,8],python:[6,7,1],auto:6,initi:[5,7],band:3,simpi:[0,6,1,3],now:[3,6,1,8],discuss:3,spam1_b:8,spam1_a:8,name:[6,5,7,1,8],anyth:7,revers:6,easili:[3,6,1,2],mode:6,each:[1,3,5,6,7,8],found:6,mean:[3,6],compil:6,colleagu:[],realli:1,yield:3,xlrd:6,collector:[0,1,2,3,4,5,6,7,8],variabl:[6,5,7,1,8],ftp:6,content:0,max:6,print:[3,6,1],spamgroup:6,forth:7,statist:3,advanc:[0,4,2],given:5,myproc2:3,ask:5,org:6,precompil:6,wai:[3,7],"_pid":3,thing:7,opend:[],workbook:6,my_data:[],assign:5,lambda:[3,5,7,8],origin:[6,1],rang:[3,6,7,8],directli:[2,3,4,6,7,8],onc:3,arrai:[3,6],number:3,yourself:6,instruct:6,alreadi:6,done:[6,1],"long":[],spam0_b:8,spam0_a:8,massiv:3,get_b:7,differ:3,sheet:[4,6],licens:6,sometim:7,sheed:[],averag:3,similarli:[7,8],conveni:[4,5,2],"final":[3,6],store:[4,5,7,1,6],option:7,cope:[],namespac:2,specifi:5,exactli:7,std:3,musst:6,keyword:[6,5,7],provid:7,"_1_a":7,were:[5,7],posit:[],seri:[6,5,7,1],pre:[4,5,8],np_mon:3,ani:[3,1],col_index:[],packag:6,seed:3,have:[3,6,7,1,8],tabl:[0,6,1],need:[3,6,5,7,1],dedic:1,imagin:1,koala:6,squar:3,randint:3,self:[3,5,7,8],snow:6,note:[3,5],mix:7,add_sheet:6,take:7,which:[1,3,5,6,7,8],instanci:5,singl:[],begin:8,normal:[3,7],previou:3,chart:3,id_attr:8,most:7,letter:8,"class":[0,1,2,3,4,5,6,7,8],homogen:8,don:1,obj_id:8,doc:6,doe:1,pyplot:3,show:[3,6],random:3,access:[5,7,1],onli:[3,6,7],execut:[3,6,5],explain:7,configur:[6,5],activ:3,getattr:7,should:[3,6,7,8],factor:7,"__call__":[5,7],local:6,xlwt:6,variou:7,get:[1,2,3,5,6,7,8],col_nam:[],h5file:6,requir:5,bar:7,enabl:3,aptitud:6,szip:6,common:5,contain:[4,5],grab:5,where:5,valid:7,set:[5,7],see:6,result:[3,6,7,8],arg:5,close:6,detect:6,state:1,won:3,"import":[1,2,3,4,6,7,8],awai:7,attribut:[0,1,3,5,7,8],altern:6,kei:7,matplotlib:3,distinguish:7,myproc:3,both:6,float64:3,"_0_a":7,"_0_b":7,instanc:[1,3,5,6,7,8],mani:7,col:1,among:6,simpli:1,foobar:[],ubuntu:6,diff:3,guid:[0,1],backend:[0,4,6,2],quit:6,coupl:[],numpi:[3,6],empti:5,much:7,basic:1,valueerror:5,argument:[6,5,7],togeth:6,func:[5,7,8],allwai:[],repetit:7,"case":[5,7],multi:3,defaultstorag:5,plain:7,defin:[5,7,1],calcul:[3,6,7],abov:7,pytabl:[0,4,7,6],observ:[6,5],loop:1,anytim:[],karmic:6,itself:[5,1],against:6,tediou:7,sever:[7,8],develop:[6,1],welcom:0,make:[6,7,1],belong:3,libhdf5:6,same:[1,3,5,6,7,8],"while":[3,5,1],handl:[6,1],document:0,my_sheet:[],complet:6,finish:3,nest:5,leopard:6,p1_a:3,rais:5,freeli:7,task:1,entri:1,thu:[3,1,2],well:[3,5,1],inherit:[5,1],exampl:[3,6,5,7,1],thi:[1,3,4,5,6,7,8],dimension:3,my_h5fil:[],identifi:7,just:[6,5,1],less:7,when:[6,7,1],speed:3,easi:[6,1],also:[3,6,5,7,1],shortcut:[0,1,2,5,7,8],add:6,other:[3,6,7],p9_a:3,save:[3,6,7,1],build:6,format:[5,7],read:[3,6],know:1,like:[6,5,7,1,8],specif:3,docutil:[4,5,8],manual:[2,3,5,6,7,8],integ:7,collect:[1,3,5,6,7,8],either:[3,5,7,1],specifii:6,page:0,openfil:6,some:[3,4,5,7],somehow:6,understood:3,"export":6,flush:[],tmp:6,subclass:5,pem:3,larg:6,foo:7,proc:3,refer:[0,2],peek:3,object:[0,1,5,6,7,8],run:3,usag:1,broken:6,step:3,src:6,obj:[6,7,8],zip:[6,1],column:1,worksheet:[],simul:3,pylist_append:[],constructor:[6,7],collector_func:7,own:[6,7,1],effici:[],"float":6,automat:[5,8],ellipsi:8,observem:6,storag:[0,2,4,5,6,7],your:[1,2,3,4,6,7],manag:[],span:[4,5,8],val:7,spam:[6,5,7,1,8],group_nam:[],bundl:6,fast:6,excelseri:[],avail:6,includ:7,"var":3,"function":[0,1,2,5,6,7,8],hdfgroup:6,tupl:[5,7,1,8],brand:3,link:6,p0_a:3,"true":[3,5,7,1],attr:7,"default":[6,5,7],troubl:3,until:3,similar:7,creat:[1,3,5,6,7,8],"int":6,dure:5,pid:3,exist:5,file:[4,6],pip:6,probabl:7,excel:[0,4,7,6],detail:[6,1],book:[],bool:6,you:[1,3,4,5,6,7,8],stat:3,why:[3,7],docstr:3,consid:[],faster:7,descript:7,depth:[0,7],time:[3,5,7,8],value_a:6,value_b:6},titles:["Welcome to Collectors\u2019 documentation!","Quickstart Guide","API-Reference","How to use <em>Collectors</em> with <em>SimPy</em>","<tt class=\"docutils literal\"><span class=\"pre\">collectors.storage</span></tt> \u2014 Advanced storage backends","<tt class=\"docutils literal\"><span class=\"pre\">collectors.core</span></tt> \u2014 Core classes","How to use the storage backends","The Collector in-depth","<tt class=\"docutils literal docutils literal\"><span class=\"pre\">collectors.shortcuts</span></tt> \u2014 Useful shortcut functions"],modules:{"collectors.storage":4,"collectors.core":5,collectors:2,"collectors.shortcuts":8},descrefs:{"collectors.core.BaseStorage":{create_series:[5,0],append:[5,0]},"collectors.core.Collector":{"__call__":[5,0],collect:[5,0]},"collectors.shortcuts":{manual:[8,1],get_objects:[8,1],get:[8,1]},"collectors.core":{BaseStorage:[5,2],DefaultStorage:[5,2],Collector:[5,2]},"collectors.core.DefaultStorage":{create_series:[5,0],append:[5,0]}},filenames:["index","quickstart","ref/index","simpy","ref/storage","ref/core","storages","collector","ref/shortcuts"]}) | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/code_generation/templates/CodeTemplatesConstants.py | template_constants_reading = r"""
#include "nuitka/prelude.h"
#include "structseq.h"
#include "build_definitions.h"
// Global constants storage
PyObject *global_constants[%(global_constants_count)d];
// Sentinel PyObject to be used for all our call iterator endings. It will
// become a PyCObject pointing to NULL. It's address is unique, and that's
// enough for us to use it as sentinel value.
PyObject *_sentinel_value = NULL;
PyObject *Nuitka_dunder_compiled_value = NULL;
#ifdef _NUITKA_STANDALONE
extern PyObject *getStandaloneSysExecutablePath(PyObject *basename);
#endif
extern void setDistributionsMetadata(PyObject *metadata_values);
// We provide the sys.version info shortcut as a global value here for ease of use.
PyObject *Py_SysVersionInfo = NULL;
static void _createGlobalConstants(PyThreadState *tstate) {
// We provide the sys.version info shortcut as a global value here for ease of use.
Py_SysVersionInfo = Nuitka_SysGetObject("version_info");
// The empty name means global.
loadConstantsBlob(tstate, &global_constants[0], "");
#if _NUITKA_EXE
/* Set the "sys.executable" path to the original CPython executable or point to inside the
distribution for standalone. */
Nuitka_SysSetObject(
"executable",
#ifndef _NUITKA_STANDALONE
%(sys_executable)s
#else
getStandaloneSysExecutablePath(%(sys_executable)s)
#endif
);
#ifndef _NUITKA_STANDALONE
/* Set the "sys.prefix" path to the original one. */
Nuitka_SysSetObject(
"prefix",
%(sys_prefix)s
);
/* Set the "sys.prefix" path to the original one. */
Nuitka_SysSetObject(
"exec_prefix",
%(sys_exec_prefix)s
);
#if PYTHON_VERSION >= 0x300
/* Set the "sys.base_prefix" path to the original one. */
Nuitka_SysSetObject(
"base_prefix",
%(sys_base_prefix)s
);
/* Set the "sys.exec_base_prefix" path to the original one. */
Nuitka_SysSetObject(
"base_exec_prefix",
%(sys_base_exec_prefix)s
);
#endif
#endif
#endif
static PyTypeObject Nuitka_VersionInfoType;
// Same fields as "sys.version_info" except no serial number.
static PyStructSequence_Field Nuitka_VersionInfoFields[] = {
{(char *)"major", (char *)"Major release number"},
{(char *)"minor", (char *)"Minor release number"},
{(char *)"micro", (char *)"Micro release number"},
{(char *)"releaselevel", (char *)"'alpha', 'beta', 'candidate', or 'release'"},
{(char *)"standalone", (char *)"boolean indicating standalone mode usage"},
{(char *)"onefile", (char *)"boolean indicating standalone mode usage"},
{(char *)"no_asserts", (char *)"boolean indicating --python-flag=no_asserts usage"},
{(char *)"no_docstrings", (char *)"boolean indicating --python-flag=no_docstrings usage"},
{(char *)"no_annotations", (char *)"boolean indicating --python-flag=no_annotations usage"},
{0}
};
static PyStructSequence_Desc Nuitka_VersionInfoDesc = {
(char *)"__nuitka_version__", /* name */
(char *)"__compiled__\\n\\nVersion information as a named tuple.", /* doc */
Nuitka_VersionInfoFields, /* fields */
9
};
PyStructSequence_InitType(&Nuitka_VersionInfoType, &Nuitka_VersionInfoDesc);
Nuitka_dunder_compiled_value = PyStructSequence_New(&Nuitka_VersionInfoType);
assert(Nuitka_dunder_compiled_value != NULL);
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 0, PyInt_FromLong(%(nuitka_version_major)s));
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 1, PyInt_FromLong(%(nuitka_version_minor)s));
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 2, PyInt_FromLong(%(nuitka_version_micro)s));
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 3, Nuitka_String_FromString("%(nuitka_version_level)s"));
#ifdef _NUITKA_STANDALONE
PyObject *is_standalone_mode = Py_True;
#else
PyObject *is_standalone_mode = Py_False;
#endif
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 4, is_standalone_mode);
#ifdef _NUITKA_ONEFILE_MODE
PyObject *is_onefile_mode = Py_True;
#else
PyObject *is_onefile_mode = Py_False;
#endif
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 5, is_onefile_mode);
#if _NUITKA_NO_ASSERTS == 1
PyObject *is_no_asserts = Py_True;
#else
PyObject *is_no_asserts = Py_False;
#endif
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 6, is_no_asserts);
#if _NUITKA_NO_DOCSTRINGS == 1
PyObject *is_no_docstrings = Py_True;
#else
PyObject *is_no_docstrings = Py_False;
#endif
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 7, is_no_docstrings);
#if _NUITKA_NO_ANNOTATIONS == 1
PyObject *is_no_annotations = Py_True;
#else
PyObject *is_no_annotations = Py_False;
#endif
PyStructSequence_SET_ITEM(Nuitka_dunder_compiled_value, 8, is_no_annotations);
// Prevent users from creating the Nuitka version type object.
Nuitka_VersionInfoType.tp_init = NULL;
Nuitka_VersionInfoType.tp_new = NULL;
setDistributionsMetadata(%(metadata_values)s);
}
// In debug mode we can check that the constants were not tampered with in any
// given moment. We typically do it at program exit, but we can add extra calls
// for sanity.
#ifndef __NUITKA_NO_ASSERT__
void checkGlobalConstants(void) {
// TODO: Ask constant code to check values.
}
#endif
void createGlobalConstants(PyThreadState *tstate) {
if (_sentinel_value == NULL) {
#if PYTHON_VERSION < 0x300
_sentinel_value = PyCObject_FromVoidPtr(NULL, NULL);
#else
// The NULL value is not allowed for a capsule, so use something else.
_sentinel_value = PyCapsule_New((void *)27, "sentinel", NULL);
#endif
assert(_sentinel_value);
_createGlobalConstants(tstate);
}
}
"""
from . import TemplateDebugWrapper # isort:skip
TemplateDebugWrapper.checkDebug(globals()) | PypiClean |
/MozPhab-1.4.3-py3-none-any.whl/mozphab/diff.py |
import operator
import re
import concurrent.futures
from typing import (
Any,
Dict,
)
from .conduit import conduit
class Diff:
"""Representation of the Diff used to submit to the Phabricator."""
class Hunk:
def __init__(self, *, old_off, old_len, new_off, new_len, lines):
"""
Hunk object, encapsulates hunk metadata and diff lines.
For the following hunk:
@@ -23,6 +23,7 @@ jobs:
- run:
name: install dependencies
command: |
+ set -e
# install modern hg
sudo pip3 install --disable-pip-version-check mercurial hg-evolve
# configure hg
:param int old_off: old offset (eg. -23)
:param int old_len: old length/line count (eg. 6)
:param int new_off: new offset (eg. +23)
:param int new_len: new length (eg. 7)
:param list[str] lines: list of diff lines, starting with "+", "-", or " ",
including the trailing "\n". (eg. the 7 lines following the @@ line)
"""
self.old_off = old_off
self.old_len = old_len
self.new_off = new_off
self.new_len = new_len
self.corpus = "".join(lines)
self.old_eof_newline = True
self.new_eof_newline = True
self.added = 0
self.deleted = 0
prev_line = " "
for line in lines:
if line[0] == "+":
self.added += 1
elif line[0] == "-":
self.deleted += 1
if line.endswith("No newline at end of file\n"):
if prev_line[0] != "+":
self.old_eof_newline = False
if prev_line[0] != "-":
self.new_eof_newline = False
prev_line = line
class Change:
def __init__(self, path):
self.old_mode = None
self.cur_mode = None
self.old_path = None
self.cur_path = path
self.away_paths = []
self.kind = Diff.Kind("CHANGE")
self.binary = False
self.file_type = Diff.FileType("TEXT")
self.uploads = []
self.hunks = []
@property
def added(self):
return sum(hunk.added for hunk in self.hunks)
@property
def deleted(self):
return sum(hunk.deleted for hunk in self.hunks)
def from_git_diff(self, git_diff):
"""Generate hunks from the provided git_diff output."""
# Process each hunk
hunk = None
in_header = True
for line in git_diff.splitlines(keepends=True):
# Skip lines before the start of the first hunk header
if in_header:
if not line.startswith("@@"):
continue
in_header = False
# Start of hunk
if line.startswith("@@"):
# Store previously collected hunk
if hunk and hunk["lines"]:
self.hunks.append(Diff.Hunk(**hunk))
# Start a new collection
(old_off, new_off, old_len, new_len) = Diff.parse_git_diff(line)
hunk = dict(
old_off=old_off,
new_off=new_off,
old_len=old_len,
new_len=new_len,
lines=[],
)
else:
hunk["lines"].append(line)
if hunk and hunk["lines"]:
self.hunks.append(Diff.Hunk(**hunk))
def set_as_binary(self, *, a_body, a_mime, b_body, b_mime):
"""Updates Change contents to the provided binary data."""
self.binary = True
self.uploads = [
{"type": "old", "value": a_body, "mime": a_mime, "phid": None},
{"type": "new", "value": b_body, "mime": b_mime, "phid": None},
]
if a_mime.startswith("image/") or b_mime.startswith("image/"):
self.file_type = Diff.FileType("IMAGE")
else:
self.file_type = Diff.FileType("BINARY")
def to_conduit(self, node: str) -> Dict[str, Any]:
# Record upload information
metadata = {}
for upload in self.uploads:
metadata["%s:binary-phid" % upload["type"]] = upload["phid"]
metadata["%s:file:size" % upload["type"]] = len(upload["value"])
metadata["%s:file:mime-type" % upload["type"]] = upload["mime"]
# Translate hunks
hunks = [
{
"oldOffset": hunk.old_off,
"oldLength": hunk.old_len,
"newOffset": hunk.new_off,
"newLength": hunk.new_len,
"addLines": hunk.added,
"delLines": hunk.deleted,
"isMissingOldNewline": not hunk.old_eof_newline,
"isMissingNewNewline": not hunk.new_eof_newline,
"corpus": hunk.corpus,
}
for hunk in self.hunks
]
old_props = {"unix:filemode": self.old_mode} if self.old_mode else {}
cur_props = {"unix:filemode": self.cur_mode} if self.cur_mode else {}
return {
"metadata": metadata,
"oldPath": self.old_path,
"currentPath": self.cur_path,
"awayPaths": self.away_paths,
"oldProperties": old_props,
"newProperties": cur_props,
"commitHash": node,
"type": self.kind.value,
"fileType": self.file_type.value,
"hunks": hunks,
}
class Kind:
values = dict(
ADD=1,
CHANGE=2,
DELETE=3,
MOVE_AWAY=4,
COPY_AWAY=5,
MOVE_HERE=6,
COPY_HERE=7,
MULTICOPY=8,
)
def __init__(self, name):
self.value = self.values[name]
self.name = name
def short(self):
if self.name == "ADD":
return "A "
elif self.name == "CHANGE":
return "M "
elif self.name == "DELETE":
return "D "
elif self.name == "MOVE_AWAY":
return "R>"
elif self.name == "MOVE_HERE":
return ">R"
elif self.name == "COPY_AWAY":
return "C>"
elif self.name == "COPY_HERE":
return ">C"
elif self.name == "MULTICOPY":
return "C*"
class FileType:
values = dict(
TEXT=1,
IMAGE=2,
BINARY=3,
DIRECTORY=4, # Should never show up...
SYMLINK=5, # Support symlinks (do we care?)
DELETED=6,
NORMAL=7,
)
def __init__(self, name):
self.value = self.values[name]
self.name = name
def __init__(self):
self.changes = {}
self.phid = None
self.id = None
def change_for(self, path):
if path not in self.changes:
self.changes[path] = self.Change(path)
return self.changes[path]
def set_change_kind(self, change, kind, a_mode, b_mode, a_path, _b_path):
"""Determine the correct kind from the letter."""
if kind == "A":
change.kind = self.Kind("ADD")
change.cur_mode = b_mode
elif kind == "D":
change.kind = self.Kind("DELETE")
change.old_mode = a_mode
change.old_path = a_path
elif kind == "M":
change.kind = self.Kind("CHANGE")
if a_mode != b_mode:
change.old_mode = a_mode
change.cur_mode = b_mode
change.old_path = a_path
assert change.old_path == change.cur_path
elif kind == "R":
change.kind = self.Kind("MOVE_HERE")
if a_mode != b_mode:
change.old_mode = a_mode
change.cur_mode = b_mode
change.old_path = a_path
old = self.change_for(change.old_path)
if old.kind.name in {"MOVE_AWAY", "COPY_AWAY"}:
old.kind = self.Kind("MULTICOPY")
elif old.kind.name != "MULTICOPY":
old.kind = self.Kind("MOVE_AWAY")
old.away_paths.append(change.cur_path)
elif kind == "C":
change.kind = self.Kind("COPY_HERE")
if a_mode != b_mode:
change.old_mode = a_mode
change.cur_mode = b_mode
change.old_path = a_path
old = self.change_for(change.old_path)
if old.kind.name != "MULTICOPY":
old.kind = self.Kind("COPY_AWAY")
old.away_paths.append(change.cur_path)
else:
raise Exception(f"unsupported change type {kind} for {a_path}")
def _upload_file(self, change, upload):
path = change.cur_path if upload["type"] == "new" else change.old_path
upload["phid"] = conduit.file_upload(path, upload["value"])
def upload_files(self):
futures = []
# files are uploaded in parallel, using a pool of threads.
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
for change in list(self.changes.values()):
for upload in change.uploads:
futures.append(executor.submit(self._upload_file, change, upload))
# wait for all uploads to be finished
concurrent.futures.wait(futures)
# check that all went well. If not, propagate the first error here
# by calling the future's result() method
for upload in futures:
upload.result()
def submit(self, commit, message):
files_changed = sorted(
self.changes.values(), key=operator.attrgetter("cur_path")
)
changes = [
change.to_conduit(conduit.repo.get_public_node(commit["node"]))
for change in files_changed
]
diff = conduit.create_diff(
changes, conduit.repo.get_public_node(commit["parent"])
)
self.phid = diff["phid"]
self.id = diff["diffid"]
self.set_property(commit, message)
return diff["phid"]
def set_property(self, commit, message):
"""Add information about our local commit to the patch."""
conduit.set_diff_property(self.id, commit, message)
@staticmethod
def parse_git_diff(hdr):
m = re.match(
r"@@ -(?P<old_off>\d+)(?:,(?P<old_len>\d+))? "
r"\+(?P<new_off>\d+)(?:,(?P<new_len>\d+))? @@",
hdr,
)
old_off = int(m.group("old_off"))
old_len = int(m.group("old_len") or 1)
new_off = int(m.group("new_off"))
new_len = int(m.group("new_len") or 1)
return old_off, new_off, old_len, new_len | PypiClean |
/Flask-AppBuilder-redirect-2.1.13.tar.gz/Flask-AppBuilder-redirect-2.1.13/flask_appbuilder/urltools.py | import re
from flask import request
class Stack(object):
"""
Stack data structure will not insert
equal sequential data
"""
def __init__(self, list=None, size=5):
self.size = size
self.data = list or []
def push(self, item):
if self.data:
if item != self.data[len(self.data) - 1]:
self.data.append(item)
else:
self.data.append(item)
if len(self.data) > self.size:
self.data.pop(0)
def pop(self):
if len(self.data) == 0:
return None
return self.data.pop(len(self.data) - 1)
def to_json(self):
return self.data
def get_group_by_args():
"""
Get page arguments for group by
"""
group_by = request.args.get("group_by")
if not group_by:
group_by = ""
return group_by
def get_page_args():
"""
Get page arguments, returns a dictionary
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: page_<VIEW_NAME>=<PAGE_NUMBER>
"""
pages = {}
for arg in request.args:
re_match = re.findall("page_(.*)", arg)
if re_match:
pages[re_match[0]] = int(request.args.get(arg))
return pages
def get_page_size_args():
"""
Get page size arguments, returns an int
{ <VIEW_NAME>: PAGE_NUMBER }
Arguments are passed: psize_<VIEW_NAME>=<PAGE_SIZE>
"""
page_sizes = {}
for arg in request.args:
re_match = re.findall("psize_(.*)", arg)
if re_match:
page_sizes[re_match[0]] = int(request.args.get(arg))
return page_sizes
def get_order_args():
"""
Get order arguments, return a dictionary
{ <VIEW_NAME>: (ORDER_COL, ORDER_DIRECTION) }
Arguments are passed like: _oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc'
"""
orders = {}
for arg in request.args:
re_match = re.findall("_oc_(.*)", arg)
if re_match:
order_direction = request.args.get("_od_" + re_match[0])
if order_direction in ("asc", "desc"):
orders[re_match[0]] = (request.args.get(arg), order_direction)
return orders
def get_filter_args(filters):
filters.clear_filters()
for arg in request.args:
re_match = re.findall("_flt_(\d)_(.*)", arg)
if re_match:
filters.add_filter_index(
re_match[0][1], int(re_match[0][0]), request.args.get(arg)
) | PypiClean |
/NodeGraphQt_QuiltiX_fork-0.6.0.tar.gz/NodeGraphQt_QuiltiX_fork-0.6.0/NodeGraphQt/nodes/base_node.py | from collections import OrderedDict
from NodeGraphQt.base.commands import NodeVisibleCmd
from NodeGraphQt.base.node import NodeObject
from NodeGraphQt.base.port import Port
from NodeGraphQt.constants import NodePropWidgetEnum, PortTypeEnum
from NodeGraphQt.errors import (PortError,
PortRegistrationError,
NodeWidgetError)
from NodeGraphQt.qgraphics.node_base import NodeItem
from NodeGraphQt.widgets.node_widgets import (NodeBaseWidget,
NodeComboBox,
NodeLineEdit,
NodeCheckBox)
class BaseNode(NodeObject):
"""
The ``NodeGraphQt.BaseNode`` class is the base class for nodes that allows
port connections from one node to another.
**Inherited from:** :class:`NodeGraphQt.NodeObject`
.. image:: ../_images/node.png
:width: 250px
example snippet:
.. code-block:: python
:linenos:
from NodeGraphQt import BaseNode
class ExampleNode(BaseNode):
# unique node identifier domain.
__identifier__ = 'io.jchanvfx.github'
# initial default node name.
NODE_NAME = 'My Node'
def __init__(self):
super(ExampleNode, self).__init__()
# create an input port.
self.add_input('in')
# create an output port.
self.add_output('out')
"""
NODE_NAME = 'Node'
def __init__(self, qgraphics_item=None):
super(BaseNode, self).__init__(qgraphics_item or NodeItem)
self._inputs = []
self._outputs = []
def update_model(self):
"""
Update the node model from view.
"""
for name, val in self.view.properties.items():
if name in ['inputs', 'outputs']:
continue
self.model.set_property(name, val)
for name, widget in self.view.widgets.items():
self.model.set_property(name, widget.get_value())
def set_property(self, name, value, push_undo=True):
"""
Set the value on the node custom property.
Args:
name (str): name of the property.
value (object): property data (python built in types).
push_undo (bool): register the command to the undo stack. (default: True)
"""
# prevent signals from causing a infinite loop.
if self.get_property(name) == value:
return
if name == 'visible':
if self.graph:
undo_cmd = NodeVisibleCmd(self, value)
if push_undo:
undo_stack = self.graph.undo_stack()
undo_stack.push(undo_cmd)
else:
undo_cmd.redo()
return
super(BaseNode, self).set_property(name, value, push_undo)
def set_layout_direction(self, value=0):
"""
Sets the node layout direction to either horizontal or vertical on
the current node only.
`Implemented in` ``v0.3.0``
See Also:
:meth:`NodeGraph.set_layout_direction`,
:meth:`NodeObject.layout_direction`
Warnings:
This function does not register to the undo stack.
Args:
value (int): layout direction mode.
"""
# base logic to update the model and view attributes only.
super(BaseNode, self).set_layout_direction(value)
# redraw the node.
self._view.draw_node()
def set_icon(self, icon=None):
"""
Set the node icon.
Args:
icon (str): path to the icon image.
"""
self.set_property('icon', icon)
def icon(self):
"""
Node icon path.
Returns:
str: icon image file path.
"""
return self.model.icon
def widgets(self):
"""
Returns all embedded widgets from this node.
See Also:
:meth:`BaseNode.get_widget`
Returns:
dict: embedded node widgets. {``property_name``: ``node_widget``}
"""
return self.view.widgets
def get_widget(self, name):
"""
Returns the embedded widget associated with the property name.
See Also:
:meth:`BaseNode.add_combo_menu`,
:meth:`BaseNode.add_text_input`,
:meth:`BaseNode.add_checkbox`,
Args:
name (str): node property name.
Returns:
NodeBaseWidget: embedded node widget.
"""
return self.view.widgets.get(name)
def add_custom_widget(self, widget, widget_type=None, tab=None):
"""
Add a custom node widget into the node.
see example :ref:`Embedding Custom Widgets`.
Note:
The ``value_changed`` signal from the added node widget is wired
up to the :meth:`NodeObject.set_property` function.
Args:
widget (NodeBaseWidget): node widget class object.
widget_type: widget flag to display in the
:class:`NodeGraphQt.PropertiesBinWidget`
(default: :attr:`NodeGraphQt.constants.NodePropWidgetEnum.HIDDEN`).
tab (str): name of the widget tab to display in.
"""
if not isinstance(widget, NodeBaseWidget):
raise NodeWidgetError(
'\'widget\' must be an instance of a NodeBaseWidget')
widget_type = widget_type or NodePropWidgetEnum.HIDDEN.value
self.create_property(widget.get_name(),
widget.get_value(),
widget_type=widget_type,
tab=tab)
widget.value_changed.connect(lambda k, v: self.set_property(k, v))
widget._node = self
self.view.add_widget(widget)
#: redraw node to address calls outside the "__init__" func.
self.view.draw_node()
def add_combo_menu(self, name, label='', items=None, tab=None):
"""
Creates a custom property with the :meth:`NodeObject.create_property`
function and embeds a :class:`PySide2.QtWidgets.QComboBox` widget
into the node.
Note:
The ``value_changed`` signal from the added node widget is wired
up to the :meth:`NodeObject.set_property` function.
Args:
name (str): name for the custom property.
label (str): label to be displayed.
items (list[str]): items to be added into the menu.
tab (str): name of the widget tab to display in.
"""
self.create_property(
name,
value=items[0] if items else None,
items=items or [],
widget_type=NodePropWidgetEnum.QCOMBO_BOX.value,
tab=tab
)
widget = NodeComboBox(self.view, name, label, items)
widget.value_changed.connect(lambda k, v: self.set_property(k, v))
self.view.add_widget(widget)
#: redraw node to address calls outside the "__init__" func.
self.view.draw_node()
def add_text_input(self, name, label='', text='', tab=None):
"""
Creates a custom property with the :meth:`NodeObject.create_property`
function and embeds a :class:`PySide2.QtWidgets.QLineEdit` widget
into the node.
Note:
The ``value_changed`` signal from the added node widget is wired
up to the :meth:`NodeObject.set_property` function.
Args:
name (str): name for the custom property.
label (str): label to be displayed.
text (str): pre filled text.
tab (str): name of the widget tab to display in.
"""
self.create_property(
name,
value=text,
widget_type=NodePropWidgetEnum.QLINE_EDIT.value,
tab=tab
)
widget = NodeLineEdit(self.view, name, label, text)
widget.value_changed.connect(lambda k, v: self.set_property(k, v))
self.view.add_widget(widget)
#: redraw node to address calls outside the "__init__" func.
self.view.draw_node()
def add_checkbox(self, name, label='', text='', state=False, tab=None):
"""
Creates a custom property with the :meth:`NodeObject.create_property`
function and embeds a :class:`PySide2.QtWidgets.QCheckBox` widget
into the node.
Note:
The ``value_changed`` signal from the added node widget is wired
up to the :meth:`NodeObject.set_property` function.
Args:
name (str): name for the custom property.
label (str): label to be displayed.
text (str): checkbox text.
state (bool): pre-check.
tab (str): name of the widget tab to display in.
"""
self.create_property(
name,
value=state,
widget_type=NodePropWidgetEnum.QCHECK_BOX.value,
tab=tab
)
widget = NodeCheckBox(self.view, name, label, text, state)
widget.value_changed.connect(lambda k, v: self.set_property(k, v))
self.view.add_widget(widget)
#: redraw node to address calls outside the "__init__" func.
self.view.draw_node()
def add_input(self, name='input', multi_input=False, display_name=True,
color=None, locked=False, painter_func=None):
"""
Add input :class:`Port` to node.
Warnings:
Undo is NOT supported for this function.
Args:
name (str): name for the input port.
multi_input (bool): allow port to have more than one connection.
display_name (bool): display the port name on the node.
color (tuple): initial port color (r, g, b) ``0-255``.
locked (bool): locked state see :meth:`Port.set_locked`
painter_func (function or None): custom function to override the drawing
of the port shape see example: :ref:`Creating Custom Shapes`
Returns:
NodeGraphQt.Port: the created port object.
"""
if name in self.inputs().keys():
raise PortRegistrationError(
'port name "{}" already registered.'.format(name))
port_args = [name, multi_input, display_name, locked]
if painter_func and callable(painter_func):
port_args.append(painter_func)
view = self.view.add_input(*port_args)
if color:
view.color = color
view.border_color = [min([255, max([0, i + 80])]) for i in color]
port = Port(self, view)
port.model.type_ = PortTypeEnum.IN.value
port.model.name = name
port.model.display_name = display_name
port.model.multi_connection = multi_input
port.model.locked = locked
self._inputs.append(port)
self.model.inputs[port.name()] = port.model
return port
def add_output(self, name='output', multi_output=True, display_name=True,
color=None, locked=False, painter_func=None):
"""
Add output :class:`Port` to node.
Warnings:
Undo is NOT supported for this function.
Args:
name (str): name for the output port.
multi_output (bool): allow port to have more than one connection.
display_name (bool): display the port name on the node.
color (tuple): initial port color (r, g, b) ``0-255``.
locked (bool): locked state see :meth:`Port.set_locked`
painter_func (function or None): custom function to override the drawing
of the port shape see example: :ref:`Creating Custom Shapes`
Returns:
NodeGraphQt.Port: the created port object.
"""
if name in self.outputs().keys():
raise PortRegistrationError(
'port name "{}" already registered.'.format(name))
port_args = [name, multi_output, display_name, locked]
if painter_func and callable(painter_func):
port_args.append(painter_func)
view = self.view.add_output(*port_args)
if color:
view.color = color
view.border_color = [min([255, max([0, i + 80])]) for i in color]
port = Port(self, view)
port.model.type_ = PortTypeEnum.OUT.value
port.model.name = name
port.model.display_name = display_name
port.model.multi_connection = multi_output
port.model.locked = locked
self._outputs.append(port)
self.model.outputs[port.name()] = port.model
return port
def get_input(self, port):
"""
Get input port by the name or index.
Args:
port (str or int): port name or index.
Returns:
NodeGraphQt.Port: node port.
"""
if type(port) is int:
if port < len(self._inputs):
return self._inputs[port]
elif type(port) is str:
return self.inputs().get(port, None)
def get_output(self, port):
"""
Get output port by the name or index.
Args:
port (str or int): port name or index.
Returns:
NodeGraphQt.Port: node port.
"""
if type(port) is int:
if port < len(self._outputs):
return self._outputs[port]
elif type(port) is str:
return self.outputs().get(port, None)
def delete_input(self, port):
"""
Delete input port.
Warnings:
Undo is NOT supported for this function.
You can only delete ports if :meth:`BaseNode.port_deletion_allowed`
returns ``True`` otherwise a port error is raised see also
:meth:`BaseNode.set_port_deletion_allowed`.
Args:
port (str or int): port name or index.
"""
if type(port) in [int, str]:
port = self.get_input(port)
if port is None:
return
if not self.port_deletion_allowed():
raise PortError(
'Port "{}" can\'t be deleted on this node because '
'"ports_removable" is not enabled.'.format(port.name()))
if port.locked():
raise PortError('Error: Can\'t delete a port that is locked!')
self._inputs.remove(port)
self._model.inputs.pop(port.name())
self._view.delete_input(port.view)
port.model.node = None
self._view.draw_node()
def delete_output(self, port):
"""
Delete output port.
Warnings:
Undo is NOT supported for this function.
You can only delete ports if :meth:`BaseNode.port_deletion_allowed`
returns ``True`` otherwise a port error is raised see also
:meth:`BaseNode.set_port_deletion_allowed`.
Args:
port (str or int): port name or index.
"""
if type(port) in [int, str]:
port = self.get_output(port)
if port is None:
return
if not self.port_deletion_allowed():
raise PortError(
'Port "{}" can\'t be deleted on this node because '
'"ports_removable" is not enabled.'.format(port.name()))
if port.locked():
raise PortError('Error: Can\'t delete a port that is locked!')
self._outputs.remove(port)
self._model.outputs.pop(port.name())
self._view.delete_output(port.view)
port.model.node = None
self._view.draw_node()
def set_port_deletion_allowed(self, mode=False):
"""
Allow ports to be removable on this node.
See Also:
:meth:`BaseNode.port_deletion_allowed` and
:meth:`BaseNode.set_ports`
Args:
mode (bool): true to allow.
"""
self.model.port_deletion_allowed = mode
def port_deletion_allowed(self):
"""
Return true if ports can be deleted on this node.
See Also:
:meth:`BaseNode.set_port_deletion_allowed`
Returns:
bool: true if ports can be deleted.
"""
return self.model.port_deletion_allowed
def set_ports(self, port_data):
"""
Create node input and output ports from serialized port data.
Warnings:
You can only use this function if the node has
:meth:`BaseNode.port_deletion_allowed` is `True`
see :meth:`BaseNode.set_port_deletion_allowed`
Hint:
example snippet of port data.
.. highlight:: python
.. code-block:: python
{
'input_ports':
[{
'name': 'input',
'multi_connection': True,
'display_name': 'Input',
'locked': False
}],
'output_ports':
[{
'name': 'output',
'multi_connection': True,
'display_name': 'Output',
'locked': False
}]
}
Args:
port_data(dict): port data.
"""
if not self.port_deletion_allowed():
raise PortError(
'Ports cannot be set on this node because '
'"set_port_deletion_allowed" is not enabled on this node.')
for port in self._inputs:
self._view.delete_input(port.view)
port.model.node = None
for port in self._outputs:
self._view.delete_output(port.view)
port.model.node = None
self._inputs = []
self._outputs = []
self._model.outputs = {}
self._model.inputs = {}
[self.add_input(name=port['name'],
multi_input=port['multi_connection'],
display_name=port['display_name'],
locked=port.get('locked') or False)
for port in port_data['input_ports']]
[self.add_output(name=port['name'],
multi_output=port['multi_connection'],
display_name=port['display_name'],
locked=port.get('locked') or False)
for port in port_data['output_ports']]
self._view.draw_node()
def inputs(self):
"""
Returns all the input ports from the node.
Returns:
dict: {<port_name>: <port_object>}
"""
return {p.name(): p for p in self._inputs}
def input_ports(self):
"""
Return all input ports.
Returns:
list[NodeGraphQt.Port]: node input ports.
"""
return self._inputs
def outputs(self):
"""
Returns all the output ports from the node.
Returns:
dict: {<port_name>: <port_object>}
"""
return {p.name(): p for p in self._outputs}
def output_ports(self):
"""
Return all output ports.
Returns:
list[NodeGraphQt.Port]: node output ports.
"""
return self._outputs
def input(self, index):
"""
Return the input port with the matching index.
Args:
index (int): index of the input port.
Returns:
NodeGraphQt.Port: port object.
"""
return self._inputs[index]
def set_input(self, index, port):
"""
Creates a connection pipe to the targeted output :class:`Port`.
Args:
index (int): index of the port.
port (NodeGraphQt.Port): port object.
"""
src_port = self.input(index)
src_port.connect_to(port)
def output(self, index):
"""
Return the output port with the matching index.
Args:
index (int): index of the output port.
Returns:
NodeGraphQt.Port: port object.
"""
return self._outputs[index]
def set_output(self, index, port):
"""
Creates a connection pipe to the targeted input :class:`Port`.
Args:
index (int): index of the port.
port (NodeGraphQt.Port): port object.
"""
src_port = self.output(index)
src_port.connect_to(port)
def connected_input_nodes(self):
"""
Returns all nodes connected from the input ports.
Returns:
dict: {<input_port>: <node_list>}
"""
nodes = OrderedDict()
for p in self.input_ports():
nodes[p] = [cp.node() for cp in p.connected_ports()]
return nodes
def connected_output_nodes(self):
"""
Returns all nodes connected from the output ports.
Returns:
dict: {<output_port>: <node_list>}
"""
nodes = OrderedDict()
for p in self.output_ports():
nodes[p] = [cp.node() for cp in p.connected_ports()]
return nodes
def on_input_connected(self, in_port, out_port):
"""
Callback triggered when a new pipe connection is made.
*The default of this function does nothing re-implement if you require
logic to run for this event.*
Note:
to work with undo & redo for this method re-implement
:meth:`BaseNode.on_input_disconnected` with the reverse logic.
Args:
in_port (NodeGraphQt.Port): source input port from this node.
out_port (NodeGraphQt.Port): output port that connected to this node.
"""
return
def on_input_disconnected(self, in_port, out_port):
"""
Callback triggered when a pipe connection has been disconnected
from a INPUT port.
*The default of this function does nothing re-implement if you require
logic to run for this event.*
Note:
to work with undo & redo for this method re-implement
:meth:`BaseNode.on_input_connected` with the reverse logic.
Args:
in_port (NodeGraphQt.Port): source input port from this node.
out_port (NodeGraphQt.Port): output port that was disconnected.
"""
return
def on_output_connected(self, out_port, in_port):
"""
Callback triggered when a new pipe connection is made.
*The default of this function does nothing re-implement if you require
logic to run for this event.*
Note:
to work with undo & redo for this method re-implement
:meth:`BaseNode.on_input_disconnected` with the reverse logic.
"""
return
def on_output_disconnected(self, out_port, in_port):
"""
Callback triggered when a pipe connection has been disconnected
from a OUTPUT port.
*The default of this function does nothing re-implement if you require
logic to run for this event.*
Note:
to work with undo & redo for this method re-implement
:meth:`BaseNode.on_input_connected` with the reverse logic.
"""
return | PypiClean |
/FACe_lib-0.3.0.tar.gz/FACe_lib-0.3.0/face/codes/status.py | from marshmallow import ValidationError
"""
FACe available status
"""
def validator(code):
"""
Status code validator
"""
if code==None:
raise ValidationError("Code can't be empty")
if str(code) not in STATUS_CODES:
raise ValidationError("Code '{}' is unknown".format(code))
STATUS_CODES = {
# "Tramitación" status
"1200": {
"nombre": "Registrada",
"description": "La factura ha sido registrada en el registro electrónico REC",
"error": False,
"scope": "tramitacion",
},
"1300": {
"nombre": "Registrada en RCF",
"description": "La factura ha sido registrada en el RCF",
"error": False,
"scope": "tramitacion",
},
"2400": {
"nombre": "Contabilizada la obligación de pago",
"description": "La factura ha sido reconocida con obligación de pago",
"error": False,
"scope": "tramitacion",
},
"2500": {
"nombre": "Pagada",
"description": "Factura pagada",
"error": False,
"scope": "tramitacion",
},
"2600": {
"nombre": "Rechazada",
"description": "La Unidad rechaza la factura",
"error": True,
"scope": "tramitacion",
},
"3100": {
"nombre": "Anulada",
"description": "La Unidad aprueba la propuesta de anulación",
"error": False,
"scope": "tramitacion",
},
# Anulation status
"4100": {
"nombre": "No solicitada anulación",
"description": "No solicitada anulación",
"error": False,
"scope": "tramitacion",
},
"4200": {
"nombre": "Solicitada anulación",
"description": "Solicitada anulación",
"error": False,
"scope": "tramitacion",
},
"4300": {
"nombre": "Aceptada anulación",
"description": "Aceptada anulación",
"error": False,
"scope": "tramitacion",
},
"4400": {
"nombre": "Solicitud de anulación",
"description": "Rechazada anulación",
"error": True,
"scope": "tramitacion",
},
} | PypiClean |
/HPI-0.3.20230327.tar.gz/HPI-0.3.20230327/my/jawbone/plots.py | from pathlib import Path
# from kython.plotting import *
from csv import DictReader
from itertools import islice
from typing import Dict, Any, NamedTuple
# sleep = []
# with open('2017.csv', 'r') as fo:
# reader = DictReader(fo)
# for line in islice(reader, 0, 10):
# sleep
# print(line)
import matplotlib.pyplot as plt # type: ignore
from numpy import genfromtxt # type: ignore
import matplotlib.pylab as pylab # type: ignore
pylab.rcParams['figure.figsize'] = (32.0, 24.0)
pylab.rcParams['font.size'] = 10
jawboneDataFeatures = Path(__file__).parent / 'features.csv' # Data File Path
featureDesc: Dict[str, str] = {}
for x in genfromtxt(jawboneDataFeatures, dtype='unicode', delimiter=','):
featureDesc[x[0]] = x[1]
def _safe_float(s: str):
if len(s) == 0:
return None
return float(s)
def _safe_int(s: str):
if len(s) == 0:
return None
return int(float(s)) # TODO meh
def _safe_mins(s: float):
if s is None:
return None
return s / 60
class SleepData(NamedTuple):
date: str
asleep_time: float
awake_time: float
total: float
awake: float # 'awake for' from app, time awake duing sleep (seconds)
awakenings: int
light: float # 'light sleep' from app (seconds)
deep: float # 'deep sleep' from app (sec)
quality: float # ???
@classmethod
def from_jawbone_dict(cls, d: Dict[str, Any]):
return cls(
date=d['DATE'],
asleep_time=_safe_mins(_safe_float(d['s_asleep_time'])),
awake_time=_safe_mins(_safe_float(d['s_awake_time'])),
total=_safe_mins(_safe_float(d['s_duration'])),
light=_safe_mins(_safe_float(d['s_light'])),
deep =_safe_mins(_safe_float(d['s_deep'])),
awake=_safe_mins(_safe_float(d['s_awake'])),
awakenings=_safe_int(d['s_awakenings']),
quality=_safe_float(d['s_quality']),
)
def is_bad(self):
return self.deep is None and self.light is None
# @property
# def total(self) -> float:
# return self.light + self.deep
def iter_useful(data_file: str):
with open(data_file) as fo:
reader = DictReader(fo)
for d in reader:
dt = SleepData.from_jawbone_dict(d)
if not dt.is_bad():
yield dt
# TODO <<< hmm. these files do contain deep and light sleep??
# also steps stats??
from my.config import jawbone as config # type: ignore[attr-defined]
p = config.export_dir / 'old_csv'
# TODO with_my?
files = [
p / "2015.csv",
p / "2016.csv",
p / "2017.csv",
]
from kython import concat, parse_date # type: ignore
useful = concat(*(list(iter_useful(str(f))) for f in files))
# for u in useful:
# print(f"{u.total} {u.asleep_time} {u.awake_time}")
# # pprint(u.total)
# pprint(u)
# pprint("---")
dates = [parse_date(u.date, yearfirst=True, dayfirst=False) for u in useful]
# TODO filter outliers?
# TODO don't need this anymore? it's gonna be in dashboards package
from kython.plotting import plot_timestamped # type: ignore
for attr, lims, mavg, fig in [ # type: ignore
('light', (0, 400), 5, None),
('deep', (0, 600), 5, None),
('total', (200, 600), 5, None),
('awake_time', (0, 1200), None, 1),
('asleep_time', (-100, 1000), None, 1),
# ('awakenings', (0, 5)),
]:
dates_wkd = [d for d in dates if d.weekday() < 5]
dates_wke = [d for d in dates if d.weekday() >= 5]
for dts, dn in [
(dates, 'total'),
(dates_wkd, 'weekday'),
(dates_wke, 'weekend')
]:
mavgs = []
if mavg is not None:
mavgs.append((mavg, 'green'))
fig = plot_timestamped(
dts, # type: ignore
[getattr(u, attr) for u in useful],
marker='.',
ratio=(16, 4),
mavgs=mavgs,
ylimits=lims,
ytick_size=60,
# figure=1,
)
plt.savefig(f'{attr}_{dn}.png')
# TODO use proper names?
# plt.savefig('res.png')
# fig.show() | PypiClean |
/DeploymentTool-1.5.zip/DeploymentTool-1.5/Deployment/Static/admin/js/timeparse.js | var timeParsePatterns = [
// 9
{ re: /^\d{1,2}$/i,
handler: function(bits) {
if (bits[0].length == 1) {
return '0' + bits[0] + ':00';
} else {
return bits[0] + ':00';
}
}
},
// 13:00
{ re: /^\d{2}[:.]\d{2}$/i,
handler: function(bits) {
return bits[0].replace('.', ':');
}
},
// 9:00
{ re: /^\d[:.]\d{2}$/i,
handler: function(bits) {
return '0' + bits[0].replace('.', ':');
}
},
// 3 am / 3 a.m. / 3am
{ re: /^(\d+)\s*([ap])(?:.?m.?)?$/i,
handler: function(bits) {
var hour = parseInt(bits[1]);
if (hour == 12) {
hour = 0;
}
if (bits[2].toLowerCase() == 'p') {
if (hour == 12) {
hour = 0;
}
return (hour + 12) + ':00';
} else {
if (hour < 10) {
return '0' + hour + ':00';
} else {
return hour + ':00';
}
}
}
},
// 3.30 am / 3:15 a.m. / 3.00am
{ re: /^(\d+)[.:](\d{2})\s*([ap]).?m.?$/i,
handler: function(bits) {
var hour = parseInt(bits[1]);
var mins = parseInt(bits[2]);
if (mins < 10) {
mins = '0' + mins;
}
if (hour == 12) {
hour = 0;
}
if (bits[3].toLowerCase() == 'p') {
if (hour == 12) {
hour = 0;
}
return (hour + 12) + ':' + mins;
} else {
if (hour < 10) {
return '0' + hour + ':' + mins;
} else {
return hour + ':' + mins;
}
}
}
},
// noon
{ re: /^no/i,
handler: function(bits) {
return '12:00';
}
},
// midnight
{ re: /^mid/i,
handler: function(bits) {
return '00:00';
}
}
];
function parseTimeString(s) {
for (var i = 0; i < timeParsePatterns.length; i++) {
var re = timeParsePatterns[i].re;
var handler = timeParsePatterns[i].handler;
var bits = re.exec(s);
if (bits) {
return handler(bits);
}
}
return s;
} | PypiClean |
/DjangoSCA-1.3e.tar.gz/DjangoSCA-1.3e/djangoSCA.py |
import sys
import os
import re
import datetime
import argparse
from djangoSCAclasses.ContentReader import ContentReader
from djangoSCAclasses.SettingsCheck import SettingsCheck
from djangoSCAclasses.MyParser import MyParser
class DjangoFileCheck(ContentReader):
"""
This class extends the base ContentReader class to
include the core 'parseme()' method. In turn, this will
parse a Python source file with .py extension using the
abstract syntax tree (AST). It will then sequentially
parse files ending with .py, .html, and .txt with a regular
expression parser as well as performing a crossdomain.xml
file check.
"""
def __init__(self, projdir, fullpath, rulesfile, filehandle):
try:
ContentReader.__init__(self, projdir, fullpath)
except:
raise
self.rulesfile = rulesfile
self.filehandle = filehandle
def parseme(self):
try:
parser = MyParser(self.rulesfile, self.filehandle)
except:
raise
if re.match(r'.+\.py$', self.shortname):
parser.ast_parse(self.shortname, self.content)
parser.nonast_parse(self.projdir, self.shortname, self.content)
return parser.print_warnings()
def spin_thing(outFH, i):
# prime number controls speed of spinny thing
prime = 23
mystr = '/-\\|'
if outFH != sys.stdout and not (i % prime):
sys.stdout.write('%s\x08'
% (mystr[i % len(mystr):i % len(mystr) + 1]))
sys.stdout.flush()
return i + 1
def show_summary(outFH, fext, fwarn):
out = '\n[*] Stage 2: File Analysis Summary\n'
for k in sorted(fext.iterkeys()):
out += ' [-] Extension [.%-4s]: %6d files, %4d warnings\n' % \
(k, fext[k], fwarn[k])
out += """\
[+] template files are identified by regular expression match.
[+] many xml files may exist, but only crossdomain.xml is analyzed.
[+] all python scripts will be analyzed."""
if outFH != sys.stdout:
outFH.write(out)
sys.stdout.write(out)
def get_settings_path(base_dir):
for root, dirs, files in os.walk(base_dir):
for f in files:
if f.endswith("settings.py"):
return os.path.dirname(os.path.join(root, f))
# start of main code
if __name__ == "__main__":
TITLE = 'DjangoSCA'
VERSION = '1.3'
# program description
desc = """\
DjangoSCA is a static security code analysis tool for Django project analysis.
It performs sanity checks on 'settings.py' files with recommendations for
improving security, and also performs a recursive directory search analysis
across all of the source code of a project. Python files are parsed using
the native python abstract syntax tree (AST) class. All file extensions
specified are also analyzed using regular expression checks.
Where possible, Django context specific analysis is performed within the model,
view, controller (MVC) paradigm."""
# parse arguments
ap = argparse.ArgumentParser(
usage="""\
djangoSCA.py -r <rules file> -o <output file> <Django Project Dir>
Version %s, Author: Joff Thyer, (c) 2013"""
% (VERSION), description=desc)
ap.add_argument('DjangoProjectDir', help='Django Project Directory')
ap.add_argument('-s', '--settings', default='settings.py',
help='Django settings.py ("settings.py" is the default)')
ap.add_argument('-i', '--ignore', action='append',
help='Ignore directories. eg, --ignore foo --ignore bar')
ap.add_argument('-r', '--rules', default='/usr/local/etc/djangoSCA.rules',
help='DjangoSCA Rules File (default is "djangoSCA.rules")')
ap.add_argument('-o', '--output',
help='Output Text File (default output to screen)')
args = ap.parse_args()
if not os.path.isdir(args.DjangoProjectDir):
sys.stderr.write('project directory does not exist')
sys.exit(1)
if args.output:
try:
outFH = open(args.output, 'w')
except:
sys.stderr.write('failed to open output file')
sys.exit(1)
else:
outFH = sys.stdout
outFH.write("""
[*]___________________________________________________________
[*]
[*] %s Version %s
[*] Author: Joff Thyer (c) 2013
[*] Project Dir/Name..: %s
[*] Date of Test......: %s
[*]___________________________________________________________
[*]---------------------------------
[*] STAGE 1: Project Settings Tests
[*]---------------------------------
""" % (TITLE, VERSION, args.DjangoProjectDir,
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
if outFH != sys.stdout:
print """[*] %s Version %s
[*] Author: Joff Thyer, (c) 2013
[*] Processing Stage 1: [settings.py]""" % (TITLE, VERSION)
try:
SettingsCheck(get_settings_path(args.DjangoProjectDir) + "/"
+ args.settings, args.rules, outFH)
except:
raise
outFH.write("""
[*]---------------------------------------------
[*] STAGE 2: Testing ALL directories and files
[*] .... Warning - This may take some time ....
[*]---------------------------------------------
""")
if outFH != sys.stdout:
sys.stdout.write('[*] Processing Stage 2: '
+ 'Full project directory recursion: [ ]\x08\x08')
sys.stdout.flush()
spincount = 0
rxp = re.compile(r'^[a-zA-Z0-9]+.+\.(py|html|txt|xml)$')
file_ext = {}
file_ext_warnings = {}
for root, dirs, files in os.walk(args.DjangoProjectDir, topdown=True):
if args.ignore:
exclude = set(args.ignore)
dirs[:] = [d for d in dirs if d not in exclude]
for f in files:
fullpath = root + '/' + f
m = rxp.match(f)
if not m:
continue
spincount = spin_thing(outFH, spincount)
if m.group(1) not in file_ext:
file_ext[m.group(1)] = 0
file_ext_warnings[m.group(1)] = 0
file_ext[m.group(1)] += 1
try:
dfc = DjangoFileCheck(args.DjangoProjectDir,
fullpath, args.rules, outFH)
file_ext_warnings[m.group(1)] += dfc.parseme()
except:
raise
show_summary(outFH, file_ext, file_ext_warnings)
print '\n[*] Test Complete'
if all(v == 0 for v in file_ext_warnings.values()):
sys.exit(0)
else:
sys.exit(1) | PypiClean |
/Avpy-0.1.3.tar.gz/Avpy-0.1.3/examples/outputPygame/outputPygame2.py | import sys
import ctypes
import copy
import pygame
from avpy import Media
if __name__ == '__main__':
# cmdline
from optparse import OptionParser
usage = "usage: %prog -m foo.avi"
parser = OptionParser(usage=usage)
parser.add_option('-m', '--media',
help='play media')
parser.add_option('--copyPacket',
action='store_true',
help='copy packet (debug only)')
parser.add_option('--scaleWidth',
type='float', default=1.0,
help='width scale (default: %default)')
parser.add_option('--scaleHeight',
type='float', default=1.0,
help='height scale (default: %default)')
parser.add_option('-f', '--fullscreen',
action='store_true',
help='turn on full screen mode')
#parser.add_option('--scaling',
#default='bilinear',
#help='scaling algorithm')
(options, args) = parser.parse_args()
try:
media = Media(options.media)
except IOError as e:
print('Unable to open %s: %s' % (options.media, e))
sys.exit(1)
# dump info
mediaInfo = media.info()
# select first video stream
vstreams = [i for i, s in enumerate(mediaInfo['stream']) if s['type'] == 'video']
if vstreams:
vstream = vstreams[0]
else:
print('No video stream in %s' % mediaInfo['name'])
sys.exit(2)
# retrieve video width and height
streamInfo = mediaInfo['stream'][vstream]
size = streamInfo['width'], streamInfo['height']
print('video stream index: %d' % vstream)
print('video stream resolution: %dx%d' % (size[0], size[1]))
size = ( int(round(size[0]*options.scaleWidth)),
int(round(size[1]*options.scaleHeight)) )
print('output resolution: %dx%d' % (size))
# setup pygame
pygame.init()
if options.fullscreen:
screen = pygame.display.set_mode(size, pygame.DOUBLEBUF|pygame.HWSURFACE|pygame.FULLSCREEN)
else:
screen = pygame.display.set_mode(size)
useYuv = False
if streamInfo['pixelFormat'] == 'yuv420p':
overlay = pygame.Overlay(pygame.YV12_OVERLAY, size)
overlay.set_location(0, 0, size[0], size[1])
useYuv = True
if overlay.get_hardware():
print('render: Hardware accelerated yuv overlay (fast)')
else:
print('render: Software yuv overlay (slow)')
else:
print('render: software rgb (very slow)')
# add scaler to convert to rgb
media.addScaler(vstream, *size)
#media.addScaler(vstream, *size, scaling='gauss')
decodedCount = 0
mainLoop = True
print('Press Esc to quit...')
while mainLoop:
try:
pkt = media.next()
except StopIteration:
mainLoop = False
continue
if pkt.streamIndex() == vstream:
if options.copyPacket:
pkt2 = copy.copy(pkt)
else:
pkt2 = pkt
pkt2.decode()
if pkt2.decoded:
decodedCount += 1
if useYuv:
# upload yuv data
size0 = pkt2.frame.contents.linesize[0] * pkt2.frame.contents.height
size1 = pkt2.frame.contents.linesize[1] * (pkt2.frame.contents.height//2)
size2 = pkt2.frame.contents.linesize[2] * (pkt2.frame.contents.height//2)
yuv = (ctypes.string_at(pkt2.frame.contents.data[0], size0),
ctypes.string_at(pkt2.frame.contents.data[1], size1),
ctypes.string_at(pkt2.frame.contents.data[2], size2))
overlay.display(yuv)
# add a small delay otherwise pygame will crash
pygame.time.wait(20)
else:
# upload rgb data
buf = pkt2.swsFrame.contents.data[0]
bufLen = size[0]*size[1]*3
surfaceStr = ctypes.string_at(buf, bufLen)
cSurface = pygame.image.fromstring(surfaceStr, size, 'RGB')
pygame.display.set_caption('Press Esc to quit...')
screen.blit(cSurface, (0, 0))
pygame.display.flip()
# event processing
for event in pygame.event.get():
if event.type == pygame.QUIT:
mainLoop = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
mainLoop = False | PypiClean |
/CellDetection-0.4.3-py3-none-any.whl/celldetection/models/commons.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor, tanh, sigmoid
from torchvision import transforms as trans
from ..util.util import lookup_nn, tensor_to, ensure_num_tuple, get_nd_conv
from ..ops.commons import split_spatially, minibatch_std_layer
from typing import Type
from functools import partial
__all__ = []
def register(obj):
__all__.append(obj.__name__)
return obj
def _ni_3d(nd):
if nd != 2:
raise NotImplementedError('The `nd` option is not yet available for this model.')
@register
class ConvNorm(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1, norm_layer=nn.BatchNorm2d,
nd=2, **kwargs):
"""ConvNorm.
Just a convolution and a normalization layer.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
kernel_size: Kernel size.
padding: Padding.
stride: Stride.
norm_layer: Normalization layer (e.g. ``nn.BatchNorm2d``).
**kwargs: Additional keyword arguments.
"""
Conv = get_nd_conv(nd)
Norm = lookup_nn(norm_layer, nd=nd, call=False)
super().__init__(
Conv(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, **kwargs),
Norm(out_channels),
)
@register
class ConvNormRelu(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1, norm_layer=nn.BatchNorm2d,
activation='relu', nd=2, **kwargs):
"""ConvNormReLU.
Just a convolution, normalization layer and an activation.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
kernel_size: Kernel size.
padding: Padding.
stride: Stride.
norm_layer: Normalization layer (e.g. ``nn.BatchNorm2d``).
activation: Activation function. (e.g. ``nn.ReLU``, ``'relu'``)
**kwargs: Additional keyword arguments.
"""
Conv = get_nd_conv(nd)
Norm = lookup_nn(norm_layer, nd=nd, call=False)
super().__init__(
Conv(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, **kwargs),
Norm(out_channels),
lookup_nn(activation)
)
@register
class TwoConvNormRelu(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1, mid_channels=None,
norm_layer=nn.BatchNorm2d, activation='relu', nd=2, **kwargs):
"""TwoConvNormReLU.
A sequence of conv, norm, activation, conv, norm, activation.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
kernel_size: Kernel size.
padding: Padding.
stride: Stride.
mid_channels: Mid-channels. Default: Same as ``out_channels``.
norm_layer: Normalization layer (e.g. ``nn.BatchNorm2d``).
activation: Activation function. (e.g. ``nn.ReLU``, ``'relu'``)
**kwargs: Additional keyword arguments.
"""
Conv = get_nd_conv(nd)
Norm = lookup_nn(norm_layer, nd=nd, call=False)
if mid_channels is None:
mid_channels = out_channels
super().__init__(
Conv(in_channels, mid_channels, kernel_size=kernel_size, padding=padding, stride=stride, **kwargs),
Norm(mid_channels),
lookup_nn(activation),
Conv(mid_channels, out_channels, kernel_size=kernel_size, padding=padding, **kwargs),
Norm(out_channels),
lookup_nn(activation)
)
@register
class TwoConvNormLeaky(TwoConvNormRelu):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1, mid_channels=None,
norm_layer=nn.BatchNorm2d, nd=2, **kwargs):
super().__init__(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride,
mid_channels=mid_channels, norm_layer=norm_layer, activation='leakyrelu', nd=nd, **kwargs)
class ScaledX(nn.Module):
def __init__(self, fn, factor, shift=0.):
super().__init__()
self.factor = factor
self.shift = shift
self.fn = fn
def forward(self, inputs: Tensor) -> Tensor:
return self.fn(inputs) * self.factor + self.shift
def extra_repr(self) -> str:
return 'factor={}, shift={}'.format(self.factor, self.shift)
@register
class ScaledTanh(ScaledX):
def __init__(self, factor, shift=0.):
"""Scaled Tanh.
Computes the scaled and shifted hyperbolic tangent:
.. math:: tanh(x) * factor + shift
Args:
factor: Scaling factor.
shift: Shifting constant.
"""
super().__init__(tanh, factor, shift)
@register
class ScaledSigmoid(ScaledX):
def __init__(self, factor, shift=0.):
"""Scaled Sigmoid.
Computes the scaled and shifted sigmoid:
.. math:: sigmoid(x) * factor + shift
Args:
factor: Scaling factor.
shift: Shifting constant.
"""
super().__init__(sigmoid, factor, shift)
@register
class ReplayCache:
def __init__(self, size=128):
"""Replay Cache.
Typical cache that can be used for experience replay in GAN training.
Notes:
- Items remain on their current device.
Args:
size: Number of batch items that fit in cache.
"""
self.cache = []
self.size = size
def __len__(self):
return len(self.cache)
def is_empty(self):
return len(self) <= 0
def add(self, x, fraction=.5):
"""Add.
Add a ``fraction`` of batch ``x`` to cache.
Drop random items if cache is full.
Args:
x: Batch Tensor[n, ...].
fraction: Fraction in 0..1.
"""
lx = len(x)
for i in np.random.choice(np.arange(lx), int(lx * fraction), replace=False):
self.cache.append(x[i].detach())
while len(self) > self.size:
del self.cache[np.random.randint(0, len(self))]
def __call__(self, num):
"""Call.
Args:
num: Batch size / number of returned items.
Returns:
Tensor[num, ...]
"""
if self.is_empty():
return None
return torch.stack([self.cache[i] for i in np.random.randint(0, len(self), num)], 0)
class _ResBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
block,
activation='ReLU',
stride=1,
downsample=None,
norm_layer='BatchNorm2d',
nd=2,
) -> None:
"""ResBlock.
Typical ResBlock with variable kernel size and an included mapping of the identity to correct dimensions.
References:
https://arxiv.org/abs/1512.03385
Args:
in_channels: Input channels.
out_channels: Output channels.
kernel_size: Kernel size.
padding: Padding.
norm_layer: Norm layer.
activation: Activation.
stride: Stride.
downsample: Downsample module that maps identity to correct dimensions. Default is an optionally strided
1x1 Conv2d with BatchNorm2d, as per He et al. (2015) (`3.3. Network Architectures`, `Residual Network`,
"option (B)").
nd: Number of spatial dimensions.
"""
super().__init__()
downsample = downsample or partial(ConvNorm, nd=nd, norm_layer=norm_layer)
if in_channels != out_channels or stride != 1:
self.downsample = downsample(in_channels, out_channels, 1, stride=stride, bias=False, padding=0)
else:
self.downsample = nn.Identity()
self.block = block
self.activation = lookup_nn(activation)
def forward(self, x: Tensor) -> Tensor:
identity = self.downsample(x)
out = self.block(x)
out += identity
return self.activation(out)
@register
class ResBlock(_ResBlock):
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
padding=1,
norm_layer='BatchNorm2d',
activation='ReLU',
stride=1,
downsample=None,
nd=2,
**kwargs
) -> None:
"""ResBlock.
Typical ResBlock with variable kernel size and an included mapping of the identity to correct dimensions.
References:
- https://doi.org/10.1109/CVPR.2016.90
Notes:
- Similar to ``torchvision.models.resnet.BasicBlock``, with different interface and defaults.
- Consistent with standard signature ``in_channels, out_channels, kernel_size, ...``.
Args:
in_channels: Input channels.
out_channels: Output channels.
kernel_size: Kernel size.
padding: Padding.
norm_layer: Norm layer.
activation: Activation.
stride: Stride.
downsample: Downsample module that maps identity to correct dimensions. Default is an optionally strided
1x1 Conv2d with BatchNorm2d, as per He et al. (2015) (`3.3. Network Architectures`, `Residual Network`,
"option (B)").
**kwargs: Keyword arguments for Conv2d layers.
"""
Conv = get_nd_conv(nd)
Norm = lookup_nn(norm_layer, nd=nd, call=False)
super().__init__(
in_channels, out_channels,
block=nn.Sequential(
Conv(in_channels, out_channels, kernel_size=kernel_size, padding=padding, bias=False, stride=stride,
**kwargs),
Norm(out_channels),
lookup_nn(activation),
Conv(out_channels, out_channels, kernel_size=kernel_size, padding=padding, bias=False, **kwargs),
Norm(out_channels),
),
activation=activation, stride=stride, downsample=downsample, nd=nd, norm_layer=norm_layer
)
@register
class BottleneckBlock(_ResBlock):
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
padding=1,
mid_channels=None,
compression=4,
base_channels=64,
norm_layer='BatchNorm2d',
activation='ReLU',
stride=1,
downsample=None,
nd=2,
**kwargs
) -> None:
"""Bottleneck Block.
Typical Bottleneck Block with variable kernel size and an included mapping of the identity to correct
dimensions.
References:
- https://doi.org/10.1109/CVPR.2016.90
- https://catalog.ngc.nvidia.com/orgs/nvidia/resources/resnet_50_v1_5_for_pytorch
Notes:
- Similar to ``torchvision.models.resnet.Bottleneck``, with different interface and defaults.
- Consistent with standard signature ``in_channels, out_channels, kernel_size, ...``.
- Stride handled in bottleneck.
Args:
in_channels: Input channels.
out_channels: Output channels.
kernel_size: Kernel size.
padding: Padding.
mid_channels:
compression: Compression rate of the bottleneck. The default 4 compresses 256 channels to 64=256/4.
base_channels: Minimum number of ``mid_channels``.
norm_layer: Norm layer.
activation: Activation.
stride: Stride.
downsample: Downsample module that maps identity to correct dimensions. Default is an optionally strided
1x1 Conv2d with BatchNorm2d, as per He et al. (2015) (`3.3. Network Architectures`, `Residual Network`,
"option (B)").
**kwargs: Keyword arguments for Conv2d layers.
"""
Conv = get_nd_conv(nd)
Norm = lookup_nn(norm_layer, nd=nd, call=False)
mid_channels = mid_channels or np.max([base_channels, out_channels // compression, in_channels // compression])
super().__init__(
in_channels, out_channels,
block=nn.Sequential(
Conv(in_channels, mid_channels, kernel_size=1, padding=0, bias=False, **kwargs),
Norm(mid_channels),
lookup_nn(activation),
Conv(mid_channels, mid_channels, kernel_size=kernel_size, padding=padding, bias=False, stride=stride,
**kwargs),
Norm(mid_channels),
lookup_nn(activation),
Conv(mid_channels, out_channels, kernel_size=1, padding=0, bias=False, **kwargs),
Norm(out_channels)
),
activation=activation, stride=stride, downsample=downsample
)
@register
class NoAmp(nn.Module):
def __init__(self, module: Type[nn.Module]):
"""No AMP.
Wrap a ``Module`` object and disable ``torch.cuda.amp.autocast`` during forward pass if it is enabled.
Examples:
>>> import celldetection as cd
... model = cd.models.CpnU22(1)
... # Wrap all ReadOut modules in model with NoAmp, thus disabling autocast for those modules
... cd.wrap_module_(model, cd.models.ReadOut, cd.models.NoAmp)
Args:
module: Module.
"""
super().__init__()
self.module = module
def forward(self, *args, **kwargs):
if torch.is_autocast_enabled():
with torch.cuda.amp.autocast(enabled=False):
result = self.module(*tensor_to(args, torch.float32), **tensor_to(kwargs, torch.float32))
else:
result = self.module(*args, **kwargs)
return result
@register
class ReadOut(nn.Module):
def __init__(
self,
channels_in,
channels_out,
kernel_size=3,
padding=1,
activation='relu',
norm='batchnorm2d',
final_activation=None,
dropout=0.1,
channels_mid=None,
stride=1,
nd=2,
):
super().__init__()
Conv = get_nd_conv(nd)
Norm = lookup_nn(norm, nd=nd, call=False)
Dropout = lookup_nn(nn.Dropout2d, nd=nd, call=False)
self.channels_out = channels_out
if channels_mid is None:
channels_mid = channels_in
self.block = nn.Sequential(
Conv(channels_in, channels_mid, kernel_size, padding=padding, stride=stride),
Norm(channels_mid),
lookup_nn(activation),
Dropout(p=dropout) if dropout else nn.Identity(),
Conv(channels_mid, channels_out, 1),
)
if final_activation is ...:
self.activation = lookup_nn(activation)
else:
self.activation = lookup_nn(final_activation)
def forward(self, x):
out = self.block(x)
return self.activation(out)
@register
class SpatialSplit(nn.Module):
def __init__(self, height, width=None):
"""Spatial split.
Splits spatial dimensions of input Tensor into patches of size ``(height, width)`` and adds the patches
to the batch dimension.
Args:
height: Patch height.
width: Patch width.
"""
super().__init__()
self.height = height
self.width = width or height
def forward(self, x):
return split_spatially(x, self.height, self.width)
@register
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, channels=1, group_channels=None, epsilon=1e-8):
"""Minibatch standard deviation layer.
The minibatch standard deviation layer first splits the batch dimension into slices of size ``group_channels``.
The channel dimension is split into ``channels`` slices. For the groups the standard deviation is calculated and
averaged over spatial dimensions and channel slice depth. The result is broadcasted to the spatial dimensions,
repeated for the batch dimension and then concatenated to the channel dimension of ``x``.
References:
- https://arxiv.org/pdf/1710.10196.pdf
Args:
channels: Number of averaged standard deviation channels.
group_channels: Number of channels per group. Default: batch size.
epsilon: Epsilon.
"""
super().__init__()
self.channels = channels
self.group_channels = group_channels
self.epsilon = epsilon
def forward(self, x):
return minibatch_std_layer(x, self.channels, self.group_channels, epsilon=self.epsilon)
def extra_repr(self) -> str:
return f'channels={self.channels}, group_channels={self.group_channels}'
class _AdditiveNoise(nn.Module):
def __init__(self, in_channels, noise_channels=1, mean=0., std=1., weighted=False, nd=2):
super().__init__()
self.noise_channels = noise_channels
self.in_channels = in_channels
self.reps = (1, self.in_channels // self.noise_channels) + (1,) * nd
self.weighted = weighted
self.weight = nn.Parameter(torch.zeros((1, in_channels) + (1,) * nd)) if weighted else 1.
self.constant = False
self.mean = mean
self.std = std
self._constant = None
def sample_noise(self, shape, device, dtype):
return torch.randn(shape, device=device, dtype=dtype) * self.std + self.mean
def forward(self, x):
shape = x.shape
constant = getattr(self, 'constant', False)
_constant = getattr(self, '_constant', None)
if (constant and _constant is None) or not constant:
noise = self.sample_noise((shape[0], self.noise_channels) + shape[2:], x.device, x.dtype)
if constant and _constant is None:
self._constant = noise
else:
noise = _constant
return x + noise.repeat(self.reps) * self.weight
def extra_repr(self):
s = f"in_channels={self.in_channels}, noise_channels={self.noise_channels}, mean={self.mean}, " \
f"std={self.std}, weighted={self.weighted}"
if getattr(self, 'constant', False):
s += ', constant=True'
return s
@register
class AdditiveNoise2d(_AdditiveNoise):
def __init__(self, in_channels, noise_channels=1, weighted=True):
super().__init__(in_channels=in_channels, noise_channels=noise_channels, weighted=weighted, nd=2)
@register
class AdditiveNoise3d(_AdditiveNoise):
def __init__(self, in_channels, noise_channels=1, weighted=True):
super().__init__(in_channels=in_channels, noise_channels=noise_channels, weighted=weighted, nd=3)
class _Stride(nn.Module):
def __init__(self, stride, start=0, nd=2):
super().__init__()
self.stride = ensure_num_tuple(stride, nd)
self.start = start
def forward(self, x):
return x[(...,) + tuple((slice(self.start, None, s) for s in self.stride))]
@register
class Stride1d(_Stride):
def __init__(self, stride, start=0):
super().__init__(stride, start, 1)
@register
class Stride2d(_Stride):
def __init__(self, stride, start=0):
super().__init__(stride, start, 2)
@register
class Stride3d(_Stride):
def __init__(self, stride, start=0):
super().__init__(stride, start, 3)
class _Fuse(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, padding=0, activation='relu', norm_layer='batchnorm2d',
nd=2, dim=1, **kwargs):
super().__init__()
modules = [get_nd_conv(nd)(in_channels, out_channels, kernel_size, padding=padding, **kwargs)]
if norm_layer is not None:
modules.append(lookup_nn(norm_layer, out_channels, nd=nd))
if activation is not None:
modules.append(lookup_nn(activation, inplace=False))
self.block = nn.Sequential(*modules)
self.nd = nd
self.dim = dim
def forward(self, x: tuple):
x = tuple(x)
target_size = x[0].shape[-self.nd:]
x = torch.cat([(F.interpolate(x_, target_size) if x_.shape[-self.nd:] != target_size else x_) for x_ in x],
dim=self.dim)
return self.block(x)
@register
class Fuse1d(_Fuse):
def __init__(self, in_channels, out_channels, kernel_size=1, padding=0, activation='relu', norm_layer='batchnorm1d',
**kwargs):
super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding,
activation=activation, norm_layer=norm_layer, nd=1, **kwargs)
@register
class Fuse2d(_Fuse):
def __init__(self, in_channels, out_channels, kernel_size=1, padding=0, activation='relu', norm_layer='batchnorm2d',
**kwargs):
super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding,
activation=activation, norm_layer=norm_layer, nd=2, **kwargs)
@register
class Fuse3d(_Fuse):
def __init__(self, in_channels, out_channels, kernel_size=1, padding=0, activation='relu', norm_layer='batchnorm3d',
**kwargs):
super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, padding=padding,
activation=activation, norm_layer=norm_layer, nd=3, **kwargs)
@register
class Normalize(nn.Module):
def __init__(self, mean=0., std=1., assert_range=(0., 1.)):
super().__init__()
self.assert_range = assert_range
self.transform = trans.Compose([
trans.Normalize(mean=mean, std=std)
])
def forward(self, inputs: Tensor):
if self.assert_range is not None:
assert torch.all(inputs >= self.assert_range[0]) and torch.all(
inputs <= self.assert_range[1]), f'Inputs should be in interval {self.assert_range}'
if self.transform is not None:
inputs = self.transform(inputs)
return inputs
def extra_repr(self) -> str:
s = ''
if self.assert_range is not None:
s += f'(assert_range): {self.assert_range}\n'
s += f'(norm): {repr(self.transform)}'
return s
@register
class SqueezeExcitation(nn.Sequential):
def __init__(self, in_channels, squeeze_channels=None, compression=16, activation='relu',
scale_activation='sigmoid', residual=True, nd=2):
Pool = lookup_nn('AdaptiveAvgPool2d', nd=nd, call=False)
Conv = lookup_nn('Conv2d', nd=nd, call=False)
self.residual = residual
if squeeze_channels is None:
squeeze_channels = max(in_channels // compression, 1)
super().__init__(
Pool(1),
Conv(in_channels, squeeze_channels, 1),
lookup_nn(activation),
Conv(squeeze_channels, in_channels, 1),
lookup_nn(scale_activation)
)
def forward(self, inputs):
scale = super().forward(inputs)
scaled = inputs * scale
if self.residual:
return inputs + scaled
return scaled
def channels_last_permute(nd):
return (0,) + tuple(range(2, nd + 2)) + (1,)
def channels_first_permute(nd):
return (0, nd + 1,) + tuple(range(1, nd + 1))
class LayerNormNd(nn.LayerNorm): # Generalized version of torchvision.models.convnext.LayerNorm2d
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, nd=2,
device=None, dtype=None) -> None:
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine,
device=device, dtype=dtype)
self._perm0 = channels_last_permute(nd)
self._perm1 = channels_first_permute(nd)
def forward(self, x: Tensor) -> Tensor:
x = x.permute(*self._perm0)
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
x = x.permute(*self._perm1)
return x
@register
class LayerNorm1d(LayerNormNd):
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None,
dtype=None) -> None:
"""Layer Norm.
By default, ``LayerNorm1d(channels)`` operates on feature vectors, i.e. the channel dimension.
Args:
normalized_shape: Input shape from an expected input of size
eps: A value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: A boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
device: Device.
dtype: Data type.
"""
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine,
device=device, dtype=dtype, nd=1)
@register
class LayerNorm2d(LayerNormNd):
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None,
dtype=None) -> None:
"""Layer Norm.
By default, ``LayerNorm2d(channels)`` operates on feature vectors, i.e. the channel dimension.
Args:
normalized_shape: Input shape from an expected input of size
eps: A value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: A boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
device: Device.
dtype: Data type.
"""
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine,
device=device, dtype=dtype, nd=2)
@register
class LayerNorm3d(LayerNormNd):
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None,
dtype=None) -> None:
"""Layer Norm.
By default, ``LayerNorm3d(channels)`` operates on feature vectors, i.e. the channel dimension.
Args:
normalized_shape: Input shape from an expected input of size
eps: A value added to the denominator for numerical stability. Default: 1e-5
elementwise_affine: A boolean value that when set to ``True``, this module
has learnable per-element affine parameters initialized to ones (for weights)
and zeros (for biases). Default: ``True``.
device: Device.
dtype: Data type.
"""
super().__init__(normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine,
device=device, dtype=dtype, nd=3) | PypiClean |
/Chips-0.1.2.tar.gz/Chips-0.1.2/docs/build/html/searchindex.js | Search.setIndex({desctypes:{"0":"function","1":"class"},terms:{represent:[2,3],all:[2,3,1],code:[0,1,3,4,2],rom:[3,1],skip:3,consum:3,concept:2,higher:1,abil:1,follow:[2,3],row:3,hierarch:[0,2],serial_out:3,advantag:2,theoutput:3,send:3,program:[2,3,1],larg:[2,3],introduc:[0,2],digit:[3,1],sourc:[0,3,2],everi:2,string:[2,3],fals:3,my_list:3,end:3,print:3,mechan:[0,1,2],condit:3,veri:[2,1],affect:3,condition:[2,3],dip_switch:3,splitter:2,din:3,level:[0,3,1],list:[2,3],iter:[2,3],item:[2,3],form:[2,3,1],verif:[0,1],small:2,initialis:[0,3,2],write_cod:[2,3],smaller:2,visualis:[0,4],vhdl:[0,1,3,4,2],"0x0":3,tee:[2,3],sign:[2,3],jump:[2,3],second:3,design:[0,1,3,2],pass:[3,1],download:0,further:[0,2],port:[2,3,1],even:[2,3],index:[0,3,2],what:[0,2],xor:3,sub:1,clock:[2,3,1],abl:[2,1],"while":[3,1],logicaland:3,access:3,pin:[2,3],"new":[0,1,3,2],ever:3,method:[2,3,1],contrast:[2,3],simplifi:[2,3],full:2,themselv:1,chose:2,gener:[0,1,3,4,2],matplotlib:0,behaviour:3,met:3,let:2,sink:[0,1,3,2],address:3,vhdl_plugin:[2,3],modifi:3,"0xf":0,interpret:3,wait:3,box:2,search:0,convers:3,shift:[0,3],larger:2,data_in:3,precis:2,pixel_clock:3,doctest:3,synchronis:[3,1],implement:[0,1,3,2],magnitud:3,commonli:3,outport:[2,3],decim:[2,3],shorthand:[2,3],primit:1,modul:[0,2],inequ:3,api:1,image_data:3,instal:[0,2],get_bit:[2,3],highli:1,plot:0,fed:2,hexadecim:[2,3],from:[0,1,3,2],describ:3,would:[2,1],memori:[2,3,1],doubl:[2,3],regist:[0,3,1],seamless:0,next:[2,3],few:[2,1],call:2,handl:2,taken:1,scope:3,basi:3,type:[0,3,2],until:[2,3],more:[0,1,3,2],line:3,finit:1,indic:[0,3],particular:3,known:2,central:2,hold:2,must:[2,3,1],none:3,retriev:3,sometim:[2,3],dest:2,local:[2,3],setup:2,work:[0,1,3,2],imper:1,can:[0,1,3,2],learn:[0,2],root:2,def:[2,3],control:[2,3,1],tap:0,stream:[0,1,3,4,2],give:[2,3,1],process:[0,1,3,2],chip:[5,0,1,3,2],calcul:3,accept:[2,3],high:[0,1],minimum:3,want:[2,3],serial:[2,3],delai:1,alwai:[2,3],cours:3,multipl:[0,1,3,2],newlin:3,thing:2,programmat:1,anoth:[2,3],simple_arbit:2,divis:[3,1],how:[2,3],instead:2,simpl:[2,1],map:[2,3],likewis:2,referenc:3,python_sequ:3,max:[2,3],after:3,usabl:2,befor:[2,3],buffer_depth:3,mai:[2,3],interconnect:1,data:[2,3,1],parallel:[2,3],demonstr:2,image_processor:3,github:0,attempt:3,practic:[3,1],third:3,bind:3,divid:3,correspond:3,element:1,caus:[2,3],rather:3,"switch":3,preced:3,combin:[2,3,1],allow:[0,1,3,2],us_tim:3,order:[2,3],elif:3,feedback:0,ouput:[],over:3,becaus:[2,3],fifo:[2,3],graphviz:0,through:2,still:3,dynam:3,paramet:[2,3,1],write:[0,3,2],style:1,group:[2,3],hexprint:[2,3],fix:[2,3],window:[0,2],might:2,tri:2,them:[3,1],good:2,"return":[2,3],greater:3,thei:[2,3],python:[0,1,3,2],handi:3,initi:3,number:[2,3],"break":[2,3],my_sequ:3,echo:3,data_stream:3,introduct:[0,1],choic:1,term:1,name:3,anyth:0,revers:3,separ:3,achiev:1,summaris:[2,3],each:[2,3],fulli:[2,1],difficult:2,truncat:[2,3],output_stream:[0,3],subset:1,domain:3,dout:3,continu:[2,3],procedur:1,realli:2,expect:2,operand:3,our:2,todo:2,special:3,out:[2,3],variabl:[0,3,2],suppos:2,new_bit:0,space:3,publish:0,number_of_item:3,hardwar:[0,1,3,2],integr:[0,3],linear:0,insid:[2,3],manipul:[3,1],given:2,get_simulation_data:3,standalon:1,reason:1,base:[0,3],put:2,ord:[2,3],care:1,reusabl:2,output_stream_1:3,output_stream_2:3,could:[2,3],success:3,timer:[2,3],round:3,filter:3,turn:[2,3,1],place:[2,3],isn:2,outsid:1,assign:[2,3],first:[0,3,2],oper:[2,3,1],softwar:[3,1],rang:2,onc:2,arrai:[2,3],independ:[2,3],scene:[2,1],yourself:2,instruct:[0,3,2],alreadi:[2,1],least:3,open:0,size:[2,3],differ:[2,3,1],zxf:2,associ:3,top:[3,1],system:3,construct:[2,3,1],tradition:1,too:[2,3],statement:[2,3,1],similarli:3,termin:3,white:3,conveni:[2,3],store:[2,3],low:1,shift_regist:0,consol:[0,3,2],option:3,adc_stream:3,namespac:2,tool:[0,1,2],dowhil:3,reversed_stream:3,target_vari:3,part:3,than:[2,3,1],succinctli:1,target:[2,3,1],keyword:1,whenev:2,provid:[0,1,3,2],remov:[2,3],rate:[2,3],charact:[2,3],matter:2,stall:3,balanc:1,were:[2,3],serial_in:3,lowest:3,sai:2,counter:[2,3,1],comput:[2,1],ram:[3,1],argument:[2,3],direct:2,packag:0,taster:0,versatil:[2,3],have:[2,3],tabl:[0,1,3,2],need:[2,3],os_2:3,os_1:3,counter_squared_stream:2,video_raster_stream:3,issu:2,bitwis:3,built:1,equival:3,min:2,self:[],inport:[2,3],note:3,also:[0,1,3,2],ideal:1,without:[2,3],take:[2,3],which:[2,3,1],environ:[0,3,1],singl:[2,3,1],compat:2,begin:3,printer:[0,3,2],distribut:[0,2],normal:3,multipli:[2,3],object:[2,3],most:[3,1],microsecond:[2,3],specifi:[2,3],arithmet:[3,1],hide:2,homepag:0,"class":3,synthesi:[2,1],don:[2,3],later:2,flow:[2,3,1],doe:[2,3,1],variablearrai:3,determin:[2,3],left:3,dountil:3,address_out:3,show:3,rtl:1,random:3,syntax:3,serialin:[2,3],concurr:[3,1],particularli:3,fine:2,find:2,involv:2,absolut:3,onli:[2,3,1],explicitli:3,just:[2,1],pretti:2,solut:2,written:[2,3,1],start:[2,3],should:[2,3],won:2,black:3,rich:[0,1],binary_2_grai:3,hello_world:[2,3],analys:3,buse:1,meant:2,count:[2,3],get:3,familiar:[2,3],express:[2,3],stop:[2,3],autom:3,nativ:0,cannot:[2,1],out_stream:3,increas:3,requir:1,synthes:3,yield:[2,3],whether:2,stuff:3,common:1,contain:[2,3],where:[2,3,1],seamlessli:3,summari:2,set:[0,1,3,2],modulo:3,project_nam:2,richer:1,univers:2,see:2,result:[2,3],arg:3,fail:3,led:3,infinit:[2,3],appear:3,extend:[5,0,3,2],correctli:3,someth:[2,3],below:3,state:1,smallest:3,verilog:1,between:[3,1],"import":[0,3,2],neither:3,entiti:3,approach:[0,1,2],wide:3,altern:1,kei:[0,3,2],numer:[2,3],complement:3,my_chip:[2,3],extens:[0,1,2],non_blocking_arbit:2,here:[2,3],vhd:2,come:[2,1],addit:[3,1],last:3,plugin:[0,3,2],howev:2,equal:[2,3],chosen:2,tutori:[0,2],logic:[3,1],mani:3,whole:[2,3],simpli:2,point:[2,3],address_in:3,height:3,exploit:0,source_0:2,suppli:[2,3],wider:3,constant:3,desktop:2,assum:2,summar:3,speak:3,quit:2,becom:3,evalu:[2,3],folow:3,devic:[0,1,3,2],three:2,been:2,sinc:[2,3],compon:[2,3,1],accumul:3,much:2,treat:2,basic:[2,3],row_stream:3,templat:2,arbit:2,baud:[2,3],bit:[0,1,3,2],inp:3,imag:[0,3],convert:[3,1],ani:[2,3],assert:[2,3],zero:[2,3],understand:[2,3],togeth:[2,3],els:[2,3],quick:[0,2],set_simulation_data:3,present:[2,3],"case":[2,3],look:[2,3],defin:[2,3],invok:1,hdl:1,data_stor:3,loop:[0,1,3,2],propag:1,layer:2,code_gener:2,increment:[2,3],tar:2,clock_rat:[2,3],readi:[2,3],non:3,destin:[2,3],itself:[2,3,1],seen:[2,3],ascii:[2,3],halt:[2,3],perform:[3,1],in_stream:3,make:[0,1,2],transpar:3,same:[2,3],binari:3,complex:[2,3],split:2,largest:[2,3],document:[0,2],infer:1,complet:2,exhaust:2,dest_1:2,dest_0:2,optim:1,nest:[2,3],effect:3,hand:3,capabl:0,temporari:2,user:3,extern:[2,3],immedi:[2,3],decoupl:[2,3],respons:[2,3,1],typic:[3,1],squar:2,appropri:2,off:0,thu:[2,3],well:3,inherit:3,"0x10":3,exampl:[0,3,2],greatest:2,thi:[2,3,1],undefin:3,programm:1,model:[0,1,2],baud_rat:[2,3],usual:[2,3,1],unari:3,pixstream:3,execut:[0,1,3,2],less:[3,1],obtain:3,multiplex:1,human:2,behind:[2,1],yet:1,languag:[0,1,3,2],myhdl:0,now:2,easi:2,test_sign:3,except:[2,3],col_stream:3,add:[0,1,3,2],other:[2,3],appli:3,input:[2,3],myarrai:3,build:2,real:[2,3,1],applic:[2,1],format:[2,3],read:[2,3],world:[0,1,3,2],realis:[2,1],tick:[2,3],scan:3,like:[2,3],specif:1,signal:1,manual:[0,1,3,2],integ:[2,3],"boolean":1,necessari:3,output:[0,3,2],resiz:[2,3],soft:1,page:0,encount:3,right:[3,1],minimum_number_of_digit:3,negat:3,linux:2,putdata:3,handshak:[2,3,1],sure:2,led_arrai:3,time_stamp_data:3,librari:[0,1,2,3,4,5,6],source_1:2,lead:3,though:[2,3],buffer:[2,3],exit:[2,3],trigger_level:3,mhz:[2,3],input_stream:[2,3],refer:[0,3,2],machin:1,core:1,run:[2,3],power:[0,1,2],broken:2,step:[2,3],found:2,subtract:[3,1],simpler:0,comparison:3,simplest:2,actual:[2,3],greatli:3,uart:[2,3,1],commun:[2,3],surround:[2,3],simul:[0,1,3,2],regular:2,act:3,commit:2,mean:[2,3,1],processor:1,block:[2,3],compil:[2,3],own:[3,1],numpi:[0,1],within:[0,1,3,2],automat:[0,1,3,4,2],two:[2,3],down:2,ellipsi:3,chang:[2,3],your:[2,3,1],waitu:[2,3],suffici:2,transfer:[2,3,1],support:[2,3,1],transform:2,fast:0,avail:[2,3],width:[0,3,2],interfac:[2,1],includ:[0,1,3,2],lot:[2,1],suit:[2,3,1],synthesis:[2,1],"function":[2,3,1],parameteris:2,repeatedli:[2,3],fpga:1,enough:3,tupl:3,link:0,overflow:3,highest:3,"true":3,reset:[0,1,3,2],made:[2,3],temp:[2,3],possibl:[2,3,1],"default":2,writeabl:3,maximum:[2,3],tell:[2,3],scipi:[0,1],limit:[2,1],fundament:[2,3],sampl:3,remaind:3,similar:[2,3],featur:[0,2],pil:[0,3],scanner:[2,3],creat:[2,3],mask:0,"abstract":1,time_stamped_stream:3,doesn:2,repres:[2,3],synthesiz:[0,1],exist:[0,3,2],file:2,valu:[2,3],some:[0,3,2],check:3,stimulu:3,floor:3,when:[2,3,1],detail:2,field:3,wai:[2,3],lookup:[2,3,1],branch:[3,1],test:[2,3],you:[0,1,3,2],repeat:[2,3],exp:3,sequenc:[2,3,1],docstr:2,intens:3,consid:[2,1],reduc:3,receiv:3,algorithm:[],scientif:0,descript:[0,1,3,2],serialout:[2,3],depth:3,potenti:2,time:[2,3,1],far:2,hello:[0,3,2]},titles:["Chips - Hardware Design in Python","Introduction","Tutorial","Chips Language Reference Manual","Automatic Code Generation","Extending the Chips Library","IP library"],modules:{"chips.ip":6,"chips.instruction":3,"chips.streams":3,"chips.cpp_plugin":4,"chips.visual_plugin":4,"chips.VHDL_plugin":4,"chips.sinks":3,"chips.process":3},descrefs:{chips:{OutPort:[3,1],Console:[3,1],SerialIn:[3,1],Lookup:[3,1],InPort:[3,1],Print:[3,1],WaitUs:[3,1],Until:[3,0],VariableArray:[3,1],Printer:[3,1],Repeater:[3,1],Evaluate:[3,1],Break:[3,1],DoWhile:[3,0],Loop:[3,1],HexPrinter:[3,1],Variable:[3,1],Response:[3,1],Stimulus:[3,1],Counter:[3,1],Value:[3,1],While:[3,0],Continue:[3,1],Output:[3,1],Resizer:[3,1],Block:[3,1],Scanner:[3,0],Scan:[3,1],Process:[3,1],Chip:[3,1],Sequence:[3,0],Fifo:[3,1],Asserter:[3,1],Decoupler:[3,1],SerialOut:[3,1],Array:[3,1],DoUntil:[3,0],If:[3,1]}},filenames:["index","introduction/index","tutorial/index","language_reference/index","automatic_code_generation/index","extending_chips/index","ip_library/index"]}) | PypiClean |
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/docs/aoa/sensors.rst | .. _sensors:
Sensors
=======
*Availability: Linux*
.. image:: ../_static/sensors.png
Glances can display the sensors information using ``psutil``,
``hddtemp`` and ``batinfo``:
- motherboard and CPU temperatures
- hard disk temperature
- battery capacity
There is no alert on this information.
.. note 1::
Limit values and sensors alias names can be defined in the
configuration file under the ``[sensors]`` section.
.. note 2::
The support for multiple batteries is only available if
you have the batinfo Python lib installed on your system
because for the moment PSUtil only support one battery.
.. note 3::
If a sensors has temperature and fan speed with the same name unit,
it is possible to alias it using:
unitname_temperature_core_alias=Alias for temp
unitname_fan_speed_alias=Alias for fan speed
.. note 4::
If a sensors has multiple identical features names (see #2280), then
Glances will add a suffix to the feature name.
For example, if you have one sensor with two Composite features, the
second one will be named Composite_1.
.. note 5::
The plugin could crash on some operating system (FreeBSD) with the
TCP or UDP blackhole option > 0 (see issue #2106). In this case, you
should disable the sensors (--disable-plugin sensors or from the
configuration file). | PypiClean |
/Chandler-debugPlugin-1.0.tar.gz/Chandler-debugPlugin-1.0/debug/generate_dialog/ItemGenerator.py |
import wx
from wx import xrc
import application.Globals as Globals
import os
import pkg_resources
import logging
from debug import createItems as createItems
import cPickle as pickle
import itemGenHelp_xrc as itemGenHelp_xrc
from datetime import date
if bool(wx.GetApp()):
import tools.QAUITestAppLib as QAUITestAppLib #import only if we really have a Chandler window
else:
from chandlerdb.persistence.RepositoryView import NullRepositoryView #import for tests
logger = logging.getLogger(__name__)
__res = None
def get_resources():
""" This function provides access to the XML resources in this module."""
global __res
if __res == None:
__init_resources()
return __res
class xrcFRAME1(wx.Dialog):
def PreCreate(self, pre):
""" This function is called during the class's initialization.
Override it for custom setup before the window is created usually to
set additional window styles using SetWindowStyle() and SetExtraStyle()."""
pass
def __init__(self, parent):
# Two stage creation (see http://wiki.wxpython.org/index.cgi/TwoStageCreation)
pre = wx.PreDialog()
self.PreCreate(pre)
get_resources().LoadOnDialog(pre, parent, "FRAME1")
self.PostCreate(pre)
#more manually added stuff
self.InitButtons()
# create attributes for the named items in this container
self.noteBookContainer = xrc.XRCCTRL(self, "noteBookContainer")
self.pageGeneral = xrc.XRCCTRL(self, "pageGeneral")
self.labelGeneralPage = xrc.XRCCTRL(self, "labelGeneralPage")
self.labelTotalItems = xrc.XRCCTRL(self, "labelTotalItems")
self.textCtrlTotalItems = xrc.XRCCTRL(self, "textCtrlTotalItems")
self.labelNumOfItemsValid = xrc.XRCCTRL(self, "labelNumOfItemsValid")
self.labelStamps = xrc.XRCCTRL(self, "labelStamps")
self.labelPercentTask = xrc.XRCCTRL(self, "labelPercentTask")
self.choicePercentTask = xrc.XRCCTRL(self, "choicePercentTask")
self.labelPercentMail = xrc.XRCCTRL(self, "labelPercentMail")
self.choicePercentMail = xrc.XRCCTRL(self, "choicePercentMail")
self.labelPercentEvent = xrc.XRCCTRL(self, "labelPercentEvent")
self.choicePercentEvent = xrc.XRCCTRL(self, "choicePercentEvent")
self.labelStampsValid = xrc.XRCCTRL(self, "labelStampsValid")
self.labelTitle = xrc.XRCCTRL(self, "labelTitle")
self.textCtrlTitleSourceFile = xrc.XRCCTRL(self, "textCtrlTitleSourceFile")
self.labelTitleSourceValid = xrc.XRCCTRL(self, "labelTitleSourceValid")
self.labelNoteField = xrc.XRCCTRL(self, "labelNoteField")
self.textCtrlNoteSourceFilePath = xrc.XRCCTRL(self, "textCtrlNoteSourceFilePath")
self.labelNoteSourceValid = xrc.XRCCTRL(self, "labelNoteSourceValid")
self.labelCollectionSource = xrc.XRCCTRL(self, "labelCollectionSource")
self.textCtrlCollectionFileName = xrc.XRCCTRL(self, "textCtrlCollectionFileName")
self.labelCollectionSourceValid = xrc.XRCCTRL(self, "labelCollectionSourceValid")
self.labelNumberOfCollections = xrc.XRCCTRL(self, "labelNumberOfCollections")
self.textCtrlCollectionCount = xrc.XRCCTRL(self, "textCtrlCollectionCount")
self.labelCollectionCountValid = xrc.XRCCTRL(self, "labelCollectionCountValid")
self.labelCollectionMembership = xrc.XRCCTRL(self, "labelCollectionMembership")
self.textCtrlCollectionMembership = xrc.XRCCTRL(self, "textCtrlCollectionMembership")
self.labelCollectionMembershipValid = xrc.XRCCTRL(self, "labelCollectionMembershipValid")
self.labelNoteField = xrc.XRCCTRL(self, "labelNoteField")
self.textCtrlLocationSourceFilePath = xrc.XRCCTRL(self, "textCtrlLocationSourceFilePath")
self.labelLocationSourceValid = xrc.XRCCTRL(self, "labelLocationSourceValid")
self.labelTriageStatus = xrc.XRCCTRL(self, "labelTriageStatus")
self.labelPercentUnassignedStatus = xrc.XRCCTRL(self, "labelPercentUnassignedStatus")
self.choicePercentUnassignedStatus = xrc.XRCCTRL(self, "choicePercentUnassignedStatus")
self.labelPercentNow = xrc.XRCCTRL(self, "labelPercentNow")
self.choicePercentNow = xrc.XRCCTRL(self, "choicePercentNow")
self.labelPercentLater = xrc.XRCCTRL(self, "labelPercentLater")
self.choicePercentLater = xrc.XRCCTRL(self, "choicePercentLater")
self.labelPercentDone = xrc.XRCCTRL(self, "labelPercentDone")
self.choicePercentDone = xrc.XRCCTRL(self, "choicePercentDone")
self.labelTriageValid = xrc.XRCCTRL(self, "labelTriageValid")
self.pageEvent = xrc.XRCCTRL(self, "pageEvent")
self.labelEventPage = xrc.XRCCTRL(self, "labelEventPage")
self.labelTimePeriod = xrc.XRCCTRL(self, "labelTimePeriod")
self.labelStartDate = xrc.XRCCTRL(self, "labelStartDate")
self.textCtrlStartDate = xrc.XRCCTRL(self, "textCtrlStartDate")
self.labelEndDate = xrc.XRCCTRL(self, "labelEndDate")
self.textCtrlEndDate = xrc.XRCCTRL(self, "textCtrlEndDate")
self.labelDateRangeValid = xrc.XRCCTRL(self, "labelDateRangeValid")
self.labelTimeOfDay = xrc.XRCCTRL(self, "labelTimeOfDay")
self.textCtrlTimeOfDay = xrc.XRCCTRL(self, "textCtrlTimeOfDay")
self.labelTimeOfDaySpecValid = xrc.XRCCTRL(self, "labelTimeOfDaySpecValid")
self.labelDuration = xrc.XRCCTRL(self, "labelDuration")
self.textCtrlDuration = xrc.XRCCTRL(self, "textCtrlDuration")
self.labelDurationSpecValid = xrc.XRCCTRL(self, "labelDurationSpecValid")
self.labelDurationTypes = xrc.XRCCTRL(self, "labelDurationTypes")
self.labelPercentAllDay = xrc.XRCCTRL(self, "labelPercentAllDay")
self.choicePercentAllDay = xrc.XRCCTRL(self, "choicePercentAllDay")
self.labelPercentAtTime = xrc.XRCCTRL(self, "labelPercentAtTime")
self.choicePercentAtTime = xrc.XRCCTRL(self, "choicePercentAtTime")
self.labelPercentAnyTime = xrc.XRCCTRL(self, "labelPercentAnyTime")
self.choicePercentAnyTime = xrc.XRCCTRL(self, "choicePercentAnyTime")
self.labelPercentDuration = xrc.XRCCTRL(self, "labelPercentDuration")
self.choicePercentDuration = xrc.XRCCTRL(self, "choicePercentDuration")
self.labelDurationTypesValid = xrc.XRCCTRL(self, "labelDurationTypesValid")
self.labelStatus = xrc.XRCCTRL(self, "labelStatus")
self.labelPercentConfirmed = xrc.XRCCTRL(self, "labelPercentConfirmed")
self.choicePercentConfirmed = xrc.XRCCTRL(self, "choicePercentConfirmed")
self.labelPercentTentative = xrc.XRCCTRL(self, "labelPercentTentative")
self.choicePercentTentative = xrc.XRCCTRL(self, "choicePercentTentative")
self.labelPercentFYI = xrc.XRCCTRL(self, "labelPercentFYI")
self.choicePercentFYI = xrc.XRCCTRL(self, "choicePercentFYI")
self.labelStatusValid = xrc.XRCCTRL(self, "labelStatusValid")
self.labelRecurrence = xrc.XRCCTRL(self, "labelRecurrence")
self.labelPercentNonRecurring = xrc.XRCCTRL(self, "labelPercentNonRecurring")
self.choicePercentNonRecurring = xrc.XRCCTRL(self, "choicePercentNonRecurring")
self.labelPercentDaily = xrc.XRCCTRL(self, "labelPercentDaily")
self.choicePercentDaily = xrc.XRCCTRL(self, "choicePercentDaily")
self.labelPercentWeekly = xrc.XRCCTRL(self, "labelPercentWeekly")
self.choicePercentWeekly = xrc.XRCCTRL(self, "choicePercentWeekly")
self.labelPercentBiWeekly = xrc.XRCCTRL(self, "labelPercentBiWeekly")
self.choicePercentBiWeekly = xrc.XRCCTRL(self, "choicePercentBiWeekly")
self.labelPercentMonthly = xrc.XRCCTRL(self, "labelPercentMonthly")
self.choicePercentMonthly = xrc.XRCCTRL(self, "choicePercentMonthly")
self.labelPercentYearly = xrc.XRCCTRL(self, "labelPercentYearly")
self.choicePercentYearly = xrc.XRCCTRL(self, "choicePercentYearly")
self.labelRecurrenceValid = xrc.XRCCTRL(self, "labelRecurrenceValid")
self.labelRecurrenceEndDate = xrc.XRCCTRL(self, "labelRecurrenceEndDate")
self.textCtrlRecurrenceEndDates = xrc.XRCCTRL(self, "textCtrlRecurrenceEndDates")
self.labelRecurrenceEndDateValid = xrc.XRCCTRL(self, "labelRecurrenceEndDateValid")
self.labelAlarmSpecification = xrc.XRCCTRL(self, "labelAlarmSpecification")
self.textCtrlAlarmSpec = xrc.XRCCTRL(self, "textCtrlAlarmSpec")
self.labelAlarmTypeValid = xrc.XRCCTRL(self, "labelAlarmTypeValid")
self.pageMessage = xrc.XRCCTRL(self, "pageMessage")
self.labelMsgPage = xrc.XRCCTRL(self, "labelMsgPage")
self.labelTo = xrc.XRCCTRL(self, "labelTo")
self.labelToFile = xrc.XRCCTRL(self, "labelToFile")
self.textCtrlToFile = xrc.XRCCTRL(self, "textCtrlToFile")
self.labelToSourceValid = xrc.XRCCTRL(self, "labelToSourceValid")
self.labelToSpec = xrc.XRCCTRL(self, "labelToSpec")
self.textCtrlToSpec = xrc.XRCCTRL(self, "textCtrlToSpec")
self.labelToSpecValid = xrc.XRCCTRL(self, "labelToSpecValid")
self.labelCC = xrc.XRCCTRL(self, "labelCC")
self.labelCCFileName = xrc.XRCCTRL(self, "labelCCFileName")
self.textCtrlCCFileName = xrc.XRCCTRL(self, "textCtrlCCFileName")
self.labelCCSourceValid = xrc.XRCCTRL(self, "labelCCSourceValid")
self.labelCCSpec = xrc.XRCCTRL(self, "labelCCSpec")
self.textCtrlCCSpec = xrc.XRCCTRL(self, "textCtrlCCSpec")
self.labelCCSpecValid = xrc.XRCCTRL(self, "labelCCSpecValid")
self.labelBCC = xrc.XRCCTRL(self, "labelBCC")
self.labelCtrlBCCFileName = xrc.XRCCTRL(self, "labelCtrlBCCFileName")
self.textCtrlBCCFileName = xrc.XRCCTRL(self, "textCtrlBCCFileName")
self.labelBCCSourceValid = xrc.XRCCTRL(self, "labelBCCSourceValid")
self.labelNumBCCSpec = xrc.XRCCTRL(self, "labelNumBCCSpec")
self.textCtrlBCCSpec = xrc.XRCCTRL(self, "textCtrlBCCSpec")
self.labelBCCSpecValid = xrc.XRCCTRL(self, "labelBCCSpecValid")
self.addressFieldNote = xrc.XRCCTRL(self, "addressFieldNote")
self.buttonGenerate = xrc.XRCCTRL(self, "buttonGenerate")
self.buttonCancel = xrc.XRCCTRL(self, "buttonCancel")
self.buttonSave = xrc.XRCCTRL(self, "buttonSave")
self.buttonRestore = xrc.XRCCTRL(self, "buttonRestore")
self.buttonHelp = xrc.XRCCTRL(self, "buttonHelp")
# ------------------------ end auto generated code ----------------------
def OnCancel(self, evt):
self.Show(show=False)
self.Close()
def OnSave(self, evt):
fname = 'itemGeneratorSettings.pickle'
try:
f = open(fname, 'w')
pickle.dump(self.valuesToDict(), f)
except:
print "\n\nUnable to open %s\n\n" % fname
finally:
f.close()
def OnRestore(self, evt):
fname = 'itemGeneratorSettings.pickle'
try:
f = open(fname, 'r')
valueDict = pickle.load(f)
except:
print "\n\nUnable to open %s\n\n" % fname
finally:
f.close()
self.restoreDialogValues(valueDict)
def OnHelp(self, evt):
self.helpWin = itemGenHelp_xrc.xrcHelpWin(self)
self.helpWin.Show()
def valuesToDict(self):
""" returns a dict containing all dialog values as ctrlName:ctrlValue """
dialogValues = {}
for ctrl in [ctrl for ctrl in dir(self)]:
if 'textCtrl' in ctrl:
dialogValues[ctrl] = self.__dict__[ctrl].GetValue()
elif 'choice' in ctrl:
dialogValues[ctrl] = self.__dict__[ctrl].GetStringSelection()
return dialogValues
def restoreDialogValues(self, valueDict):
"""given a dict of ctrlName:ctrlValue pairs repopulates dialog with values"""
for ctrl in valueDict.iterkeys():
if 'textCtrl' in ctrl:
self.__dict__[ctrl].SetValue(valueDict[ctrl])
if 'choice' in ctrl:
self.__dict__[ctrl].SetStringSelection(valueDict[ctrl])
def OnGenerate(self, evt):
if self.validate():
self.Close()
createItems.createItems(self.valuesToDict())
else:
msg ="Some input is invalid. Please correct input with 'Error' to the right of it and try again"
wx.MessageDialog(self, msg, caption='Input error', style=wx.OK, pos=wx.DefaultPosition).ShowModal()
def InitButtons(self):
wx.EVT_BUTTON(self, xrc.XRCID("buttonCancel"), self.OnCancel)
wx.EVT_BUTTON(self, xrc.XRCID("buttonGenerate"), self.OnGenerate)
wx.EVT_BUTTON(self, xrc.XRCID("buttonSave"), self.OnSave)
wx.EVT_BUTTON(self, xrc.XRCID("buttonRestore"), self.OnRestore)
wx.EVT_BUTTON(self, xrc.XRCID("buttonHelp"), self.OnHelp)
## validate dialog inputs
def isNumericAndNonZero(self, ctrl):
"""Test that a control contains a non zero numeric value"""
val = ctrl.GetValue()
return val.isdigit() and int(val) > 0
def isValidPath(self, path):
"""Test that data file exists"""
if os.path.lexists(path):
return True
elif os.path.lexists(os.path.join(Globals.chandlerDirectory, 'projects/Chandler-debugPlugin/debug', path)):
return True
return False
def collectionCountNotExceeded(self, spec, totalCollectionCount):
"""Test that no membership spec requires more collections than the total
number of collections created (totalCollectionCount).
"""
collectionCounts = [int(x.strip()[0]) for x in spec.split(',')]
collectionCounts.sort()
return collectionCounts.pop() <= totalCollectionCount
def sumValueIs100(self, *controls):
"""Returns true if the sum of all _choice_ control values is == 100"""
sum = 0
for ctrl in controls:
sum += int(ctrl.GetStringSelection())
return sum == 100
def datesCorrect(self, start, end):
"""Test that start and end are valid dates and start is before end"""
try:
y,m,d = start.split(',')
startDate = date(int(y),int(m),int(d))
y,m,d = end.split(',')
endDate = date(int(y),int(m),int(d))
diff = endDate - startDate
return diff.days > 0
except:
return False
def tryToProcess(self, func, *args):
"""Try to process dialog textValue with its associated function.
Return True if no errors"""
try:
func(*args)
except:
return False
return True
def validate(self):
"""Attempts to check that all settings in dialog are valid."""
if bool(wx.GetApp()):
view = QAUITestAppLib.App_ns.itsView
else: # when running unit tests there is no app
view = NullRepositoryView(verify=True)
tzinfo = view.tzinfo.getDefault()
self.output = True
def _markValid(isValid, label):
"""mark control as 'Valid' or 'Error'"""
if isValid:
label.ForegroundColour='black'
label.SetLabel('Valid')
else:
label.ForegroundColour='red'
label.SetLabel('Error')
self.output = False
# test total items
result = self.isNumericAndNonZero(self.textCtrlTotalItems)
_markValid(result, self.labelNumOfItemsValid)
# mark stamp percentages valid (all possible input is valid)
_markValid(True, self.labelStampsValid)
# test title source
result = self.isValidPath(self.textCtrlTitleSourceFile.GetValue())
_markValid(result, self.labelTitleSourceValid)
# test note source
result = self.isValidPath(self.textCtrlNoteSourceFilePath.GetValue())
_markValid(result, self.labelNoteSourceValid)
# test collection source
result = self.isValidPath(self.textCtrlCollectionFileName.GetValue())
_markValid(result, self.labelCollectionSourceValid)
# test number of collections
result = self.isNumericAndNonZero(self.textCtrlCollectionCount)
_markValid(result, self.labelCollectionCountValid)
# test collection membership
membershipSpec = self.textCtrlCollectionMembership.GetValue()
totalCollectionCount = int(self.textCtrlCollectionCount.GetValue())
result = self.tryToProcess(createItems.createMembershipIndex, membershipSpec, totalCollectionCount) \
and \
self.collectionCountNotExceeded(membershipSpec, totalCollectionCount)
_markValid(result, self.labelCollectionMembershipValid)
# test location source
result = self.isValidPath(self.textCtrlLocationSourceFilePath.GetValue())
_markValid(result, self.labelLocationSourceValid)
# test triage percentaqes
result = self.sumValueIs100(self.choicePercentUnassignedStatus,
self.choicePercentNow,
self.choicePercentLater,
self.choicePercentDone )
_markValid(result, self.labelTriageValid)
# test start/ end dates
result = self.datesCorrect(self.textCtrlStartDate.GetValue(), self.textCtrlEndDate.GetValue())
_markValid(result, self.labelDateRangeValid)
# test time of day spec
result = self.tryToProcess(createItems.createStartTimeRange,self.textCtrlTimeOfDay.GetValue(), [1,2,3])
_markValid(result, self.labelTimeOfDaySpecValid)
# test duration spec
result = self.tryToProcess(createItems.createDurationIndex, self.textCtrlDuration.GetValue(), [1,2,3])
_markValid(result, self.labelDurationSpecValid)
# test duration type percentages
result = self.sumValueIs100(self.choicePercentAllDay,
self.choicePercentAtTime,
self.choicePercentAnyTime,
self.choicePercentDuration)
_markValid(result, self.labelDurationTypesValid)
# test status percentages
result = self.sumValueIs100(self.choicePercentConfirmed,
self.choicePercentTentative,
self.choicePercentFYI)
_markValid(result, self.labelStatusValid)
# test recurrence percentages
result = self.sumValueIs100(self.choicePercentNonRecurring,
self.choicePercentDaily,
self.choicePercentWeekly,
self.choicePercentBiWeekly,
self.choicePercentMonthly,
self.choicePercentYearly)
_markValid(result, self.labelRecurrenceValid)
# test recurrence end date spec
result = self.tryToProcess(createItems.createEndDateIndex, self.textCtrlRecurrenceEndDates.GetValue(), [1,2,3])
_markValid(result, self.labelRecurrenceEndDateValid)
# test alarm spec
result = self.tryToProcess(createItems.createAlarmIndex, self.textCtrlAlarmSpec.GetValue(), [1,2,3], [1,2,3,4], tzinfo)
_markValid(result, self.labelAlarmTypeValid)
# test To source file
result = self.isValidPath(self.textCtrlToFile.GetValue())
_markValid(result, self.labelToSourceValid)
# test To spec
result = self.tryToProcess(createItems.createAddressIndex, [1,2,3], self.textCtrlToSpec.GetValue(), [1,2,3])
_markValid(result, self.labelToSpecValid)
# test CC source file
result = self.isValidPath(self.textCtrlCCFileName.GetValue())
_markValid(result, self.labelCCSourceValid)
# test CC spec
result = self.tryToProcess(createItems.createAddressIndex, [1,2,3], self.textCtrlCCSpec.GetValue(), [1,2,3])
_markValid(result, self.labelCCSpecValid)
# test BCC source file
result = self.isValidPath(self.textCtrlBCCFileName.GetValue())
_markValid(result, self.labelBCCSourceValid)
# test To spec
result = self.tryToProcess(createItems.createAddressIndex, [1,2,3], self.textCtrlBCCSpec.GetValue(), [1,2,3])
_markValid(result, self.labelBCCSpecValid)
return self.output
def __init_resources():
global __res
xml = pkg_resources.resource_string(__name__, 'ItemGenerator.xrc')
__res = xrc.EmptyXmlResource()
__res.LoadFromString(xml)
def show():
dialogWin = xrcFRAME1(None)
dialogWin.Show()
def showStandAlone():
app = wx.PySimpleApp()
dialog = xrcFRAME1(None)
result = dialog.Show()
if __name__ == '__main__':
showStandAlone() | PypiClean |
/Bubot_Helpers-0.0.14-py3-none-any.whl/Bubot/Helpers/Action.py | import json
from time import time
class Action:
def __init__(self, name=None, begin=True, *, group='other'):
self.name = name
self.param = {}
# self.error = None
self.group = group
self.result = None
self.begin = None
self.end = None
self.time = 0
self.total_time = 0
self.stat = {}
if begin:
self.set_begin()
def set_begin(self):
self.begin = time()
def set_end(self, result=None):
if self.end:
return self
self.end = time()
if not self.begin:
self.begin = self.end
self.total_time = round(self.end - self.begin, 3)
if result is not None:
self.result = result
if self.name:
self.update_stat(self.name, [self.total_time - self.time, 1], self.group)
return self
def add_stat(self, action):
if not isinstance(action, Action):
return action
if hasattr(action, 'group'):
for group in action.stat:
for elem in action.stat[group]:
self.update_stat(elem, action.stat[group][elem], group)
else:
for elem in action.stat:
self.update_stat(elem, action.stat[elem])
return action.result
def update_stat(self, name, stat, group='other'):
self.time += stat[0]
if group not in self.stat:
self.stat[group] = {}
if name not in self.stat[group]:
self.stat[group][name] = stat
else:
self.stat[group][name][1] += stat[1]
self.stat[group][name][0] += stat[0]
pass
# def __bool__(self):
# return False if self.error else True
# def __str__(self):
# pass
def to_dict(self):
return {
'result': self.result,
'stat': {
'action': self.name,
'time': self.total_time,
'detail': self.stat
}
}
def dump(self):
return json.dumps(self.to_dict(), ensure_ascii=False)
pass
@classmethod
def loads(cls, json_string):
_tmp = json.loads(json_string)
self = cls(_tmp.get('name'), _tmp.get('begin', None))
self.result = _tmp.get('result', None)
self.end = _tmp.get('end', None)
self.time = _tmp.get('time', 0)
self.stat = _tmp.get('stat', {})
return self | PypiClean |
/BittyTax-0.5.1.tar.gz/BittyTax-0.5.1/src/bittytax/transactions.py |
import copy
import sys
from colorama import Fore, Style
from tqdm import tqdm
from .config import config
from .record import TransactionRecord
class TransactionHistory(object):
def __init__(self, transaction_records, value_asset):
self.value_asset = value_asset
self.transactions = []
if config.debug:
print("%ssplit transaction records" % Fore.CYAN)
for tr in tqdm(
transaction_records,
unit="tr",
desc="%ssplit transaction records%s" % (Fore.CYAN, Fore.GREEN),
disable=bool(config.debug or not sys.stdout.isatty()),
):
if config.debug:
print("%ssplit: TR %s" % (Fore.MAGENTA, tr))
self.get_all_values(tr)
# Attribute the fee value (allowable cost) to the buy, the sell or both
if tr.fee and tr.fee.disposal and tr.fee.proceeds:
if tr.buy and tr.buy.acquisition and tr.sell and tr.sell.disposal:
if tr.buy.asset in config.fiat_list:
tr.sell.fee_value = tr.fee.proceeds
tr.sell.fee_fixed = tr.fee.proceeds_fixed
elif tr.sell.asset in config.fiat_list:
tr.buy.fee_value = tr.fee.proceeds
tr.buy.fee_fixed = tr.fee.proceeds_fixed
else:
# Crypto-to-crypto trades
if config.trade_allowable_cost_type == config.TRADE_ALLOWABLE_COST_BUY:
tr.buy.fee_value = tr.fee.proceeds
tr.buy.fee_fixed = tr.fee.proceeds_fixed
elif config.trade_allowable_cost_type == config.TRADE_ALLOWABLE_COST_SELL:
tr.sell.fee_value = tr.fee.proceeds
tr.sell.fee_fixed = tr.fee.proceeds_fixed
else:
# Split fee between both
tr.buy.fee_value = tr.fee.proceeds / 2
tr.buy.fee_fixed = tr.fee.proceeds_fixed
tr.sell.fee_value = tr.fee.proceeds - tr.buy.fee_value
tr.sell.fee_fixed = tr.fee.proceeds_fixed
elif tr.buy and tr.buy.acquisition:
tr.buy.fee_value = tr.fee.proceeds
tr.buy.fee_fixed = tr.fee.proceeds_fixed
elif tr.sell and tr.sell.disposal:
tr.sell.fee_value = tr.fee.proceeds
tr.sell.fee_fixed = tr.fee.proceeds_fixed
else:
# Special case for transfer fees
if config.transfer_fee_allowable_cost:
tr.fee.fee_value = tr.fee.proceeds
tr.fee.fee_fixed = tr.fee.proceeds_fixed
if tr.t_type != TransactionRecord.TYPE_LOST:
if tr.buy and (tr.buy.quantity or tr.buy.fee_value):
tr.buy.set_tid()
self.transactions.append(tr.buy)
if config.debug:
print("%ssplit: %s" % (Fore.GREEN, tr.buy))
if tr.sell and (tr.sell.quantity or tr.sell.fee_value):
tr.sell.set_tid()
self.transactions.append(tr.sell)
if config.debug:
print("%ssplit: %s" % (Fore.GREEN, tr.sell))
else:
# Special case for LOST sell must be before buy-back
if tr.sell and (tr.sell.quantity or tr.sell.fee_value):
tr.sell.set_tid()
self.transactions.append(tr.sell)
if config.debug:
print("%ssplit: %s" % (Fore.GREEN, tr.sell))
if tr.buy and (tr.buy.quantity or tr.buy.fee_value):
tr.buy.set_tid()
self.transactions.append(tr.buy)
if config.debug:
print("%ssplit: %s" % (Fore.GREEN, tr.buy))
if tr.fee and tr.fee.quantity:
tr.fee.set_tid()
self.transactions.append(tr.fee)
if config.debug:
print("%ssplit: %s" % (Fore.GREEN, tr.fee))
if config.debug:
print("%ssplit: total transactions=%d" % (Fore.CYAN, len(self.transactions)))
def get_all_values(self, tr):
if tr.buy and tr.buy.acquisition and tr.buy.cost is None:
if tr.sell:
(tr.buy.cost, tr.buy.cost_fixed) = self.which_asset_value(tr)
else:
(tr.buy.cost, tr.buy.cost_fixed) = self.value_asset.get_value(
tr.buy.asset, tr.buy.timestamp, tr.buy.quantity
)
if tr.sell and tr.sell.disposal and tr.sell.proceeds is None:
if tr.buy:
tr.sell.proceeds = tr.buy.cost
tr.sell.proceeds_fixed = tr.buy.cost_fixed
else:
(tr.sell.proceeds, tr.sell.proceeds_fixed) = self.value_asset.get_value(
tr.sell.asset, tr.sell.timestamp, tr.sell.quantity
)
if tr.fee and tr.fee.disposal and tr.fee.proceeds is None:
if tr.fee.asset not in config.fiat_list:
if tr.buy and tr.buy.asset == tr.fee.asset:
if tr.buy.cost and tr.buy.quantity:
price = tr.buy.cost / tr.buy.quantity
tr.fee.proceeds = tr.fee.quantity * price
tr.fee.proceeds_fixed = tr.buy.cost_fixed
else:
(
tr.fee.proceeds,
tr.fee.proceeds_fixed,
) = self.value_asset.get_value(
tr.fee.asset, tr.fee.timestamp, tr.fee.quantity
)
elif tr.sell and tr.sell.asset == tr.fee.asset:
if tr.sell.proceeds and tr.sell.quantity:
price = tr.sell.proceeds / tr.sell.quantity
tr.fee.proceeds = tr.fee.quantity * price
tr.fee.proceeds_fixed = tr.sell.proceeds_fixed
else:
(
tr.fee.proceeds,
tr.fee.proceeds_fixed,
) = self.value_asset.get_value(
tr.fee.asset, tr.fee.timestamp, tr.fee.quantity
)
else:
# Must be a 3rd cryptoasset
(
tr.fee.proceeds,
tr.fee.proceeds_fixed,
) = self.value_asset.get_value(tr.fee.asset, tr.fee.timestamp, tr.fee.quantity)
else:
# Fee paid in fiat
(tr.fee.proceeds, tr.fee.proceeds_fixed) = self.value_asset.get_value(
tr.fee.asset, tr.fee.timestamp, tr.fee.quantity
)
def which_asset_value(self, tr):
if config.trade_asset_type == config.TRADE_ASSET_TYPE_BUY:
if tr.buy.cost is None:
value, fixed = self.value_asset.get_value(
tr.buy.asset, tr.buy.timestamp, tr.buy.quantity
)
else:
value, fixed = tr.buy.cost, tr.buy.cost_fixed
elif config.trade_asset_type == config.TRADE_ASSET_TYPE_SELL:
if tr.sell.proceeds is None:
value, fixed = self.value_asset.get_value(
tr.sell.asset, tr.sell.timestamp, tr.sell.quantity
)
else:
value, fixed = tr.sell.proceeds, tr.sell.proceeds_fixed
else:
pos_sell_asset = pos_buy_asset = len(config.asset_priority) + 1
if tr.sell.asset in config.asset_priority:
pos_sell_asset = config.asset_priority.index(tr.sell.asset)
if tr.buy.asset in config.asset_priority:
pos_buy_asset = config.asset_priority.index(tr.buy.asset)
if pos_sell_asset <= pos_buy_asset:
if tr.sell.proceeds is None:
value, fixed = self.value_asset.get_value(
tr.sell.asset, tr.sell.timestamp, tr.sell.quantity
)
else:
value, fixed = tr.sell.proceeds, tr.sell.proceeds_fixed
else:
if tr.buy.cost is None:
value, fixed = self.value_asset.get_value(
tr.buy.asset, tr.buy.timestamp, tr.buy.quantity
)
else:
value, fixed = tr.buy.cost, tr.buy.cost_fixed
return value, fixed
class TransactionBase(object): # pylint: disable=too-many-instance-attributes
def __init__(self, t_type, asset, quantity):
self.tid = None
self.t_record = None
self.t_type = t_type
self.asset = asset
self.quantity = quantity
self.fee_value = None
self.fee_fixed = True
self.wallet = None
self.timestamp = None
self.note = None
self.matched = False
self.pooled = []
def set_tid(self):
self.tid = self.t_record.set_tid()
def is_crypto(self):
return bool(self.asset not in config.fiat_list)
def _format_tid(self):
return "%s.%s" % (self.tid[0], self.tid[1])
def _format_quantity(self):
if self.quantity is None:
return ""
return "{:0,f}".format(self.quantity.normalize())
def _format_asset(self):
if sys.version_info[0] < 3:
return self.asset.decode("utf8")
return self.asset
def _format_wallet(self):
if sys.version_info[0] < 3:
return self.wallet.decode("utf8")
return self.wallet
def _format_note(self):
if self.note:
if sys.version_info[0] < 3:
return "'%s' " % self.note.decode("utf8")
return "'%s' " % self.note
return ""
def _format_pooled(self, bold=False):
if self.pooled:
return " %s(%s)%s" % (
Style.BRIGHT if bold else "",
len(self.pooled),
Style.NORMAL if bold else "",
)
return ""
def _format_fee(self):
if self.fee_value is not None:
return " + fee=%s%s %s" % (
"" if self.fee_fixed else "~",
config.sym() + "{:0,.2f}".format(self.fee_value),
config.ccy,
)
return ""
def _format_timestamp(self):
if self.timestamp.microsecond:
return self.timestamp.strftime("%Y-%m-%dT%H:%M:%S.%f %Z")
return self.timestamp.strftime("%Y-%m-%dT%H:%M:%S %Z")
def __eq__(self, other):
return (self.asset, self.timestamp) == (other.asset, other.timestamp)
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return (self.asset, self.timestamp) < (other.asset, other.timestamp)
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k == "t_record":
# Keep reference to the transaction record
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
class Buy(TransactionBase): # pylint: disable=too-many-instance-attributes
TYPE_DEPOSIT = TransactionRecord.TYPE_DEPOSIT
TYPE_MINING = TransactionRecord.TYPE_MINING
TYPE_STAKING = TransactionRecord.TYPE_STAKING
TYPE_INTEREST = TransactionRecord.TYPE_INTEREST
TYPE_DIVIDEND = TransactionRecord.TYPE_DIVIDEND
TYPE_INCOME = TransactionRecord.TYPE_INCOME
TYPE_GIFT_RECEIVED = TransactionRecord.TYPE_GIFT_RECEIVED
TYPE_AIRDROP = TransactionRecord.TYPE_AIRDROP
TYPE_TRADE = TransactionRecord.TYPE_TRADE
ACQUISITION_TYPES = {
TYPE_MINING,
TYPE_STAKING,
TYPE_INTEREST,
TYPE_DIVIDEND,
TYPE_INCOME,
TYPE_GIFT_RECEIVED,
TYPE_AIRDROP,
TYPE_TRADE,
}
def __init__(self, t_type, buy_quantity, buy_asset, buy_value):
super(Buy, self).__init__(t_type, buy_asset, buy_quantity)
self.acquisition = bool(self.t_type in self.ACQUISITION_TYPES)
self.cost = None
self.cost_fixed = False
if self.acquisition and buy_value is not None:
self.cost = buy_value
self.cost_fixed = True
def __iadd__(self, other):
if not self.pooled:
self.pooled.append(copy.deepcopy(self))
# Pool buys
if self.asset != other.asset:
raise ValueError("Assets do not match")
self.quantity += other.quantity
self.cost += other.cost
if self.fee_value is not None and other.fee_value is not None:
self.fee_value += other.fee_value
elif self.fee_value is None and other.fee_value is not None:
self.fee_value = other.fee_value
if other.timestamp < self.timestamp:
# Keep timestamp of earliest transaction
self.timestamp = other.timestamp
if other.wallet != self.wallet:
self.wallet = "<pooled>"
if other.cost_fixed != self.cost_fixed:
self.cost_fixed = False
if other.fee_fixed != self.fee_fixed:
self.fee_fixed = False
if other.note != self.note:
self.note = "<pooled>"
self.pooled.append(other)
return self
def split_buy(self, sell_quantity):
remainder = copy.deepcopy(self)
self.cost = self.cost * (sell_quantity / self.quantity)
if self.fee_value:
self.fee_value = self.fee_value * (sell_quantity / self.quantity)
self.quantity = sell_quantity
self.set_tid()
# pylint: disable=attribute-defined-outside-init
remainder.cost = remainder.cost - self.cost
# pylint: enable=attribute-defined-outside-init
if self.fee_value:
remainder.fee_value = remainder.fee_value - self.fee_value
remainder.quantity = remainder.quantity - sell_quantity
remainder.set_tid()
return remainder
def _format_cost(self):
if self.cost is not None:
return " (%s%s %s)" % (
"=" if self.cost_fixed else "~",
config.sym() + "{:0,.2f}".format(self.cost),
config.ccy,
)
return ""
def __str__(self, pooled_bold=False, quantity_bold=False):
return "%s%s %s%s %s %s%s%s%s '%s' %s %s[TID:%s]%s" % (
type(self).__name__.upper(),
"*" if not self.acquisition else "",
self.t_type,
Style.BRIGHT if quantity_bold else "",
self._format_quantity(),
self._format_asset(),
Style.NORMAL if quantity_bold else "",
self._format_cost(),
self._format_fee(),
self._format_wallet(),
self._format_timestamp(),
self._format_note(),
self._format_tid(),
self._format_pooled(pooled_bold),
)
class Sell(TransactionBase): # pylint: disable=too-many-instance-attributes
TYPE_WITHDRAWAL = TransactionRecord.TYPE_WITHDRAWAL
TYPE_SPEND = TransactionRecord.TYPE_SPEND
TYPE_GIFT_SENT = TransactionRecord.TYPE_GIFT_SENT
TYPE_GIFT_SPOUSE = TransactionRecord.TYPE_GIFT_SPOUSE
TYPE_CHARITY_SENT = TransactionRecord.TYPE_CHARITY_SENT
TYPE_LOST = TransactionRecord.TYPE_LOST
TYPE_TRADE = TransactionRecord.TYPE_TRADE
DISPOSAL_TYPES = {
TYPE_SPEND,
TYPE_GIFT_SENT,
TYPE_GIFT_SPOUSE,
TYPE_CHARITY_SENT,
TYPE_LOST,
TYPE_TRADE,
}
def __init__(self, t_type, sell_quantity, sell_asset, sell_value):
super(Sell, self).__init__(t_type, sell_asset, sell_quantity)
self.disposal = bool(self.t_type in self.DISPOSAL_TYPES)
self.proceeds = None
self.proceeds_fixed = False
if self.disposal and sell_value is not None:
self.proceeds = sell_value
self.proceeds_fixed = True
def __iadd__(self, other):
if not self.pooled:
self.pooled.append(copy.deepcopy(self))
# Pool sells
if self.asset != other.asset:
raise ValueError("Assets do not match")
self.quantity += other.quantity
self.proceeds += other.proceeds
if self.fee_value is not None and other.fee_value is not None:
self.fee_value += other.fee_value
elif self.fee_value is None and other.fee_value is not None:
self.fee_value = other.fee_value
if other.timestamp > self.timestamp:
# Keep timestamp of latest transaction
self.timestamp = other.timestamp
if other.wallet != self.wallet:
self.wallet = "<pooled>"
if other.proceeds_fixed != self.proceeds_fixed:
self.proceeds_fixed = False
if other.fee_fixed != self.fee_fixed:
self.fee_fixed = False
if other.note != self.note:
self.note = "<pooled>"
self.pooled.append(other)
return self
def split_sell(self, buy_quantity):
remainder = copy.deepcopy(self)
self.proceeds = self.proceeds * (buy_quantity / self.quantity)
if self.fee_value:
self.fee_value = self.fee_value * (buy_quantity / self.quantity)
self.quantity = buy_quantity
self.set_tid()
# pylint: disable=attribute-defined-outside-init
remainder.proceeds = remainder.proceeds - self.proceeds
# pylint: enable=attribute-defined-outside-init
if self.fee_value:
remainder.fee_value = remainder.fee_value - self.fee_value
remainder.quantity = remainder.quantity - buy_quantity
remainder.set_tid()
return remainder
def _format_proceeds(self):
if self.proceeds is not None:
return " (%s%s %s)" % (
"=" if self.proceeds_fixed else "~",
config.sym() + "{:0,.2f}".format(self.proceeds),
config.ccy,
)
return ""
def __str__(self, pooled_bold=False, quantity_bold=False):
return "%s%s %s%s %s %s%s%s%s '%s' %s %s[TID:%s]%s" % (
type(self).__name__.upper(),
"*" if not self.disposal else "",
self.t_type,
Style.BRIGHT if quantity_bold else "",
self._format_quantity(),
self._format_asset(),
Style.NORMAL if quantity_bold else "",
self._format_proceeds(),
self._format_fee(),
self._format_wallet(),
self._format_timestamp(),
self._format_note(),
self._format_tid(),
self._format_pooled(pooled_bold),
) | PypiClean |
/Hooover-0.0.1.tar.gz/Hooover-0.0.1/hooover/handlers.py | logging library.'''
import logging
try:
from simplejson import dumps
except ImportError:
from json import dumps
from logging.handlers import SysLogHandler
from hooover.session import LogglySession
from hooover.utils import async_post_to_endpoint
class LogglyHttpHandler(logging.Handler):
def __init__(self, session=None, token='', inputname='', input=None,
announce=False, json_class=None, secure=True, proxy=None):
logging.Handler.__init__(self)
if inputname:
# TODO: raise something appropriate if session is None
input = session.get_input_by_name(inputname)
if input:
self.inputobj = input
try:
token = input.input_token
self.inputname = input.name
except AttributeError:
raise ValueError('This is not an HTTP input')
session = session or LogglySession
self.token = token
protocol = secure and 'https' or 'http'
proxy = proxy or session.proxy
self.endpoint = "%s://%s/inputs/%s" % (protocol, proxy, token)
self.json_class = json_class
# TODO: verify we can write to the input
if announce:
# TODO: grab this boxes' IP, and announce logging to the input
pass
def emit(self, record):
if isinstance(record.msg, (list, dict)):
record.msg = dumps(record.msg, cls=self.json_class, default=str)
msg = self.format(record)
async_post_to_endpoint(self.endpoint, msg)
class LogglySyslogHandler(SysLogHandler):
def __init__(self, session=None, port=None, inputname='', input=None,
announce=False, authorize=True, **kwargs):
#TODO: avoid duplication with __init__ above
if inputname:
# raise if no session
input = session.get_input_by_name(inputname)
if input:
self.inputobj = input
try:
port = input.port
self.inputname = input.name
except AttributeError:
raise ValueError("This doesn't look like a syslog input")
if authorize:
if port == 514:
# raise if no session
session._api_help('api/inputs/%s/add514' % input.id)
else:
session._api_help('api/inputs/%s/adddevice' % input.id,
method='POST')
self.port = port
session = session or LogglySession
SysLogHandler.__init__(self, address=(session.proxy, port),
**kwargs) | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/drinks_wages.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def drinks_wages(path):
"""Elderton and Pearson's (1910) data on drinking and wages
In 1910, Karl Pearson weighed in on the debate, fostered by the
temperance movement, on the evils done by alcohol not only to drinkers,
but to their families. The report "A first study of the influence of
parental alcholism on the physique and ability of their offspring" was
an ambitious attempt to the new methods of statistics to bear on an
important question of social policy, to see if the hypothesis that
children were damaged by parental alcoholism would stand up to
statistical scrutiny.
Working with his assistant, Ethel M. Elderton, Pearson collected
voluminous data in Edinburgh and Manchester on many aspects of health,
stature, intelligence, etc. of children classified according to the
drinking habits of their parents. His conclusions where almost
invariably negative: the tendency of parents to drink appeared unrelated
to any thing he had measured.
The firestorm that this report set off is well described by Stigler
(1999), Chapter 1. The data set `DrinksWages` is just one of Pearsons
many tables, that he published in a letter to *The Times*, August 10,
1910.
A data frame with 70 observations on the following 6 variables, giving
the number of non-drinkers (`sober`) and drinkers (`drinks`) in
various occupational categories (`trade`).
`class`
wage class: a factor with levels `A` `B` `C`
`trade`
a factor with levels `baker` `barman` `billposter` ...
`wellsinker` `wireworker`
`sober`
the number of non-drinkers, a numeric vector
`drinks`
the number of drinkers, a numeric vector
`wage`
weekly wage (in shillings), a numeric vector
`n`
total number, a numeric vector
Pearson, K. (1910). *The Times*, August 10, 1910.
Stigler, S. M. (1999). *Statistics on the Table: The History of
Statistical Concepts and Methods*. Harvard University Press, Table 1.1
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `drinks_wages.csv`.
Returns:
Tuple of np.ndarray `x_train` with 70 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'drinks_wages.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HistData/DrinksWages.csv'
maybe_download_and_extract(path, url,
save_file_name='drinks_wages.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/address/sl_SI/__init__.py | from .. import Provider as AddressProvider
class Provider(AddressProvider):
city_formats = ("{{city_name}}",)
street_name_formats = ("{{street_name}}",)
street_address_formats = ("{{street_name}} {{building_number}}",)
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("###", "##", "#", "#a", "#b", "#c")
postcode_formats = ("####",)
cities = (
"Ajdovščina",
"Bled",
"Bovec",
"Brežice",
"Celje",
"Cerknica",
"Črnomelj",
"Domžale",
"Dravograd",
"Gornja Radgona",
"Gornji Grad",
"Grosuplje",
"Hrastnik",
"Idrija",
"Ilirska Bistrica",
"Izola",
"Jesenice",
"Kamnik",
"Kobarid",
"Kočevje",
"Koper",
"Kostanjevica na Krki",
"Kranj",
"Krško",
"Laško",
"Lenart v Slovenskih goricah",
"Lendava",
"Litija",
"Ljubljana",
"Ljutomer",
"Logatec",
"Maribor",
"Medvode",
"Mengeš",
"Metlika",
"Mežica",
"Murska Sobota",
"Nova Gorica",
"Novo mesto",
"Ormož",
"Piran",
"Postojna",
"Prevalje",
"Ptuj",
"Radeče",
"Radovljica",
"Ravne na Koroškem",
"Ribnica",
"Rogaška Slatina",
"Ruše",
"Sevnica",
"Sežana",
"Slovenj Gradec",
"Slovenska Bistrica",
"Slovenske Konjice",
"Šempeter pri Gorici",
"Šentjur",
"Škofja Loka",
"Šoštanj",
"Tolmin",
"Trbovlje",
"Trebnje",
"Tržič",
"Turnišče",
"Velenje",
"Vipava",
"Vipavski Križ",
"Višnja Gora",
"Vrhnika",
"Zagorje ob Savi",
"Žalec",
"Železniki",
"Žiri",
)
streets = (
"Abramova ulica",
"Adamičeva ulica",
"Adamič-Lundrovo nabrežje",
"Ajdovščina",
"Aleševa ulica",
"Alešovčeva ulica",
"Aljaževa ulica",
"Ambrožev trg",
"Ameriška ulica",
"Andrićeva ulica",
"Anžurjeva ulica",
"Apihova ulica",
"Argentinska ulica",
"Arharjeva cesta",
"Arkova ulica",
"Artačeva ulica",
"Aškerčeva cesta",
"Avčinova ulica",
"Avsečeva ulica",
"Avstrijska ulica",
"Avšičeva cesta",
"Ažmanova ulica",
"Babičeva ulica",
"Badjurova ulica",
"Balinarska pot",
"Baragova ulica",
"Barjanska cesta",
"Bavdkova ulica",
"Baznikova ulica",
"Bazoviška ulica",
"Beethovnova ulica",
"Belačeva ulica",
"Beljaška ulica",
"Berčičeva ulica",
"Berčonova pot",
"Berdajsova ulica",
"Bernekerjeva ulica",
"Bernikova ulica",
"Betettova cesta",
"Bezenškova ulica",
"Bežigrad",
"Bičevje",
"Bilečanska ulica",
"Bitenčeva ulica",
"Bizjakova ulica",
"Bizjanova ulica",
"Bizovški štradon",
"Blasnikova ulica",
"Blasov breg",
"Bleiweisova cesta",
"Bobenčkova ulica",
"Bobrova ulica",
"Bognarjeva pot",
"Bohinjčeva ulica",
"Bohoričeva ulica",
"Boletova ulica",
"Bolgarska ulica",
"Borovniška ulica",
"Borštnikov trg",
"Borutova ulica",
"Božičeva ulica",
"Brankova ulica",
"Bratinova ulica",
"Bratislavska cesta",
"Bratov Jakopičev ulica",
"Bratov Kunovarjev ulica",
"Bravničarjeva ulica",
"Brdnikova ulica",
"Breg",
"Bregarjeva ulica",
"Breznikova ulica",
"Brglezov štradon",
"Brilejeva ulica",
"Brodarjev trg",
"Brodska cesta",
"Burnikova ulica",
"Cankarjev vrh",
"Cankarjevo nabrežje",
"Carja Dušana ulica",
"Celarčeva ulica",
"Celjska ulica",
"Celovška cesta",
"Cerkniška ulica",
"Cerutova ulica",
"Cesta Andreja Bitenca",
"Cesta Ceneta Štuparja",
"Cesta Dolomitskega odreda",
"Cesta II. grupe odredov",
"Cesta Ljubljanske brigade",
"Cesta na Bellevue",
"Cesta na Bokalce",
"Cesta na Brinovec",
"Cesta na Brod",
"Cesta na Ježah",
"Cesta na Kope",
"Cesta na Laze",
"Cesta na Loko",
"Cesta na Mesarico",
"Cesta na Ozare",
"Cesta na Poljane",
"Cesta na Prevoje",
"Cesta na Urh",
"Cesta na Vrhovce",
"Cesta slov. kmečkih uporov",
"Cesta Urške Zatlerjeve",
"Cesta v Dvor",
"Cesta v Gameljne",
"Cesta v Hrastje",
"Cesta v hrib",
"Cesta v Kleče",
"Cesta v Kostanj",
"Cesta v Legarico",
"Cesta v Mestni log",
"Cesta v Pečale",
"Cesta v Prod",
"Cesta v Rožno dolino",
"Cesta v Šmartno",
"Cesta v Zeleni log",
"Cesta v Zgornji log",
"Cesta vstaje",
"Cesta 24. junija",
"Cesta 25 talcev",
"Cesta 27. aprila",
"Chengdujska cesta",
"Chopinov prehod",
"Cigaletova ulica",
"Cilenškova ulica",
"Cimermanova ulica",
"Cimpermanova ulica",
"Cizejeva ulica",
"Clevelandska ulica",
"Colnarjeva ulica",
"Cvetlična pot",
"Čampova ulica",
"Čanžekova ulica",
"Čargova ulica",
"Čebelarska ulica",
"Čehova ulica",
"Čepelnikova ulica",
"Čepovanska ulica",
"Čerinova ulica",
"Černigojeva ulica",
"Černivčeva ulica",
"Červanova ulica",
"Čevljarska ulica",
"Čižmanova ulica",
"Čopova ulica",
"Črna pot",
"Črnuška cesta",
"Črtomirova ulica",
"Čučkova ulica",
"Dajnkova ulica",
"Dalmatinova ulica",
"Danile Kumarjeve ulica",
"Dečkova ulica",
"Dečmanova ulica",
"Delakova ulica",
"Demšarjeva cesta",
"Derčeva ulica",
"Dergančeva ulica",
"Dermotova ulica",
"Detelova ulica",
"Devinska ulica",
"Devova ulica",
"Divjakova ulica",
"Do proge",
"Dobrajčeva ulica",
"Dobrdobska ulica",
"Dolenjska cesta",
"Dolgi breg",
"Dolgi most",
"Dolharjeva ulica",
"Dolinarjeva ulica",
"Dolinškova ulica",
"Dolničarjeva ulica",
"Dolomitska ulica",
"Drabosnjakova ulica",
"Draga",
"Draveljska ulica",
"Dražgoška ulica",
"Drenikov vrh",
"Drenikova ulica",
"Dunajska cesta",
"Dvojna ulica",
"Dvorakova ulica",
"Dvorni trg",
"Eipprova ulica",
"Ellerjeva ulica",
"Emonska cesta",
"Erbežnikova ulica",
"Erjavčeva cesta",
"Fabianijeva ulica",
"Fani Grumove ulica",
"Ferberjeva ulica",
"Filipičeva ulica",
"Flajšmanova ulica",
"Flandrova ulica",
"Forsterjeva ulica",
"Franketova ulica",
"Frankopanska ulica",
"Frenkova pot",
"Friškovec",
"Funtkova ulica",
"Fužinska cesta",
"Gabrov trg",
"Gača",
"Galičeva ulica",
"Galjevica",
"Gallusovo nabrežje",
"Gasilska cesta",
"Gasparijeva ulica",
"Gašperšičeva ulica",
"Gerbičeva ulica",
"Gestrinova ulica",
"Glavarjeva ulica",
"Gledališka stolba",
"Glinška ulica",
"Glinškova ploščad",
"Glonarjeva ulica",
"Gmajnice",
"Gobarska pot",
"Godeževa ulica",
"Gola Loka",
"Golarjeva ulica",
"Goljarjeva pot",
"Golouhova ulica",
"Goriška ulica",
"Gorjančeva ulica",
"Gorjupova ulica",
"Gornji Rudnik I",
"Gornji Rudnik II",
"Gornji Rudnik III",
"Gornji trg",
"Goropečnikova ulica",
"Gortanova ulica",
"Gospodinjska ulica",
"Gosposka ulica",
"Gosposvetska cesta",
"Govekarjeva ulica",
"Gozdna pot",
"Grablovičeva ulica",
"Gradišče",
"Gradnikova ulica",
"Grafenauerjeva ulica",
"Grajski drevored",
"Grajzerjeva ulica",
"Gramozna pot",
"Grassellijeva ulica",
"Gregorčičeva ulica",
"Gregorinova ulica",
"Grintovška ulica",
"Grobeljca",
"Grobeljska pot",
"Groharjeva cesta",
"Groznikova ulica",
"Grška ulica",
"Grško",
"Gruberjevo nabrežje",
"Grudnovo nabrežje",
"Gubčeva ulica",
"Gunceljska cesta",
"Gustinčarjeva ulica",
"Gustinčičeva ulica",
"Hacetova ulica",
"Hafnerjeva ulica",
"Hajdrihova ulica",
"Hauptmanca",
"Hladilniška pot",
"Hladnikova cesta",
"Hlebčeva ulica",
"Hotimirova ulica",
"Hradeckega cesta",
"Hranilniška ulica",
"Hribarjevo nabrežje",
"Hribernikova ulica",
"Hribovska pot",
"Hrvaška ulica",
"Hrvatski trg",
"Hubadova ulica",
"Hudourniška pot",
"Idrijska ulica",
"Igriška ulica",
"Ilešičeva ulica",
"Ilovški štradon",
"Industrijska cesta",
"Ingličeva ulica",
"Italijanska ulica",
"Izletniška ulica",
"Ižanska cesta",
"Jakčeva ulica",
"Jakhljeva ulica",
"Jakopičev drevored",
"Jakopičevo sprehajališče",
"Jakšičeva ulica",
"Jalnova ulica",
"Jamova cesta",
"Janežičeva cesta",
"Janova ulica",
"Janševa ulica",
"Jarčeva ulica",
"Jarnikova ulica",
"Jarše",
"Jarška cesta",
"Javorškova ulica",
"Jazbečeva pot",
"Jelinčičeva ulica",
"Jenkova ulica",
"Jensenova ulica",
"Jerajeva ulica",
"Jeranova ulica",
"Jesenkova ulica",
"Jesihov štradon",
"Jezerska ulica",
"Ježa",
"Ježica",
"Joškov štradon",
"Jurčičev trg",
"Jurčkova cesta",
"Juričeva ulica",
"Juvanova ulica",
"K reaktorju",
"Kadilnikova ulica",
"Kajuhova ulica",
"Kalingerjeva ulica",
"Kalinova ulica",
"Kaminova ulica",
"Kamniška ulica",
"Kamnogoriška cesta",
"Kančeva ulica",
"Kanonijeva cesta",
"Kantetova ulica",
"Kapusova ulica",
"Kardeljeva ploščad",
"Karingerjeva ulica",
"Karunova ulica",
"Kastelčeva ulica",
"Kašeljska cesta",
"Kavadarska cesta",
"Kavčičeva ulica",
"Kavškova ulica",
"Kekčeva ulica",
"Kermaunerjeva ulica",
"Kernova cesta",
"Kerševanova ulica",
"Keržičeva ulica",
"Kettejeva ulica",
"Kladezna ulica",
"Klančarjeva ulica",
"Kleče",
"Klemenova ulica",
"Kleparska steza",
"Ključavničarska ulica",
"Klunova ulica",
"Kmečka pot",
"Knafljev prehod",
"Knezov štradon",
"Knezova ulica",
"Knobleharjeva ulica",
"Koblarjeva ulica",
"Kocbekova ulica",
"Kocenova ulica",
"Kocjanova ulica",
"Kočenska ulica",
"Kodrova ulica",
"Kogojeva ulica",
"Kogovškova ulica",
"Kokaljeva ulica",
"Kolarjeva ulica",
"Kolesarska pot",
"Koleševa ulica",
"Kolinska ulica",
"Kolmanova ulica",
"Kolodvorska ulica",
"Komanova ulica",
"Komenskega ulica",
"Kongresni trg",
"Kopališka ulica",
"Kopitarjeva ulica",
"Kopna pot",
"Koprska ulica",
"Koreninova ulica",
"Koroška ulica",
"Korotanska ulica",
"Kosančeva ulica",
"Koseskega ulica",
"Koseška cesta",
"Kosmačeva ulica",
"Kosova ulica",
"Kosovelova ulica",
"Koširjeva ulica",
"Kotnikova ulica",
"Kovačeva ulica",
"Kovaška ulica",
"Kovinarska ulica",
"Kozakova ulica",
"Kozinova ulica",
"Kozlarjeva pot",
"Koželjeva ulica",
"Krakovski nasip",
"Kraljeva ulica",
"Kranerjeva ulica",
"Kraška ulica",
"Kratka pot",
"Kratka steza",
"Kregarjeva ulica",
"Kreljeva ulica",
"Kremžarjeva ulica",
"Krimska ulica",
"Krištofova ulica",
"Kriva pot",
"Krivec",
"Križevniška soteska",
"Križna ulica",
"Krmčeva ulica",
"Krmeljeva ulica",
"Kropova ulica",
"Krošljeva ulica",
"Krovska ulica",
"Krožna pot",
"Kržičeva ulica",
"Kudrova ulica",
"Kuhljeva cesta",
"Kumerdejeva ulica",
"Kumerjeve ulica",
"Kumrovška ulica",
"Kurilniška ulica",
"Kurirska ulica",
"Kusoldova ulica",
"Kuštrinova ulica",
"Kuzeletova ulica",
"Kuzmičeva ulica",
"Lahova pot",
"Lajovčeva ulica",
"Laknerjeva ulica",
"Lakotence",
"Lampetova ulica",
"Lamutova ulica",
"Langusova ulica",
"Latinski trg",
"Lavrinova ulica",
"Layerjeva ulica",
"Lazarjeva ulica",
"Legatova ulica",
"Lemeževa ulica",
"Lepi pot",
"Lepodvorska ulica",
"Leskovičeva ulica",
"Letališka cesta",
"Levarjeva ulica",
"Levičnikova ulica",
"Levstikov trg",
"Levstikova ulica",
"Linhartov podhod",
"Linhartova cesta",
"Lipahova ulica",
"Litijska cesta",
"Litostrojska cesta",
"Livada",
"Livarska ulica",
"Ločnikarjeva ulica",
"Lončarska steza",
"Lorenzova cesta",
"Lovrenčičeva ulica",
"Lovska ulica",
"Lovšetova ulica",
"Lubejeva ulica",
"Luize Pesjakove ulica",
"Lunačkova ulica",
"Mačja steza",
"Mačkov kot",
"Mačkova ulica",
"Madžarska ulica",
"Magistrova ulica",
"Maistrova ulica",
"Majaronova ulica",
"Majde Vrhovnikove ulica",
"Majorja Lavriča ulica",
"Makucova ulica",
"Mala ulica",
"Mala vas",
"Malejeva ulica",
"Malenškova ulica",
"Malgajeva ulica",
"Mali štradon",
"Mali trg",
"Malnarjeva ulica",
"Marčenkova ulica",
"Marentičeva ulica",
"Mareška pot",
"Marice Kovačeve ulica",
"Marincljeva ulica",
"Marinovševa cesta",
"Maroltova ulica",
"Martina Krpana ulica",
"Martinčeva ulica",
"Martinova ulica",
"Marušičeva ulica",
"Masarykova cesta",
"Matjanova pot",
"Matjaževa ulica",
"Maurerjeva ulica",
"Mazovčeva pot",
"Med hmeljniki",
"Medarska ulica",
"Medenska cesta",
"Medveščkova ulica",
"Mekinčeva ulica",
"Melikova ulica",
"Mencingerjeva ulica",
"Merčnikova ulica",
"Merosodna ulica",
"Mesesnelova ulica",
"Mestni trg",
"Meškova ulica",
"Metelkova ulica",
"Miheličeva cesta",
"Mihov štradon",
"Miklavčeva ulica",
"Miklošičeva cesta",
"Mikuževa ulica",
"Milčetova pot",
"Mire Lenardičeve ulica",
"Mirje",
"Mirna pot",
"Mislejeva ulica",
"Mizarska pot",
"Mladinska ulica",
"Mlake",
"Mlinska pot",
"Močnikova ulica",
"Mokrška ulica",
"Molekova ulica",
"Moškričeva ulica",
"Mrharjeva ulica",
"Mrzelova ulica",
"Murkova ulica",
"Murnikova ulica",
"Murnova ulica",
"Muzejska ulica",
"Na cvetači",
"Na delih",
"Na dolih",
"Na gaju",
"Na gmajni",
"Na Herši",
"Na jami",
"Na klančku",
"Na Korošci",
"Na Palcah",
"Na požaru",
"Na produ",
"Na Rojah",
"Na Stolbi",
"Na Straški vrh",
"Na Trati",
"Na Žalah",
"Nade Ovčakove ulica",
"Nadgoriška cesta",
"Nahlikova ulica",
"Nahtigalova ulica",
"Nanoška ulica",
"Nazorjeva ulica",
"Nebotičnikov prehod",
"Nedohova ulica",
"Njegoševa cesta",
"Nova ulica",
"Novakova pot",
"Novakova ulica",
"Novi trg",
"Novinarska ulica",
"Novo naselje",
"Novo Polje, cesta I",
"Novo Polje, cesta III",
"Novo Polje, cesta IV",
"Novo Polje, cesta V",
"Novo Polje, cesta VI",
"Novo Polje, cesta VII",
"Novo Polje, cesta X",
"Novo Polje, cesta XI",
"Novo Polje, cesta XII",
"Novo Polje, cesta XIV",
"Novo Polje, cesta XIX",
"Novo Polje, cesta XVI",
"Novo Polje, cesta XVII",
"Novo Polje, cesta XXI",
"Novo Polje, cesta XXIII",
"Novosadska ulica",
"Ob daljnovodu",
"Ob dolenjski železnici",
"Ob Farjevcu",
"Ob Ljubljanici",
"Ob Mejašu",
"Ob potoku",
"Ob pristanu",
"Ob Savi",
"Ob studencu",
"Ob zdravstvenem domu",
"Ob zeleni jami",
"Ob zelenici",
"Ob žici",
"Obirska ulica",
"Obrežna steza",
"Obrije",
"Ocvirkova ulica",
"Ogrinčeva ulica",
"Okiškega ulica",
"Omahnova ulica",
"Omejčeva ulica",
"Omersova ulica",
"Oražnova ulica",
"Orlova ulica",
"Osenjakova ulica",
"Osojna pot",
"Osojna steza",
"Osterčeva ulica",
"Ovčakova ulica",
"Pahorjeva ulica",
"Palmejeva ulica",
"Papirniška pot",
"Park Ajdovščina",
"Park Arturo Toscanini",
"Parmova ulica",
"Parmska cesta",
"Partizanska ulica",
"Pavlovčeva ulica",
"Pavšičeva ulica",
"Pečarjeva ulica",
"Pečnik",
"Pečnikova ulica",
"Pegamova ulica",
"Perčeva ulica",
"Periška cesta",
"Perkova ulica",
"Peršinova cesta",
"Pesarska cesta",
"Pestotnikova ulica",
"Peščena pot",
"Petkova ulica",
"Petkovškovo nabrežje",
"Petrčeva ulica",
"Pilonova ulica",
"Pionirska pot",
"Pipanova pot",
"Pirnatova ulica",
"Planinska cesta",
"Planinškova ulica",
"Plečnikov podhod",
"Plemljeva ulica",
"Plešičeva ulica",
"Pleteršnikova ulica",
"Pločanska ulica",
"Pod akacijami",
"Pod bregom",
"Pod bresti",
"Pod bukvami",
"Pod Debnim vrhom",
"Pod gabri",
"Pod gozdom",
"Pod hrasti",
"Pod hribom",
"Pod hruško",
"Pod jelšami",
"Pod jezom",
"Pod ježami",
"Pod Kamno gorico",
"Pod klancem",
"Pod lipami",
"Pod topoli",
"Pod Trančo",
"Pod turnom",
"Pod vrbami",
"Podgornikova ulica",
"Podgorska cesta",
"Podgrajska cesta",
"Podjunska ulica",
"Podlimbarskega ulica",
"Podmilščakova ulica",
"Podrožniška pot",
"Podsmreška cesta",
"Podutiška cesta",
"Pogačarjev trg",
"Pohlinova ulica",
"Poklukarjeva ulica",
"Polakova ulica",
"Polanškova ulica",
"Poljanska cesta",
"Polje",
"Polje, cesta I",
"Polje, cesta II",
"Polje, cesta III",
"Polje, cesta VI",
"Polje, cesta VIII",
"Polje, cesta X",
"Polje, cesta XIV",
"Polje, cesta XL",
"Polje, cesta XLII",
"Polje, cesta XLVI",
"Polje, cesta XVI",
"Polje, cesta XVIII",
"Polje, cesta XXII",
"Polje, cesta XXIV",
"Polje, cesta XXVI",
"Polje, cesta XXX",
"Polje, cesta XXXII",
"Polje, cesta XXXIV",
"Polje, cesta XXXVIII",
"Poljedelska ulica",
"Poljska pot",
"Porentova ulica",
"Posavskega ulica",
"Postojnska ulica",
"Pot do šole",
"Pot Draga Jakopiča",
"Pot heroja Trtnika",
"Pot k igrišču",
"Pot k ribniku",
"Pot k Savi",
"Pot k sejmišču",
"Pot k studencu",
"Pot na Breje",
"Pot na Drenikov vrh",
"Pot na Golovec",
"Pot na goro",
"Pot na Gradišče",
"Pot na Grič",
"Pot na Labar",
"Pot na mah",
"Pot na most",
"Pot na Orle",
"Pot na Visoko",
"Pot na Zduše",
"Pot Rdečega križa",
"Pot v boršt",
"Pot v Čeželj",
"Pot v dolino",
"Pot v Goričico",
"Pot v hribec",
"Pot v mejah",
"Pot v Mlake",
"Pot v Podgorje",
"Pot v Zeleni gaj",
"Pot za Brdom",
"Pot za razori",
"Potokarjeva ulica",
"Potrčeva ulica",
"Povšetova ulica",
"Prašnikarjeva ulica",
"Praznikova ulica",
"Pražakova ulica",
"Pred Savljami",
"Predjamska cesta",
"Predor pod Gradom",
"Preglov trg",
"Prekmurska ulica",
"Prelčeva ulica",
"Preloge",
"Premrlova ulica",
"Preradovićeva ulica",
"Preserska ulica",
"Prešernov trg",
"Prešernova cesta",
"Pretnarjeva ulica",
"Pri borštu",
"Pri brvi",
"Pri malem kamnu",
"Pri mostiščarjih",
"Pribinova ulica",
"Prijateljeva ulica",
"Primorska ulica",
"Prinčičeva ulica",
"Prisojna ulica",
"Prištinska ulica",
"Privoz",
"Proletarska cesta",
"Prule",
"Prušnikova ulica",
"Prvomajska ulica",
"Pšatnik",
"Pšatska pot",
"Ptujska ulica",
"Pučnikova ulica",
"Puharjeva ulica",
"Puhova ulica",
"Puhtejeva ulica",
"Puterlejeva ulica",
"Putrihova ulica",
"Raičeva ulica",
"Rakovniška ulica",
"Rakuševa ulica",
"Ramovševa ulica",
"Ravbarjeva ulica",
"Ravna pot",
"Ravnikova ulica",
"Razgledna steza",
"Reber",
"Reboljeva ulica",
"Rečna ulica",
"Regentova cesta",
"Resljeva cesta",
"Reška ulica",
"Ribičičeva ulica",
"Ribji trg",
"Ribniška ulica",
"Rimska cesta",
"Rjava cesta",
"Robbova ulica",
"Robičeva ulica",
"Rodičeva ulica",
"Rojčeva ulica",
"Romavhova ulica",
"Rosna pot",
"Rotarjeva ulica",
"Rovšnikova ulica",
"Rozmanova ulica",
"Rožanska ulica",
"Rožičeva ulica",
"Rožna dolina, cesta I",
"Rožna dolina, cesta III",
"Rožna dolina, cesta IV",
"Rožna dolina, cesta V",
"Rožna dolina, cesta VI",
"Rožna dolina, cesta VIII",
"Rožna dolina, cesta X",
"Rožna dolina, cesta XII",
"Rožna dolina, cesta XIII",
"Rožna dolina, cesta XV",
"Rožna dolina, cesta XVII",
"Rožna ulica",
"Rudnik I",
"Rudnik II",
"Rudnik III",
"Runkova ulica",
"Ruska ulica",
"Rutarjeva ulica",
"Sadinja vas",
"Sajovčeva ulica",
"Samova ulica",
"Saškova ulica",
"Sattnerjeva ulica",
"Savinova ulica",
"Savinškova ulica",
"Savlje",
"Savska cesta",
"Sedejeva ulica",
"Selanov trg",
"Selanova ulica",
"Setnikarjeva ulica",
"Seunigova ulica",
"Simončičeva ulica",
"Siva pot",
"Skapinova ulica",
"Sketova ulica",
"Skopčeva ulica",
"Skrbinškova ulica",
"Slape",
"Slapnikova ulica",
"Slavčja ulica",
"Slomškova ulica",
"Slovenčeva ulica",
"Slovenska cesta",
"Smoletova ulica",
"Smrekarjeva ulica",
"Smrtnikova ulica",
"Snebersko nabrežje",
"Snežniška ulica",
"Snojeva ulica",
"Sojerjeva ulica",
"Sončna pot",
"Sostrska cesta",
"Soška ulica",
"Soteška pot",
"Soussenska ulica",
"Sovretova ulica",
"Spodnji Rudnik I",
"Spodnji Rudnik II",
"Spodnji Rudnik III",
"Spodnji Rudnik V",
"Spomeniška pot",
"Srebrničeva ulica",
"Srednja pot",
"Stadionska ulica",
"Staničeva ulica",
"Stara Ježica",
"Stara slovenska ulica",
"Stare Črnuče",
"Stari trg",
"Stegne",
"Steletova ulica",
"Sternadova ulica",
"Stiška ulica",
"Stolpniška ulica",
"Stoženska ulica",
"Stožice",
"Stražarjeva ulica",
"Streliška ulica",
"Stritarjeva ulica",
"Strmeckijeva ulica",
"Strmi pot",
"Strniševa cesta",
"Strossmayerjeva ulica",
"Strugarska ulica",
"Strupijevo nabrežje",
"Suhadolčanova ulica",
"Sulčja ulica",
"Svetčeva ulica",
"Šarhova ulica",
"Šentjakob",
"Šentviška ulica",
"Šerkova ulica",
"Šestova ulica",
"Šibeniška ulica",
"Šinkov štradon",
"Šišenska cesta",
"Šivičeva ulica",
"Škerljeva ulica",
"Škofova ulica",
"Škrabčeva ulica",
"Šlandrova ulica",
"Šlosarjeva ulica",
"Šmarna gora",
"Šmartinska cesta",
"Šmartno",
"Španova pot",
"Španska ulica",
"Štajerska cesta",
"Štebijeva cesta",
"Štefančeva ulica",
"Štembalova ulica",
"Štepanjska cesta",
"Štepanjsko nabrežje",
"Štirnova ulica",
"Štradon čez Prošco",
"Štrekljeva ulica",
"Študentovska ulica",
"Štukljeva cesta",
"Štula",
"Šturmova ulica",
"Šubičeva ulica",
"Šumarjeva ulica",
"Švabićeva ulica",
"Švarova ulica",
"Švegljeva cesta",
"Tabor",
"Tacenska cesta",
"Tavčarjeva ulica",
"Tbilisijska ulica",
"Tesarska ulica",
"Teslova ulica",
"Tesna ulica",
"Tesovnikova ulica",
"Tiha ulica",
"Tiranova ulica",
"Tischlerjeva ulica",
"Tivolska cesta",
"Tkalska ulica",
"Tobačna ulica",
"Tolminska ulica",
"Tomačevo",
"Tomačevska cesta",
"Tomažičeva ulica",
"Tometova ulica",
"Tominškova ulica",
"Tomišeljska ulica",
"Toplarniška ulica",
"Topniška ulica",
"Torkarjeva ulica",
"Tratnikova ulica",
"Travniška ulica",
"Trbeže",
"Trdinova ulica",
"Trebušakova ulica",
"Trg francoske revolucije",
"Trg mladih",
"Trg mladinskih delov. brigad",
"Trg narodnih herojev",
"Trg prekomorskih brigad",
"Trg republike",
"Trg 9. maja",
"Trinkova ulica",
"Trnovčeva ulica",
"Trnovska ulica",
"Trpinčeva ulica",
"Trstenjakova ulica",
"Trtnikova ulica",
"Tržaška cesta",
"Tržna ulica",
"Tugomerjeva ulica",
"Turnerjeva ulica",
"Turnsko nabrežje",
"Udvančeva ulica",
"Ulica aktivistov",
"Ulica Alme Sodnik",
"Ulica Andreja Kumarja",
"Ulica Angelce Ocepkove",
"Ulica Angele Ljubičeve",
"Ulica borca Petra",
"Ulica borcev za severno mejo",
"Ulica bratov Bezlajev",
"Ulica bratov Blanč",
"Ulica bratov Jančar",
"Ulica bratov Komel",
"Ulica bratov Kraljič",
"Ulica bratov Martinec",
"Ulica bratov Novak",
"Ulica bratov Rozmanov",
"Ulica bratov Škofov",
"Ulica bratov Učakar",
"Ulica bratov Židan",
"Ulica Dušana Kraigherja",
"Ulica Ernesta Kramerja",
"Ulica Franca Nebca",
"Ulica Francke Jerasove",
"Ulica Franja Novaka",
"Ulica gledališča BTC",
"Ulica Goce Delčeva",
"Ulica Gubčeve brigade",
"Ulica Hermana Potočnika",
"Ulica Ivana Roba",
"Ulica Ivanke Kožuh",
"Ulica Ivice Pirjevčeve",
"Ulica Janeza Pavla II.",
"Ulica Janeza Rožiča",
"Ulica Jožeta Jame",
"Ulica Jožeta Japlja",
"Ulica Jožeta Mirtiča",
"Ulica Konrada Babnika",
"Ulica Koroškega bataljona",
"Ulica Lizike Jančarjeve",
"Ulica Lojzeta Spacala",
"Ulica Lovre Klemenčiča",
"Ulica Malči Beličeve",
"Ulica Marije Drakslerjeve",
"Ulica Marije Hvaličeve",
"Ulica Marje Boršnikove",
"Ulica Marka Šlajmerja",
"Ulica Milana Majcna",
"Ulica Milke Kerinove",
"Ulica Minke Bobnar",
"Ulica Mirka Jurce",
"Ulica Mirka Tomšiča",
"Ulica Miroslava Turka",
"Ulica Molniške čete",
"Ulica na Grad",
"Ulica Nade Čamernikove",
"Ulica Olge Mohorjeve",
"Ulica padlih borcev",
"Ulica Pariške komune",
"Ulica Pohorskega bataljona",
"Ulica Polonce Čude",
"Ulica prvoborcev",
"Ulica Rezke Dragarjeve",
"Ulica Rezke Klopčič",
"Ulica Rudolfa Janežiča",
"Ulica Staneta Severja",
"Ulica Štefke Zbašnikove",
"Ulica talcev",
"Ulica Tončke Čečeve",
"Ulica v Kokovšek",
"Ulica Vide Pregarčeve",
"Ulica Vladimirja Trampuža",
"Ulica Zore Ragancinove",
"Ulica Žanke Erjavec",
"Ulica 15. aprila",
"Ulica 15. maja",
"Ulica 24. avgusta",
"Ulica 4. julija",
"Ulica 7. septembra",
"Ulica 9. junija",
"Uršičev štradon",
"Usnjarska ulica",
"V Češnjico",
"V dolini",
"V Karlovce",
"V Karlovce",
"V Kladeh",
"V Murglah",
"V Sige",
"V Varde",
"V Zalar",
"Vagajeva ulica",
"Valjavčeva ulica",
"Valvasorjeva ulica",
"Vandotova ulica",
"Vaška pot",
"Večna pot",
"Vegova ulica",
"Velebitska ulica",
"Veliki štradon",
"Velikovška ulica",
"Velnarjeva ulica",
"Verovškova ulica",
"Veršičeva ulica",
"Veselova ulica",
"Videmska ulica",
"Vidergarjeva ulica",
"Vidičeva ulica",
"Vidovdanska cesta",
"Vilharjev podhod",
"Vilharjeva cesta",
"Vinterca",
"Vipavska ulica",
"Vipotnikova ulica",
"Viška cesta",
"Vižmarska pot",
"Vodmatska ulica",
"Vodmatski trg",
"Vodna steza",
"Vodnikova cesta",
"Vodnikovo naselje",
"Vodovodna cesta",
"Vogelna ulica",
"Vojkova cesta",
"Volaričeva ulica",
"Vošnjakova ulica",
"Vozna pot na Grad",
"Vožarski pot",
"Vrazov trg",
"Vrbovec",
"Vrbska ulica",
"Vregova ulica",
"Vrhovci, cesta I",
"Vrhovci, cesta II",
"Vrhovci, cesta III",
"Vrhovci, cesta IX",
"Vrhovci, cesta V",
"Vrhovci, cesta VI",
"Vrhovci, cesta X",
"Vrhovci, cesta XI",
"Vrhovci, cesta XII",
"Vrhovci, cesta XIV",
"Vrhovci, cesta XIX",
"Vrhovci, cesta XV",
"Vrhovci, cesta XVII",
"Vrhovci, cesta XVIII",
"Vrhovci, cesta XX",
"Vrhovci, cesta XXII",
"Vrhovci, cesta XXVI",
"Vrhovci, cesta XXVIII",
"Vrhovci, cesta XXXII",
"Vrhovčeva ulica",
"Vrhovnikova ulica",
"Vrtača",
"Vrtna ulica",
"Vrtnarska cesta",
"Vulčeva ulica",
"Vzajemna ulica",
"Windischerjeva ulica",
"Wolfova ulica",
"Za Garažami",
"Za gasilskim domom",
"Za Gradom",
"Za krajem",
"Za opekarno",
"Za partizanskim domom",
"Za progo",
"Za vasjo",
"Zadnikarjeva ulica",
"Zadobrovška cesta",
"Zadružna ulica",
"Zajčeva pot",
"Zajčevi dvori",
"Zakotnikova ulica",
"Zalaznikova ulica",
"Zaletelova ulica",
"Zaloška cesta",
"Zarnikova ulica",
"Zasavska cesta",
"Zatišje",
"Zavetiška ulica",
"Završje",
"Zbašnikova ulica",
"Zdešarjeva cesta",
"Zelena pot",
"Zelenova ulica",
"Zeljarska ulica",
"Zevnikova ulica",
"Zidarjev štradon",
"Ziherlova ulica",
"Zlatek",
"Znamenjska ulica",
"Zofke Kvedrove ulica",
"Zoisova cesta",
"Zupanova ulica",
"Zvezda",
"Zvezdarska ulica",
"Zvezna ulica",
"Žabarjeva ulica",
"Žabjak",
"Žalska ulica",
"Žaucerjeva ulica",
"Žeje",
"Železna cesta",
"Železnikarjeva ulica",
"Žerjalova ulica",
"Židankova ulica",
"Židovska steza",
"Židovska ulica",
"Živaličeva ulica",
"Živinozdravska ulica",
"Žolgerjeva ulica",
)
states = (
"Pomurksa",
"Podravska",
"Koroška",
"Savinjska",
"Zasavska",
"Spodnjeposavska",
"Jugovzhodna Slovenija",
"Osrednjeslovenska",
"Gorenjska",
"Notranjsko - kraška",
"Goriška",
"Obalno - kraška",
)
countries = (
"Afganistan",
"Islamska republika Afganistan",
"Albanija",
"Alžirija",
"Ljudska demokratična republika Alžirija",
"Andora",
"Angola",
"Republika Angola",
"Antigva in Barbuda",
"Argentina",
"Armenija",
"Republika Armenija",
"Avstralija",
"Avstrija",
"Azerbajdžan",
"Azerbajdžanska republika",
"Bahami",
"Zveza Bahami",
"Država Bahrajn",
"Bangladeš",
"Ljudska republika Bangladeš",
"Belgija",
"Kraljevina Belgija",
"Belize",
"Belorusija",
"Benin",
"Republika Benin",
"Bocvana",
"Republika Bocvana",
"Republika Bolgarija",
"Bolivija",
"Republika Bolivija",
"Brazilija",
"Federativna republika Brazilija",
"Brunej",
"Burkina Faso",
"Burundi",
"Republika Burundi",
"Butan",
"Ciper",
"Republika Ciper",
"Čad",
"Republika Čad",
"Češka",
"Čile",
"Republika Čile",
"Črna gora",
"Republika Črna gora",
"Kraljevina Danska",
"Dominika",
"Zveza Dominika",
"Džibuti",
"Republika Džibuti",
"Egipt",
"Arabska republika Egipt",
"Republika Ekvador",
"Ekvatorialna Gvineja",
"Eritreja",
"Estonija",
"Republika Estonija",
"Etiopija",
"Fidži",
"Filipini",
"Republika Filipini",
"Finska",
"Republika Finska",
"Francoska republika",
"Gabon",
"Gabonska republika",
"Gambija",
"Gana",
"Republika Gana",
"Grčija",
"Helenska republika",
"Grenada",
"Gvajana",
"Republika Gvajana",
"Gvatemala",
"Republika Gvatemala",
"Republika Gvineja",
"Gvineja Bissau",
"Republika Gvineja Bissau",
"Republika Haiti",
"Honduras",
"Republika Honduras",
"Hrvaška",
"Indija",
"Republika Indija",
"Indonezija",
"Republika Indonezija",
"Republika Irak",
"Iran",
"Islamska republika Iran",
"Irska",
"Republika Islandija",
"Italija",
"Italijanska republika",
"Izrael",
"Jamajka",
"Japonska",
"Jemen",
"Republika Jemen",
"Jordanija",
"Južna Afrika",
"Republika Južna Afrika",
"Južna Koreja",
"Kambodža",
"Kraljevina Kambodža",
"Kamerun",
"Republika Kamerun",
"Katar",
"Država Katar",
"Kazahstan",
"Republika Kazahstan",
"Kenija",
"Kirgizistan",
"Kirgiška republika",
"Kiribati",
"Kitajska",
"Kolumbija",
"Republika Kolumbija",
"Komori",
"Kongo",
"Republika Kongo",
"Demokratična republika Kongo",
"Republika Kostarika",
"Kuba",
"Republika Kuba",
"Kuvajt",
"Laos",
"Laoška ljudska demokratična republika",
"Latvija",
"Lesoto",
"Kraljevina Lesoto",
"Libanon",
"Libanonska republika",
"Republika Liberija",
"Libija",
"Libijska arabska džamahirija",
"Lihtenštajn",
"Kneževina Lihtenštajn",
"Litva",
"Republika Litva",
"Veliko vojvodstvo Luksemburg",
"Madagaskar",
"Republika Madagaskar",
"Republika Madžarska",
"Republika Severna Makedonija",
"Malavi",
"Maldivi",
"Republika Maldivi",
"Malezija",
"Mali",
"Republika Mali",
"Republika Malta",
"Maroko",
"Kraljevina Maroko",
"Marshallovi otoki",
"Mauritius",
"Republika Mauritius",
"Mavretanija",
"Mehika",
"Združene mehiške države",
"Mikronezija",
"Mjanmar",
"Zveza Mjanmar",
"Moldavija",
"Moldavija, Republika",
"Kneževina Monako",
"Mongolija",
"Mozambik",
"Republika Mozambik",
"Republika Namibija",
"Nauru",
"Republika Nauru",
"Nemčija",
"Nepal",
"Kraljevina Nepal",
"Niger",
"Republika Niger",
"Nigerija",
"Nikaragva",
"Republika Nikaragva",
"Nizozemska",
"Norveška",
"Kraljevina Norveška",
"Nova Zelandija",
"Oman",
"Pakistan",
"Islamska republika Pakistan",
"Palau",
"Republika Palau",
"Republika Panama",
"Papua Nova Gvineja",
"Paragvaj",
"Peru",
"Republika Peru",
"Poljska",
"Republika Poljska",
"Portugalska republika",
"Romunija",
"Ruanda",
"Republika Ruanda",
"Ruska federacija",
"Saint Kitts in Nevis",
"Saint Lucia",
"Salomonovi otoki",
"Salvador",
"Republika Salvador",
"San Marino",
"Sao Tome in Principe",
"Demokratična republika Sao Tome in Principe",
"Kraljevina Saudova Arabija",
"Sejšeli",
"Republika Sejšeli",
"Republika Senegal",
"Severna Koreja",
"Severna Makedonija",
"Sierra Leone",
"Republika Sierra Leone",
"Singapur",
"Sirija",
"Sirska arabska republika",
"Slonokoščena obala",
"Slovaška",
"Slovaška republika",
"Slovenija",
"Republika Slovenija",
"Somalska demokratična republika",
"Srbija",
"Republika Srbija",
"Sudan",
"Republika Sudan",
"Surinam",
"Republika Surinam",
"Svazi",
"Španija",
"Kraljevina Španija",
"Šrilanka",
"Švedska",
"Kraljevina Švedska",
"Švica",
"Tadžikistan",
"Republika Tadžikistan",
"Tajska",
"Tajvan",
"Tajvan, Provinca Kitajske",
"Tanzanija",
"Togo",
"Togoška republika",
"Tonga",
"Kraljevina Tonga",
"Republika Trinidad in Tobago",
"Tunizija",
"Republika Tunizija",
"Republika Turčija",
"Turkmenistan",
"Tuvalu",
"Uganda",
"Ukrajina",
"Urugvaj",
"Vzhodna republika Urugvaj",
"Uzbekistan",
"Vanuatu",
"Republika Vanuatu",
"Vatikan",
"Velika Britanija",
"Združeno kraljestvo",
"Venezuela",
"Republika Venezuela",
"Vietnam",
"Vzhodni Timor",
"Demokratična republika Vzhodni Timor",
"Samoa",
"Neodvisna država Zahodna Samoa",
"Zambija",
"Združene države Amerike",
"Združene države",
"Združeni arabski emirati",
"Zelenortski otoki",
)
def city_name(self) -> str:
return self.random_element(self.cities)
def street_name(self) -> str:
return self.random_element(self.streets)
def administrative_unit(self) -> str:
return self.random_element(self.states)
state = administrative_unit | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/chokidar/node_modules/glob-parent/CHANGELOG.md | ### [5.1.2](https://github.com/gulpjs/glob-parent/compare/v5.1.1...v5.1.2) (2021-03-06)
### Bug Fixes
* eliminate ReDoS ([#36](https://github.com/gulpjs/glob-parent/issues/36)) ([f923116](https://github.com/gulpjs/glob-parent/commit/f9231168b0041fea3f8f954b3cceb56269fc6366))
### [5.1.1](https://github.com/gulpjs/glob-parent/compare/v5.1.0...v5.1.1) (2021-01-27)
### Bug Fixes
* unescape exclamation mark ([#26](https://github.com/gulpjs/glob-parent/issues/26)) ([a98874f](https://github.com/gulpjs/glob-parent/commit/a98874f1a59e407f4fb1beb0db4efa8392da60bb))
## [5.1.0](https://github.com/gulpjs/glob-parent/compare/v5.0.0...v5.1.0) (2021-01-27)
### Features
* add `flipBackslashes` option to disable auto conversion of slashes (closes [#24](https://github.com/gulpjs/glob-parent/issues/24)) ([#25](https://github.com/gulpjs/glob-parent/issues/25)) ([eecf91d](https://github.com/gulpjs/glob-parent/commit/eecf91d5e3834ed78aee39c4eaaae654d76b87b3))
## [5.0.0](https://github.com/gulpjs/glob-parent/compare/v4.0.0...v5.0.0) (2021-01-27)
### ⚠ BREAKING CHANGES
* Drop support for node <6 & bump dependencies
### Miscellaneous Chores
* Drop support for node <6 & bump dependencies ([896c0c0](https://github.com/gulpjs/glob-parent/commit/896c0c00b4e7362f60b96e7fc295ae929245255a))
## [4.0.0](https://github.com/gulpjs/glob-parent/compare/v3.1.0...v4.0.0) (2021-01-27)
### ⚠ BREAKING CHANGES
* question marks are valid path characters on Windows so avoid flagging as a glob when alone
* Update is-glob dependency
### Features
* hoist regexps and strings for performance gains ([4a80667](https://github.com/gulpjs/glob-parent/commit/4a80667c69355c76a572a5892b0f133c8e1f457e))
* question marks are valid path characters on Windows so avoid flagging as a glob when alone ([2a551dd](https://github.com/gulpjs/glob-parent/commit/2a551dd0dc3235e78bf3c94843d4107072d17841))
* Update is-glob dependency ([e41fcd8](https://github.com/gulpjs/glob-parent/commit/e41fcd895d1f7bc617dba45c9d935a7949b9c281))
## [3.1.0](https://github.com/gulpjs/glob-parent/compare/v3.0.1...v3.1.0) (2021-01-27)
### Features
* allow basic win32 backslash use ([272afa5](https://github.com/gulpjs/glob-parent/commit/272afa5fd070fc0f796386a5993d4ee4a846988b))
* handle extglobs (parentheses) containing separators ([7db1bdb](https://github.com/gulpjs/glob-parent/commit/7db1bdb0756e55fd14619e8ce31aa31b17b117fd))
* new approach to braces/brackets handling ([8269bd8](https://github.com/gulpjs/glob-parent/commit/8269bd89290d99fac9395a354fb56fdcdb80f0be))
* pre-process braces/brackets sections ([9ef8a87](https://github.com/gulpjs/glob-parent/commit/9ef8a87f66b1a43d0591e7a8e4fc5a18415ee388))
* preserve escaped brace/bracket at end of string ([8cfb0ba](https://github.com/gulpjs/glob-parent/commit/8cfb0ba84202d51571340dcbaf61b79d16a26c76))
### Bug Fixes
* trailing escaped square brackets ([99ec9fe](https://github.com/gulpjs/glob-parent/commit/99ec9fecc60ee488ded20a94dd4f18b4f55c4ccf))
### [3.0.1](https://github.com/gulpjs/glob-parent/compare/v3.0.0...v3.0.1) (2021-01-27)
### Features
* use path-dirname ponyfill ([cdbea5f](https://github.com/gulpjs/glob-parent/commit/cdbea5f32a58a54e001a75ddd7c0fccd4776aacc))
### Bug Fixes
* unescape glob-escaped dirnames on output ([598c533](https://github.com/gulpjs/glob-parent/commit/598c533bdf49c1428bc063aa9b8db40c5a86b030))
## [3.0.0](https://github.com/gulpjs/glob-parent/compare/v2.0.0...v3.0.0) (2021-01-27)
### ⚠ BREAKING CHANGES
* update is-glob dependency
### Features
* update is-glob dependency ([5c5f8ef](https://github.com/gulpjs/glob-parent/commit/5c5f8efcee362a8e7638cf8220666acd8784f6bd))
## [2.0.0](https://github.com/gulpjs/glob-parent/compare/v1.3.0...v2.0.0) (2021-01-27)
### Features
* move up to dirname regardless of glob characters ([f97fb83](https://github.com/gulpjs/glob-parent/commit/f97fb83be2e0a9fc8d3b760e789d2ecadd6aa0c2))
## [1.3.0](https://github.com/gulpjs/glob-parent/compare/v1.2.0...v1.3.0) (2021-01-27)
## [1.2.0](https://github.com/gulpjs/glob-parent/compare/v1.1.0...v1.2.0) (2021-01-27)
### Reverts
* feat: make regex test strings smaller ([dc80fa9](https://github.com/gulpjs/glob-parent/commit/dc80fa9658dca20549cfeba44bbd37d5246fcce0))
## [1.1.0](https://github.com/gulpjs/glob-parent/compare/v1.0.0...v1.1.0) (2021-01-27)
### Features
* make regex test strings smaller ([cd83220](https://github.com/gulpjs/glob-parent/commit/cd832208638f45169f986d80fcf66e401f35d233))
## 1.0.0 (2021-01-27)
| PypiClean |
/MakeSens_API-0.5.1-py3-none-any.whl/MakeSens/MakeSens.py | class MakeSens:
import pandas
import requests
import json
from typing import Union
from datetime import datetime
def __init__(self, proyecto:str,token:str):
self.headers = {'content-type': 'application/json',
'Authorization':f'Bearer {token}'}
self.project=self.json.loads(self.requests.get(f'https://makesens.aws.thinger.io/v1/users/MakeSens/devices?project={proyecto}',headers=self.headers).content)
self.devices_dfs=[]
self.devices=[]
for p in self.project:
device=self.json.loads(self.requests.get(f'https://makesens.aws.thinger.io/v1/users/MakeSens/devices/{p["device"]}',headers=self.headers).content)['connection']['location']
lat,lon=device['lat'],device['lon']
self.devices.append(p['device'])
device_df={'device':p['device'],'name':p['name'],'active':p['connection']['active'],'description':p['description'],'last_timestamp':
self.datetime.utcfromtimestamp(p['connection']['ts']/1000).strftime('%Y-%m-%d %H:%M:%S'),
'lat':lat,'lon':lon}
self.devices_dfs.append(device_df)
self.devices_dfs=self.pandas.DataFrame(self.devices_dfs).set_index('device')
self.data_total={}
def request_data(self,id_devices:Union[list,tuple]=[],start:str='2015-01-01',end:str='2023-01-01',items:int=1000)->dict:
if not id_devices:
id_devices=self.devices
start=int((self.datetime.strptime(start,"%Y-%m-%d") - self.datetime(1970, 1, 1)).total_seconds())*1000
end=int((self.datetime.strptime(end,"%Y-%m-%d") - self.datetime(1970, 1, 1)).total_seconds())*1000
self.data_total={}
for id in id_devices:
data=[]
min_ts=start
while min_ts-1<end:
d=self.json.loads(self.requests.get(f'https://makesens.aws.thinger.io/v1/users/MakeSens/buckets/{"B"+id}/data?items={items}&min_ts={min_ts}&sort=asc',headers=self.headers).content)
if not d: break
data+=d
min_ts=d[-1]['ts']+1
self.data_total[id]=self.pandas.DataFrame([i['val'] for i in data],index=[self.datetime.utcfromtimestamp(i['ts']/1000).strftime('%Y-%m-%d %H:%M:%S') for i in data])
return self.data_total
def request_device_status(self,id_devices:Union[list,tuple]=[])->list:
if not id_devices:id_devices=self.devices
return {id:self.json.loads(self.requests.get(f'https://makesens.aws.thinger.io/v1/users/MakeSens/devices/{id}',headers=self.headers).content) for id in id_devices}
def data_types(self,data:dict={})->dict:
if not data:
data=self.data_total
return {id:df.dtypes for id,df in data.items()}
def plot_data(self):
for id,df in self.data_total.items():
n=len(df.columns)
ax=df.plot(subplots=True,sharex=False,figsize=(20,4*n),title=[id]*n)
def save_data(self):
[df.to_csv(id+'.csv') for id,df in self.data_total.items()] | PypiClean |
/ImmuneDB-0.29.11.tar.gz/ImmuneDB-0.29.11/immunedb/identification/genes.py | from collections import OrderedDict
import dnautils
import re
from Bio import SeqIO
from Bio.Seq import Seq
from immunedb.common.models import CDR3_OFFSET
from immunedb.util.hyper import hypergeom
from immunedb.identification import AlignmentException, get_common_seq
class GermlineException(Exception):
pass
class GeneName(object):
def __init__(self, name):
self.name = name
try:
parts = re.search(r'((([A-Z]+)(\d+)([^\*]+)?)(\*(\d+))?)',
self.name).groups()
except AttributeError:
raise AlignmentException('Invalid gene name {}'.format(name))
self.name = parts[0]
self.base = parts[1]
self.prefix = parts[2]
self.family = parts[3]
self.allele = parts[6] if parts[6] else None
def __str__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return hash(self) == hash(other)
def __repr__(self):
return ('<GeneName={}, base={}, prefix={}, family={}, '
'allele={}>').format(str(self), self.base, self.prefix,
self.family, self.allele)
def __lt__(self, other):
return self.name < other.name
class GeneTies(dict):
TIES_PROB_THRESHOLD = 0.01
def __init__(self, genes, remove_gaps=True, ties=False):
self.ties = {}
self.hypers = {}
self.remove_gaps = remove_gaps
self.ties = ties
self.update(genes)
self.allele_lookup = {}
for name in self.keys():
self.allele_lookup[name] = set([])
for name2 in self.keys():
if name2.base == name.base:
self.allele_lookup[name].add(name2)
def all_ties(self, length, mutation, cutoff=True):
ties = {}
for name in self:
tie_name = tuple(sorted(self.get_ties([name], length, mutation)))
if tie_name not in ties:
ties[tie_name] = get_common_seq(
[self[n] for n in tie_name], cutoff=cutoff
)
return ties
def get_ties(self, genes, length, mutation):
ties = set([])
for gene in genes:
ties.update(self.get_single_tie(gene, length, mutation))
return ties
def get_single_tie(self, gene, length, mutation):
# Used to disable gene ties for genotyping
if not self.ties:
return set([gene])
length = int(length)
mutation = round(mutation, 3)
mutation = self.mut_bucket(mutation)
key = (length, mutation)
if key not in self.ties:
self.ties[key] = {}
if gene not in self:
return set([gene])
if gene not in self.ties[key]:
s_1 = (
self[gene].replace('-', '') if self.remove_gaps else self[gene]
)
self.ties[key][gene] = set([gene])
for name, v in sorted(self.items()):
s_2 = v.replace('-', '') if self.remove_gaps else v
K = dnautils.hamming(s_1[-length:], s_2[-length:])
p = self._hypergeom(length, mutation, K)
if p >= self.TIES_PROB_THRESHOLD:
self.ties[key][gene].add(name)
self.ties[key][gene] = self.all_alleles(self.ties[key][gene])
return self.ties[key][gene]
def _hypergeom(self, length, mutation, K):
key = (length, mutation, K)
if key not in self.hypers:
self.hypers[key] = hypergeom(length, mutation, K)
return self.hypers[key]
def mut_bucket(self, mut):
if 0 <= mut <= .05:
return .05
if mut <= .15:
return .15
return .30
def all_alleles(self, genes):
all_genes = set([])
for gene in genes:
all_genes.update(self.allele_lookup[gene])
return all_genes
class VGermlines(GeneTies):
def __init__(self, path_to_germlines, **kwargs):
self._min_length = None
self.alignments = OrderedDict()
with open(path_to_germlines) as fh:
for record in SeqIO.parse(fh, 'fasta'):
seq = str(record.seq).replace('.', '-')
if seq.startswith('-'):
continue
try:
v = VGene(seq)
name = GeneName(record.id)
self.alignments[name] = v
self[name] = seq
if (self._min_length is None or
self._min_length > len(v.sequence_ungapped)):
self._min_length = len(v.sequence_ungapped)
except Exception:
continue
super(VGermlines, self).__init__({k: v for k, v in self.items()},
**kwargs)
def get_single_tie(self, gene, length, mutation):
return super(VGermlines, self).get_single_tie(
gene, min(self.length_bucket(length), self._min_length), mutation
)
def length_bucket(self, length):
if 0 < length <= 100:
return 100
if 100 < length <= 150:
return 150
if 150 < length <= 200:
return 200
return 300
class VGene(object):
def __init__(self, gapped_sequence):
self.sequence = str(gapped_sequence).upper()
self.sequence_ungapped = self.sequence.replace('-', '')
if self.sequence[CDR3_OFFSET:].count('-') > 0:
raise AlignmentException('Cannot have gaps after CDR3 start '
'(position {})'.format(CDR3_OFFSET))
try:
self.ungapped_anchor_pos = next(find_v_position(
self.sequence_ungapped))
except StopIteration:
raise AlignmentException('Unable to find anchor')
def align(self, other_v):
diff = abs(self.ungapped_anchor_pos - other_v.ungapped_anchor_pos)
this_seq = self.sequence_ungapped
other_seq = other_v.sequence_ungapped
# Trim the sequence which has the maximal anchor position, and
# determine the CDR3 start position without gaps
if self.ungapped_anchor_pos > other_v.ungapped_anchor_pos:
this_seq = this_seq[diff:]
cdr3_start = self.ungapped_anchor_pos - diff
else:
other_seq = other_seq[diff:]
cdr3_start = other_v.ungapped_anchor_pos - diff
return {
'base': this_seq,
'seq': other_seq,
'diff': diff,
'cdr3_start': cdr3_start
}
def compare(self, other_v, max_extent, max_streak):
alignment = self.align(other_v)
this_seq = alignment['base'][:max_extent]
other_seq = alignment['seq'][:max_extent]
cdr3_offset = alignment['cdr3_start']
# Determine the CDR3 in the germline and sequence
this_cdr3 = this_seq[cdr3_offset:]
other_cdr3 = other_seq[cdr3_offset:]
length = min(len(this_cdr3), len(other_cdr3))
this_cdr3 = this_cdr3[:length]
other_cdr3 = other_cdr3[:length]
if len(this_cdr3) == 0 or len(other_cdr3) == 0:
raise AlignmentException('Empty CDR3 found after alignment')
# Find the extent of the sequence's V into the CDR3
streak = dnautils.find_streak_position(
this_cdr3, other_cdr3, max_streak)
if streak is not None:
# If there is a streak of mismatches, cut after the streak
max_index = cdr3_offset + (streak - max_streak)
else:
# Unlikely: the CDR3 in the sequence exactly matches the
# germline. Use the smaller sequence length (full match)
max_index = cdr3_offset + min(len(this_cdr3), len(other_cdr3))
# Compare to the end of V
this_seq = this_seq[:max_index]
other_seq = other_seq[:max_index]
if len(this_seq) != len(other_seq) or len(this_seq) == 0:
raise AlignmentException('Unequal sequences after alignment')
# Determine the distance between the germline and sequence
dist = dnautils.hamming(this_seq, other_seq)
return dist, len(other_seq)
def find_v_position(sequence):
if type(sequence) == str:
sequence = Seq(sequence)
frames = []
for shift in [2, 1, 0]:
seq = sequence[shift:]
seq = seq[:len(seq) - len(seq) % 3]
frames.append((shift, str(seq.translate())))
patterns = [
'D(.{3}((YY)|(YC)|(YH)))C',
'Y([YHC])C',
'D(.{5})C',
'Y..A',
'Y.C',
]
for pattern in patterns:
for found in _find_with_frameshifts(frames, pattern):
yield found
def _find_with_frameshifts(frames, regex):
for (shift, aas) in frames:
res = re.search(regex, aas)
if res is not None:
yield (res.end() - 1) * 3 + shift
class JGermlines(GeneTies):
defaults = {
'upstream_of_cdr3': 31,
'anchor_len': 18,
'min_anchor_len': 12,
}
def __init__(self, path_to_germlines,
upstream_of_cdr3=defaults['upstream_of_cdr3'],
anchor_len=defaults['anchor_len'],
min_anchor_len=defaults['min_anchor_len'],
**kwargs):
self._upstream_of_cdr3 = upstream_of_cdr3
self._anchor_len = anchor_len
self._min_anchor_len = min_anchor_len
self._min_length = None
with open(path_to_germlines) as fh:
for record in SeqIO.parse(fh, 'fasta'):
name = GeneName(record.id)
if all([c in 'ATCGN' for c in record.seq.upper()]):
self[name] = str(record.seq).upper()
if (self._min_length is None or
len(self[name]) < self._min_length):
self._min_length = len(self[name])
self._anchors = {name: seq[-anchor_len:] for name, seq in
self.items()}
super(JGermlines, self).__init__({k: v for k, v in self.items()},
**kwargs)
@property
def upstream_of_cdr3(self):
return self._upstream_of_cdr3
@property
def anchor_len(self):
return self._anchor_len
@property
def full_anchors(self):
return self._anchors
def get_j_in_cdr3(self, gene):
return self[gene][:-self._upstream_of_cdr3]
def get_all_anchors(self, allowed_genes=None):
if allowed_genes is None:
allowed_genes = self
else:
allowed_genes = {k: v for k, v in self.items() if k.name in
allowed_genes}
max_len = max(map(len, allowed_genes.values()))
for trim_len in range(0, max_len, 3):
for j, seq in allowed_genes.items():
trimmed_seq = seq[-self.anchor_len:-trim_len]
if len(trimmed_seq) >= self._min_anchor_len:
yield trimmed_seq, j
def get_single_tie(self, gene, length, mutation):
# Used to disable gene ties for genotyping
if not self.ties:
return set([gene])
seq = self[gene][-self.anchor_len:]
tied = self.all_alleles(set([gene]))
for j, other_seq in sorted(self.items()):
other_seq = other_seq[-self.anchor_len:][:len(seq)]
if other_seq == seq:
tied.add(j)
elif dnautils.hamming(other_seq, seq) == 0:
tied.add(j)
return tied
def all_ties(self, length, mutation):
ties = {}
for name in self:
tie_name = tuple(sorted(self.get_ties([name], length, mutation)))
if tie_name not in ties:
ties[tie_name] = get_common_seq(
[self[n] for n in tie_name], right=True
)
return ties | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/integrations/handlers/openai_handler/openai_handler.py | import os
import re
import math
import json
import shutil
import tempfile
import datetime
import textwrap
import subprocess
import concurrent.futures
from typing import Optional, Dict
import openai
import numpy as np
import pandas as pd
from mindsdb.utilities.hooks import before_openai_query, after_openai_query
from mindsdb.utilities import log
from mindsdb.integrations.libs.base import BaseMLEngine
from mindsdb.integrations.handlers.openai_handler.helpers import retry_with_exponential_backoff, \
truncate_msgs_for_token_limit
from mindsdb.integrations.utilities.handler_utils import get_api_key
CHAT_MODELS = ('gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k')
class OpenAIHandler(BaseMLEngine):
name = 'openai'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generative = True
self.default_model = 'gpt-3.5-turbo'
self.default_mode = 'default' # can also be 'conversational' or 'conversational-full'
self.supported_modes = ['default', 'conversational', 'conversational-full', 'image', 'embedding']
self.rate_limit = 60 # requests per minute
self.max_batch_size = 20
self.default_max_tokens = 100
self.chat_completion_models = CHAT_MODELS
self.supported_ft_models = ('davinci', 'curie', 'babbage', 'ada') # base models compatible with finetuning
@staticmethod
def create_validation(target, args=None, **kwargs):
if 'using' not in args:
raise Exception("OpenAI engine requires a USING clause! Refer to its documentation for more details.")
else:
args = args['using']
if len(set(args.keys()) & {'question_column', 'prompt_template', 'json_struct', 'prompt'}) == 0:
raise Exception('One of `question_column`, `prompt_template` or `json_struct` is required for this engine.')
keys_collection = [
['prompt_template'],
['question_column', 'context_column'],
['prompt', 'user_column', 'assistant_column'],
['json_struct']
]
for keys in keys_collection:
if keys[0] in args and any(x[0] in args for x in keys_collection if x != keys):
raise Exception(textwrap.dedent('''\
Please provide one of
1) a `prompt_template`
2) a `question_column` and an optional `context_column`
3) a `json_struct`
4) a `prompt' and 'user_column' and 'assistant_column`
'''))
# for all args that are not expected, raise an error
known_args = set()
# flatten of keys_collection
for keys in keys_collection:
known_args = known_args.union(set(keys))
# TODO: need a systematic way to maintain a list of known args
known_args = known_args.union(
{
"target",
"model_name",
"mode",
"predict_params",
"input_text",
"ft_api_info",
"ft_result_stats",
"runtime",
"max_tokens",
"temperature",
"api_key",
"openai_api_key",
}
)
unknown_args = set(args.keys()) - known_args
if unknown_args:
# return a list of unknown args as a string
raise Exception(
f"Unknown arguments: {', '.join(unknown_args)}.\n Known arguments are: {', '.join(known_args)}"
)
def create(self, target, args=None, **kwargs):
args = args['using']
args['target'] = target
api_key = get_api_key('openai', args, self.engine_storage)
available_models = [m.openai_id for m in openai.Model.list(api_key=api_key).data]
if not args.get('model_name'):
args['model_name'] = self.default_model
elif args['model_name'] not in available_models:
raise Exception(f"Invalid model name. Please use one of {available_models}")
if not args.get('mode'):
args['mode'] = self.default_mode
elif args['mode'] not in self.supported_modes:
raise Exception(f"Invalid operation mode. Please use one of {self.supported_modes}")
self.model_storage.json_set('args', args)
def predict(self, df, args=None):
"""
If there is a prompt template, we use it. Otherwise, we use the concatenation of `context_column` (optional) and `question_column` to ask for a completion.
""" # noqa
# TODO: support for edits, embeddings and moderation
pred_args = args['predict_params'] if args else {}
args = self.model_storage.json_get('args')
df = df.reset_index(drop=True)
if pred_args.get('mode'):
if pred_args['mode'] in self.supported_modes:
args['mode'] = pred_args['mode']
else:
raise Exception(f"Invalid operation mode. Please use one of {self.supported_modes}.") # noqa
if pred_args.get('prompt_template', False):
base_template = pred_args['prompt_template'] # override with predict-time template if available
elif args.get('prompt_template', False):
base_template = args['prompt_template']
else:
base_template = None
# Embedding Mode
if args.get('mode', self.default_mode) == 'embedding':
api_args = {
'question_column': pred_args.get('question_column', None),
'model': pred_args.get('model_name', 'text-embedding-ada-002')
}
model_name = 'embedding'
if args.get('question_column'):
prompts = list(df[args['question_column']].apply(lambda x: str(x)))
empty_prompt_ids = np.where(df[[args['question_column']]].isna().all(axis=1).values)[0]
else:
raise Exception('Embedding mode needs a question_column')
# Image mode
elif args.get('mode', self.default_mode) == 'image':
api_args = {
'n': pred_args.get('n', None),
'size': pred_args.get('size', None),
'response_format': pred_args.get('response_format', None),
}
api_args = {k: v for k, v in api_args.items() if v is not None} # filter out non-specified api args
model_name = 'image'
if args.get('question_column'):
prompts = list(df[args['question_column']].apply(lambda x: str(x)))
empty_prompt_ids = np.where(df[[args['question_column']]].isna().all(axis=1).values)[0]
elif args.get('prompt_template'):
prompts, empty_prompt_ids = self._get_completed_prompts(base_template, df)
else:
raise Exception('Image mode needs either `prompt_template` or `question_column`.')
# Chat or normal completion mode
else:
if args.get('question_column', False) and args['question_column'] not in df.columns:
raise Exception(f"This model expects a question to answer in the '{args['question_column']}' column.")
if args.get('context_column', False) and args['context_column'] not in df.columns:
raise Exception(f"This model expects context in the '{args['context_column']}' column.")
# api argument validation
model_name = args.get('model_name', self.default_model)
api_args = {
'max_tokens': pred_args.get('max_tokens', args.get('max_tokens', self.default_max_tokens)),
'temperature': min(1.0, max(0.0, pred_args.get('temperature', args.get('temperature', 0.0)))),
'top_p': pred_args.get('top_p', None),
'n': pred_args.get('n', None),
'stop': pred_args.get('stop', None),
'presence_penalty': pred_args.get('presence_penalty', None),
'frequency_penalty': pred_args.get('frequency_penalty', None),
'best_of': pred_args.get('best_of', None),
'logit_bias': pred_args.get('logit_bias', None),
'user': pred_args.get('user', None),
}
if args.get('mode', self.default_mode) != 'default' and model_name not in self.chat_completion_models:
raise Exception(f"Conversational modes are only available for the following models: {', '.join(self.chat_completion_models)}") # noqa
if args.get('prompt_template', False):
prompts, empty_prompt_ids = self._get_completed_prompts(base_template, df)
elif args.get('context_column', False):
empty_prompt_ids = np.where(df[[args['context_column'],
args['question_column']]].isna().all(axis=1).values)[0]
contexts = list(df[args['context_column']].apply(lambda x: str(x)))
questions = list(df[args['question_column']].apply(lambda x: str(x)))
prompts = [f'Context: {c}\nQuestion: {q}\nAnswer: ' for c, q in zip(contexts, questions)]
elif args.get('json_struct', False):
empty_prompt_ids = np.where(df[[args['input_text']]].isna().all(axis=1).values)[0]
prompts = []
for i in df.index:
if 'json_struct' in df.columns:
if isinstance(df['json_struct'][i], str):
df['json_struct'][i] = json.loads(df['json_struct'][i])
json_struct = ''
for ind, val in enumerate(df['json_struct'][i].values()):
json_struct = json_struct + f'{ind}. {val}\n'
else:
json_struct = ''
for ind, val in enumerate(args['json_struct'].values()):
json_struct = json_struct + f'{ind + 1}. {val}\n'
p = textwrap.dedent(f'''\
Using text starting after 'The text is:', give exactly {len(args['json_struct'])} answers to the questions:
{{{{json_struct}}}}
Answers should be in the same order as the questions.
Each answer should start with a question number.
Each answer must end with new line.
If there is no answer to the question in the text, put a -.
Answers should be as short as possible, ideally 1-2 words (unless otherwise specified).
The text is:
{{{{{args['input_text']}}}}}
''')
p = p.replace('{{json_struct}}', json_struct)
for column in df.columns:
if column == 'json_struct':
continue
p = p.replace(f'{{{{{column}}}}}', str(df[column][i]))
prompts.append(p)
elif 'prompt' in args:
empty_prompt_ids = []
prompts = list(df[args['user_column']])
else:
empty_prompt_ids = np.where(df[[args['question_column']]].isna().all(axis=1).values)[0]
prompts = list(df[args['question_column']].apply(lambda x: str(x)))
# remove prompts without signal from completion queue
prompts = [j for i, j in enumerate(prompts) if i not in empty_prompt_ids]
api_key = get_api_key('openai', args, self.engine_storage)
api_args = {k: v for k, v in api_args.items() if v is not None} # filter out non-specified api args
completion = self._completion(model_name, prompts, api_key, api_args, args, df)
# add null completion for empty prompts
for i in sorted(empty_prompt_ids):
completion.insert(i, None)
pred_df = pd.DataFrame(completion, columns=[args['target']])
# restore json struct
if args.get('json_struct', False):
for i in pred_df.index:
try:
if 'json_struct' in df.columns:
json_keys = df['json_struct'][i].keys()
else:
json_keys = args['json_struct'].keys()
responses = pred_df[args['target']][i].split('\n')
responses = [x[3:] for x in responses] # del question index
pred_df[args['target']][i] = {
key: val for key, val in zip(
json_keys,
responses
)
}
except Exception:
pred_df[args['target']][i] = None
return pred_df
def _completion(self, model_name, prompts, api_key, api_args, args, df, parallel=True):
"""
Handles completion for an arbitrary amount of rows.
There are a couple checks that should be done when calling OpenAI's API:
- account max batch size, to maximize batch size first
- account rate limit, to maximize parallel calls second
Additionally, single completion calls are done with exponential backoff to guarantee all prompts are processed,
because even with previous checks the tokens-per-minute limit may apply.
"""
@retry_with_exponential_backoff()
def _submit_completion(model_name, prompts, api_key, api_args, args, df):
kwargs = {
'model': model_name,
'api_key': api_key,
'organization': args.get('api_organization'),
}
if model_name == 'image':
return _submit_image_completion(kwargs, prompts, api_args)
elif model_name == 'embedding':
return _submit_embedding_completion(kwargs, prompts, api_args)
elif model_name in self.chat_completion_models:
return _submit_chat_completion(kwargs, prompts, api_args, df, mode=args.get('mode', 'conversational'))
else:
return _submit_normal_completion(kwargs, prompts, api_args)
def _log_api_call(params, response):
after_openai_query(params, response)
params2 = params.copy()
params2.pop('api_key', None)
params2.pop('user', None)
log.logger.debug(f'>>>openai call: {params2}:\n{response}')
def _submit_normal_completion(kwargs, prompts, api_args):
def _tidy(comp):
tidy_comps = []
for c in comp['choices']:
if 'text' in c:
tidy_comps.append(c['text'].strip('\n').strip(''))
return tidy_comps
kwargs['prompt'] = prompts
kwargs = {**kwargs, **api_args}
before_openai_query(kwargs)
resp = _tidy(openai.Completion.create(**kwargs))
_log_api_call(kwargs, resp)
return resp
def _submit_embedding_completion(kwargs, prompts, api_args):
def _tidy(comp):
tidy_comps = []
for c in comp['data']:
if 'embedding' in c:
tidy_comps.append([c['embedding']])
return tidy_comps
kwargs['input'] = prompts
kwargs = {**kwargs, **api_args}
before_openai_query(kwargs)
resp = _tidy(openai.Embedding.create(**kwargs))
_log_api_call(kwargs, resp)
return resp
def _submit_chat_completion(kwargs, prompts, api_args, df, mode='conversational'):
def _tidy(comp):
tidy_comps = []
for c in comp['choices']:
if 'message' in c:
tidy_comps.append(c['message']['content'].strip('\n').strip(''))
return tidy_comps
completions = []
if mode != 'conversational':
initial_prompt = {"role": "system", "content": "You are a helpful assistant. Your task is to continue the chat."} # noqa
else:
# get prompt from model
initial_prompt = {"role": "system", "content": args['prompt']} # noqa
kwargs['messages'] = [initial_prompt]
last_completion_content = None
for pidx in range(len(prompts)):
if mode != 'conversational':
kwargs['messages'].append({'role': 'user', 'content': prompts[pidx]})
else:
question = prompts[pidx]
if question:
kwargs['messages'].append({'role': 'user', 'content': question})
answer = df.iloc[pidx][args.get('assistant_column')]
if answer:
kwargs['messages'].append({'role': 'assistant', 'content': answer})
if mode == 'conversational-full' or (mode == 'conversational' and pidx == len(prompts) - 1):
kwargs['messages'] = truncate_msgs_for_token_limit(kwargs['messages'],
kwargs['model'],
api_args['max_tokens'])
pkwargs = {**kwargs, **api_args}
before_openai_query(kwargs)
resp = _tidy(openai.ChatCompletion.create(**pkwargs))
_log_api_call(pkwargs, resp)
completions.extend(resp)
elif mode == 'default':
kwargs['messages'] = [initial_prompt] + [kwargs['messages'][-1]]
pkwargs = {**kwargs, **api_args}
before_openai_query(kwargs)
resp = _tidy(openai.ChatCompletion.create(**pkwargs))
_log_api_call(pkwargs, resp)
completions.extend(resp)
else:
# in "normal" conversational mode, we request completions only for the last row
last_completion_content = None
if args.get('answer_column') in df.columns:
# insert completion if provided, which saves redundant API calls
completions.extend([df.iloc[pidx][args.get('answer_column')]])
else:
completions.extend([''])
if args.get('answer_column') in df.columns:
kwargs['messages'].append({'role': 'assistant',
'content': df.iloc[pidx][args.get('answer_column')]})
elif last_completion_content:
# interleave assistant responses with user input
kwargs['messages'].append({'role': 'assistant', 'content': last_completion_content[0]})
return completions
def _submit_image_completion(kwargs, prompts, api_args):
def _tidy(comp):
return [c[0]['url'] if 'url' in c[0].keys() else c[0]['b64_json'] for c in comp]
kwargs.pop('model')
completions = [openai.Image.create(**{'prompt': p, **kwargs, **api_args})['data'] for p in prompts]
return _tidy(completions)
try:
# check if simple completion works
completion = _submit_completion(
model_name,
prompts,
api_key,
api_args,
args,
df
)
return completion
except openai.error.InvalidRequestError as e:
# else, we get the max batch size
e = e.user_message
if 'you can currently request up to at most a total of' in e:
pattern = 'a total of'
max_batch_size = int(e[e.find(pattern) + len(pattern):].split(').')[0])
else:
max_batch_size = self.max_batch_size # guards against changes in the API message
if not parallel:
completion = None
for i in range(math.ceil(len(prompts) / max_batch_size)):
partial = _submit_completion(model_name,
prompts[i * max_batch_size:(i + 1) * max_batch_size],
api_key,
api_args,
args,
df)
if not completion:
completion = partial
else:
completion['choices'].extend(partial['choices'])
for field in ('prompt_tokens', 'completion_tokens', 'total_tokens'):
completion['usage'][field] += partial['usage'][field]
else:
promises = []
with concurrent.futures.ThreadPoolExecutor() as executor:
for i in range(math.ceil(len(prompts) / max_batch_size)):
print(f'{i * max_batch_size}:{(i+1) * max_batch_size}/{len(prompts)}')
future = executor.submit(_submit_completion,
model_name,
prompts[i * max_batch_size:(i + 1) * max_batch_size],
api_key,
api_args,
args,
df)
promises.append({"choices": future})
completion = None
for p in promises:
if not completion:
completion = p['choices'].result()
else:
completion.extend(p['choices'].result())
return completion
def describe(self, attribute: Optional[str] = None) -> pd.DataFrame:
# TODO: Update to use update() artifacts
args = self.model_storage.json_get('args')
if attribute == 'args':
return pd.DataFrame(args.items(), columns=['key', 'value'])
elif attribute == 'metadata':
api_key = get_api_key('openai', args, self.engine_storage)
model_name = args.get('model_name', self.default_model)
meta = openai.Model.retrieve(model_name, api_key=api_key)
return pd.DataFrame(meta.items(), columns=['key', 'value'])
else:
tables = ['args', 'metadata']
return pd.DataFrame(tables, columns=['tables'])
def finetune(self, df: Optional[pd.DataFrame] = None, args: Optional[Dict] = None) -> None:
"""
Fine-tune OpenAI GPT models. Steps are roughly:
- Analyze input data and modify it according to suggestions made by the OpenAI utility tool
- Get a training and validation file
- Determine base model to use
- Submit a fine-tuning job via the OpenAI API
- Monitor progress with exponential backoff (which has been modified for greater control given a time budget in hours),
- Gather stats once fine-tuning finishes
- Modify model metadata so that the new version triggers the fine-tuned version of the model (stored in the user's OpenAI account)
Caveats:
- As base fine-tuning models, OpenAI only supports the original GPT ones: `ada`, `babbage`, `curie`, `davinci`. This means if you fine-tune successively more than once, any fine-tuning other than the most recent one is lost.
""" # noqa
args = args if args else {}
using_args = args.pop('using') if 'using' in args else {}
prompt_col = using_args.get('prompt_column', 'prompt')
completion_col = using_args.get('completion_column', 'completion')
for col in [prompt_col, completion_col]:
if col not in set(df.columns):
raise Exception(f"To fine-tune this OpenAI model, please format your select data query to have a `{prompt_col}` column and a `{completion_col}` column first.") # noqa
args = {**using_args, **args}
prev_model_name = self.base_model_storage.json_get('args').get('model_name', '')
if prev_model_name not in self.supported_ft_models:
raise Exception(f"This model cannot be finetuned. Supported base models are {self.supported_ft_models}")
openai.api_key = get_api_key('openai', args, self.engine_storage)
finetune_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
temp_storage_path = tempfile.mkdtemp()
temp_file_name = f"ft_{finetune_time}"
temp_model_storage_path = f"{temp_storage_path}/{temp_file_name}.jsonl"
df.to_json(temp_model_storage_path, orient='records', lines=True)
# TODO avoid subprocess usage once OpenAI enables non-CLI access
subprocess.run(
[
"openai", "tools", "fine_tunes.prepare_data",
"-f", temp_model_storage_path, # from file
'-q' # quiet mode (accepts all suggestions)
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
file_names = {'original': f'{temp_file_name}.jsonl',
'base': f'{temp_file_name}_prepared.jsonl',
'train': f'{temp_file_name}_prepared_train.jsonl',
'val': f'{temp_file_name}_prepared_valid.jsonl'}
jsons = {k: None for k in file_names.keys()}
for split, file_name in file_names.items():
if os.path.isfile(os.path.join(temp_storage_path, file_name)):
jsons[split] = openai.File.create(
file=open(f"{temp_storage_path}/{file_name}", "rb"),
purpose='fine-tune')
train_file_id = jsons['train'].id if isinstance(jsons['train'], openai.File) else jsons['base'].id
val_file_id = jsons['val'].id if isinstance(jsons['val'], openai.File) else None
def _get_model_type(model_name: str):
for model_type in ['ada', 'curie', 'babbage', 'davinci']:
if model_type in model_name.lower():
return model_type
return 'ada'
# `None` values are internally imputed by OpenAI to `null` or default values
ft_params = {
'training_file': train_file_id,
'validation_file': val_file_id,
'model': _get_model_type(prev_model_name),
'suffix': 'mindsdb',
'n_epochs': using_args.get('n_epochs', None),
'batch_size': using_args.get('batch_size', None),
'learning_rate_multiplier': using_args.get('learning_rate_multiplier', None),
'prompt_loss_weight': using_args.get('prompt_loss_weight', None),
'compute_classification_metrics': using_args.get('compute_classification_metrics', None),
'classification_n_classes': using_args.get('classification_n_classes', None),
'classification_positive_class': using_args.get('classification_positive_class', None),
'classification_betas': using_args.get('classification_betas', None),
}
start_time = datetime.datetime.now()
ft_result = openai.FineTune.create(**{k: v for k, v in ft_params.items() if v is not None})
@retry_with_exponential_backoff(
hour_budget=args.get('hour_budget', 8),
errors=(openai.error.RateLimitError, openai.error.OpenAIError))
def _check_ft_status(model_id):
ft_retrieved = openai.FineTune.retrieve(id=model_id)
if ft_retrieved['status'] in ('succeeded', 'failed'):
return ft_retrieved
else:
raise openai.error.OpenAIError('Fine-tuning still pending!')
ft_stats = _check_ft_status(ft_result.id)
ft_model_name = ft_stats['fine_tuned_model']
if ft_stats['status'] != 'succeeded':
raise Exception(f"Fine-tuning did not complete successfully (status: {ft_stats['status']}). Error message: {ft_stats['events'][-1]['message']}") # noqa
end_time = datetime.datetime.now()
runtime = end_time - start_time
result_file_id = openai.FineTune.retrieve(id=ft_result.id)['result_files'][0].id
name_extension = openai.File.retrieve(id=result_file_id).filename
result_path = f'{temp_storage_path}/ft_{finetune_time}_result_{name_extension}'
with open(result_path, 'wb') as f:
f.write(openai.File.download(id=result_file_id))
train_stats = pd.read_csv(result_path)
if 'validation_token_accuracy' in train_stats.columns:
train_stats = train_stats[train_stats['validation_token_accuracy'].notnull()]
args['model_name'] = ft_model_name
args['ft_api_info'] = ft_stats.to_dict_recursive()
args['ft_result_stats'] = train_stats.to_dict()
args['runtime'] = runtime.total_seconds()
args['mode'] = self.base_model_storage.json_get('args').get('mode', self.default_mode)
self.model_storage.json_set('args', args)
shutil.rmtree(temp_storage_path)
@staticmethod
def _get_completed_prompts(base_template, df):
columns = []
spans = []
matches = list(re.finditer("{{(.*?)}}", base_template))
assert len(matches) > 0, 'No placeholders found in the prompt, please provide a valid prompt template.'
first_span = matches[0].start()
last_span = matches[-1].end()
for m in matches:
columns.append(m[0].replace('{', '').replace('}', ''))
spans.extend((m.start(), m.end()))
spans = spans[1:-1] # omit first and last, they are added separately
template = [base_template[s:e] for s, e in list(zip(spans, spans[1:]))[::2]] # take every other to skip placeholders # noqa
template.insert(0, base_template[0:first_span]) # add prompt start
template.append(base_template[last_span:]) # add prompt end
empty_prompt_ids = np.where(df[columns].isna().all(axis=1).values)[0]
df['__mdb_prompt'] = ''
for i in range(len(template)):
atom = template[i]
if i < len(columns):
col = df[columns[i]].replace(to_replace=[None], value='') # add empty quote if data is missing
df['__mdb_prompt'] = df['__mdb_prompt'].apply(lambda x: x + atom) + col.astype("string")
else:
df['__mdb_prompt'] = df['__mdb_prompt'].apply(lambda x: x + atom)
prompts = list(df['__mdb_prompt'])
return prompts, empty_prompt_ids | PypiClean |
/BicycleParameters-1.0.0.tar.gz/BicycleParameters-1.0.0/bicycleparameters/rider.py |
import os
import numpy as np
from numpy import sin, cos, sqrt
from scipy.optimize import fsolve
import yeadon
from .io import remove_uncertainties
from .inertia import combine_bike_rider
def yeadon_vec_to_bicycle_vec(vector, measured_bicycle_par,
benchmark_bicycle_par):
"""
Parameters
----------
vector : np.matrix, shape(3, 1)
A vector from the Yeadon origin to a point expressed in the Yeadon
reference frame.
measured_bicycle_par : dictionary
The raw bicycle measurements.
benchmark_bicycle_par : dictionary
The Meijaard 2007 et. al parameters for this bicycle.
Returns
-------
vector_wrt_bike : np.matrix, shape(3, 1)
The vector from the bicycle origin to the same point above expressed
in the bicycle reference frame.
"""
# This is the rotation matrix that relates Yeadon's reference frame
# to the bicycle reference frame.
# vector_expressed_in_bike = rot_mat * vector_expressed_in_yeadon)
rot_mat = np.matrix([[0.0, -1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0]])
# The relevant bicycle parameters:
measuredPar = remove_uncertainties(measured_bicycle_par)
benchmarkPar = remove_uncertainties(benchmark_bicycle_par)
# bottom bracket height
hbb = measuredPar['hbb']
# chain stay length
lcs = measuredPar['lcs']
# rear wheel radius
rR = benchmarkPar['rR']
# seat post length
lsp = measuredPar['lsp']
# seat tube length
lst = measuredPar['lst']
# seat tube angle
lambdast = measuredPar['lamst']
# bicycle origin to yeadon origin expressed in bicycle frame
yeadon_origin_in_bike_frame = \
np.matrix([[np.sqrt(lcs**2 - (-hbb + rR)**2) + (-lsp - lst) * np.cos(lambdast)], # bx
[0.0],
[-hbb + (-lsp - lst) * np.sin(lambdast)]]) # bz
vector_wrt_bike = yeadon_origin_in_bike_frame + rot_mat * vector
return vector_wrt_bike
def configure_rider(pathToRider, bicycle, bicyclePar, measuredPar, draw):
"""
Returns the rider parameters, bicycle paramaters with a rider and a
human object that is configured to sit on the bicycle.
Parameters
----------
pathToRider : string
Path to the rider's data folder.
bicycle : string
The short name of the bicycle.
bicyclePar : dictionary
Contains the benchmark bicycle parameters for a bicycle.
measuredPar : dictionary
Contains the measured values of the bicycle.
draw : boolean, optional
If true, visual python will be used to draw a three dimensional
image of the rider.
Returns
-------
riderpar : dictionary
The inertial parameters of the rider with reference to the
benchmark coordinate system.
human : yeadon.human
The human object that represents the rider seated on the
bicycle.
bicycleRiderPar : dictionary
The benchmark parameters of the bicycle with the rider added to
the rear frame.
"""
try:
# get the rider name
rider = os.path.split(pathToRider)[1]
# get the paths to the yeadon data files
pathToYeadon = os.path.join(pathToRider, 'RawData',
rider + 'YeadonMeas.txt')
pathToCFG = os.path.join(pathToRider, 'RawData',
rider + bicycle + 'YeadonCFG.txt')
# generate the human that has been configured to sit on the bicycle
# the human's inertial parameters are expressed in the Yeadon
# reference frame about the Yeadon origin.
human = rider_on_bike(bicyclePar, measuredPar,
pathToYeadon, pathToCFG, draw)
# This is the rotation matrix that relates Yeadon's reference frame
# to the bicycle reference frame.
rot_mat = np.array([[0.0, -1.0, 0.0],
[-1.0, 0.0, 0.0],
[0.0, 0.0, -1.0]])
# This is the human's inertia expressed in the bicycle reference
# frame about the human's center of mass.
human_inertia_in_bike_frame = \
human.inertia_transformed(rotmat=rot_mat)
human_com_in_bike_frame = \
yeadon_vec_to_bicycle_vec(human.center_of_mass, measuredPar,
bicyclePar)
# build a dictionary to store the inertial data
riderPar = {'IBxx': human_inertia_in_bike_frame[0, 0],
'IByy': human_inertia_in_bike_frame[1, 1],
'IBzz': human_inertia_in_bike_frame[2, 2],
'IBxz': human_inertia_in_bike_frame[2, 0],
'mB': human.mass,
'xB': human_com_in_bike_frame[0, 0],
'yB': human_com_in_bike_frame[1, 0],
'zB': human_com_in_bike_frame[2, 0]}
except: # except if this fails
# no rider was added
print('Calculations in yeadon failed. No rider added.')
# raise the error that caused things to fail
raise
else:
bicycleRiderPar = combine_bike_rider(bicyclePar, riderPar)
return riderPar, human, bicycleRiderPar
def rider_on_bike(benchmarkPar, measuredPar, yeadonMeas, yeadonCFG,
drawrider):
"""
Returns a yeadon human configured to sit on a bicycle.
Parameters
----------
benchmarkPar : dictionary
A dictionary containing the benchmark bicycle parameters.
measuredPar : dictionary
A dictionary containing the raw geometric measurements of the bicycle.
yeadonMeas : str
Path to a text file that holds the 95 yeadon measurements. See
`yeadon documentation`_.
yeadonCFG : str
Path to a text file that holds configuration variables. See `yeadon
documentation`_. As of now, only 'somersalt' angle can be set as an
input. The remaining variables are either zero or calculated in this
method.
drawrider : bool
Switch to draw the rider, with vectors pointing to the desired
position of the hands and feet of the rider (at the handles and
bottom bracket). Requires python-visual.
Returns
-------
human : yeadon.Human
Human object is returned with an updated configuration.
The dictionary, taken from H.CFG, has the following key's values
updated:
'PJ1extension'
'J1J2flexion'
'CA1extension'
'CA1adduction'
'CA1rotation'
'A1A2extension'
'somersault'
'PK1extension'
'K1K2flexion'
'CB1extension'
'CB1abduction'
'CB1rotation'
'B1B2extension'
Notes
-----
Requires that the bike object has a raw data text input file that contains
the measurements necessary to situate a rider on the bike (i.e.
``<pathToData>/bicycles/<short name>/RawData/<short name>Measurements.txt``).
.. _yeadon documentation : http://packages.python.org/yeadon
"""
# create human using input measurements and configuration files
human = yeadon.Human(yeadonMeas, yeadonCFG)
# The relevant human measurments:
L_j3L = human.meas['Lj3L']
L_j5L = human.meas['Lj5L']
L_j6L = human.meas['Lj6L']
L_s4L = human.meas['Ls4L']
L_s4w = human.meas['Ls4w']
L_a2L = human.meas['La2L']
L_a4L = human.meas['La4L']
L_a5L = human.meas['La5L']
somersault = human.CFG['somersault']
# The relevant bicycle parameters:
measuredPar = remove_uncertainties(measuredPar)
benchmarkPar = remove_uncertainties(benchmarkPar)
# bottom bracket height
h_bb = measuredPar['hbb']
# chain stay length
l_cs = measuredPar['lcs']
# rear wheel radius
r_R = benchmarkPar['rR']
# front wheel radius
r_F = benchmarkPar['rF']
# seat post length
l_sp = measuredPar['lsp']
# seat tube length
l_st = measuredPar['lst']
# seat tube angle
lambda_st = measuredPar['lamst']
# handlebar width
w_hb = measuredPar['whb']
# distance from rear wheel hub to hand
L_hbR = measuredPar['LhbR']
# distance from front wheel hub to hand
L_hbF = measuredPar['LhbF']
# wheelbase
w = benchmarkPar['w']
def zero(unknowns):
"""For the derivation of these equations see:
http://nbviewer.ipython.org/github/chrisdembia/yeadon/blob/v1.2.0/examples/bicyclerider/bicycle_example.ipynb
"""
PJ1extension = unknowns[0]
J1J2flexion = unknowns[1]
CA1extension = unknowns[2]
CA1adduction = unknowns[3]
CA1rotation = unknowns[4]
A1A2extension = unknowns[5]
alpha_y = unknowns[6]
alpha_z = unknowns[7]
beta_y = unknowns[8]
beta_z = unknowns[9]
phi_J1 = PJ1extension
phi_J2 = J1J2flexion
phi_A1 = CA1extension
theta_A1 = CA1adduction
psi_A = CA1rotation
phi_A2 = A1A2extension
phi_P = somersault
zero = np.zeros(10)
zero[0] = (L_j3L*(-sin(phi_J1)*cos(phi_P) - sin(phi_P)*cos(phi_J1))
+ (-l_sp - l_st)*cos(lambda_st) + (-(-sin(phi_J1)*
sin(phi_P) + cos(phi_J1)*cos(phi_P))*sin(phi_J2) +
(-sin(phi_J1)*cos(phi_P) - sin(phi_P)*cos(phi_J1))*
cos(phi_J2))*(-L_j3L + L_j5L + L_j6L))
zero[1] = (L_j3L*(-sin(phi_J1)*sin(phi_P) + cos(phi_J1)*cos(phi_P))
+ (-l_sp - l_st)*sin(lambda_st) + ((-sin(phi_J1)*
sin(phi_P) + cos(phi_J1)*cos(phi_P))*cos(phi_J2) -
(sin(phi_J1)*cos(phi_P) + sin(phi_P)*cos(phi_J1))*
sin(phi_J2))*(-L_j3L + L_j5L + L_j6L))
zero[2] = -L_hbF + sqrt(alpha_y**2 + alpha_z**2 + 0.25*w_hb**2)
zero[3] = -L_hbR + sqrt(beta_y**2 + beta_z**2 + 0.25*w_hb**2)
zero[4] = alpha_y - beta_y - w
zero[5] = alpha_z - beta_z + r_F - r_R
zero[6] = (-L_a2L*sin(theta_A1) + L_s4w/2 - 0.5*w_hb + (sin(phi_A2)*
sin(psi_A)*cos(theta_A1) + sin(theta_A1)*cos(phi_A2))*
(L_a2L - L_a4L - L_a5L))
zero[7] = (-L_a2L*(-sin(phi_A1)*cos(phi_P)*cos(theta_A1) -
sin(phi_P)*cos(phi_A1)*cos(theta_A1)) - L_s4L*sin(phi_P)
- beta_y - sqrt(l_cs**2 - (-h_bb + r_R)**2) - (-l_sp -
l_st)*cos(lambda_st) + (-(-(sin(phi_A1)*cos(psi_A) +
sin(psi_A)*sin(theta_A1)*cos(phi_A1))*sin(phi_P) +
(-sin(phi_A1)*sin(psi_A)*sin(theta_A1) + cos(phi_A1)*
cos(psi_A))*cos(phi_P))*sin(phi_A2) + (-sin(phi_A1)*
cos(phi_P)*cos(theta_A1) - sin(phi_P)*cos(phi_A1)*
cos(theta_A1))*cos(phi_A2))*(L_a2L - L_a4L - L_a5L))
zero[8] = (-L_a2L*(-sin(phi_A1)*sin(phi_P)*cos(theta_A1) +
cos(phi_A1)*cos(phi_P)*cos(theta_A1)) + L_s4L*cos(phi_P)
- beta_z + h_bb - r_R - (-l_sp - l_st)*sin(lambda_st) +
(-((sin(phi_A1)*cos(psi_A) + sin(psi_A)*sin(theta_A1)*
cos(phi_A1))*cos(phi_P) + (-sin(phi_A1)*sin(psi_A)*
sin(theta_A1) + cos(phi_A1)*cos(psi_A))*sin(phi_P))*
sin(phi_A2) + (-sin(phi_A1)*sin(phi_P)*cos(theta_A1) +
cos(phi_A1)*cos(phi_P)*cos(theta_A1))*cos(phi_A2))*(L_a2L
- L_a4L - L_a5L))
zero[9] = ((sin(phi_A1)*sin(psi_A) - sin(theta_A1)*cos(phi_A1)*
cos(psi_A))*cos(phi_P) + (sin(phi_A1)*sin(theta_A1)*
cos(psi_A) + sin(psi_A)*cos(phi_A1))*sin(phi_P))
return zero
g_PJ1extension = -np.deg2rad(90.0)
g_J1J2flexion = np.deg2rad(75.0)
g_CA1extension = -np.deg2rad(15.0)
g_CA1adduction = np.deg2rad(2.0)
g_CA1rotation = np.deg2rad(2.0)
g_A1A2extension = -np.deg2rad(40.0)
g_alpha_y = L_hbF * np.cos(np.deg2rad(45.0))
g_alpha_z = L_hbF * np.sin(np.deg2rad(45.0))
g_beta_y = -L_hbR * np.cos(np.deg2rad(30.0))
g_beta_z = L_hbR * np.sin(np.deg2rad(30.0))
guess = [g_PJ1extension, g_J1J2flexion, g_CA1extension, g_CA1adduction,
g_CA1rotation, g_A1A2extension, g_alpha_y, g_alpha_z, g_beta_y,
g_beta_z]
solution = fsolve(zero, guess)
cfg_dict = human.CFG.copy()
cfg_dict['PJ1extension'] = solution[0]
cfg_dict['J1J2flexion'] = solution[1]
cfg_dict['CA1extension'] = solution[2]
cfg_dict['CA1adduction'] = solution[3]
cfg_dict['CA1rotation'] = solution[4]
cfg_dict['A1A2extension'] = solution[5]
cfg_dict['somersault'] = somersault
cfg_dict['PK1extension'] = cfg_dict['PJ1extension']
cfg_dict['K1K2flexion'] = cfg_dict['J1J2flexion']
cfg_dict['CB1extension'] = cfg_dict['CA1extension']
cfg_dict['CB1abduction'] = -cfg_dict['CA1adduction']
cfg_dict['CB1rotation'] = -cfg_dict['CA1rotation']
cfg_dict['B1B2extension'] = cfg_dict['A1A2extension']
# assign configuration to human and check that the solution worked
human.set_CFG_dict(cfg_dict)
# draw rider for fun, but possibly to check results aren't crazy
if drawrider:
human.draw()
return human | PypiClean |
/HAP-python-4.7.1.tar.gz/HAP-python-4.7.1/pyhap/accessory_driver.py | import asyncio
import base64
from concurrent.futures import ThreadPoolExecutor
import hashlib
import logging
import os
import re
import sys
import tempfile
import threading
import time
from typing import Optional
from zeroconf import ServiceInfo
from zeroconf.asyncio import AsyncZeroconf
from pyhap import util
from pyhap.accessory import Accessory, get_topic
from pyhap.characteristic import CharacteristicError
from pyhap.const import (
HAP_PERMISSION_NOTIFY,
HAP_PROTOCOL_SHORT_VERSION,
HAP_REPR_ACCS,
HAP_REPR_AID,
HAP_REPR_CHARS,
HAP_REPR_IID,
HAP_REPR_PID,
HAP_REPR_STATUS,
HAP_REPR_TTL,
HAP_REPR_VALUE,
STANDALONE_AID,
)
from pyhap.encoder import AccessoryEncoder
from pyhap.hap_server import HAPServer
from pyhap.hsrp import Server as SrpServer
from pyhap.loader import Loader
from pyhap.params import get_srp_context
from pyhap.state import State
from .const import HAP_SERVER_STATUS
from .util import callback
logger = logging.getLogger(__name__)
SERVICE_CALLBACK = "callback"
SERVICE_CHARS = "chars"
SERVICE_IIDS = "iids"
HAP_SERVICE_TYPE = "_hap._tcp.local."
VALID_MDNS_REGEX = re.compile(r"[^A-Za-z0-9\-]+")
LEADING_TRAILING_SPACE_DASH = re.compile(r"^[ -]+|[ -]+$")
DASH_REGEX = re.compile(r"[-]+")
def _wrap_char_setter(char, value, client_addr):
"""Process an characteristic setter callback trapping and logging all exceptions."""
try:
char.client_update_value(value, client_addr)
except Exception: # pylint: disable=broad-except
logger.exception(
"%s: Error while setting characteristic %s to %s",
client_addr,
char.display_name,
value,
)
return HAP_SERVER_STATUS.SERVICE_COMMUNICATION_FAILURE
return HAP_SERVER_STATUS.SUCCESS
def _wrap_acc_setter(acc, updates_by_service, client_addr):
"""Process an accessory setter callback trapping and logging all exceptions."""
try:
acc.setter_callback(updates_by_service)
except Exception: # pylint: disable=broad-except
logger.exception(
"%s: Error while setting characteristics to %s for the %s accessory",
updates_by_service,
client_addr,
acc,
)
return HAP_SERVER_STATUS.SERVICE_COMMUNICATION_FAILURE
return HAP_SERVER_STATUS.SUCCESS
def _wrap_service_setter(service, chars, client_addr):
"""Process a service setter callback trapping and logging all exceptions."""
# Ideally this would pass the chars as is without converting
# them to the display_name, but that would break existing
# consumers of the data.
service_chars = {char.display_name: value for char, value in chars.items()}
try:
service.setter_callback(service_chars)
except Exception: # pylint: disable=broad-except
logger.exception(
"%s: Error while setting characteristics to %s for the %s service",
service_chars,
client_addr,
service.display_name,
)
return HAP_SERVER_STATUS.SERVICE_COMMUNICATION_FAILURE
return HAP_SERVER_STATUS.SUCCESS
class AccessoryMDNSServiceInfo(ServiceInfo):
"""A mDNS service info representation of an accessory."""
def __init__(self, accessory, state, zeroconf_server=None):
self.accessory = accessory
self.state: State = state
adv_data = self._get_advert_data()
valid_name = self._valid_name()
short_mac = self.state.mac[-8:].replace(":", "")
# Append part of MAC address to prevent name conflicts
name = f"{valid_name} {short_mac}.{HAP_SERVICE_TYPE}"
valid_host_name = self._valid_host_name()
server = zeroconf_server or f"{valid_host_name}-{short_mac}.local."
super().__init__(
HAP_SERVICE_TYPE,
name=name,
server=server,
port=self.state.port,
weight=0,
priority=0,
properties=adv_data,
parsed_addresses=self.state.addresses,
)
def _valid_name(self):
return re.sub(
LEADING_TRAILING_SPACE_DASH,
"",
re.sub(VALID_MDNS_REGEX, " ", self.accessory.display_name),
)
def _valid_host_name(self):
return re.sub(
DASH_REGEX,
"-",
re.sub(VALID_MDNS_REGEX, " ", self.accessory.display_name)
.strip()
.replace(" ", "-")
.strip("-"),
)
def _setup_hash(self):
setup_hash_material = self.state.setup_id + self.state.mac
temp_hash = hashlib.sha512()
temp_hash.update(setup_hash_material.encode())
return base64.b64encode(temp_hash.digest()[:4]).decode()
def _get_advert_data(self):
"""Generate advertisement data from the accessory."""
return {
"md": self._valid_name(),
"pv": HAP_PROTOCOL_SHORT_VERSION,
"id": self.state.mac,
# represents the 'configuration version' of an Accessory.
# Increasing this 'version number' signals iOS devices to
# re-fetch accessories data.
"c#": str(self.state.config_version),
"s#": "1", # 'accessory state'
"ff": "0",
"ci": str(self.accessory.category),
# 'sf == 1' means "discoverable by HomeKit iOS clients"
"sf": "0" if self.state.paired else "1",
"sh": self._setup_hash(),
}
class AccessoryDriver:
"""
An AccessoryDriver mediates between incoming requests from the HAPServer and
the Accessory.
The driver starts and stops the HAPServer, the mDNS advertisements and responds
to events from the HAPServer.
"""
def __init__(
self,
*,
address=None,
port=51234,
persist_file="accessory.state",
pincode=None,
encoder=None,
loader=None,
loop=None,
mac=None,
listen_address=None,
advertised_address=None,
interface_choice=None,
async_zeroconf_instance=None,
zeroconf_server=None
):
"""
Initialize a new AccessoryDriver object.
:param pincode: The pincode that HAP clients must prove they know in order
to pair with this `Accessory`. Defaults to None, in which case a random
pincode is generated. The pincode has the format "xxx-xx-xxx", where x is
a digit.
:type pincode: bytearray
:param port: The local port on which the accessory will be accessible.
In other words, this is the port of the HAPServer.
:type port: int
:param address: The local address on which the accessory will be accessible.
In other words, this is the address of the HAPServer. If not given, the
driver will try to select an address.
:type address: str
:param persist_file: The file name in which the state of the accessory
will be persisted. This uses `expandvars`, so may contain `~` to
refer to the user's home directory.
:type persist_file: str
:param encoder: The encoder to use when persisting/loading the Accessory state.
:type encoder: AccessoryEncoder
:param mac: The MAC address which will be used to identify the accessory.
If not given, the driver will try to select a MAC address.
:type mac: str
:param listen_address: The local address on the HAPServer will listen.
If not given, the value of the address parameter will be used.
:type listen_address: str
:param advertised_address: The addresses of the HAPServer announced via mDNS.
This can be used to announce an external address from behind a NAT.
If not given, the value of the address parameter will be used.
:type advertised_address: str | list[str]
:param interface_choice: The zeroconf interfaces to listen on.
:type InterfacesType: [InterfaceChoice.Default, InterfaceChoice.All]
:param async_zeroconf_instance: An AsyncZeroconf instance. When running multiple accessories or
bridges a single zeroconf instance can be shared to avoid the overhead
of processing the same data multiple times.
:param zeroconf_server: The server name that will be used for the zeroconf
ServiceInfo.
:type zeroconf_server: str
"""
if loop is None:
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
executor_opts = {"max_workers": None}
if sys.version_info >= (3, 6):
executor_opts["thread_name_prefix"] = "SyncWorker"
self.executor = ThreadPoolExecutor(**executor_opts)
loop.set_default_executor(self.executor)
self.tid = threading.current_thread()
else:
self.tid = threading.main_thread()
self.executor = None
self.loop = loop
self.accessory: Optional[Accessory] = None
self.advertiser = async_zeroconf_instance
self.zeroconf_server = zeroconf_server
self.interface_choice = interface_choice
self.persist_file = os.path.expanduser(persist_file)
self.encoder = encoder or AccessoryEncoder()
self.topics = {} # topic: set of (address, port) of subscribed clients
self.loader = loader or Loader()
self.aio_stop_event = None
self.stop_event = threading.Event()
self.safe_mode = False
self.mdns_service_info = None
self.srp_verifier = None
address = address or util.get_local_address()
advertised_address = advertised_address or address
self.state = State(
address=advertised_address, mac=mac, pincode=pincode, port=port
)
listen_address = listen_address or address
network_tuple = (listen_address, self.state.port)
self.http_server = HAPServer(network_tuple, self)
self.prepared_writes = {}
def start(self):
"""Start the event loop and call `start_service`.
Pyhap will be stopped gracefully on a KeyBoardInterrupt.
"""
try:
logger.info("Starting the event loop")
if (
threading.current_thread() is threading.main_thread()
and os.name != "nt"
):
logger.debug("Setting child watcher")
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
else:
logger.debug(
"Not setting a child watcher. Set one if "
"subprocesses will be started outside the main thread."
)
self.add_job(self.async_start())
self.loop.run_forever()
except KeyboardInterrupt:
logger.debug("Got a KeyboardInterrupt, stopping driver")
self.loop.call_soon_threadsafe(self.loop.create_task, self.async_stop())
self.loop.run_forever()
finally:
self.loop.close()
logger.info("Closed the event loop")
def start_service(self):
"""Start the service."""
self._validate_start()
self.add_job(self.async_start)
def _validate_start(self):
"""Validate we can start."""
if self.accessory is None:
raise ValueError(
"You must assign an accessory to the driver, "
"before you can start it."
)
async def async_start(self):
"""Starts the accessory.
- Call the accessory's run method.
- Start handling accessory events.
- Start the HAP server.
- Publish a mDNS advertisement.
- Print the setup QR code if the accessory is not paired.
All of the above are started in separate threads. Accessory thread is set as
daemon.
"""
self._validate_start()
self.aio_stop_event = asyncio.Event()
logger.info(
"Starting accessory %s on addresses %s, port %s.",
self.accessory.display_name,
self.state.addresses,
self.state.port,
)
# Start listening for requests
logger.debug("Starting server.")
await self.http_server.async_start(self.loop)
# Update the hash of the accessories
# in case the config version needs to be
# incremented to tell iOS to drop the cache
# for /accessories
if self.state.set_accessories_hash(self.accessories_hash):
self.async_persist()
# Advertise the accessory as a mDNS service.
logger.debug("Starting mDNS.")
self.mdns_service_info = AccessoryMDNSServiceInfo(
self.accessory, self.state, self.zeroconf_server
)
if not self.advertiser:
zc_args = {}
if self.interface_choice is not None:
zc_args["interfaces"] = self.interface_choice
self.advertiser = AsyncZeroconf(**zc_args)
await self.advertiser.async_register_service(
self.mdns_service_info, cooperating_responders=True
)
# Print accessory setup message
if not self.state.paired:
self.accessory.setup_message()
# Start the accessory so it can do stuff.
logger.debug("Starting accessory %s", self.accessory.display_name)
self.add_job(self.accessory.run)
logger.debug(
"AccessoryDriver for %s started successfully", self.accessory.display_name
)
def stop(self):
"""Method to stop pyhap."""
self.add_job(self.async_stop)
async def async_stop(self):
"""Stops the AccessoryDriver and shutdown all remaining tasks."""
self.stop_event.set()
logger.debug("Stopping HAP server and event sending")
logger.debug("Stopping mDNS advertising for %s", self.accessory.display_name)
await self.advertiser.async_unregister_service(self.mdns_service_info)
await self.advertiser.async_close()
self.aio_stop_event.set()
self.http_server.async_stop()
logger.info(
"Stopping accessory %s on address %s, port %s.",
self.accessory.display_name,
self.state.addresses,
self.state.port,
)
await self.async_add_job(self.accessory.stop)
logger.debug(
"AccessoryDriver for %s stopped successfully", self.accessory.display_name
)
# Executor=None means a loop wasn't passed in
if self.executor is not None:
logger.debug("Shutdown executors")
self.executor.shutdown()
self.loop.stop()
logger.debug("Stop completed")
def add_job(self, target, *args):
"""Add job to executor pool."""
if target is None:
raise ValueError("Don't call add_job with None.")
self.loop.call_soon_threadsafe(self.async_add_job, target, *args)
@util.callback
def async_add_job(self, target, *args):
"""Add job from within the event loop."""
task = None
if asyncio.iscoroutine(target):
task = self.loop.create_task(target)
elif util.is_callback(target):
self.loop.call_soon(target, *args)
elif util.iscoro(target):
task = self.loop.create_task(target(*args))
else:
task = self.loop.run_in_executor(None, target, *args)
return task
def add_accessory(self, accessory):
"""Add top level accessory to driver."""
self.accessory = accessory
if accessory.aid is None:
accessory.aid = STANDALONE_AID
elif accessory.aid != STANDALONE_AID:
raise ValueError("Top-level accessory must have the AID == 1.")
if os.path.exists(self.persist_file):
logger.info("Loading Accessory state from `%s`", self.persist_file)
self.load()
else:
logger.info("Storing Accessory state in `%s`", self.persist_file)
self.persist()
@util.callback
def async_subscribe_client_topic(self, client, topic, subscribe=True):
"""(Un)Subscribe the given client from the given topic.
This method must be run in the event loop.
:param client: A client (address, port) tuple that should be subscribed.
:type client: tuple <str, int>
:param topic: The topic to which to subscribe.
:type topic: str
:param subscribe: Whether to subscribe or unsubscribe the client. Both subscribing
an already subscribed client and unsubscribing a client that is not subscribed
do nothing.
:type subscribe: bool
"""
if subscribe:
subscribed_clients = self.topics.get(topic)
if subscribed_clients is None:
subscribed_clients = set()
self.topics[topic] = subscribed_clients
subscribed_clients.add(client)
return
if topic not in self.topics:
return
subscribed_clients = self.topics[topic]
subscribed_clients.discard(client)
if not subscribed_clients:
del self.topics[topic]
def connection_lost(self, client):
"""Called when a connection is lost to a client.
This method must be run in the event loop.
:param client: A client (address, port) tuple that should be unsubscribed.
:type client: tuple <str, int>
"""
client_topics = []
for topic, subscribed_clients in self.topics.items():
if client in subscribed_clients:
# Make a copy to avoid changing
# self.topics during iteration
client_topics.append(topic)
for topic in client_topics:
self.async_subscribe_client_topic(client, topic, subscribe=False)
self.prepared_writes.pop(client, None)
def publish(self, data, sender_client_addr=None, immediate=False):
"""Publishes an event to the client.
The publishing occurs only if the current client is subscribed to the topic for
the aid and iid contained in the data.
:param data: The data to publish. It must at least contain the keys "aid" and
"iid".
:type data: dict
"""
topic = get_topic(data[HAP_REPR_AID], data[HAP_REPR_IID])
if topic not in self.topics:
return
if threading.current_thread() == self.tid:
self.async_send_event(topic, data, sender_client_addr, immediate)
return
self.loop.call_soon_threadsafe(
self.async_send_event, topic, data, sender_client_addr, immediate
)
def async_send_event(self, topic, data, sender_client_addr, immediate):
"""Send an event to a client.
Must be called in the event loop
"""
if self.aio_stop_event.is_set():
return
subscribed_clients = self.topics.get(topic, [])
logger.debug(
"Send event: topic(%s), data(%s), sender_client_addr(%s)",
topic,
data,
sender_client_addr,
)
unsubs = []
for client_addr in subscribed_clients:
if sender_client_addr and sender_client_addr == client_addr:
logger.debug(
"Skip sending event to client since "
"its the client that made the characteristic change: %s",
client_addr,
)
continue
logger.debug(
"Sending event to client: %s, immediate: %s", client_addr, immediate
)
pushed = self.http_server.push_event(data, client_addr, immediate)
if not pushed:
logger.debug(
"Could not send event to %s, probably stale socket.", client_addr
)
unsubs.append(client_addr)
# Maybe consider removing the client_addr from every topic?
for client_addr in unsubs:
self.async_subscribe_client_topic(client_addr, topic, False)
def config_changed(self):
"""Notify the driver that the accessory's configuration has changed.
Persists the accessory, so that the new configuration is available on
restart. Also, updates the mDNS advertisement, so that iOS clients know they need
to fetch new data.
"""
self.state.increment_config_version()
self.persist()
self.update_advertisement()
def update_advertisement(self):
"""Updates the mDNS service info for the accessory."""
self.loop.call_soon_threadsafe(self.async_update_advertisement)
@callback
def async_update_advertisement(self):
"""Updates the mDNS service info for the accessory from the event loop."""
logger.debug("Updating mDNS advertisement")
self.mdns_service_info = AccessoryMDNSServiceInfo(
self.accessory, self.state, self.zeroconf_server
)
asyncio.ensure_future(
self.advertiser.async_update_service(self.mdns_service_info)
)
@callback
def async_persist(self):
"""Saves the state of the accessory.
Must be run in the event loop.
"""
loop = asyncio.get_event_loop()
logger.debug("Scheduling write of accessory state to disk")
asyncio.ensure_future(loop.run_in_executor(None, self.persist))
def persist(self):
"""Saves the state of the accessory.
Must run in executor.
"""
logger.debug("Writing of accessory state to disk")
tmp_filename = None
try:
temp_dir = os.path.dirname(self.persist_file)
with tempfile.NamedTemporaryFile(
mode="w", dir=temp_dir, delete=False
) as file_handle:
tmp_filename = file_handle.name
self.encoder.persist(file_handle, self.state)
if (
os.name == "nt"
): # Or `[WinError 5] Access Denied` will be raised on Windows
os.chmod(tmp_filename, 0o644)
os.chmod(self.persist_file, 0o644)
os.replace(tmp_filename, self.persist_file)
except Exception: # pylint: disable=broad-except
logger.exception("Failed to persist accessory state")
raise
finally:
if tmp_filename and os.path.exists(tmp_filename):
os.remove(tmp_filename)
def load(self):
"""Load the persist file.
Must run in executor.
"""
with open(self.persist_file, "r", encoding="utf8") as file_handle:
self.encoder.load_into(file_handle, self.state)
@callback
def pair(
self,
client_username_bytes: bytes,
client_public: bytes,
client_permissions: bytes,
) -> bool:
"""Called when a client has paired with the accessory.
Persist the new accessory state.
:param client_username_bytes: The client username bytes.
:type client_username_bytes: bytes
:param client_public: The client's public key.
:type client_public: bytes
:param client_permissions: The client's permissions.
:type client_permissions: bytes (int)
:return: Whether the pairing is successful.
:rtype: bool
"""
logger.info(
"Paired with %s with permissions %s.",
client_username_bytes,
client_permissions,
)
self.state.add_paired_client(
client_username_bytes, client_public, client_permissions
)
self.async_persist()
return True
@callback
def unpair(self, client_uuid):
"""Removes the paired client from the accessory.
Persist the new accessory state.
:param client_uuid: The client uuid.
:type client_uuid: uuid.UUID
"""
logger.info("Unpairing client %s.", client_uuid)
self.state.remove_paired_client(client_uuid)
self.async_persist()
def finish_pair(self):
"""Finishing pairing or unpairing.
Updates the accessory and updates the mDNS service.
The mDNS announcement must not be updated until AFTER
the final pairing response is sent or homekit will
see that the accessory is already paired and assume
it should stop pairing.
"""
# Safe mode added to avoid error during pairing, see
# https://github.com/home-assistant/home-assistant/issues/14567
#
# This may no longer be needed now that we defer
# updating the advertisement until after the final
# pairing response is sent.
#
if not self.safe_mode:
self.update_advertisement()
def setup_srp_verifier(self):
"""Create an SRP verifier for the accessory's info."""
# TODO: Move the below hard-coded values somewhere nice.
ctx = get_srp_context(3072, hashlib.sha512, 16)
verifier = SrpServer(ctx, b"Pair-Setup", self.state.pincode)
self.srp_verifier = verifier
@property
def accessories_hash(self):
"""Hash the get_accessories response to track configuration changes."""
return hashlib.sha512(
util.to_sorted_hap_json(self.get_accessories())
).hexdigest()
def get_accessories(self):
"""Returns the accessory in HAP format.
:return: An example HAP representation is:
.. code-block:: python
{
"accessories": [
"aid": 1,
"services": [
"iid": 1,
"type": ...,
"characteristics": [{
"iid": 2,
"type": ...,
"description": "CurrentTemperature",
...
}]
]
]
}
:rtype: dict
"""
hap_rep = self.accessory.to_HAP()
if not isinstance(hap_rep, list):
hap_rep = [
hap_rep,
]
logger.debug("Get accessories response: %s", hap_rep)
return {HAP_REPR_ACCS: hap_rep}
def get_characteristics(self, char_ids):
"""Returns values for the required characteristics.
:param char_ids: A list of characteristic "paths", e.g. "1.2" is aid 1, iid 2.
:type char_ids: list<str>
:return: Status success for each required characteristic. For example:
.. code-block:: python
{
"characteristics: [{
"aid": 1,
"iid": 2,
"status" 0
}]
}
:rtype: dict
"""
chars = []
for aid_iid in char_ids:
aid, iid = (int(i) for i in aid_iid.split("."))
rep = {
HAP_REPR_AID: aid,
HAP_REPR_IID: iid,
HAP_REPR_STATUS: HAP_SERVER_STATUS.SERVICE_COMMUNICATION_FAILURE,
}
try:
if aid == STANDALONE_AID:
char = self.accessory.iid_manager.get_obj(iid)
available = True
else:
acc = self.accessory.accessories.get(aid)
if acc is None:
continue
available = acc.available
char = acc.iid_manager.get_obj(iid)
if available:
rep[HAP_REPR_VALUE] = char.get_value()
rep[HAP_REPR_STATUS] = HAP_SERVER_STATUS.SUCCESS
except CharacteristicError:
logger.error(
"%s: Error getting value for characteristic %s.",
self.accessory.display_name,
(aid, iid),
)
except Exception: # pylint: disable=broad-except
logger.exception(
"%s: Unexpected error getting value for characteristic %s.",
self.accessory.display_name,
(aid, iid),
)
chars.append(rep)
logger.debug("Get chars response: %s", chars)
return {HAP_REPR_CHARS: chars}
def set_characteristics(self, chars_query, client_addr):
"""Called from ``HAPServerHandler`` when iOS configures the characteristics.
:param chars_query: A configuration query. For example:
.. code-block:: python
{
"characteristics": [{
"aid": 1,
"iid": 2,
"value": False, # Value to set
"ev": True # (Un)subscribe for events from this characteristics.
}]
}
:type chars_query: dict
"""
# TODO: Add support for chars that do no support notifications.
updates = {}
setter_results = {}
had_error = False
expired = False
if HAP_REPR_PID in chars_query:
pid = chars_query[HAP_REPR_PID]
expire_time = self.prepared_writes.get(client_addr, {}).pop(pid, None)
if expire_time is None or time.time() > expire_time:
expired = True
for cq in chars_query[HAP_REPR_CHARS]:
aid, iid = cq[HAP_REPR_AID], cq[HAP_REPR_IID]
setter_results.setdefault(aid, {})
if expired:
setter_results[aid][iid] = HAP_SERVER_STATUS.INVALID_VALUE_IN_REQUEST
had_error = True
continue
if HAP_PERMISSION_NOTIFY in cq:
char_topic = get_topic(aid, iid)
action = "Subscribed" if cq[HAP_PERMISSION_NOTIFY] else "Unsubscribed"
logger.debug(
"%s client %s to topic %s", action, client_addr, char_topic
)
self.async_subscribe_client_topic(
client_addr, char_topic, cq[HAP_PERMISSION_NOTIFY]
)
if HAP_REPR_VALUE not in cq:
continue
updates.setdefault(aid, {})[iid] = cq[HAP_REPR_VALUE]
for aid, new_iid_values in updates.items():
if self.accessory.aid == aid:
acc = self.accessory
else:
acc = self.accessory.accessories.get(aid)
updates_by_service = {}
char_to_iid = {}
for iid, value in new_iid_values.items():
# Characteristic level setter callbacks
char = acc.get_characteristic(aid, iid)
set_result = _wrap_char_setter(char, value, client_addr)
if set_result != HAP_SERVER_STATUS.SUCCESS:
had_error = True
setter_results[aid][iid] = set_result
if not char.service or (
not acc.setter_callback and not char.service.setter_callback
):
continue
char_to_iid[char] = iid
updates_by_service.setdefault(char.service, {}).update({char: value})
# Accessory level setter callbacks
if acc.setter_callback:
set_result = _wrap_acc_setter(acc, updates_by_service, client_addr)
if set_result != HAP_SERVER_STATUS.SUCCESS:
had_error = True
for iid in updates[aid]:
setter_results[aid][iid] = set_result
# Service level setter callbacks
for service, chars in updates_by_service.items():
if not service.setter_callback:
continue
set_result = _wrap_service_setter(service, chars, client_addr)
if set_result != HAP_SERVER_STATUS.SUCCESS:
had_error = True
for char in chars:
setter_results[aid][char_to_iid[char]] = set_result
if not had_error:
return None
return {
HAP_REPR_CHARS: [
{
HAP_REPR_AID: aid,
HAP_REPR_IID: iid,
HAP_REPR_STATUS: status,
}
for aid, iid_status in setter_results.items()
for iid, status in iid_status.items()
]
}
def prepare(self, prepare_query, client_addr):
"""Called from ``HAPServerHandler`` when iOS wants to prepare a write.
:param prepare_query: A prepare query. For example:
.. code-block:: python
{
"ttl": 10000, # in milliseconds
"pid": 12345678,
}
:type prepare_query: dict
"""
try:
ttl = prepare_query[HAP_REPR_TTL]
pid = prepare_query[HAP_REPR_PID]
self.prepared_writes.setdefault(client_addr, {})[pid] = time.time() + (
ttl / 1000
)
except (KeyError, ValueError):
return {HAP_REPR_STATUS: HAP_SERVER_STATUS.INVALID_VALUE_IN_REQUEST}
return {HAP_REPR_STATUS: HAP_SERVER_STATUS.SUCCESS}
def signal_handler(self, _signal, _frame):
"""Stops the AccessoryDriver for a given signal.
An AccessoryDriver can be registered as a signal handler with this method. For
example, you can register it for a KeyboardInterrupt as follows:
>>> import signal
>>> signal.signal(signal.SIGINT, anAccDriver.signal_handler)
Now, when the user hits Ctrl+C, the driver will stop its accessory, the HAP server
and everything else that needs stopping and will exit gracefully.
"""
try:
self.stop()
except Exception as e:
logger.error("Could not stop AccessoryDriver because of error: %s", e)
raise | PypiClean |
Subsets and Splits