id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Crwy-1.7.1.tar.gz/Crwy-1.7.1/crwy/utils/scrapy_plugs/dupefilters.py | import logging
import time
import datetime
import hashlib
from crwy.utils.filter.RedisSet import RedisSet
from crwy.utils.filter.RedisSortedSet import RedisSortedSet
from scrapy.dupefilters import BaseDupeFilter
from scrapy.exceptions import NotConfigured
from scrapy_redis.connection import get_redis_from_settings
logger = logging.getLogger(__name__)
class RedisRFPDupeFilter(BaseDupeFilter):
"""
dupefilter by redis, redis connect base on scrapy-redis connect
warning:
config SPIDER_NAME in settings before use
default:
DUPEFILTER_DEBUG = False
DUPEFILTER_DELAY_DAY = 0
"""
logger = logger
def __init__(self, debug=False,
server=None,
bot_name=None,
spider_name=None,
duperliter_delay_day=None,
do_hash=None):
self.debug = debug
self.logdupes = True
self.server = server
self.bot_name = bot_name
self.spider_name = spider_name
self.duperliter_delay_day = duperliter_delay_day
self.do_hash = do_hash
@classmethod
def from_settings(cls, settings):
server = get_redis_from_settings(settings)
debug = settings.getbool('DUPEFILTER_DEBUG')
bot_name = settings.get('BOT_NAME')
spider_name = settings.get('SPIDER_NAME')
duperliter_delay_day = settings.getint('DUPEFILTER_DELAY_DAY', 0)
do_hash = settings.getbool('DUPEFILTER_DO_HASH', True)
if not spider_name:
raise NotConfigured('%s - "SPIDER_NAME" is not found.' %
cls.__name__)
return cls(debug=debug, server=server, bot_name=bot_name,
spider_name=spider_name,
duperliter_delay_day=duperliter_delay_day,
do_hash=do_hash)
def request_seen(self, request):
if not request.meta.get('dupefilter_key', None):
return False
if len(request.meta.get('redirect_urls', [])) > 0:
# skip url from redirect
return False
dupefilter_key = request.meta.get('dupefilter_key')
dupefilter_key = hashlib.sha1(dupefilter_key).hexdigest() if \
self.do_hash else dupefilter_key
# SPIDER_NAME for dupefilter
key = '{bot_name}:{spider_name}'.format(
bot_name=self.bot_name,
spider_name=self.spider_name)
if request.meta.get('duperliter_delay_day', ''):
self.duperliter_delay_day = int(request.meta.get(
'duperliter_delay_day'))
if self.duperliter_delay_day == 0:
s = RedisSet(key, server=self.server)
if s.sadd(dupefilter_key) is True:
return False
self.logger.info('Filtered dupefilter_key: %s' %
dupefilter_key)
return True
else:
z = RedisSortedSet(key, server=self.server)
now = time.time()
last_time = z.zscore(dupefilter_key)
if not last_time:
z.zadd(now, dupefilter_key)
return False
if (datetime.datetime.utcfromtimestamp(now) -
datetime.datetime.utcfromtimestamp(last_time)).days >= \
self.duperliter_delay_day:
z.zadd(now, dupefilter_key)
return False
self.logger.info('Filtered dupefilter_key within %s day(s): %s' %
(self.duperliter_delay_day,
request.meta.get('dupefilter_key')))
return True
def log(self, request, spider): # log that a request has been filtered
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {
'request': request.meta.get('dupefilter_key')}, extra={
'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request: %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request},
extra={'spider': spider})
self.logdupes = False
spider.crawler.stats.inc_value('dupefilter/filtered', spider=spider)
class ReleaseDupefilterKey(object):
"""
rm dupefilter_key from redis, when call response
"""
def call(self, spider, dupefilter_key):
if not dupefilter_key:
return
obj = RedisRFPDupeFilter().from_settings(spider.settings)
dupefilter_key = hashlib.sha1(dupefilter_key).hexdigest() if \
obj.do_hash else dupefilter_key
# SPIDER_NAME for dupefilter
key = '{bot_name}:{spider_name}'.format(
bot_name=obj.bot_name,
spider_name=obj.spider_name)
if obj.duperliter_delay_day == 0:
s = RedisSet(key, server=obj.server)
s.srem(dupefilter_key)
else:
z = RedisSortedSet(key, server=obj.server)
z.zrem(dupefilter_key)
obj.logger.info('dupefilter_key: {} released.'.format(
dupefilter_key))
release_dupefilter_key = ReleaseDupefilterKey() | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/nodes/ConstantRefNodes.py | import sys
from abc import abstractmethod
from nuitka import Options
from nuitka.__past__ import (
GenericAlias,
UnionType,
iterItems,
long,
unicode,
xrange,
)
from nuitka.Builtins import (
builtin_anon_values,
builtin_exception_values_list,
builtin_named_values,
)
from nuitka.Constants import (
getUnhashableConstant,
isConstant,
isHashable,
isMutable,
the_empty_dict,
the_empty_frozenset,
the_empty_list,
the_empty_set,
the_empty_tuple,
the_empty_unicode,
)
from nuitka.PythonVersions import python_version
from nuitka.Tracing import optimization_logger
from .ExpressionBases import CompileTimeConstantExpressionBase
from .ExpressionShapeMixins import (
ExpressionBoolShapeExactMixin,
ExpressionBytearrayShapeExactMixin,
ExpressionBytesShapeExactMixin,
ExpressionComplexShapeExactMixin,
ExpressionDictShapeExactMixin,
ExpressionEllipsisShapeExactMixin,
ExpressionFloatShapeExactMixin,
ExpressionFrozensetShapeExactMixin,
ExpressionIntShapeExactMixin,
ExpressionListShapeExactMixin,
ExpressionLongShapeExactMixin,
ExpressionNoneShapeExactMixin,
ExpressionSetShapeExactMixin,
ExpressionSliceShapeExactMixin,
ExpressionStrShapeExactMixin,
ExpressionTupleShapeExactMixin,
ExpressionUnicodeShapeExactMixin,
)
from .IterationHandles import (
ConstantBytearrayIterationHandle,
ConstantBytesIterationHandle,
ConstantDictIterationHandle,
ConstantFrozensetIterationHandle,
ConstantListIterationHandle,
ConstantRangeIterationHandle,
ConstantSetIterationHandle,
ConstantStrIterationHandle,
ConstantTupleIterationHandle,
ConstantUnicodeIterationHandle,
)
from .NodeMakingHelpers import (
makeRaiseExceptionReplacementExpression,
wrapExpressionWithSideEffects,
)
from .shapes.BuiltinTypeShapes import (
tshape_namedtuple,
tshape_type,
tshape_xrange,
)
class ExpressionConstantUntrackedRefBase(CompileTimeConstantExpressionBase):
__slots__ = ("constant",)
def __init__(self, constant, source_ref):
CompileTimeConstantExpressionBase.__init__(self, source_ref=source_ref)
self.constant = constant
def finalize(self):
del self.parent
del self.constant
def __repr__(self):
return "<Node %s value %r at %s>" % (
self.kind,
self.constant,
self.source_ref.getAsString(),
)
def getDetails(self):
return {"constant": self.constant}
def getDetailsForDisplay(self):
result = self.getDetails()
if "constant" in result:
result["constant"] = repr(result["constant"])
return result
@staticmethod
def isExpressionConstantRef():
return True
def computeExpressionRaw(self, trace_collection):
# Cannot compute any further, this is already the best.
return self, None, None
# Note: For computedExpressionResult to work, TODO: needed more generally?
def computeExpression(self, trace_collection):
# Cannot compute any further, this is already the best.
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
trace_collection.onExceptionRaiseExit(TypeError)
# The arguments don't matter. All constant values cannot be called, and
# we just need to make and error out of that.
new_node = wrapExpressionWithSideEffects(
new_node=makeRaiseExceptionReplacementExpression(
expression=self,
exception_type="TypeError",
exception_value="'%s' object is not callable"
% type(self.constant).__name__,
),
old_node=call_node,
side_effects=call_node.extractSideEffectsPreCall(),
)
return (
new_node,
"new_raise",
"Predicted call of constant %s value to exception raise."
% type(self.constant),
)
def computeExpressionCallViaVariable(
self, call_node, variable_ref_node, call_args, call_kw, trace_collection
):
return self.computeExpressionCall(
call_node=call_node,
call_args=call_args,
call_kw=call_kw,
trace_collection=trace_collection,
)
def getCompileTimeConstant(self):
return self.constant
# TODO: Push this to singletons for being static functions
def getComparisonValue(self):
return True, self.constant
@staticmethod
def getIterationHandle():
return None
def isMutable(self):
# This is expected to be overloaded by child classes.
assert False, self
def isKnownToBeHashable(self):
# This is expected to be overloaded by child classes.
assert False, self
def extractUnhashableNodeType(self):
value = getUnhashableConstant(self.constant)
if value is not None:
return makeConstantRefNode(constant=type(value), source_ref=self.source_ref)
@staticmethod
def isNumberConstant():
# This is expected to be overloaded by child classes that disagree, bool, int, long and float
return False
@staticmethod
def isIndexConstant():
# This is expected to be overloaded by child classes that disagree, bool, int, long and float
return False
def isIndexable(self):
# TODO: Suspiciously this doesn't use isIndexConstant, which includes float, bug?
return self.constant is None or self.isNumberConstant()
def isKnownToBeIterable(self, count):
if self.isIterableConstant():
return count is None or len(self.constant) == count
else:
return False
def isKnownToBeIterableAtMin(self, count):
length = self.getIterationLength()
return length is not None and length >= count
def canPredictIterationValues(self):
return self.isKnownToBeIterable(None)
def getIterationValue(self, count):
assert count < len(self.constant)
return makeConstantRefNode(
constant=self.constant[count], source_ref=self.source_ref
)
def getIterationValueRange(self, start, stop):
return [
makeConstantRefNode(constant=value, source_ref=self.source_ref)
for value in self.constant[start:stop]
]
def getIterationValues(self):
source_ref = self.source_ref
return tuple(
makeConstantRefNode(
constant=value, source_ref=source_ref, user_provided=self.user_provided
)
for value in self.constant
)
def getIntegerValue(self):
if self.isNumberConstant():
return int(self.constant)
else:
return None
@abstractmethod
def isIterableConstant(self):
"""Is the constant type iterable."""
# This is expected to be overloaded by child classes, but it's actually wasteful
# to use it, we should have overloads of using methods too.
def getIterationLength(self):
# This is expected to be overloaded by child classes if they are iterable
assert not self.isIterableConstant(), self
return None
def getStrValue(self):
return makeConstantRefNode(
constant=str(self.constant),
user_provided=False,
source_ref=self.source_ref,
)
def computeExpressionIter1(self, iter_node, trace_collection):
# Note, this is overloaded for all the others.
assert not self.isIterableConstant()
# TODO: Raise static exception.
return iter_node, None, None
class ExpressionConstantRefBase(ExpressionConstantUntrackedRefBase):
"""Constants reference base class.
Use this for cases, for which it makes sense to track origin, e.g.
large lists are from computation or from user literals.
"""
# Base classes can be abstract, pylint: disable=I0021,abstract-method
__slots__ = ("user_provided",)
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=constant, source_ref=source_ref
)
self.user_provided = user_provided
if not user_provided and Options.is_debug:
try:
if type(constant) in (str, unicode, bytes):
max_size = 1000
elif type(constant) is xrange:
max_size = None
else:
max_size = 256
if max_size is not None and len(constant) > max_size:
optimization_logger.warning(
"Too large constant (%s %d) encountered at %s."
% (
type(constant),
len(constant),
source_ref.getAsString(),
)
)
except TypeError:
pass
def getDetails(self):
return {"constant": self.constant, "user_provided": self.user_provided}
def __repr__(self):
return "<Node %s value %r at %s %s>" % (
self.kind,
self.constant,
self.source_ref.getAsString(),
self.user_provided,
)
def getStrValue(self):
try:
return makeConstantRefNode(
constant=str(self.constant),
user_provided=self.user_provided,
source_ref=self.source_ref,
)
except UnicodeEncodeError:
# Unicode constants may not be possible to encode.
return None
class ExpressionConstantNoneRef(
ExpressionNoneShapeExactMixin, ExpressionConstantUntrackedRefBase
):
kind = "EXPRESSION_CONSTANT_NONE_REF"
__slots__ = ()
def __init__(self, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=None, source_ref=source_ref
)
@staticmethod
def getDetails():
return {}
@staticmethod
def isMutable():
return False
@staticmethod
def isIterableConstant():
return False
class ExpressionConstantBoolRefBase(
ExpressionBoolShapeExactMixin, ExpressionConstantUntrackedRefBase
):
@staticmethod
def isExpressionConstantBoolRef():
return True
@staticmethod
def computeExpressionBool(trace_collection):
# Best case already, None indicated no action.
return None, None, None
@staticmethod
def getDetails():
return {}
@staticmethod
def isMutable():
return False
@staticmethod
def isKnownToBeHashable():
return True
@staticmethod
def isNumberConstant():
return True
@staticmethod
def isIndexConstant():
return True
@staticmethod
def isIterableConstant():
return False
class ExpressionConstantTrueRef(ExpressionConstantBoolRefBase):
kind = "EXPRESSION_CONSTANT_TRUE_REF"
__slots__ = ()
def __init__(self, source_ref):
ExpressionConstantBoolRefBase.__init__(
self, constant=True, source_ref=source_ref
)
@staticmethod
def getTruthValue():
"""Return known truth value."""
return True
@staticmethod
def getIndexValue():
return 1
class ExpressionConstantFalseRef(ExpressionConstantBoolRefBase):
kind = "EXPRESSION_CONSTANT_FALSE_REF"
__slots__ = ()
def __init__(self, source_ref):
ExpressionConstantBoolRefBase.__init__(
self, constant=False, source_ref=source_ref
)
@staticmethod
def getTruthValue():
"""Return known truth value."""
return False
@staticmethod
def getIndexValue():
return 0
class ExpressionConstantEllipsisRef(
ExpressionEllipsisShapeExactMixin, ExpressionConstantUntrackedRefBase
):
kind = "EXPRESSION_CONSTANT_ELLIPSIS_REF"
__slots__ = ()
def __init__(self, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=Ellipsis, source_ref=source_ref
)
@staticmethod
def getDetails():
return {}
@staticmethod
def isMutable():
return False
@staticmethod
def isIterableConstant():
return False
class ExpressionConstantDictRef(
ExpressionDictShapeExactMixin, ExpressionConstantRefBase
):
kind = "EXPRESSION_CONSTANT_DICT_REF"
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantDictRef():
return True
@staticmethod
def isMutable():
return True
@staticmethod
def isKnownToBeHashable():
return False
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantDictIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
result = makeConstantRefNode(
constant=tuple(self.constant),
user_provided=self.user_provided,
source_ref=self.source_ref,
)
self.parent.replaceChild(self, result)
self.finalize()
return (
iter_node,
"new_constant",
"""Iteration over constant dict lowered to tuple.""",
)
def isMappingWithConstantStringKeys(self):
return all(type(key) in (str, unicode) for key in self.constant)
def getMappingStringKeyPairs(self):
pairs = []
for key, value in iterItems(self.constant):
pairs.append(
(
key,
makeConstantRefNode(
constant=value,
user_provided=self.user_provided,
source_ref=self.source_ref,
),
)
)
return pairs
@staticmethod
def getTruthValue():
"""Return known truth value.
The empty dict is not allowed here, so we can hardcode it.
"""
return True
class EmptyContainerMixin(object):
__slots__ = ()
def getDetails(self):
return {"user_provided": self.user_provided}
@staticmethod
def getIterationLength():
return 0
@staticmethod
def getTruthValue():
"""Return known truth value.
The empty container is false, so we can hardcode it.
"""
return False
class ExpressionConstantDictEmptyRef(EmptyContainerMixin, ExpressionConstantDictRef):
kind = "EXPRESSION_CONSTANT_DICT_EMPTY_REF"
__slots__ = ()
def __init__(self, user_provided, source_ref):
ExpressionConstantDictRef.__init__(
self,
constant=the_empty_dict,
user_provided=user_provided,
source_ref=source_ref,
)
class ExpressionConstantTupleRef(
ExpressionTupleShapeExactMixin, ExpressionConstantRefBase
):
kind = "EXPRESSION_CONSTANT_TUPLE_REF"
__slots__ = ()
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantTupleRef():
return True
@staticmethod
def isMutable():
return False
def isKnownToBeHashable(self):
# There are a few exceptions, where non-mutable can be non-hashable, e.g. slice.
return isHashable(self.constant)
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantTupleIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
# Note: Tuples are as good as it gets.
return iter_node, None, None
@staticmethod
def getTruthValue():
"""Return known truth value.
The empty dict is not allowed here, so we can hardcode it.
"""
return True
class ExpressionConstantTupleMutableRef(ExpressionConstantTupleRef):
kind = "EXPRESSION_CONSTANT_TUPLE_MUTABLE_REF"
__slots__ = ()
@staticmethod
def isMutable():
return True
@staticmethod
def isKnownToBeHashable():
return False
class ExpressionConstantTupleEmptyRef(EmptyContainerMixin, ExpressionConstantTupleRef):
kind = "EXPRESSION_CONSTANT_TUPLE_EMPTY_REF"
__slots__ = ()
def __init__(self, user_provided, source_ref):
ExpressionConstantTupleRef.__init__(
self,
constant=the_empty_tuple,
user_provided=user_provided,
source_ref=source_ref,
)
class ExpressionConstantListRef(
ExpressionListShapeExactMixin, ExpressionConstantRefBase
):
kind = "EXPRESSION_CONSTANT_LIST_REF"
__slots__ = ()
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantListRef():
return True
@staticmethod
def isMutable():
return True
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantListIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
result = makeConstantRefNode(
constant=tuple(self.constant),
user_provided=self.user_provided,
source_ref=self.source_ref,
)
self.parent.replaceChild(self, result)
self.finalize()
return (
iter_node,
"new_constant",
"""Iteration over constant list lowered to tuple.""",
)
class ExpressionConstantListEmptyRef(EmptyContainerMixin, ExpressionConstantListRef):
kind = "EXPRESSION_CONSTANT_LIST_EMPTY_REF"
__slots__ = ()
def __init__(self, user_provided, source_ref):
ExpressionConstantListRef.__init__(
self,
constant=the_empty_list,
user_provided=user_provided,
source_ref=source_ref,
)
class ExpressionConstantSetRef(ExpressionSetShapeExactMixin, ExpressionConstantRefBase):
kind = "EXPRESSION_CONSTANT_SET_REF"
__slots__ = ()
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantSetRef():
return True
@staticmethod
def isMutable():
return True
@staticmethod
def isKnownToBeHashable():
return False
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantSetIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
result = makeConstantRefNode(
constant=tuple(self.constant),
user_provided=self.user_provided,
source_ref=self.source_ref,
)
self.parent.replaceChild(self, result)
self.finalize()
return (
iter_node,
"new_constant",
"""Iteration over constant set lowered to tuple.""",
)
class ExpressionConstantSetEmptyRef(EmptyContainerMixin, ExpressionConstantSetRef):
kind = "EXPRESSION_CONSTANT_SET_EMPTY_REF"
__slots__ = ()
def __init__(self, user_provided, source_ref):
ExpressionConstantSetRef.__init__(
self,
constant=the_empty_set,
user_provided=user_provided,
source_ref=source_ref,
)
class ExpressionConstantFrozensetRef(
ExpressionFrozensetShapeExactMixin, ExpressionConstantRefBase
):
kind = "EXPRESSION_CONSTANT_FROZENSET_REF"
__slots__ = ()
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantFrozensetRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantFrozensetIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
result = makeConstantRefNode(
constant=tuple(self.constant),
user_provided=self.user_provided,
source_ref=self.source_ref,
)
self.parent.replaceChild(self, result)
self.finalize()
return (
iter_node,
"new_constant",
"""Iteration over constant frozenset lowered to tuple.""",
)
class ExpressionConstantFrozensetEmptyRef(
EmptyContainerMixin, ExpressionConstantFrozensetRef
):
kind = "EXPRESSION_CONSTANT_FROZENSET_EMPTY_REF"
__slots__ = ()
def __init__(self, user_provided, source_ref):
ExpressionConstantFrozensetRef.__init__(
self,
constant=the_empty_frozenset,
user_provided=user_provided,
source_ref=source_ref,
)
class ExpressionConstantIntRef(
ExpressionIntShapeExactMixin, ExpressionConstantUntrackedRefBase
):
kind = "EXPRESSION_CONSTANT_INT_REF"
__slots__ = ()
def __init__(self, constant, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=constant, source_ref=source_ref
)
@staticmethod
def isExpressionConstantIntRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isNumberConstant():
return True
@staticmethod
def isIndexConstant():
return True
def getIndexValue(self):
return self.constant
@staticmethod
def isIterableConstant():
return False
class ExpressionConstantLongRef(
ExpressionLongShapeExactMixin, ExpressionConstantRefBase
):
kind = "EXPRESSION_CONSTANT_LONG_REF"
__slots__ = ()
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantLongRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isNumberConstant():
return True
@staticmethod
def isIndexConstant():
return True
def getIndexValue(self):
# Use the int value if possible, otherwise that remains a long, which is
# also OK, but often unnecessary.
return int(self.constant)
@staticmethod
def isIterableConstant():
return False
class ExpressionConstantStrRef(ExpressionStrShapeExactMixin, ExpressionConstantRefBase):
kind = "EXPRESSION_CONSTANT_STR_REF"
__slots__ = ()
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantStrRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantStrIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def getStrValue(self):
return self
def getStringValue(self):
return self.constant
def computeExpressionIter1(self, iter_node, trace_collection):
# Note: str are as good as it gets.
return iter_node, None, None
class ExpressionConstantStrEmptyRef(EmptyContainerMixin, ExpressionConstantStrRef):
kind = "EXPRESSION_CONSTANT_STR_EMPTY_REF"
__slots__ = ()
def __init__(self, user_provided, source_ref):
ExpressionConstantStrRef.__init__(
self,
constant="",
user_provided=user_provided,
source_ref=source_ref,
)
class ExpressionConstantUnicodeRef(
ExpressionUnicodeShapeExactMixin, ExpressionConstantRefBase
):
kind = "EXPRESSION_CONSTANT_UNICODE_REF"
__slots__ = ()
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantUnicodeRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantUnicodeIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
# Note: unicode are as good as it gets
return iter_node, None, None
class ExpressionConstantUnicodeEmptyRef(
EmptyContainerMixin, ExpressionConstantUnicodeRef
):
kind = "EXPRESSION_CONSTANT_UNICODE_EMPTY_REF"
__slots__ = ()
def __init__(self, user_provided, source_ref):
ExpressionConstantUnicodeRef.__init__(
self,
constant=the_empty_unicode,
user_provided=user_provided,
source_ref=source_ref,
)
class ExpressionConstantBytesRef(
ExpressionBytesShapeExactMixin, ExpressionConstantRefBase
):
kind = "EXPRESSION_CONSTANT_BYTES_REF"
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantBytesRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantBytesIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
# Note: bytes are as good as it gets
return iter_node, None, None
class ExpressionConstantBytesEmptyRef(EmptyContainerMixin, ExpressionConstantBytesRef):
kind = "EXPRESSION_CONSTANT_BYTES_EMPTY_REF"
__slots__ = ()
def __init__(self, user_provided, source_ref):
ExpressionConstantBytesRef.__init__(
self,
constant=b"",
user_provided=user_provided,
source_ref=source_ref,
)
class ExpressionConstantBytearrayRef(
ExpressionBytearrayShapeExactMixin, ExpressionConstantRefBase
):
kind = "EXPRESSION_CONSTANT_BYTEARRAY_REF"
def __init__(self, constant, user_provided, source_ref):
ExpressionConstantRefBase.__init__(
self, constant=constant, user_provided=user_provided, source_ref=source_ref
)
@staticmethod
def isExpressionConstantBytearrayRef():
return True
@staticmethod
def isMutable():
return True
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantBytearrayIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
result = makeConstantRefNode(
constant=bytes(self.constant),
user_provided=self.user_provided,
source_ref=self.source_ref,
)
self.parent.replaceChild(self, result)
self.finalize()
return (
iter_node,
"new_constant",
"""Iteration over constant bytearray lowered to bytes.""",
)
class ExpressionConstantFloatRef(
ExpressionFloatShapeExactMixin, ExpressionConstantUntrackedRefBase
):
kind = "EXPRESSION_CONSTANT_FLOAT_REF"
__slots__ = ()
def __init__(self, constant, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=constant, source_ref=source_ref
)
@staticmethod
def isExpressionConstantFloatRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isNumberConstant():
return True
@staticmethod
def isIterableConstant():
return False
class ExpressionConstantComplexRef(
ExpressionComplexShapeExactMixin, ExpressionConstantUntrackedRefBase
):
kind = "EXPRESSION_CONSTANT_COMPLEX_REF"
__slots__ = ()
def __init__(self, constant, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=constant, source_ref=source_ref
)
@staticmethod
def isExpressionConstantComplexRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isIterableConstant():
return False
# Overload what ExpressionComplexShapeExactMixin says, for a given instance we know all.
@staticmethod
def isKnownToHaveAttribute(attribute_name):
return hasattr(0j, attribute_name)
class ExpressionConstantSliceRef(
ExpressionSliceShapeExactMixin, ExpressionConstantUntrackedRefBase
):
kind = "EXPRESSION_CONSTANT_SLICE_REF"
__slots__ = ()
def __init__(self, constant, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=constant, source_ref=source_ref
)
@staticmethod
def isExpressionConstantSliceRef():
return True
@staticmethod
def isMutable():
return False
@staticmethod
def isIterableConstant():
return False
class ExpressionConstantXrangeRef(ExpressionConstantUntrackedRefBase):
kind = "EXPRESSION_CONSTANT_XRANGE_REF"
__slots__ = ()
def __init__(self, constant, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=constant, source_ref=source_ref
)
@staticmethod
def isExpressionConstantXrangeRef():
return True
@staticmethod
def getTypeShape():
return tshape_xrange
@staticmethod
def isMutable():
return False
@staticmethod
def isKnownToBeHashable():
return True
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantRangeIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
# Note: xrange are as good as it gets.
return iter_node, None, None
class ExpressionConstantTypeRef(ExpressionConstantUntrackedRefBase):
kind = "EXPRESSION_CONSTANT_TYPE_REF"
__slots__ = ()
@staticmethod
def isExpressionConstantTypeRef():
return True
@staticmethod
def getTypeShape():
return tshape_type
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
from nuitka.optimizations.OptimizeBuiltinCalls import (
computeBuiltinCall,
)
# Anything may happen. On next pass, if replaced, we might be better
# but not now.
trace_collection.onExceptionRaiseExit(BaseException)
new_node, tags, message = computeBuiltinCall(
builtin_name=self.constant.__name__, call_node=call_node
)
return new_node, tags, message
def computeExpressionCallViaVariable(
self, call_node, variable_ref_node, call_args, call_kw, trace_collection
):
return self.computeExpressionCall(
call_node=call_node,
call_args=call_args,
call_kw=call_kw,
trace_collection=trace_collection,
)
@staticmethod
def isMutable():
return False
@staticmethod
def isKnownToBeHashable():
return True
@staticmethod
def isIterableConstant():
return False
@staticmethod
def getTruthValue():
return True
class ExpressionConstantTypeSubscriptableRef(ExpressionConstantTypeRef):
kind = "EXPRESSION_CONSTANT_TYPE_SUBSCRIPTABLE_REF"
__slots__ = ()
def computeExpressionSubscript(self, lookup_node, subscript, trace_collection):
if subscript.isCompileTimeConstant():
return trace_collection.getCompileTimeComputationResult(
node=lookup_node,
computation=lambda: self.getCompileTimeConstant()[
subscript.getCompileTimeConstant()
],
description="Subscript of subscriptable type with constant value.",
)
# TODO: Not true, in fact these should become GenericAlias always.
trace_collection.onExceptionRaiseExit(BaseException)
return lookup_node, None, None
def makeConstantRefNode(constant, source_ref, user_provided=False):
# This is dispatching per constant value and types, every case
# to be a return statement, pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
# Dispatch based on constants first.
if constant is None:
return ExpressionConstantNoneRef(source_ref=source_ref)
elif constant is True:
return ExpressionConstantTrueRef(source_ref=source_ref)
elif constant is False:
return ExpressionConstantFalseRef(source_ref=source_ref)
elif constant is Ellipsis:
return ExpressionConstantEllipsisRef(source_ref=source_ref)
# Next, dispatch based on type.
constant_type = type(constant)
if constant_type is int:
return ExpressionConstantIntRef(constant=constant, source_ref=source_ref)
elif constant_type is str:
if constant:
return ExpressionConstantStrRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantStrEmptyRef(
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is float:
return ExpressionConstantFloatRef(constant=constant, source_ref=source_ref)
elif constant_type is long:
return ExpressionConstantLongRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is unicode:
if constant:
return ExpressionConstantUnicodeRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantUnicodeEmptyRef(
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is bytes:
if constant:
return ExpressionConstantBytesRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantBytesEmptyRef(
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is dict:
if constant:
assert isConstant(constant), repr(constant)
return ExpressionConstantDictRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantDictEmptyRef(
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is tuple:
if constant:
assert isConstant(constant), repr(constant)
if isMutable(constant):
return ExpressionConstantTupleMutableRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantTupleRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantTupleEmptyRef(
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is list:
if constant:
assert isConstant(constant), repr(constant)
return ExpressionConstantListRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantListEmptyRef(
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is set:
if constant:
assert isConstant(constant), repr(constant)
return ExpressionConstantSetRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantSetEmptyRef(
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is frozenset:
if constant:
assert isConstant(constant), repr(constant)
return ExpressionConstantFrozensetRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
else:
return ExpressionConstantFrozensetEmptyRef(
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is complex:
return ExpressionConstantComplexRef(
constant=constant,
source_ref=source_ref,
)
elif constant_type is slice:
return ExpressionConstantSliceRef(
constant=constant,
source_ref=source_ref,
)
elif constant_type is type:
if python_version >= 0x390 and constant in (
set,
frozenset,
tuple,
list,
dict,
):
return ExpressionConstantTypeSubscriptableRef(
constant=constant, source_ref=source_ref
)
return ExpressionConstantTypeRef(constant=constant, source_ref=source_ref)
elif constant_type is xrange:
return ExpressionConstantXrangeRef(
constant=constant,
source_ref=source_ref,
)
elif constant_type is bytearray:
return ExpressionConstantBytearrayRef(
constant=constant,
user_provided=user_provided,
source_ref=source_ref,
)
elif constant_type is GenericAlias:
from .BuiltinTypeNodes import ExpressionConstantGenericAlias
return ExpressionConstantGenericAlias(
generic_alias=constant, source_ref=source_ref
)
elif constant_type is UnionType:
from .BuiltinTypeNodes import ExpressionConstantUnionType
return ExpressionConstantUnionType(union_type=constant, source_ref=source_ref)
elif constant is sys.version_info:
return ExpressionConstantSysVersionInfoRef(source_ref=source_ref)
elif constant in builtin_anon_values:
from .BuiltinRefNodes import ExpressionBuiltinAnonymousRef
return ExpressionBuiltinAnonymousRef(
builtin_name=builtin_anon_values[constant],
source_ref=source_ref,
)
elif constant in builtin_named_values:
from .BuiltinRefNodes import ExpressionBuiltinRef
return ExpressionBuiltinRef(
builtin_name=builtin_named_values[constant], source_ref=source_ref
)
elif constant in builtin_exception_values_list:
from .BuiltinRefNodes import ExpressionBuiltinExceptionRef
if constant is NotImplemented:
exception_name = "NotImplemented"
else:
exception_name = constant.__name__
return ExpressionBuiltinExceptionRef(
exception_name=exception_name, source_ref=source_ref
)
else:
# Missing constant type, ought to not happen, please report.
assert False, (constant, constant_type)
class ExpressionConstantSysVersionInfoRef(ExpressionConstantUntrackedRefBase):
kind = "EXPRESSION_CONSTANT_SYS_VERSION_INFO_REF"
__slots__ = ()
def __init__(self, source_ref):
ExpressionConstantUntrackedRefBase.__init__(
self, constant=sys.version_info, source_ref=source_ref
)
@staticmethod
def getDetails():
return {}
@staticmethod
def getTypeShape():
return tshape_namedtuple
@staticmethod
def isMutable():
return False
@staticmethod
def isKnownToBeHashable():
return True
@staticmethod
def isIterableConstant():
return True
def getIterationHandle(self):
return ConstantTupleIterationHandle(self)
def getIterationLength(self):
return len(self.constant)
def computeExpressionIter1(self, iter_node, trace_collection):
# For iteration, we are just a normal tuple.
result = makeConstantRefNode(
constant=tuple(self.constant),
user_provided=True,
source_ref=self.source_ref,
)
self.parent.replaceChild(self, result)
self.finalize()
return (
iter_node,
"new_constant",
"""Iteration over constant 'sys.version_info' lowered to tuple.""",
)
@staticmethod
def getTruthValue():
return True | PypiClean |
/MindustryCompiler-2.1-py3-none-any.whl/compiler/yacc/grammar/function.py | from compiler import CompilationException
from ._start import grammar, YaccProduction, context
from .. import importsHandling
from ..classes import AsmInst, FunDef, ReturnStm, FunCall
def getModuleAndFunName(dotted):
module = None
if len(dotted) == 2:
module, funName = dotted
else:
assert len(dotted) == 1
funName = dotted[0]
return module, funName
@grammar
def runFuncReturnArgs(p: YaccProduction):
'''ligne : affectation dottedID OpenParenthesis arguments CloseParenthesis'''
returnTo = p[1]
dotted = p[2]
module, funName = getModuleAndFunName(dotted)
callArgs = p[4]
p[0] = FunCall(module, funName, callArgs, p.lineno(3), returnTo)
@grammar
def runFunc(p: YaccProduction):
'''ligne : dottedID OpenParenthesis arguments CloseParenthesis'''
dotted = p[1]
module, funName = getModuleAndFunName(dotted)
callArgs = p[3]
p[0] = FunCall(module, funName, callArgs, p.lineno(2))
@grammar
def defFun(p: YaccProduction):
'''noLine : dottedID OpenParenthesis arguments CloseParenthesis OpenCurlyBracket funDefContext lines CloseCurlyBracket''' # noqa
if len(p[1]) != 1:
raise CompilationException(
"line {}, function definition incorrect: {} is not accepted"
.format(p.lineno(2), p[1]))
name = p[1][0]
args = p[3]
content = p[7]
fundef = FunDef(context, name, args, content)
importsHandling.imports.addFunToModule(fundef)
context.inFunDefinition = False
@grammar
def funDefContext(p: YaccProduction):
'''funDefContext : '''
context.inFunDefinition = True
@grammar
def handleReturn(p: YaccProduction):
'''ligne : Return arguments'''
if not context.inFunDefinition:
raise CompilationException(
"line {}, return statement must be in function definition".format(p.lineno(1)))
p[0] = ReturnStm(p[2])
# set {liSet} {liVal}
def setters(liSet, liVar):
return [AsmInst('set', [s, v]) for s, v in zip(liSet, liVar)] | PypiClean |
/Intrst_algrms-0.7.2-py3-none-any.whl/unpacking_flatten_lists/performance.py | import timeit
import json
import time
from collections import defaultdict
from typing import List, Callable
from numbers import Integral
from contextlib import contextmanager
from unpacking_flatten_lists import BASEDIR
from unpacking_flatten_lists.data import (
generate_data,
create_data_decreasing_depth,
create_data_increasing_depth)
from unpacking_flatten_lists.funcs import (
outer_flatten_1,
outer_flatten_2,
niccolum_flatten,
tishka_flatten,
zart_flatten,
recursive_flatten_iterator,
tishka_flatten_with_stack,
recursive_flatten_generator)
RETRY_NUM = 10
TOO_LONG = 60 * 5 // 100 # in seconds
INCREMENT_MODE_NAME = 'increase'
DECREMENT_MODE_NAME = 'decrease'
SETUP_IMPORT_TEMPLATE = '''
from typing import Iterator
import json
from __main__ import {func_name} as flatten
data = json.loads("{data}")
'''
RUNNING_TEMPLATE = '''
result = flatten(data)
if isinstance(result, Iterator):
result = list(result)
'''
funcs = [
outer_flatten_1,
outer_flatten_2,
niccolum_flatten,
tishka_flatten,
zart_flatten,
recursive_flatten_iterator,
tishka_flatten_with_stack,
recursive_flatten_generator
]
result = defaultdict(lambda: defaultdict(lambda: defaultdict(Integral)))
def mean(numbers: List[Integral]) -> int:
return sum(numbers) / len(numbers) / RETRY_NUM
@contextmanager
def time_time(msg: str) -> None:
start = time.monotonic()
yield start
print('{} done: '.format(msg), time.monotonic() - start)
def increase_part():
print('*' * 10, 'Increase', '*' * 10)
common_part(data_create_func=create_data_increasing_depth, mode=INCREMENT_MODE_NAME)
def decrease_part():
print('*' * 10, 'Decrease', '*' * 10)
common_part(data_create_func=create_data_decreasing_depth, mode=DECREMENT_MODE_NAME)
def common_part(*, data_create_func: Callable, mode: str):
for func in funcs:
func_name = func.__name__
print('\n', func_name, '\n')
for data_example in generate_data():
data = data_create_func(**data_example[1])
data = json.dumps(data) # crutch because timeit has s_push: parser stack overflow for list with 100 deep
data_struct_name = data_example[0]
with time_time(data_struct_name) as start_time:
result[func_name][mode][data_struct_name] = mean(
timeit.repeat(
RUNNING_TEMPLATE,
setup=SETUP_IMPORT_TEMPLATE.format(
func_name=func_name,
data=data),
number=RETRY_NUM
)
)
if time.monotonic() - start_time > TOO_LONG:
break
def main():
increase_part()
decrease_part()
print('Done testing. Writes...')
with open(BASEDIR / 'performance.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
print('Done')
if __name__ == '__main__':
main() | PypiClean |
/OMEMO-1.0.2.tar.gz/OMEMO-1.0.2/omemo/storage.py | from __future__ import annotations # pylint: disable=unused-variable
from abc import ABC, abstractmethod
import base64
import copy
from typing import Any, Callable, Dict, Generic, List, Optional, Type, TypeVar, Union, cast
from .types import JSONType, OMEMOException
__all__ = [ # pylint: disable=unused-variable
"Just",
"Maybe",
"Nothing",
"NothingException",
"Storage",
"StorageException"
]
class StorageException(OMEMOException):
"""
Parent type for all exceptions specifically raised by methods of :class:`Storage`.
"""
ValueTypeT = TypeVar("ValueTypeT")
DefaultTypeT = TypeVar("DefaultTypeT")
MappedValueTypeT = TypeVar("MappedValueTypeT")
class Maybe(ABC, Generic[ValueTypeT]):
"""
typing's `Optional[A]` is just an alias for `Union[None, A]`, which means if `A` is a union itself that
allows `None`, the `Optional[A]` doesn't add anything. E.g. `Optional[Optional[X]] = Optional[X]` is true
for any type `X`. This Maybe class actually differenciates whether a value is set or not.
All incoming and outgoing values or cloned using :func:`copy.deepcopy`, such that values stored in a Maybe
instance are not affected by outside application logic.
"""
@property
@abstractmethod
def is_just(self) -> bool:
"""
Returns:
Whether this is a :class:`Just`.
"""
@property
@abstractmethod
def is_nothing(self) -> bool:
"""
Returns:
Whether this is a :class:`Nothing`.
"""
@abstractmethod
def from_just(self) -> ValueTypeT:
"""
Returns:
The value if this is a :class:`Just`.
Raises:
NothingException: if this is a :class:`Nothing`.
"""
@abstractmethod
def maybe(self, default: DefaultTypeT) -> Union[ValueTypeT, DefaultTypeT]:
"""
Args:
default: The value to return if this is in instance of :class:`Nothing`.
Returns:
The value if this is a :class:`Just`, or the default value if this is a :class:`Nothing`. The
default is returned by reference in that case.
"""
@abstractmethod
def fmap(self, function: Callable[[ValueTypeT], MappedValueTypeT]) -> "Maybe[MappedValueTypeT]":
"""
Apply a mapping function.
Args:
function: The mapping function.
Returns:
A new :class:`Just` containing the mapped value if this is a :class:`Just`. A new :class:`Nothing`
if this is a :class:`Nothing`.
"""
class NothingException(Exception):
"""
Raised by :meth:`Maybe.from_just`, in case the :class:`Maybe` is a :class:`Nothing`.
"""
class Nothing(Maybe[ValueTypeT]):
"""
A :class:`Maybe` that does not hold a value.
"""
def __init__(self) -> None:
"""
Initialize a :class:`Nothing`, representing an empty :class:`Maybe`.
"""
@property
def is_just(self) -> bool:
return False
@property
def is_nothing(self) -> bool:
return True
def from_just(self) -> ValueTypeT:
raise NothingException("Maybe.fromJust: Nothing") # -- yuck
def maybe(self, default: DefaultTypeT) -> DefaultTypeT:
return default
def fmap(self, function: Callable[[ValueTypeT], MappedValueTypeT]) -> "Nothing[MappedValueTypeT]":
return Nothing()
class Just(Maybe[ValueTypeT]):
"""
A :class:`Maybe` that does hold a value.
"""
def __init__(self, value: ValueTypeT) -> None:
"""
Initialize a :class:`Just`, representing a :class:`Maybe` that holds a value.
Args:
value: The value to store in this :class:`Just`.
"""
self.__value = copy.deepcopy(value)
@property
def is_just(self) -> bool:
return True
@property
def is_nothing(self) -> bool:
return False
def from_just(self) -> ValueTypeT:
return copy.deepcopy(self.__value)
def maybe(self, default: DefaultTypeT) -> ValueTypeT:
return copy.deepcopy(self.__value)
def fmap(self, function: Callable[[ValueTypeT], MappedValueTypeT]) -> "Just[MappedValueTypeT]":
return Just(function(copy.deepcopy(self.__value)))
PrimitiveTypeT = TypeVar("PrimitiveTypeT", None, float, int, str, bool)
class Storage(ABC):
"""
A simple key/value storage class with optional caching (on by default). Keys can be any Python string,
values any JSON-serializable structure.
Warning:
Writing (and deletion) operations must be performed right away, before returning from the method. Such
operations must not be cached or otherwise deferred.
Warning:
All parameters must be treated as immutable unless explicitly noted otherwise.
Note:
The :class:`Maybe` type performs the additional job of cloning stored and returned values, which
essential to decouple the cached values from the application logic.
"""
def __init__(self, disable_cache: bool = False):
"""
Configure caching behaviour of the storage.
Args:
disable_cache: Whether to disable the cache, which is on by default. Use this parameter if your
storage implementation handles caching itself, to avoid pointless double caching.
"""
self.__cache: Optional[Dict[str, Maybe[JSONType]]] = None if disable_cache else {}
@abstractmethod
async def _load(self, key: str) -> Maybe[JSONType]:
"""
Load a value.
Args:
key: The key identifying the value.
Returns:
The loaded value, if it exists.
Raises:
StorageException: if any kind of storage operation failed. Feel free to raise a subclass instead.
"""
@abstractmethod
async def _store(self, key: str, value: JSONType) -> Any:
"""
Store a value.
Args:
key: The key identifying the value.
value: The value to store under the given key.
Returns:
Anything, the return value is ignored.
Raises:
StorageException: if any kind of storage operation failed. Feel free to raise a subclass instead.
"""
@abstractmethod
async def _delete(self, key: str) -> Any:
"""
Delete a value, if it exists.
Args:
key: The key identifying the value to delete.
Returns:
Anything, the return value is ignored.
Raises:
StorageException: if any kind of storage operation failed. Feel free to raise a subclass instead.
Do not raise if the key doesn't exist.
"""
async def load(self, key: str) -> Maybe[JSONType]:
"""
Load a value.
Args:
key: The key identifying the value.
Returns:
The loaded value, if it exists.
Raises:
StorageException: if any kind of storage operation failed. Forwarded from :meth:`_load`.
"""
if self.__cache is not None and key in self.__cache:
return self.__cache[key]
value = await self._load(key)
if self.__cache is not None:
self.__cache[key] = value
return value
async def store(self, key: str, value: JSONType) -> None:
"""
Store a value.
Args:
key: The key identifying the value.
value: The value to store under the given key.
Raises:
StorageException: if any kind of storage operation failed. Forwarded from :meth:`_store`.
"""
await self._store(key, value)
if self.__cache is not None:
self.__cache[key] = Just(value)
async def delete(self, key: str) -> None:
"""
Delete a value, if it exists.
Args:
key: The key identifying the value to delete.
Raises:
StorageException: if any kind of storage operation failed. Does not raise if the key doesn't
exist. Forwarded from :meth:`_delete`.
"""
await self._delete(key)
if self.__cache is not None:
self.__cache[key] = Nothing()
async def store_bytes(self, key: str, value: bytes) -> None:
"""
Variation of :meth:`store` for storing specifically bytes values.
Args:
key: The key identifying the value.
value: The value to store under the given key.
Raises:
StorageException: if any kind of storage operation failed. Forwarded from :meth:`_store`.
"""
await self.store(key, base64.urlsafe_b64encode(value).decode("ASCII"))
async def load_primitive(self, key: str, primitive: Type[PrimitiveTypeT]) -> Maybe[PrimitiveTypeT]:
"""
Variation of :meth:`load` for loading specifically primitive values.
Args:
key: The key identifying the value.
primitive: The primitive type of the value.
Returns:
The loaded and type-checked value, if it exists.
Raises:
StorageException: if any kind of storage operation failed. Forwarded from :meth:`_load`.
"""
def check_type(value: JSONType) -> PrimitiveTypeT:
if isinstance(value, primitive):
return value
raise TypeError(f"The value stored for key {key} is not a {primitive}: {value}")
return (await self.load(key)).fmap(check_type)
async def load_bytes(self, key: str) -> Maybe[bytes]:
"""
Variation of :meth:`load` for loading specifically bytes values.
Args:
key: The key identifying the value.
Returns:
The loaded and type-checked value, if it exists.
Raises:
StorageException: if any kind of storage operation failed. Forwarded from :meth:`_load`.
"""
def check_type(value: JSONType) -> bytes:
if isinstance(value, str):
return base64.urlsafe_b64decode(value.encode("ASCII"))
raise TypeError(f"The value stored for key {key} is not a str/bytes: {value}")
return (await self.load(key)).fmap(check_type)
async def load_optional(
self,
key: str,
primitive: Type[PrimitiveTypeT]
) -> Maybe[Optional[PrimitiveTypeT]]:
"""
Variation of :meth:`load` for loading specifically optional primitive values.
Args:
key: The key identifying the value.
primitive: The primitive type of the optional value.
Returns:
The loaded and type-checked value, if it exists.
Raises:
StorageException: if any kind of storage operation failed. Forwarded from :meth:`_load`.
"""
def check_type(value: JSONType) -> Optional[PrimitiveTypeT]:
if value is None or isinstance(value, primitive):
return value
raise TypeError(f"The value stored for key {key} is not an optional {primitive}: {value}")
return (await self.load(key)).fmap(check_type)
async def load_list(self, key: str, primitive: Type[PrimitiveTypeT]) -> Maybe[List[PrimitiveTypeT]]:
"""
Variation of :meth:`load` for loading specifically lists of primitive values.
Args:
key: The key identifying the value.
primitive: The primitive type of the list elements.
Returns:
The loaded and type-checked value, if it exists.
Raises:
StorageException: if any kind of storage operation failed. Forwarded from :meth:`_load`.
"""
def check_type(value: JSONType) -> List[PrimitiveTypeT]:
if isinstance(value, list) and all(isinstance(element, primitive) for element in value):
return cast(List[PrimitiveTypeT], value)
raise TypeError(f"The value stored for key {key} is not a list of {primitive}: {value}")
return (await self.load(key)).fmap(check_type)
async def load_dict(
self,
key: str,
primitive: Type[PrimitiveTypeT]
) -> Maybe[Dict[str, PrimitiveTypeT]]:
"""
Variation of :meth:`load` for loading specifically dictionaries of primitive values.
Args:
key: The key identifying the value.
primitive: The primitive type of the dictionary values.
Returns:
The loaded and type-checked value, if it exists.
Raises:
StorageException: if any kind of storage operation failed. Forwarded from :meth:`_load`.
"""
def check_type(value: JSONType) -> Dict[str, PrimitiveTypeT]:
if isinstance(value, dict) and all(isinstance(v, primitive) for v in value.values()):
return cast(Dict[str, PrimitiveTypeT], value)
raise TypeError(f"The value stored for key {key} is not a dict of {primitive}: {value}")
return (await self.load(key)).fmap(check_type) | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/geoserver/helpers.py | import os
import re
import sys
import time
import uuid
import json
import errno
import logging
import datetime
import tempfile
import traceback
from shutil import copyfile
from itertools import cycle
from collections import namedtuple, defaultdict
from os.path import basename, splitext, isfile
from threading import local
from urllib.parse import urlparse, urlencode, urlsplit, urljoin
from pinax.ratings.models import OverallRating
from bs4 import BeautifulSoup
from dialogos.models import Comment
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.db.models.signals import pre_delete
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext as _
from geoserver.catalog import Catalog, FailedRequestError
from geoserver.resource import FeatureType, Coverage
from geoserver.store import CoverageStore, DataStore, datastore_from_index, \
coveragestore_from_index, wmsstore_from_index
from geoserver.support import DimensionInfo
from geoserver.workspace import Workspace
from gsimporter import Client
from lxml import etree
from defusedxml import lxml as dlxml
from owslib.wcs import WebCoverageService
from owslib.wms import WebMapService
from geonode import GeoNodeException
from geonode.utils import http_client
from geonode.layers.models import Layer, Attribute, Style
from geonode.layers.enumerations import LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES
from geonode.security.views import _perms_info_json
from geonode.security.utils import set_geowebcache_invalidate_cache
import xml.etree.ElementTree as ET
from django.utils.module_loading import import_string
logger = logging.getLogger(__name__)
temp_style_name_regex = r'[a-zA-Z0-9]{8}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{4}-[a-zA-Z0-9]{12}_ms_.*'
if not hasattr(settings, 'OGC_SERVER'):
msg = (
'Please configure OGC_SERVER when enabling geonode.geoserver.'
' More info can be found at '
'http://docs.geonode.org/en/2.10.x/basic/settings/index.html#ogc-server')
raise ImproperlyConfigured(msg)
def check_geoserver_is_up():
"""Verifies all geoserver is running,
this is needed to be able to upload.
"""
url = f"{ogc_server_settings.LOCATION}"
req, content = http_client.get(url, user=_user)
msg = f'Cannot connect to the GeoServer at {url}\nPlease make sure you have started it.'
logger.debug(req)
assert req.status_code == 200, msg
def _add_sld_boilerplate(symbolizer):
"""
Wrap an XML snippet representing a single symbolizer in the appropriate
elements to make it a valid SLD which applies that symbolizer to all features,
including format strings to allow interpolating a "name" variable in.
"""
return """
<StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<NamedLayer>
<Name>%(name)s</Name>
<UserStyle>
<Name>%(name)s</Name>
<Title>%(name)s</Title>
<FeatureTypeStyle>
<Rule>
""" + symbolizer + """
</Rule>
</FeatureTypeStyle>
</UserStyle>
</NamedLayer>
</StyledLayerDescriptor>
"""
_raster_template = """
<RasterSymbolizer>
<Opacity>1.0</Opacity>
</RasterSymbolizer>
"""
_polygon_template = """
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(bg)s</CssParameter>
</Fill>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
<CssParameter name="stroke-width">0.7</CssParameter>
</Stroke>
</PolygonSymbolizer>
"""
_line_template = """
<LineSymbolizer>
<Stroke>
<CssParameter name="stroke">%(bg)s</CssParameter>
<CssParameter name="stroke-width">3</CssParameter>
</Stroke>
</LineSymbolizer>
</Rule>
</FeatureTypeStyle>
<FeatureTypeStyle>
<Rule>
<LineSymbolizer>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
</Stroke>
</LineSymbolizer>
"""
_point_template = """
<PointSymbolizer>
<Graphic>
<Mark>
<WellKnownName>%(mark)s</WellKnownName>
<Fill>
<CssParameter name="fill">%(bg)s</CssParameter>
</Fill>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
</Stroke>
</Mark>
<Size>10</Size>
</Graphic>
</PointSymbolizer>
"""
_style_templates = dict(
raster=_add_sld_boilerplate(_raster_template),
polygon=_add_sld_boilerplate(_polygon_template),
line=_add_sld_boilerplate(_line_template),
point=_add_sld_boilerplate(_point_template)
)
def _style_name(resource):
return _punc.sub("_", f"{resource.store.workspace.name}:{resource.name}")
def extract_name_from_sld(gs_catalog, sld, sld_file=None):
try:
if sld:
if isfile(sld):
with open(sld, "rb") as sld_file:
sld = sld_file.read()
if isinstance(sld, str):
sld = sld.encode('utf-8')
dom = etree.XML(sld)
elif sld_file and isfile(sld_file):
with open(sld_file, "rb") as sld_file:
sld = sld_file.read()
if isinstance(sld, str):
sld = sld.encode('utf-8')
dom = dlxml.parse(sld)
except Exception:
logger.exception("The uploaded SLD file is not valid XML")
raise Exception(
"The uploaded SLD file is not valid XML")
named_layer = dom.findall(
"{http://www.opengis.net/sld}NamedLayer")
user_layer = dom.findall(
"{http://www.opengis.net/sld}UserLayer")
el = None
if named_layer and len(named_layer) > 0:
user_style = named_layer[0].findall("{http://www.opengis.net/sld}UserStyle")
if user_style and len(user_style) > 0:
el = user_style[0].findall("{http://www.opengis.net/sld}Name")
if len(el) == 0:
el = user_style[0].findall("{http://www.opengis.net/se}Name")
if len(el) == 0:
el = named_layer[0].findall("{http://www.opengis.net/sld}Name")
if len(el) == 0:
el = named_layer[0].findall("{http://www.opengis.net/se}Name")
if not el or len(el) == 0:
if user_layer and len(user_layer) > 0:
user_style = user_layer[0].findall("{http://www.opengis.net/sld}UserStyle")
if user_style and len(user_style) > 0:
el = user_style[0].findall("{http://www.opengis.net/sld}Name")
if len(el) == 0:
el = user_style[0].findall("{http://www.opengis.net/se}Name")
if len(el) == 0:
el = user_layer[0].findall("{http://www.opengis.net/sld}Name")
if len(el) == 0:
el = user_layer[0].findall("{http://www.opengis.net/se}Name")
if not el or len(el) == 0:
if sld_file:
return splitext(basename(sld_file))[0]
else:
raise Exception(
"Please provide a name, unable to extract one from the SLD.")
return el[0].text
def get_sld_for(gs_catalog, layer):
name = None
gs_layer = None
gs_style = None
_default_style = None
_max_retries, _tries = getattr(ogc_server_settings, "MAX_RETRIES", 2), 0
try:
gs_layer = gs_catalog.get_layer(layer.name)
if gs_layer.default_style:
gs_style = gs_layer.default_style.sld_body
set_layer_style(layer,
layer.alternate,
gs_style)
name = gs_layer.default_style.name
_default_style = gs_layer.default_style
except Exception as e:
logger.debug(e)
name = None
while not name and _tries < _max_retries:
try:
gs_layer = gs_catalog.get_layer(layer.name)
if gs_layer:
if gs_layer.default_style:
gs_style = gs_layer.default_style.sld_body
set_layer_style(layer,
layer.alternate,
gs_style)
name = gs_layer.default_style.name
if name:
break
except Exception as e:
logger.exception(e)
name = None
_tries += 1
time.sleep(3)
if not _default_style:
_default_style = layer.default_style if layer else None
name = _default_style.name if _default_style else None
gs_style = _default_style.sld_body if _default_style else None
if not name:
msg = """
GeoServer didn't return a default style for this layer.
Consider increasing OGC_SERVER MAX_RETRIES value.''
"""
raise GeoNodeException(msg)
# Detect geometry type if it is a FeatureType
res = gs_layer.resource if gs_layer else None
if res and res.resource_type == 'featureType':
res.fetch()
ft = res.store.get_resources(name=res.name)
ft.fetch()
for attr in ft.dom.find("attributes").getchildren():
attr_binding = attr.find("binding")
if "jts.geom" in attr_binding.text:
if "Polygon" in attr_binding.text:
name = "polygon"
elif "Line" in attr_binding.text:
name = "line"
else:
name = "point"
# FIXME: When gsconfig.py exposes the default geometry type for vector
# layers we should use that rather than guessing based on the auto-detected
# style.
if name in _style_templates:
fg, bg, mark = next(_style_contexts)
return _style_templates[name] % dict(
name=layer.name,
fg=fg,
bg=bg,
mark=mark)
else:
return gs_style
def set_layer_style(saved_layer, title, sld, base_file=None):
# Check SLD is valid
try:
if sld:
if isfile(sld):
with open(sld, "rb") as sld_file:
sld = sld_file.read()
elif isinstance(sld, str):
sld = sld.strip('b\'\n')
sld = re.sub(r'(\\r)|(\\n)', '', sld).encode("UTF-8")
etree.XML(sld)
elif base_file and isfile(base_file):
with open(base_file, "rb") as sld_file:
sld = sld_file.read()
dlxml.parse(base_file)
except Exception:
logger.exception("The uploaded SLD file is not valid XML")
raise Exception("The uploaded SLD file is not valid XML")
# Check Layer's available styles
match = None
styles = list(saved_layer.styles.all()) + [
saved_layer.default_style]
for style in styles:
if style and style.name == saved_layer.name:
match = style
break
layer = gs_catalog.get_layer(title)
style = None
if match is None:
try:
style = gs_catalog.get_style(saved_layer.name, workspace=saved_layer.workspace) or \
gs_catalog.get_style(saved_layer.name)
if not style:
style = gs_catalog.create_style(
saved_layer.name, sld, overwrite=False, raw=True, workspace=saved_layer.workspace)
except Exception as e:
logger.exception(e)
else:
style = gs_catalog.get_style(saved_layer.name, workspace=saved_layer.workspace) or \
gs_catalog.get_style(saved_layer.name)
try:
if not style:
style = gs_catalog.create_style(
saved_layer.name, sld,
overwrite=True, raw=True,
workspace=saved_layer.workspace)
elif sld:
style.update_body(sld)
except Exception as e:
logger.exception(e)
if layer and style:
_old_styles = []
_old_styles.append(gs_catalog.get_style(
name=saved_layer.name))
_old_styles.append(gs_catalog.get_style(
name=f"{saved_layer.workspace}_{saved_layer.name}"))
_old_styles.append(gs_catalog.get_style(
name=layer.default_style.name))
_old_styles.append(gs_catalog.get_style(
name=layer.default_style.name,
workspace=layer.default_style.workspace))
layer.default_style = style
gs_catalog.save(layer)
for _s in _old_styles:
try:
gs_catalog.delete(_s)
except Exception as e:
logger.debug(e)
set_styles(saved_layer, gs_catalog)
def cascading_delete(layer_name=None, catalog=None):
if not layer_name:
return
cat = catalog or gs_catalog
resource = None
workspace = None
try:
if layer_name.find(':') != -1 and len(layer_name.split(':')) == 2:
workspace, name = layer_name.split(':')
ws = cat.get_workspace(workspace)
store = None
try:
store = get_store(cat, name, workspace=ws)
except FailedRequestError:
if ogc_server_settings.DATASTORE:
try:
layers = Layer.objects.filter(alternate=layer_name)
for layer in layers:
store = get_store(cat, layer.store, workspace=ws)
except FailedRequestError:
logger.debug(
'the store was not found in geoserver')
else:
logger.debug(
'the store was not found in geoserver')
if ws is None or store is None:
logger.debug(
'cascading delete was called on a layer where the workspace was not found')
resource = cat.get_resource(name=name, store=store, workspace=workspace)
else:
resource = cat.get_resource(name=layer_name)
except EnvironmentError as e:
if e.errno == errno.ECONNREFUSED:
msg = (f'Could not connect to geoserver at "{ogc_server_settings.LOCATION}"'
f'to save information for layer "{layer_name}"')
logger.error(msg)
return None
else:
raise e
if resource is None:
# If there is no associated resource,
# this method can not delete anything.
# Let's return and make a note in the log.
logger.debug(
'cascading_delete was called with a non existent resource')
return
resource_name = resource.name
lyr = None
try:
lyr = cat.get_layer(resource_name)
except Exception as e:
logger.debug(e)
if lyr is not None: # Already deleted
store = resource.store
styles = lyr.styles
try:
styles = styles + [lyr.default_style]
except Exception:
pass
if workspace:
gs_styles = [x for x in cat.get_styles(names=[f"{workspace}_{resource_name}"])]
styles = styles + gs_styles
if settings.DEFAULT_WORKSPACE and settings.DEFAULT_WORKSPACE != workspace:
gs_styles = [x for x in cat.get_styles(names=[f"{settings.DEFAULT_WORKSPACE}_{resource_name}"])]
styles = styles + gs_styles
cat.delete(lyr)
for s in styles:
if s is not None and s.name not in _default_style_names:
try:
logger.debug(f"Trying to delete Style [{s.name}]")
cat.delete(s, purge='true')
except Exception as e:
# Trying to delete a shared style will fail
# We'll catch the exception and log it.
logger.debug(e)
# Due to a possible bug of geoserver, we need this trick for now
# TODO: inspect the issue reported by this hack. Should be solved
# with GS 2.7+
try:
cat.delete(resource, recurse=True) # This may fail
except Exception:
cat._cache.clear()
cat.reset()
if store.resource_type == 'dataStore' and 'dbtype' in store.connection_parameters and \
store.connection_parameters['dbtype'] == 'postgis':
delete_from_postgis(resource_name, store)
else:
if store.resource_type == 'coverageStore':
try:
logger.debug(f" - Going to purge the {store.resource_type} : {store.href}")
cat.reset() # this resets the coverage readers and unlocks the files
cat.delete(store, purge='all', recurse=True)
# cat.reload() # this preservers the integrity of geoserver
except Exception as e:
# Trying to recursively purge a store may fail
# We'll catch the exception and log it.
logger.debug(e)
else:
try:
if not store.get_resources():
cat.delete(store, recurse=True)
except Exception as e:
# Catch the exception and log it.
logger.debug(e)
def delete_from_postgis(layer_name, store):
"""
Delete a table from PostGIS (because Geoserver won't do it yet);
to be used after deleting a layer from the system.
"""
import psycopg2
# we will assume that store/database may change (when using shard for example)
# but user and password are the ones from settings (DATASTORE_URL)
db = ogc_server_settings.datastore_db
db_name = store.connection_parameters['database']
user = db['USER']
password = db['PASSWORD']
host = store.connection_parameters['host']
port = store.connection_parameters['port']
conn = None
try:
conn = psycopg2.connect(dbname=db_name, user=user, host=host, port=port, password=password)
cur = conn.cursor()
cur.execute(f"SELECT DropGeometryTable ('{layer_name}')")
conn.commit()
except Exception as e:
logger.error(
"Error deleting PostGIS table %s:%s",
layer_name,
str(e))
finally:
try:
if conn:
conn.close()
except Exception as e:
logger.error("Error closing PostGIS conn %s:%s", layer_name, str(e))
def gs_slurp(
ignore_errors=True,
verbosity=1,
console=None,
owner=None,
workspace=None,
store=None,
filter=None,
skip_unadvertised=False,
skip_geonode_registered=False,
remove_deleted=False,
permissions=None,
execute_signals=False):
"""Configure the layers available in GeoServer in GeoNode.
It returns a list of dictionaries with the name of the layer,
the result of the operation and the errors and traceback if it failed.
"""
if console is None:
console = open(os.devnull, 'w')
if verbosity > 0:
print("Inspecting the available layers in GeoServer ...", file=console)
cat = gs_catalog
if workspace is not None and workspace:
workspace = cat.get_workspace(workspace)
if workspace is None:
resources = []
else:
# obtain the store from within the workspace. if it exists, obtain resources
# directly from store, otherwise return an empty list:
if store is not None:
store = get_store(cat, store, workspace=workspace)
if store is None:
resources = []
else:
resources = cat.get_resources(stores=[store])
else:
resources = cat.get_resources(workspaces=[workspace])
elif store is not None:
store = get_store(cat, store)
resources = cat.get_resources(stores=[store])
else:
resources = cat.get_resources()
if remove_deleted:
resources_for_delete_compare = resources[:]
workspace_for_delete_compare = workspace
# filter out layers for delete comparison with GeoNode layers by following criteria:
# enabled = true, if --skip-unadvertised: advertised = true, but
# disregard the filter parameter in the case of deleting layers
try:
resources_for_delete_compare = [
k for k in resources_for_delete_compare if k.enabled in {"true", True}]
if skip_unadvertised:
resources_for_delete_compare = [
k for k in resources_for_delete_compare if k.advertised in {"true", True}]
except Exception:
if ignore_errors:
pass
else:
raise
if filter:
resources = [k for k in resources if filter in k.name]
# filter out layers depending on enabled, advertised status:
_resources = []
for k in resources:
try:
if k.enabled in {"true", True}:
_resources.append(k)
except Exception:
if ignore_errors:
continue
else:
raise
# resources = [k for k in resources if k.enabled in {"true", True}]
resources = _resources
if skip_unadvertised:
try:
resources = [k for k in resources if k.advertised in {"true", True}]
except Exception:
if ignore_errors:
pass
else:
raise
# filter out layers already registered in geonode
layer_names = Layer.objects.all().values_list('alternate', flat=True)
if skip_geonode_registered:
try:
resources = [k for k in resources
if f'{k.workspace.name}:{k.name}' not in layer_names]
except Exception:
if ignore_errors:
pass
else:
raise
# TODO: Should we do something with these?
# i.e. look for matching layers in GeoNode and also disable?
# disabled_resources = [k for k in resources if k.enabled == "false"]
number = len(resources)
if verbosity > 0:
msg = "Found %d layers, starting processing" % number
print(msg, file=console)
output = {
'stats': {
'failed': 0,
'updated': 0,
'created': 0,
'deleted': 0,
},
'layers': [],
'deleted_layers': []
}
start = datetime.datetime.now(timezone.get_current_timezone())
for i, resource in enumerate(resources):
name = resource.name
the_store = resource.store
workspace = the_store.workspace
try:
created = False
layer = Layer.objects.filter(name=name, workspace=workspace.name).first()
if not layer:
layer = Layer.objects.create(
name=name,
workspace=workspace.name,
store=the_store.name,
storeType=the_store.resource_type,
alternate=f"{workspace.name}:{resource.name}",
title=resource.title or _('No title provided'),
abstract=resource.abstract or _('No abstract provided'),
owner=owner,
uuid=str(uuid.uuid4())
)
created = True
bbox = resource.native_bbox
layer.set_bbox_polygon([bbox[0], bbox[2], bbox[1], bbox[3]], resource.projection)
# sync permissions in GeoFence
perm_spec = json.loads(_perms_info_json(layer))
layer.set_permissions(perm_spec)
# recalculate the layer statistics
set_attributes_from_geoserver(layer, overwrite=True)
# in some cases we need to explicitily save the resource to execute the signals
# (for sure when running updatelayers)
if execute_signals:
layer.save(notify=True)
# Fix metadata links if the ip has changed
if layer.link_set.metadata().count() > 0:
if not created and settings.SITEURL not in layer.link_set.metadata()[0].url:
layer.link_set.metadata().delete()
layer.save()
metadata_links = []
for link in layer.link_set.metadata():
metadata_links.append((link.mime, link.name, link.url))
resource.metadata_links = metadata_links
cat.save(resource)
except Exception as e:
if ignore_errors:
status = 'failed'
exception_type, error, traceback = sys.exc_info()
else:
if verbosity > 0:
msg = "Stopping process because --ignore-errors was not set and an error was found."
print(msg, file=sys.stderr)
raise Exception(f"Failed to process {resource.name}") from e
else:
if created:
if not permissions:
layer.set_default_permissions()
else:
layer.set_permissions(permissions)
status = 'created'
output['stats']['created'] += 1
else:
status = 'updated'
output['stats']['updated'] += 1
msg = f"[{status}] Layer {name} ({(i + 1)}/{number})"
info = {'name': name, 'status': status}
if status == 'failed':
output['stats']['failed'] += 1
info['traceback'] = traceback
info['exception_type'] = exception_type
info['error'] = error
output['layers'].append(info)
if verbosity > 0:
print(msg, file=console)
if remove_deleted:
q = Layer.objects.filter()
if workspace_for_delete_compare is not None:
if isinstance(workspace_for_delete_compare, Workspace):
q = q.filter(
workspace__exact=workspace_for_delete_compare.name)
else:
q = q.filter(workspace__exact=workspace_for_delete_compare)
if store is not None:
if isinstance(
store,
CoverageStore) or isinstance(
store,
DataStore):
q = q.filter(store__exact=store.name)
else:
q = q.filter(store__exact=store)
logger.debug("Executing 'remove_deleted' logic")
logger.debug("GeoNode Layers Found:")
# compare the list of GeoNode layers obtained via query/filter with valid resources found in GeoServer
# filtered per options passed to updatelayers: --workspace, --store, --skip-unadvertised
# add any layers not found in GeoServer to deleted_layers (must match
# workspace and store as well):
deleted_layers = []
for layer in q:
logger.debug(
"GeoNode Layer info: name: %s, workspace: %s, store: %s",
layer.name,
layer.workspace,
layer.store)
layer_found_in_geoserver = False
for resource in resources_for_delete_compare:
# if layer.name matches a GeoServer resource, check also that
# workspace and store match, mark valid:
if layer.name == resource.name:
if layer.workspace == resource.workspace.name and layer.store == resource.store.name:
logger.debug(
"Matches GeoServer layer: name: %s, workspace: %s, store: %s",
resource.name,
resource.workspace.name,
resource.store.name)
layer_found_in_geoserver = True
if not layer_found_in_geoserver:
logger.debug(
"----- Layer %s not matched, marked for deletion ---------------",
layer.name)
deleted_layers.append(layer)
number_deleted = len(deleted_layers)
if verbosity > 0:
msg = "\nFound %d layers to delete, starting processing" % number_deleted if number_deleted > 0 else \
"\nFound %d layers to delete" % number_deleted
print(msg, file=console)
for i, layer in enumerate(deleted_layers):
logger.debug(
"GeoNode Layer to delete: name: %s, workspace: %s, store: %s",
layer.name,
layer.workspace,
layer.store)
try:
# delete ratings, comments, and taggit tags:
ct = ContentType.objects.get_for_model(layer)
OverallRating.objects.filter(
content_type=ct,
object_id=layer.id).delete()
Comment.objects.filter(
content_type=ct,
object_id=layer.id).delete()
layer.keywords.clear()
layer.delete()
output['stats']['deleted'] += 1
status = "delete_succeeded"
except Exception:
status = "delete_failed"
finally:
from .signals import geoserver_pre_delete
pre_delete.connect(geoserver_pre_delete, sender=Layer)
msg = f"[{status}] Layer {layer.name} ({(i + 1)}/{number_deleted})"
info = {'name': layer.name, 'status': status}
if status == "delete_failed":
exception_type, error, traceback = sys.exc_info()
info['traceback'] = traceback
info['exception_type'] = exception_type
info['error'] = error
output['deleted_layers'].append(info)
if verbosity > 0:
print(msg, file=console)
finish = datetime.datetime.now(timezone.get_current_timezone())
td = finish - start
output['stats']['duration_sec'] = td.microseconds / \
1000000 + td.seconds + td.days * 24 * 3600
return output
def get_stores(store_type=None):
cat = gs_catalog
stores = cat.get_stores()
store_list = []
for store in stores:
store.fetch()
stype = store.dom.find('type').text.lower()
if store_type and store_type.lower() == stype:
store_list.append({'name': store.name, 'type': stype})
elif store_type is None:
store_list.append({'name': store.name, 'type': stype})
return store_list
def set_attributes(
layer,
attribute_map,
overwrite=False,
attribute_stats=None):
""" *layer*: a geonode.layers.models.Layer instance
*attribute_map*: a list of 2-lists specifying attribute names and types,
example: [ ['id', 'Integer'], ... ]
*overwrite*: replace existing attributes with new values if name/type matches.
*attribute_stats*: dictionary of return values from get_attribute_statistics(),
of the form to get values by referencing attribute_stats[<layer_name>][<field_name>].
"""
# we need 3 more items; description, attribute_label, and display_order
attribute_map_dict = {
'field': 0,
'ftype': 1,
'description': 2,
'label': 3,
'display_order': 4,
}
for attribute in attribute_map:
if len(attribute) == 2:
attribute.extend((None, None, 0))
attributes = layer.attribute_set.all()
# Delete existing attributes if they no longer exist in an updated layer
for la in attributes:
lafound = False
for attribute in attribute_map:
field, ftype, description, label, display_order = attribute
if field == la.attribute:
lafound = True
# store description and attribute_label in attribute_map
attribute[attribute_map_dict['description']] = la.description
attribute[attribute_map_dict['label']] = la.attribute_label
attribute[attribute_map_dict['display_order']] = la.display_order
if overwrite or not lafound:
logger.debug(
"Going to delete [%s] for [%s]",
la.attribute,
layer.name)
la.delete()
# Add new layer attributes if they doesn't exist already
if attribute_map:
iter = len(Attribute.objects.filter(layer=layer)) + 1
for attribute in attribute_map:
field, ftype, description, label, display_order = attribute
if field:
_gs_attrs = Attribute.objects.filter(layer=layer, attribute=field)
if _gs_attrs.count() == 1:
la = _gs_attrs.get()
else:
if _gs_attrs.count() > 0:
_gs_attrs.delete()
la = Attribute.objects.create(layer=layer, attribute=field)
la.visible = ftype.find("gml:") != 0
la.attribute_type = ftype
la.description = description
la.attribute_label = label
la.display_order = iter
iter += 1
if (not attribute_stats or layer.name not in attribute_stats or
field not in attribute_stats[layer.name]):
result = None
else:
result = attribute_stats[layer.name][field]
if result:
logger.debug("Generating layer attribute statistics")
la.count = result['Count']
la.min = result['Min']
la.max = result['Max']
la.average = result['Average']
la.median = result['Median']
la.stddev = result['StandardDeviation']
la.sum = result['Sum']
la.unique_values = result['unique_values']
la.last_stats_updated = datetime.datetime.now(timezone.get_current_timezone())
try:
la.save()
except Exception as e:
logger.exception(e)
else:
logger.debug("No attributes found")
def set_attributes_from_geoserver(layer, overwrite=False):
"""
Retrieve layer attribute names & types from Geoserver,
then store in GeoNode database using Attribute model
"""
attribute_map = []
server_url = ogc_server_settings.LOCATION if layer.storeType != "remoteStore" else layer.remote_service.service_url
if layer.storeType == "remoteStore" and layer.remote_service.ptype == "gxp_arcrestsource":
dft_url = f"{server_url}{(layer.alternate or layer.typename)}?f=json"
try:
# The code below will fail if http_client cannot be imported
req, body = http_client.get(dft_url, user=_user)
body = json.loads(body)
attribute_map = [[n["name"], _esri_types[n["type"]]]
for n in body["fields"] if n.get("name") and n.get("type")]
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
attribute_map = []
elif layer.storeType in {"dataStore", "remoteStore", "wmsStore"}:
typename = layer.alternate if layer.alternate else layer.typename
dft_url = re.sub(r"\/wms\/?$",
"/",
server_url) + "ows?" + urlencode({"service": "wfs",
"version": "1.0.0",
"request": "DescribeFeatureType",
"typename": typename,
})
try:
# The code below will fail if http_client cannot be imported or WFS not supported
req, body = http_client.get(dft_url, user=_user)
doc = dlxml.fromstring(body.encode())
xsd = "{http://www.w3.org/2001/XMLSchema}"
path = f".//{xsd}extension/{xsd}sequence/{xsd}element"
attribute_map = [[n.attrib["name"], n.attrib["type"]] for n in doc.findall(
path) if n.attrib.get("name") and n.attrib.get("type")]
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
attribute_map = []
# Try WMS instead
dft_url = server_url + "?" + urlencode({
"service": "wms",
"version": "1.0.0",
"request": "GetFeatureInfo",
"bbox": ','.join([str(x) for x in layer.bbox]),
"LAYERS": layer.alternate,
"QUERY_LAYERS": typename,
"feature_count": 1,
"width": 1,
"height": 1,
"srs": "EPSG:4326",
"info_format": "text/html",
"x": 1,
"y": 1
})
try:
req, body = http_client.get(dft_url, user=_user)
soup = BeautifulSoup(body, features="lxml")
for field in soup.findAll('th'):
if(field.string is None):
field_name = field.contents[0].string
else:
field_name = field.string
attribute_map.append([field_name, "xsd:string"])
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
attribute_map = []
elif layer.storeType in ["coverageStore"]:
typename = layer.alternate if layer.alternate else layer.typename
dc_url = server_url + "wcs?" + urlencode({
"service": "wcs",
"version": "1.1.0",
"request": "DescribeCoverage",
"identifiers": typename
})
try:
req, body = http_client.get(dc_url, user=_user)
doc = dlxml.fromstring(body.encode())
wcs = "{http://www.opengis.net/wcs/1.1.1}"
path = f".//{wcs}Axis/{wcs}AvailableKeys/{wcs}Key"
attribute_map = [[n.text, "raster"] for n in doc.findall(path)]
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
attribute_map = []
# Get attribute statistics & package for call to really_set_attributes()
attribute_stats = defaultdict(dict)
# Add new layer attributes if they don't already exist
for attribute in attribute_map:
field, ftype = attribute
if field is not None:
if Attribute.objects.filter(layer=layer, attribute=field).exists():
continue
elif is_layer_attribute_aggregable(
layer.storeType,
field,
ftype):
logger.debug("Generating layer attribute statistics")
result = get_attribute_statistics(layer.alternate or layer.typename, field)
else:
result = None
attribute_stats[layer.name][field] = result
set_attributes(
layer, attribute_map, overwrite=overwrite, attribute_stats=attribute_stats
)
def set_styles(layer, gs_catalog):
style_set = []
gs_layer = None
try:
gs_layer = gs_catalog.get_layer(layer.name)
except Exception:
tb = traceback.format_exc()
logger.exception(tb)
if not gs_layer:
try:
gs_layer = gs_catalog.get_layer(layer.alternate or layer.typename)
except Exception:
tb = traceback.format_exc()
logger.error(tb)
logger.exception("No GeoServer Layer found!")
if gs_layer:
default_style = gs_catalog.get_style(
name=gs_layer.default_style.name,
workspace=gs_layer.default_style.workspace)
if default_style:
# make sure we are not using a default SLD (which won't be editable)
layer.default_style = save_style(default_style, layer)
style_set.append(layer.default_style)
try:
if gs_layer.styles:
alt_styles = gs_layer.styles
for alt_style in alt_styles:
if alt_style and alt_style:
_s = save_style(alt_style, layer)
if _s != layer.default_style:
style_set.append(_s)
except Exception as e:
logger.exception(e)
if style_set:
# Remove duplicates
style_set = list(dict.fromkeys(style_set))
layer.styles.set(style_set)
# Update default style to database
to_update = {
'default_style': layer.default_style
}
Layer.objects.filter(id=layer.id).update(**to_update)
layer.refresh_from_db()
# Legend links
logger.debug(" -- Resource Links[Legend link]...")
try:
from geonode.base.models import Link
layer_legends = Link.objects.filter(resource=layer.resourcebase_ptr, name='Legend')
for style in set(list(layer.styles.all()) + [layer.default_style, ]):
if style:
style_name = os.path.basename(
urlparse(style.sld_url).path).split('.')[0]
legend_url = ogc_server_settings.PUBLIC_LOCATION + \
'ows?service=WMS&request=GetLegendGraphic&format=image/png&WIDTH=20&HEIGHT=20&LAYER=' + \
layer.alternate + '&STYLE=' + style_name + \
'&legend_options=fontAntiAliasing:true;fontSize:12;forceLabels:on'
if layer_legends.filter(resource=layer.resourcebase_ptr,
name='Legend',
url=legend_url).count() < 2:
Link.objects.update_or_create(
resource=layer.resourcebase_ptr,
name='Legend',
url=legend_url,
defaults=dict(
extension='png',
url=legend_url,
mime='image/png',
link_type='image',
)
)
logger.debug(" -- Resource Links[Legend link]...done!")
except Exception as e:
logger.debug(f" -- Resource Links[Legend link]...error: {e}")
try:
set_geowebcache_invalidate_cache(layer.alternate or layer.typename, cat=gs_catalog)
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
def save_style(gs_style, layer):
style_name = os.path.basename(
urlparse(gs_style.body_href).path).split('.')[0]
sld_name = gs_style.name
sld_body = gs_style.sld_body
if not gs_style.workspace:
gs_style = gs_catalog.create_style(
style_name, sld_body,
raw=True, overwrite=True,
workspace=layer.workspace)
style = None
try:
style, created = Style.objects.get_or_create(name=style_name)
style.workspace = gs_style.workspace
style.sld_title = gs_style.sld_title if gs_style.style_format != 'css' and gs_style.sld_title else sld_name
style.sld_body = gs_style.sld_body
style.sld_url = gs_style.body_href
style.save()
except Exception as e:
tb = traceback.format_exc()
logger.debug(tb)
raise e
return style
def is_layer_attribute_aggregable(store_type, field_name, field_type):
"""
Decipher whether layer attribute is suitable for statistical derivation
"""
# must be vector layer
if store_type != 'dataStore':
return False
# must be a numeric data type
if field_type not in LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES:
return False
# must not be an identifier type field
if field_name.lower() in {'id', 'identifier'}:
return False
return True
def get_attribute_statistics(layer_name, field):
"""
Generate statistics (range, mean, median, standard deviation, unique values)
for layer attribute
"""
logger.debug('Deriving aggregate statistics for attribute %s', field)
if not ogc_server_settings.WPS_ENABLED:
return None
try:
return wps_execute_layer_attribute_statistics(layer_name, field)
except Exception:
tb = traceback.format_exc()
logger.debug(tb)
logger.exception('Error generating layer aggregate statistics')
def get_wcs_record(instance, retry=True):
wcs = WebCoverageService(f"{ogc_server_settings.LOCATION}wcs", '1.0.0')
key = f"{instance.workspace}:{instance.name}"
logger.debug(wcs.contents)
if key in wcs.contents:
return wcs.contents[key]
else:
msg = (f"Layer '{key}' was not found in WCS service at {ogc_server_settings.public_url}."
)
if retry:
logger.debug(
f"{msg} Waiting a couple of seconds before trying again.")
time.sleep(2)
return get_wcs_record(instance, retry=False)
else:
raise GeoNodeException(msg)
def get_coverage_grid_extent(instance):
"""
Returns a list of integers with the size of the coverage
extent in pixels
"""
instance_wcs = get_wcs_record(instance)
grid = instance_wcs.grid
return [(int(h) - int(l) + 1) for
h, l in zip(grid.highlimits, grid.lowlimits)]
GEOSERVER_LAYER_TYPES = {
'vector': FeatureType.resource_type,
'raster': Coverage.resource_type,
}
def cleanup(name, uuid):
"""Deletes GeoServer and Catalogue records for a given name.
Useful to clean the mess when something goes terribly wrong.
It also verifies if the Django record existed, in which case
it performs no action.
"""
try:
Layer.objects.get(name=name)
except Layer.DoesNotExist:
pass
else:
msg = f'Not doing any cleanup because the layer {name} exists in the Django db.'
raise GeoNodeException(msg)
cat = gs_catalog
gs_store = None
gs_layer = None
gs_resource = None
# FIXME: Could this lead to someone deleting for example a postgis db
# with the same name of the uploaded file?.
try:
gs_store = cat.get_store(name)
if gs_store is not None:
gs_layer = cat.get_layer(name)
if gs_layer is not None:
gs_resource = gs_layer.resource
else:
gs_layer = None
gs_resource = None
except FailedRequestError as e:
msg = ('Couldn\'t connect to GeoServer while cleaning up layer '
'[%s] !!', str(e))
logger.warning(msg)
if gs_layer is not None:
try:
cat.delete(gs_layer)
except Exception:
logger.warning("Couldn't delete GeoServer layer during cleanup()")
if gs_resource is not None:
try:
cat.delete(gs_resource)
except Exception:
msg = 'Couldn\'t delete GeoServer resource during cleanup()'
logger.warning(msg)
if gs_store is not None:
try:
cat.delete(gs_store)
except Exception:
logger.warning("Couldn't delete GeoServer store during cleanup()")
logger.warning('Deleting dangling Catalogue record for [%s] '
'(no Django record to match)', name)
if 'geonode.catalogue' in settings.INSTALLED_APPS:
from geonode.catalogue import get_catalogue
catalogue = get_catalogue()
catalogue.remove_record(uuid)
logger.warning('Finished cleanup after failed Catalogue/Django '
'import for layer: %s', name)
def create_geoserver_db_featurestore(
store_type=None, store_name=None,
author_name='admin', author_email='[email protected]',
charset="UTF-8", workspace=None):
cat = gs_catalog
dsname = store_name or ogc_server_settings.DATASTORE
# get or create datastore
ds_exists = False
try:
if dsname:
ds = cat.get_store(dsname, workspace=workspace)
else:
return None
if ds is None:
raise FailedRequestError
ds_exists = True
except FailedRequestError:
logger.debug(
f'Creating target datastore {dsname}')
ds = cat.create_datastore(dsname, workspace=workspace)
db = ogc_server_settings.datastore_db
db_engine = 'postgis' if \
'postgis' in db['ENGINE'] else db['ENGINE']
ds.connection_parameters.update(
{'Evictor run periodicity': 300,
'Estimated extends': 'true',
'fetch size': 100000,
'encode functions': 'false',
'Expose primary keys': 'true',
'validate connections': 'true',
'Support on the fly geometry simplification': 'false',
'Connection timeout': 10,
'create database': 'false',
'Batch insert size': 30,
'preparedStatements': 'true',
'min connections': 10,
'max connections': 100,
'Evictor tests per run': 3,
'Max connection idle time': 300,
'Loose bbox': 'true',
'Test while idle': 'true',
'host': db['HOST'],
'port': db['PORT'] if isinstance(
db['PORT'], str) else str(db['PORT']) or '5432',
'database': db['NAME'],
'user': db['USER'],
'passwd': db['PASSWORD'],
'dbtype': db_engine}
)
if ds_exists:
ds.save_method = "PUT"
logger.debug('Updating target datastore % s' % dsname)
cat.save(ds)
logger.debug('Reloading target datastore % s' % dsname)
ds = get_store(cat, dsname, workspace=workspace)
assert ds.enabled
return ds
def _create_featurestore(name, data, overwrite=False, charset="UTF-8", workspace=None):
cat = gs_catalog
cat.create_featurestore(name, data, workspace=workspace, overwrite=overwrite, charset=charset)
store = get_store(cat, name, workspace=workspace)
return store, cat.get_resource(name=name, store=store, workspace=workspace)
def _create_coveragestore(name, data, overwrite=False, charset="UTF-8", workspace=None):
cat = gs_catalog
cat.create_coveragestore(name, path=data, workspace=workspace, overwrite=overwrite, upload_data=True)
store = get_store(cat, name, workspace=workspace)
return store, cat.get_resource(name=name, store=store, workspace=workspace)
def _create_db_featurestore(name, data, overwrite=False, charset="UTF-8", workspace=None):
"""Create a database store then use it to import a shapefile.
If the import into the database fails then delete the store
(and delete the PostGIS table for it).
"""
cat = gs_catalog
db = ogc_server_settings.datastore_db
# dsname = ogc_server_settings.DATASTORE
dsname = db['NAME']
ds = create_geoserver_db_featurestore(store_name=dsname, workspace=workspace)
try:
cat.add_data_to_store(ds,
name,
data,
overwrite=overwrite,
workspace=workspace,
charset=charset)
resource = cat.get_resource(name=name, store=ds, workspace=workspace)
assert resource is not None
return ds, resource
except Exception:
msg = _("An exception occurred loading data to PostGIS")
msg += f"- {sys.exc_info()[1]}"
try:
delete_from_postgis(name, ds)
except Exception:
msg += _(" Additionally an error occured during database cleanup")
msg += f"- {sys.exc_info()[1]}"
raise GeoNodeException(msg)
def get_store(cat, name, workspace=None):
# Make sure workspace is a workspace object and not a string.
# If the workspace does not exist, continue as if no workspace had been defined.
if isinstance(workspace, str):
workspace = cat.get_workspace(workspace)
if workspace is None:
workspace = cat.get_default_workspace()
if workspace:
try:
store = cat.get_xml(f'{workspace.datastore_url[:-4]}/{name}.xml')
except FailedRequestError:
try:
store = cat.get_xml(f'{workspace.coveragestore_url[:-4]}/{name}.xml')
except FailedRequestError:
try:
store = cat.get_xml(f'{workspace.wmsstore_url[:-4]}/{name}.xml')
except FailedRequestError:
raise FailedRequestError(f"No store found named: {name}")
if store:
if store.tag == 'dataStore':
store = datastore_from_index(cat, workspace, store)
elif store.tag == 'coverageStore':
store = coveragestore_from_index(cat, workspace, store)
elif store.tag == 'wmsStore':
store = wmsstore_from_index(cat, workspace, store)
return store
else:
raise FailedRequestError(f"No store found named: {name}")
else:
raise FailedRequestError(f"No store found named: {name}")
class ServerDoesNotExist(Exception):
pass
class OGC_Server(object):
"""
OGC Server object.
"""
def __init__(self, ogc_server, alias):
self.alias = alias
self.server = ogc_server
def __getattr__(self, item):
return self.server.get(item)
@property
def credentials(self):
"""
Returns a tuple of the server's credentials.
"""
creds = namedtuple('OGC_SERVER_CREDENTIALS', ['username', 'password'])
return creds(username=self.USER, password=self.PASSWORD)
@property
def datastore_db(self):
"""
Returns the server's datastore dict or None.
"""
if self.DATASTORE and settings.DATABASES.get(self.DATASTORE, None):
datastore_dict = settings.DATABASES.get(self.DATASTORE, dict())
return datastore_dict
else:
return dict()
@property
def ows(self):
"""
The Open Web Service url for the server.
"""
location = self.PUBLIC_LOCATION if self.PUBLIC_LOCATION else self.LOCATION
return self.OWS_LOCATION if self.OWS_LOCATION else urljoin(location, 'ows')
@property
def rest(self):
"""
The REST endpoint for the server.
"""
return urljoin(self.LOCATION, 'rest') if not self.REST_LOCATION else self.REST_LOCATION
@property
def public_url(self):
"""
The global public endpoint for the server.
"""
return self.LOCATION if not self.PUBLIC_LOCATION else self.PUBLIC_LOCATION
@property
def internal_ows(self):
"""
The Open Web Service url for the server used by GeoNode internally.
"""
location = self.LOCATION
return urljoin(location, 'ows')
@property
def hostname(self):
return urlsplit(self.LOCATION).hostname
@property
def netloc(self):
return urlsplit(self.LOCATION).netloc
def __str__(self):
return str(self.alias)
class OGC_Servers_Handler(object):
"""
OGC Server Settings Convenience dict.
"""
def __init__(self, ogc_server_dict):
self.servers = ogc_server_dict
# FIXME(Ariel): Are there better ways to do this without involving
# local?
self._servers = local()
def ensure_valid_configuration(self, alias):
"""
Ensures the settings are valid.
"""
try:
server = self.servers[alias]
except KeyError:
raise ServerDoesNotExist(f"The server {alias} doesn't exist")
if 'PRINTNG_ENABLED' in server:
raise ImproperlyConfigured("The PRINTNG_ENABLED setting has been removed, use 'PRINT_NG_ENABLED' instead.")
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection where no settings is provided.
"""
try:
server = self.servers[alias]
except KeyError:
raise ServerDoesNotExist(f"The server {alias} doesn't exist")
server.setdefault('BACKEND', 'geonode.geoserver')
server.setdefault('LOCATION', 'http://localhost:8080/geoserver/')
server.setdefault('USER', 'admin')
server.setdefault('PASSWORD', 'geoserver')
server.setdefault('DATASTORE', str())
for option in ['MAPFISH_PRINT_ENABLED', 'PRINT_NG_ENABLED', 'GEONODE_SECURITY_ENABLED',
'GEOFENCE_SECURITY_ENABLED', 'BACKEND_WRITE_ENABLED']:
server.setdefault(option, True)
for option in ['WMST_ENABLED', 'WPS_ENABLED']:
server.setdefault(option, False)
def __getitem__(self, alias):
if hasattr(self._servers, alias):
return getattr(self._servers, alias)
self.ensure_defaults(alias)
self.ensure_valid_configuration(alias)
server = self.servers[alias]
server = OGC_Server(alias=alias, ogc_server=server)
setattr(self._servers, alias, server)
return server
def __setitem__(self, key, value):
setattr(self._servers, key, value)
def __iter__(self):
return iter(self.servers)
def all(self):
return [self[alias] for alias in self]
def fetch_gs_resource(instance, values, tries):
_max_tries = getattr(ogc_server_settings, "MAX_RETRIES", 2)
try:
gs_resource = gs_catalog.get_resource(
name=instance.name,
store=instance.store,
workspace=instance.workspace)
except Exception:
try:
gs_resource = gs_catalog.get_resource(
name=instance.alternate,
store=instance.store,
workspace=instance.workspace)
except Exception:
try:
gs_resource = gs_catalog.get_resource(
name=instance.alternate or instance.typename)
except Exception:
gs_resource = None
if gs_resource:
if values:
gs_resource.title = values.get('title', '')
gs_resource.abstract = values.get('abstract', '')
else:
values = {}
values.update(dict(store=gs_resource.store.name,
storeType=gs_resource.store.resource_type,
alternate=f"{gs_resource.store.workspace.name}:{gs_resource.name}",
title=gs_resource.title or gs_resource.store.name,
abstract=gs_resource.abstract or '',
owner=instance.owner))
else:
msg = f"There isn't a geoserver resource for this layer: {instance.name}"
logger.exception(msg)
if tries >= _max_tries:
# raise GeoNodeException(msg)
return (values, None)
gs_resource = None
time.sleep(5)
return (values, gs_resource)
def get_wms():
wms_url = f"{ogc_server_settings.internal_ows}?service=WMS&request=GetCapabilities&version=1.1.0"
req, body = http_client.get(wms_url, user=_user)
_wms = WebMapService(wms_url, xml=body)
return _wms
def wps_execute_layer_attribute_statistics(layer_name, field):
"""Derive aggregate statistics from WPS endpoint"""
# generate statistics using WPS
url = urljoin(ogc_server_settings.LOCATION, 'ows')
request = render_to_string('layers/wps_execute_gs_aggregate.xml', {
'layer_name': layer_name,
'field': field
})
u = urlsplit(url)
headers = {
'User-Agent': 'OWSLib (https://geopython.github.io/OWSLib)',
'Content-type': 'text/xml',
'Accept': 'text/xml',
'Accept-Language': 'en-US',
'Accept-Encoding': 'gzip,deflate',
'Host': u.netloc,
}
response, content = http_client.request(
url,
method='POST',
data=request,
headers=headers,
user=_user,
timeout=5,
retries=1)
exml = dlxml.fromstring(content.encode())
result = {}
for f in ['Min', 'Max', 'Average', 'Median', 'StandardDeviation', 'Sum']:
fr = exml.find(f)
if fr is not None:
result[f] = fr.text
else:
result[f] = 'NA'
count = exml.find('Count')
if count is not None:
result['Count'] = int(count.text)
else:
result['Count'] = 0
result['unique_values'] = 'NA'
return result
def _stylefilterparams_geowebcache_layer(layer_name):
headers = {
"Content-Type": "text/xml"
}
url = f'{ogc_server_settings.LOCATION}gwc/rest/layers/{layer_name}.xml'
# read GWC configuration
req, content = http_client.get(
url,
headers=headers,
user=_user)
if req.status_code != 200:
logger.error(
f"Error {req.status_code} reading Style Filter Params GeoWebCache at {url}"
)
return
# check/write GWC filter parameters
body = None
tree = dlxml.fromstring(_)
param_filters = tree.findall('parameterFilters')
if param_filters and len(param_filters) > 0:
if not param_filters[0].findall('styleParameterFilter'):
style_filters_xml = "<styleParameterFilter><key>STYLES</key>\
<defaultValue></defaultValue></styleParameterFilter>"
style_filters_elem = dlxml.fromstring(style_filters_xml)
param_filters[0].append(style_filters_elem)
body = ET.tostring(tree)
if body:
req, content = http_client.post(
url,
data=body,
headers=headers,
user=_user)
if req.status_code != 200:
logger.error(
f"Error {req.status_code} writing Style Filter Params GeoWebCache at {url}"
)
def _invalidate_geowebcache_layer(layer_name, url=None):
# http.add_credentials(username, password)
headers = {
"Content-Type": "text/xml",
}
body = f"""
<truncateLayer><layerName>{layer_name}</layerName></truncateLayer>
""".strip()
if not url:
url = f'{ogc_server_settings.LOCATION}gwc/rest/masstruncate'
req, content = http_client.post(
url,
data=body,
headers=headers,
user=_user)
if req.status_code != 200:
logger.debug(
f"Error {req.status_code} invalidating GeoWebCache at {url}"
)
def style_update(request, url, workspace=None):
"""
Sync style stuff from GS to GN.
Ideally we should call this from a view straight from GXP, and we should use
gsConfig, that at this time does not support styles updates. Before gsConfig
is updated, for now we need to parse xml.
In case of a DELETE, we need to query request.path to get the style name,
and then remove it.
In case of a POST or PUT, we need to parse the xml from
request.body, which is in this format:
"""
affected_layers = []
if request.method in ('POST', 'PUT', 'DELETE'): # we need to parse xml
# Need to remove NSx from IE11
if "HTTP_USER_AGENT" in request.META:
if ('Trident/7.0' in request.META['HTTP_USER_AGENT'] and
'rv:11.0' in request.META['HTTP_USER_AGENT']):
txml = re.sub(r'xmlns:NS[0-9]=""', '', request.body)
txml = re.sub(r'NS[0-9]:', '', txml)
request._body = txml
style_name = os.path.basename(request.path)
sld_title = style_name
sld_body = None
sld_url = url
layer_name = None
if 'name' in request.GET:
style_name = request.GET['name']
sld_body = request.body
elif request.method == 'DELETE':
style_name = os.path.basename(request.path)
else:
sld_body = request.body
gs_style = gs_catalog.get_style(name=style_name) or gs_catalog.get_style(name=style_name, workspace=workspace)
if gs_style:
sld_title = gs_style.sld_title if gs_style.style_format != 'css' and gs_style.sld_title else style_name
sld_body = gs_style.sld_body
sld_url = gs_style.body_href
else:
try:
tree = ET.ElementTree(dlxml.fromstring(request.body))
elm_namedlayer_name = tree.findall(
'.//{http://www.opengis.net/sld}Name')[0]
elm_user_style_name = tree.findall(
'.//{http://www.opengis.net/sld}Name')[1]
elm_user_style_title = tree.find(
'.//{http://www.opengis.net/sld}Title')
layer_name = elm_namedlayer_name.text
if elm_user_style_title is None:
sld_title = elm_user_style_name.text
else:
sld_title = elm_user_style_title.text
sld_body = f'<?xml version="1.0" encoding="UTF-8"?>{request.body}'
except Exception:
logger.warn("Could not recognize Style and Layer name from Request!")
# add style in GN and associate it to layer
if request.method == 'DELETE':
if style_name:
Style.objects.filter(name=style_name).delete()
if request.method == 'POST':
style = None
if style_name and not re.match(temp_style_name_regex, style_name):
style, created = Style.objects.get_or_create(name=style_name)
style.workspace = workspace
style.sld_body = sld_body
style.sld_url = sld_url
style.sld_title = sld_title
style.save()
layer = None
if layer_name:
try:
layer = Layer.objects.get(name=layer_name)
except Exception:
try:
layer = Layer.objects.get(alternate=layer_name)
except Exception:
pass
if layer:
if style:
style.layer_styles.add(layer)
style.save()
affected_layers.append(layer)
elif request.method == 'PUT': # update style in GN
if style_name and not re.match(temp_style_name_regex, style_name):
style, created = Style.objects.get_or_create(name=style_name)
style.workspace = workspace
style.sld_body = sld_body
style.sld_url = sld_url
style.sld_title = sld_title
style.save()
for layer in style.layer_styles.all():
affected_layers.append(layer)
# Invalidate GeoWebCache so it doesn't retain old style in tiles
try:
if layer_name:
_stylefilterparams_geowebcache_layer(layer_name)
_invalidate_geowebcache_layer(layer_name)
except Exception:
pass
return affected_layers
def set_time_info(layer, attribute, end_attribute, presentation,
precision_value, precision_step, enabled=True):
'''Configure the time dimension for a layer.
:param layer: the layer to configure
:param attribute: the attribute used to represent the instant or period
start
:param end_attribute: the optional attribute used to represent the end
period
:param presentation: either 'LIST', 'DISCRETE_INTERVAL', or
'CONTINUOUS_INTERVAL'
:param precision_value: number representing number of steps
:param precision_step: one of 'seconds', 'minutes', 'hours', 'days',
'months', 'years'
:param enabled: defaults to True
'''
layer = gs_catalog.get_layer(layer.name)
if layer is None:
raise ValueError(f'no such layer: {layer.name}')
resource = layer.resource if layer else None
if not resource:
resources = gs_catalog.get_resources(stores=[layer.name])
if resources:
resource = resources[0]
resolution = None
if precision_value and precision_step:
resolution = f'{precision_value} {precision_step}'
info = DimensionInfo("time", enabled, presentation, resolution, "ISO8601",
None, attribute=attribute, end_attribute=end_attribute)
if resource and resource.metadata:
metadata = dict(resource.metadata or {})
else:
metadata = dict({})
metadata['time'] = info
if resource and resource.metadata:
resource.metadata = metadata
if resource:
gs_catalog.save(resource)
def get_time_info(layer):
'''Get the configured time dimension metadata for the layer as a dict.
The keys of the dict will be those of the parameters of `set_time_info`.
:returns: dict of values or None if not configured
'''
layer = gs_catalog.get_layer(layer.name)
if layer is None:
raise ValueError(f'no such layer: {layer.name}')
resource = layer.resource if layer else None
if not resource:
resources = gs_catalog.get_resources(stores=[layer.name])
if resources:
resource = resources[0]
info = resource.metadata.get('time', None) if resource.metadata else None
vals = None
if info:
value = step = None
resolution = info.resolution_str()
if resolution:
value, step = resolution.split()
vals = dict(
enabled=info.enabled,
attribute=info.attribute,
end_attribute=info.end_attribute,
presentation=info.presentation,
precision_value=value,
precision_step=step,
)
return vals
ogc_server_settings = OGC_Servers_Handler(settings.OGC_SERVER)['default']
_wms = None
_csw = None
_user, _password = ogc_server_settings.credentials
url = ogc_server_settings.rest
gs_catalog = Catalog(url, _user, _password,
retries=ogc_server_settings.MAX_RETRIES,
backoff_factor=ogc_server_settings.BACKOFF_FACTOR)
gs_uploader = Client(url, _user, _password)
_punc = re.compile(r"[\.:]") # regex for punctuation that confuses restconfig
_foregrounds = [
"#ffbbbb",
"#bbffbb",
"#bbbbff",
"#ffffbb",
"#bbffff",
"#ffbbff"]
_backgrounds = [
"#880000",
"#008800",
"#000088",
"#888800",
"#008888",
"#880088"]
_marks = ["square", "circle", "cross", "x", "triangle"]
_style_contexts = zip(cycle(_foregrounds), cycle(_backgrounds), cycle(_marks))
_default_style_names = ["point", "line", "polygon", "raster"]
_esri_types = {
"esriFieldTypeDouble": "xsd:double",
"esriFieldTypeString": "xsd:string",
"esriFieldTypeSmallInteger": "xsd:int",
"esriFieldTypeInteger": "xsd:int",
"esriFieldTypeDate": "xsd:dateTime",
"esriFieldTypeOID": "xsd:long",
"esriFieldTypeGeometry": "xsd:geometry",
"esriFieldTypeBlob": "xsd:base64Binary",
"esriFieldTypeRaster": "raster",
"esriFieldTypeGUID": "xsd:string",
"esriFieldTypeGlobalID": "xsd:string",
"esriFieldTypeXML": "xsd:anyType"}
def _dump_image_spec(request_body, image_spec):
millis = int(round(time.time() * 1000))
try:
with tempfile.TemporaryDirectory() as tmp_dir:
_request_body_file_name = os.path.join(
tmp_dir,
f"request_body_{millis}.dump")
_image_spec_file_name = os.path.join(
tmp_dir,
f"image_spec_{millis}.dump")
with open(_request_body_file_name, "w") as _request_body_file:
_request_body_file.write(f"{request_body}")
copyfile(
_request_body_file_name,
os.path.join(tempfile.gettempdir(), f"request_body_{millis}.dump"))
with open(_image_spec_file_name, "w") as _image_spec_file:
_image_spec_file.write(f"{image_spec}")
copyfile(
_image_spec_file_name,
os.path.join(tempfile.gettempdir(), f"image_spec_{millis}.dump"))
return f"Dumping image_spec to: {os.path.join(tempfile.gettempdir(), f'image_spec_{millis}.dump')}"
except Exception as e:
logger.exception(e)
return f"Unable to dump image_spec for request: {request_body}"
def _fixup_ows_url(thumb_spec):
# @HACK - for whatever reason, a map's maplayers ows_url contains only /geoserver/wms
# so rendering of thumbnails fails - replace those uri's with full geoserver URL
gspath = f"\"{ogc_server_settings.public_url}" # this should be in img src attributes
repl = f"\"{ogc_server_settings.LOCATION}"
return re.sub(gspath, repl, thumb_spec)
def mosaic_delete_first_granule(cat, layer):
# - since GeoNode will uploade the first granule again through the Importer, we need to /
# delete the one created by the gs_config
cat._cache.clear()
store = cat.get_store(layer)
coverages = cat.mosaic_coverages(store)
granule_id = f"{layer}.1"
cat.mosaic_delete_granule(coverages['coverages']['coverage'][0]['name'], store, granule_id)
def set_time_dimension(cat, name, workspace, time_presentation, time_presentation_res, time_presentation_default_value,
time_presentation_reference_value):
# configure the layer time dimension as LIST
presentation = time_presentation
if not presentation:
presentation = "LIST"
resolution = None
if time_presentation == 'DISCRETE_INTERVAL':
resolution = time_presentation_res
strategy = None
if time_presentation_default_value and not time_presentation_default_value == "":
strategy = time_presentation_default_value
timeInfo = DimensionInfo("time", "true", presentation, resolution, "ISO8601", None, attribute="time",
strategy=strategy, reference_value=time_presentation_reference_value)
layer = cat.get_layer(name)
resource = layer.resource if layer else None
if not resource:
resources = cat.get_resources(stores=[name]) or cat.get_resources(stores=[name], workspaces=[workspace])
if resources:
resource = resources[0]
if not resource:
logger.exception(f"No resource could be found on GeoServer with name {name}")
raise Exception(f"No resource could be found on GeoServer with name {name}")
resource.metadata = {'time': timeInfo}
cat.save(resource)
# main entry point to create a thumbnail - will use implementation
# defined in settings.THUMBNAIL_GENERATOR (see settings.py)
def create_gs_thumbnail(instance, overwrite=False, check_bbox=False):
implementation = import_string(settings.THUMBNAIL_GENERATOR)
return implementation(instance, overwrite, check_bbox) | PypiClean |
/Gooey-1.2.0a0.tar.gz/Gooey-1.2.0a0/gooey/gui/application/application.py | import sys
from json import JSONDecodeError
import six
import wx # type: ignore
from gooey import Events
from gooey.gui import events
from gooey.gui import host
from gooey.gui import state as s
from gooey.gui.application.components import RHeader, ProgressSpinner, ErrorWarning, RTabbedLayout, \
RSidebar, RFooter
from gooey.gui.components import modals
from gooey.gui.components.config import ConfigPage
from gooey.gui.components.config import TabbedConfigPage
from gooey.gui.components.console import Console
from gooey.gui.components.menubar import MenuBar
from gooey.gui.lang.i18n import _
from gooey.gui.processor import ProcessController
from gooey.gui.pubsub import pub
from gooey.gui.state import FullGooeyState
from gooey.gui.state import initial_state, ProgressEvent, TimingEvent
from gooey.gui.util.wx_util import transactUI, callafter
from gooey.python_bindings import constants
from gooey.python_bindings.dynamics import unexpected_exit_explanations, \
deserialize_failure_explanations
from gooey.python_bindings.types import PublicGooeyState
from gooey.python_bindings.types import Try
from gooey.util.functional import assoc
from gooey.gui.util.time import Timing
from rewx import components as c # type: ignore
from rewx import wsx # type: ignore
from rewx.core import Component, Ref # type: ignore
class RGooey(Component):
"""
Main Application container for Gooey.
State Management
----------------
Pending further refactor, state is tracked in two places:
1. On this instance (React style)
2. In the WX Form Elements themselves[0]
As needed, these two states are merged to form the `FullGooeyState`, which
is the canonical state object against which all logic runs.
Dynamic Updates
---------------
[0] this is legacy and will (eventually) be refactored away
"""
def __init__(self, props):
super().__init__(props)
self.frameRef = Ref()
self.consoleRef = Ref()
self.configRef = Ref()
self.buildSpec = props
self.state = initial_state(props)
self.headerprops = lambda state: {
'background_color': self.buildSpec['header_bg_color'],
'title': state['title'],
'show_title': state['header_show_title'],
'subtitle': state['subtitle'],
'show_subtitle': state['header_show_subtitle'],
'flag': wx.EXPAND,
'height': self.buildSpec['header_height'],
'image_uri': state['image'],
'image_size': (six.MAXSIZE, self.buildSpec['header_height'] - 10)}
self.fprops = lambda state: {
'buttons': state['buttons'],
'progress': state['progress'],
'timing': state['timing'],
'bg_color': self.buildSpec['footer_bg_color'],
'flag': wx.EXPAND,
}
self.clientRunner = ProcessController.of(self.buildSpec)
self.timer = None
def component_did_mount(self):
pub.subscribe(events.WINDOW_START, self.onStart)
pub.subscribe(events.WINDOW_RESTART, self.onStart)
pub.subscribe(events.WINDOW_STOP, self.handleInterrupt)
pub.subscribe(events.WINDOW_CLOSE, self.handleClose)
pub.subscribe(events.WINDOW_CANCEL, self.handleCancel)
pub.subscribe(events.WINDOW_EDIT, self.handleEdit)
pub.subscribe(events.CONSOLE_UPDATE, self.consoleRef.instance.logOutput)
pub.subscribe(events.EXECUTION_COMPLETE, self.handleComplete)
pub.subscribe(events.PROGRESS_UPDATE, self.updateProgressBar)
pub.subscribe(events.TIME_UPDATE, self.updateTime)
# # Top level wx close event
frame: wx.Frame = self.frameRef.instance
frame.Bind(wx.EVT_CLOSE, self.handleClose)
frame.SetMenuBar(MenuBar(self.buildSpec))
self.timer = Timing(frame)
if self.state['fullscreen']:
frame.ShowFullScreen(True)
if self.state['show_preview_warning'] and not 'unittest' in sys.modules.keys():
wx.MessageDialog(None, caption='YOU CAN DISABLE THIS MESSAGE',
message="""
This is a preview build of 1.2.0! There may be instability or
broken functionality. If you encounter any issues, please open an issue
here: https://github.com/chriskiehl/Gooey/issues
The current stable version is 1.0.8.
NOTE! You can disable this message by setting `show_preview_warning` to False.
e.g.
`@Gooey(show_preview_warning=False)`
""").ShowModal()
def getActiveConfig(self):
return [item
for child in self.configRef.instance.Children
# we descend down another level of children to account
# for Notebook layouts (which have wrapper objects)
for item in [child] + list(child.Children)
if isinstance(item, ConfigPage)
or isinstance(item, TabbedConfigPage)][self.state['activeSelection']]
def getActiveFormState(self):
"""
This boiler-plate and manual interrogation of the UIs
state is required until we finish porting the Config Form
over to rewx (which is a battle left for another day given
its complexity)
"""
return self.getActiveConfig().getFormState()
def fullState(self):
"""
Re: final porting is a to do. For now we merge the UI
state into the main tracked state.
"""
formState = self.getActiveFormState()
return s.combine(self.state, self.props, formState)
def onStart(self, *args, **kwargs):
"""
Dispatches the start behavior.
"""
if Events.VALIDATE_FORM in self.state['use_events']:
self.runAsyncValidation()
else:
self.startRun()
def startRun(self):
"""
Kicks off a run by invoking the host's code
and pumping its stdout to Gooey's Console window.
"""
state = self.fullState()
if state['clear_before_run']:
self.consoleRef.instance.Clear()
self.set_state(s.consoleScreen(_, state))
self.clientRunner.run(s.buildInvocationCmd(state))
self.timer.start()
self.frameRef.instance.Layout()
for child in self.frameRef.instance.Children:
child.Layout()
def syncExternalState(self, state: FullGooeyState):
"""
Sync the UI's state to what the host program has requested.
"""
self.getActiveConfig().syncFormState(s.activeFormState(state))
self.frameRef.instance.Layout()
for child in self.frameRef.instance.Children:
child.Layout()
def handleInterrupt(self, *args, **kwargs):
if self.shouldStopExecution():
self.clientRunner.stop()
def handleComplete(self, *args, **kwargs):
self.timer.stop()
if self.clientRunner.was_success():
self.handleSuccessfulRun()
if Events.ON_SUCCESS in self.state['use_events']:
self.runAsyncExternalOnCompleteHandler(was_success=True)
else:
self.handleErrantRun()
if Events.ON_ERROR in self.state['use_events']:
self.runAsyncExternalOnCompleteHandler(was_success=False)
def handleSuccessfulRun(self):
if self.state['return_to_config']:
self.set_state(s.editScreen(_, self.state))
else:
self.set_state(s.successScreen(_, self.state))
if self.state['show_success_modal']:
wx.CallAfter(modals.showSuccess)
def handleErrantRun(self):
if self.clientRunner.wasForcefullyStopped:
self.set_state(s.interruptedScreen(_, self.state))
else:
self.set_state(s.errorScreen(_, self.state))
if self.state['show_failure_modal']:
wx.CallAfter(modals.showFailure)
def successScreen(self):
strings = {'title': _('finished_title'), 'subtitle': _('finished_msg')}
self.set_state(s.success(self.state, strings, self.buildSpec))
def handleEdit(self, *args, **kwargs):
self.set_state(s.editScreen(_, self.state))
def handleCancel(self, *args, **kwargs):
if modals.confirmExit():
self.handleClose()
def handleClose(self, *args, **kwargs):
"""Stop any actively running client program, cleanup the top
level WxFrame and shutdown the current process"""
# issue #592 - we need to run the same onStopExecution machinery
# when the exit button is clicked to ensure everything is cleaned
# up correctly.
frame: wx.Frame = self.frameRef.instance
if self.clientRunner.running():
if self.shouldStopExecution():
self.clientRunner.stop()
frame.Destroy()
# TODO: NOT exiting here would allow
# spawing the gooey to input params then
# returning control to the CLI
sys.exit()
else:
frame.Destroy()
sys.exit()
def shouldStopExecution(self):
return not self.state['show_stop_warning'] or modals.confirmForceStop()
def updateProgressBar(self, *args, progress=None):
self.set_state(s.updateProgress(self.state, ProgressEvent(progress=progress)))
def updateTime(self, *args, elapsed_time=None, estimatedRemaining=None, **kwargs):
event = TimingEvent(elapsed_time=elapsed_time, estimatedRemaining=estimatedRemaining)
self.set_state(s.updateTime(self.state, event))
def handleSelectAction(self, event):
self.set_state(assoc(self.state, 'activeSelection', event.Selection))
def runAsyncValidation(self):
def handleHostResponse(hostState: PublicGooeyState):
self.set_state(s.finishUpdate(self.state))
currentState = self.fullState()
self.syncExternalState(s.mergeExternalState(currentState, hostState))
if not s.has_errors(self.fullState()):
self.startRun()
else:
self.set_state(s.editScreen(_, s.show_alert(self.fullState())))
def onComplete(result: Try[PublicGooeyState]):
result.onSuccess(handleHostResponse)
result.onError(self.handleHostError)
self.set_state(s.beginUpdate(self.state))
fullState = self.fullState()
host.communicateFormValidation(fullState, callafter(onComplete))
def runAsyncExternalOnCompleteHandler(self, was_success):
def handleHostResponse(hostState):
if hostState:
self.syncExternalState(s.mergeExternalState(self.fullState(), hostState))
def onComplete(result: Try[PublicGooeyState]):
result.onError(self.handleHostError)
result.onSuccess(handleHostResponse)
if was_success:
host.communicateSuccessState(self.fullState(), callafter(onComplete))
else:
host.communicateErrorState(self.fullState(), callafter(onComplete))
def handleHostError(self, ex):
"""
All async errors get pumped here where we dump out the
error and they hopefully provide a lot of helpful debugging info
for the user.
"""
try:
self.set_state(s.errorScreen(_, self.state))
self.consoleRef.instance.appendText(str(ex))
self.consoleRef.instance.appendText(str(getattr(ex, 'output', '')))
self.consoleRef.instance.appendText(str(getattr(ex, 'stderr', '')))
raise ex
except JSONDecodeError as e:
self.consoleRef.instance.appendText(deserialize_failure_explanations)
except Exception as e:
self.consoleRef.instance.appendText(unexpected_exit_explanations)
finally:
self.set_state({**self.state, 'fetchingUpdate': False})
def render(self):
return wsx(
[c.Frame, {'title': self.buildSpec['program_name'],
'background_color': self.buildSpec['body_bg_color'],
'double_buffered': True,
'min_size': (400, 300),
'icon_uri': self.state['images']['programIcon'],
'size': self.buildSpec['default_size'],
'ref': self.frameRef},
[c.Block, {'orient': wx.VERTICAL},
[RHeader, self.headerprops(self.state)],
[c.StaticLine, {'style': wx.LI_HORIZONTAL, 'flag': wx.EXPAND}],
[ProgressSpinner, {'show': self.state['fetchingUpdate']}],
[ErrorWarning, {'show': self.state['show_error_alert'],
'uri': self.state['images']['errorIcon']}],
[Console, {**self.buildSpec,
'flag': wx.EXPAND,
'proportion': 1,
'show': self.state['screen'] == 'CONSOLE',
'ref': self.consoleRef}],
[RTabbedLayout if self.buildSpec['navigation'] == constants.TABBED else RSidebar,
{'bg_color': self.buildSpec['sidebar_bg_color'],
'label': 'Some Action!',
'tabbed_groups': self.buildSpec['tabbed_groups'],
'show_sidebar': self.state['show_sidebar'],
'ref': self.configRef,
'show': self.state['screen'] == 'FORM',
'activeSelection': self.state['activeSelection'],
'options': list(self.buildSpec['widgets'].keys()),
'on_change': self.handleSelectAction,
'config': self.buildSpec['widgets'],
'flag': wx.EXPAND,
'proportion': 1}],
[c.StaticLine, {'style': wx.LI_HORIZONTAL, 'flag': wx.EXPAND}],
[RFooter, self.fprops(self.state)]]]
) | PypiClean |
/GhTrack-1.1.1.tar.gz/GhTrack-1.1.1/docs/examples/GhTrack.rst | GhTrack Class
==========
This is the main class of the github-track module.
Configuration file
---------------
You can instantiate the class *GhTrack* by passing your own configuration file. Otherwise it will load with the default
configuration file. That default file looks something below.
In case if you are providing a file, it has to be the absolute path of the file.
*auth.twilioapi* can be ignored.
*github.token* is your development token that you can create through github portal.
If you are planning to pull `pull requests` more than 60 times. it is recommended to set this value to a valid token.
An invalid token, will not allow you to query `pull requests` .
*email.to* the address email to send the email.
*email.from* a valid address email verified by sendGrid in order to be able to send emails.
*email.subject* the subject of your email.
*email.sendGridApi* Your sendGrid Api key. That is used to authenticate and send email.
if you provide and invalid `api key` the application will not be able to send any email.
The *repo* section is the owner and the name of the repo you want to pull the requests. By default it is "kubernetes/kubernetes"
.. code-block:: yaml
settings:
auth:
twilioapi: ""
github:
token: ""
email:
to: [email protected]
from: [email protected]
subject: "Subject of the pull request"
sendGridApi: ""
repo:
user: kubernetes
name: kubernetes
Setup the class GhTrack
----------------
To begin with you need to create an instance of the class GhTrack.
Like stated above, you can profile a file as configuration values or
providing the necessary params during the class object creation or
you can create without any params and keep the default values the application is providing.
.. code-block:: python
>>> g = GhTrack()
>>> g = GhTrack(file_name="/absolute/path/to/the/config/file")
>>> g = GhTrack(token="github token", email="email to received email", user="repo owner", repo="public repo")
Set the age of the pulls request
------------------------
By default pulls requests are not older than 7 days.
.. code-block:: python
>>> g.setAge(0)
# That means the pulls requests will return only today created pulls
Get repo full information
----------------
.. code-block:: python
>>> repo = g.getRepo()
>>> repo["name"]
'kubernetes'
>> repo["full_name"]
'kubernetes/kubernetes'
Get the pull requests of the repo
----------------------
.. code-block:: python
>>> pulls = g.getPulls()
# if there are pulls the len will return the number of pulls
>>> len(pulls)
30
Get pulls request by status open or closed
------------------------
.. code-block:: python
>>> pulls = g.getPullsByStatus(status="open")
# if there are pulls the len will return the number of pulls
>>> pulls[0]["state"]
'open'
Send Email Or Print on the Console
-------------------------------
Here you can decide to print the html summary of the pulls request or to send to your configurable email.
If the send grid api key is not set, you will not be able to send emails.
If the send grid api key is invalid, you will see an exception on the screen.
.. code-block:: python
>>> summary = g.sendEmailOrPrintConsole(emailNotConsole=False)
>>> summary
# output the html format of the summary.
| PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/localization/da/TeX.js | MathJax.Localization.addTranslation("da","TeX",{version:"2.7.9",isLoaded:true,strings:{ExtraOpenMissingClose:"Ekstra venstreklammeparentes eller manglende h\u00F8jreklammeparentes",ExtraCloseMissingOpen:"Ekstra h\u00F8jreklammeparentes eller manglende venstreklammeparentes",MissingLeftExtraRight:"Manglende \\left eller ekstra \\right",MissingScript:"Manglende h\u00E6vet skrift eller s\u00E6nket skrift argument",ExtraLeftMissingRight:"Ekstra \\left eller manglende \\right",Misplaced:"Malplaceret %1",MissingOpenForSub:"Manglende venstreklammeparentes til s\u00E6nket skrift",MissingOpenForSup:"Manglende venstreklammeparentes til h\u00E6vet skrift",AmbiguousUseOf:"Flertydig brug af %1",EnvBadEnd:"\\begin{%1} sluttede med \\end{%2}",EnvMissingEnd:"Manglende \\end{%1}",MissingBoxFor:"Manglende boks for %1",MissingCloseBrace:"Manglende h\u00F8jreklammeparentes",UndefinedControlSequence:"Udefineret kontrolsekvens %1",DoubleExponent:"Dobbelt eksponent: brug klammeparenteser til at tydeligg\u00F8re",DoubleSubscripts:"Dobbelt s\u00E6nket skrift: brug klammeparenteser til at tydeligg\u00F8re",DoubleExponentPrime:"M\u00E6rke for\u00E5rsager dobbelt eksponent: bruge klammeparenteser til at tydeligg\u00F8re",CantUseHash1:"Du kan ikke bruge 'makro parameter tegnet #' i matematik tilstand",MisplacedMiddle:"%1 skal v\u00E6re inden for \\left og \\right",MisplacedLimits:"%1 er kun tilladt p\u00E5 operatorer",MisplacedMoveRoot:"%1 kan kun v\u00E6re indenfor en root",MultipleCommand:"For mange %1",IntegerArg:"Argumentet til %1 skal v\u00E6re et heltal",NotMathMLToken:"%1 er ikke et token element",InvalidMathMLAttr:"Ugyldig MathML attribut: %1",UnknownAttrForElement:"%1 er ikke en genkendt attribut for %2",MaxMacroSub1:"Det maksimale antal makro substitutioner i MathJax er overskredet; er der et rekursivt makrokald?",MaxMacroSub2:"Det maksimale antal substitutioner i MathJax er overskredet; er der et rekursivt LaTeX milj\u00F8?",MissingArgFor:"Manglende argument til %1",ExtraAlignTab:"For mange \u0026 i \\cases tekst",BracketMustBeDimension:"Klammeargument til %1 skal v\u00E6re en dimension",InvalidEnv:"Ugyldigt navn '%1'",UnknownEnv:"Ukendt navn '%1'",ExtraCloseLooking:"Ekstra h\u00F8jreklammeparentes under s\u00F8gning efter %1",MissingCloseBracket:"Kunne ikke finde det afsluttende ']' argument til %1",MissingOrUnrecognizedDelim:"Manglende eller ukendt skilletegn for %1",MissingDimOrUnits:"Manglende dimension eller enheder for %1",TokenNotFoundForCommand:"Kunne ikke finde %1 for %2",MathNotTerminated:"Matematik ikke afsluttet i tekstfeltet",IllegalMacroParam:"Ulovlig makro parameter reference",MaxBufferSize:"Intern bufferst\u00F8rrelse for MathJax er overskredet; er der et rekursivt makrokald?",CommandNotAllowedInEnv:"%1 er ikke tilladt i milj\u00F8et %2",MultipleLabel:"Etiketten '%1' er defineret flere gange",CommandAtTheBeginingOfLine:"%1 skal v\u00E6re i begyndelsen af linjen",IllegalAlign:"Ulovlig justering angivet i %1",BadMathStyleFor:"D\u00E5rlig matematik stil for %1",PositiveIntegerArg:"Argumentet til %1 skal v\u00E6re et positivt heltal",ErroneousNestingEq:"Fejlagtig indlejring af ligningsstrukturer",MultlineRowsOneCol:"R\u00E6kker indenfor milj\u00F8et %1 skal have pr\u00E6cis \u00E9n kolonne",MultipleBBoxProperty:"%1 angivet to gange i %2",InvalidBBoxProperty:"'%1' ligner ikke en farve, en padding dimension eller en stil",ExtraEndMissingBegin:"Ekstra %1 eller manglende \\begingroup",GlobalNotFollowedBy:"%1 ikke efterfulgt af \\let, \\def eller \\newcommand",UndefinedColorModel:"Farvemodel '%1' ikke defineret",ModelArg1:"Farvev\u00E6rdier for modellen %1 kr\u00E6ver 3 tal",InvalidDecimalNumber:"Ugyldigt decimaltal",ModelArg2:"Farvev\u00E6rdier for modellen %1 skal v\u00E6re mellem %2 og %3",InvalidNumber:"Ugyldigt tal",NewextarrowArg1:"F\u00F8rste argument til %1 skal v\u00E6re navnet p\u00E5 en kontrol sekvens",NewextarrowArg2:"Andet argument til %1 skal v\u00E6re to heltal adskilt af et komma",NewextarrowArg3:"Tredje argument til %1 skal v\u00E6re nummeret p\u00E5 et Unicode-tegn",NoClosingChar:"Kan ikke finde den afsluttende %1",IllegalControlSequenceName:"Ulovligt kontrol sekvens navn for %1",IllegalParamNumber:"Ulovligt antal parametre angivet i %1",MissingCS:"%1 skal efterf\u00F8lges af en kontrolsekvens",CantUseHash2:"Ulovlig brug af # i skabelon for %1",SequentialParam:"Parametre for %1 skal v\u00E6re nummereret fortl\u00F8bende",MissingReplacementString:"Manglende erstatningsstreng til definition af %1",MismatchUseDef:"Brug af %1 stemmer ikke overens med dens definition",RunawayArgument:"L\u00F8bsk argument for %1?",NoClosingDelim:"Kan ikke finde afsluttende skilletegn for %1"}});MathJax.Ajax.loadComplete("[MathJax]/localization/da/TeX.js"); | PypiClean |
/Assimulo-3.0.tar.gz/Assimulo-3.0/assimulo/examples/ida_with_initial_sensitivity.py |
# Copyright (C) 2010 Modelon AB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as N
import pylab as P
import nose
from assimulo.solvers import IDA
from assimulo.problem import Implicit_Problem
def run_example(with_plots=True):
"""
This example show how to use Assimulo and IDA for simulating sensitivities
for initial conditions.::
0 = dy1/dt - -(k01+k21+k31)*y1 - k12*y2 - k13*y3 - b1
0 = dy2/dt - k21*y1 + (k02+k12)*y2
0 = dy3/dt - k31*y1 + k13*y3
y1(0) = p1, y2(0) = p2, y3(0) = p3
p1=p2=p3 = 0
See http://sundials.2283335.n4.nabble.com/Forward-sensitivities-for-initial-conditions-td3239724.html
on return:
- :dfn:`imp_mod` problem instance
- :dfn:`imp_sim` solver instance
"""
def f(t, y, yd,p):
y1,y2,y3 = y
yd1,yd2,yd3 = yd
k01 = 0.0211
k02 = 0.0162
k21 = 0.0111
k12 = 0.0124
k31 = 0.0039
k13 = 0.000035
b1 = 49.3
res_0 = -yd1 -(k01+k21+k31)*y1+k12*y2+k13*y3+b1
res_1 = -yd2 + k21*y1-(k02+k12)*y2
res_2 = -yd3 + k31*y1-k13*y3
return N.array([res_0,res_1,res_2])
#The initial conditions
y0 = [0.0,0.0,0.0] #Initial conditions for y
yd0 = [49.3,0.,0.]
p0 = [0.0, 0.0, 0.0] #Initial conditions for parameters
yS0 = N.array([[1,0,0],[0,1,0],[0,0,1.]])
#Create an Assimulo implicit problem
imp_mod = Implicit_Problem(f,y0,yd0,p0=p0,name='Example: Computing Sensitivities')
#Sets the options to the problem
imp_mod.yS0=yS0
#Create an Assimulo explicit solver (IDA)
imp_sim = IDA(imp_mod)
#Sets the paramters
imp_sim.rtol = 1e-7
imp_sim.atol = 1e-6
imp_sim.pbar = [1,1,1] #pbar is used to estimate the tolerances for the parameters
imp_sim.report_continuously = True #Need to be able to store the result using the interpolate methods
imp_sim.sensmethod = 'SIMULTANEOUS' #Defines the sensitvity method used
imp_sim.suppress_sens = False #Dont suppress the sensitivity variables in the error test.
#Simulate
t, y, yd = imp_sim.simulate(400) #Simulate 400 seconds
#Basic test
nose.tools.assert_almost_equal(y[-1][0], 1577.6552477,3)
nose.tools.assert_almost_equal(y[-1][1], 611.9574565, 3)
nose.tools.assert_almost_equal(y[-1][2], 2215.88563217, 3)
nose.tools.assert_almost_equal(imp_sim.p_sol[0][1][0], 1.0)
#Plot
if with_plots:
P.figure(1)
P.subplot(221)
P.plot(t, N.array(imp_sim.p_sol[0])[:,0],
t, N.array(imp_sim.p_sol[0])[:,1],
t, N.array(imp_sim.p_sol[0])[:,2])
P.title("Parameter p1")
P.legend(("p1/dy1","p1/dy2","p1/dy3"))
P.subplot(222)
P.plot(t, N.array(imp_sim.p_sol[1])[:,0],
t, N.array(imp_sim.p_sol[1])[:,1],
t, N.array(imp_sim.p_sol[1])[:,2])
P.title("Parameter p2")
P.legend(("p2/dy1","p2/dy2","p2/dy3"))
P.subplot(223)
P.plot(t, N.array(imp_sim.p_sol[2])[:,0],
t, N.array(imp_sim.p_sol[2])[:,1],
t, N.array(imp_sim.p_sol[2])[:,2])
P.title("Parameter p3")
P.legend(("p3/dy1","p3/dy2","p3/dy3"))
P.subplot(224)
P.title('ODE Solution')
P.plot(t, y)
P.suptitle(imp_mod.name)
P.show()
return imp_mod, imp_sim
if __name__=='__main__':
mod,sim = run_example() | PypiClean |
/JPype1-1.4.1-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl/jpype/pickle.py | from __future__ import absolute_import
import _jpype
import pickle
from copyreg import dispatch_table
# TODO: Support use of a custom classloader with the unpickler.
# TODO: Use copyreg to pickle a JProxy
__ALL__ = ['JPickler', 'JUnpickler']
# This must exist as a global, the real unserializer is created by the JUnpickler
class JUnserializer(object):
def __call__(self, *args):
raise pickle.UnpicklingError("Unpickling Java requires JUnpickler")
class _JDispatch(object):
"""Dispatch for Java classes and objects.
Python does not have a good way to register a reducer that applies to
many classes, thus we will substitute the usual dictionary with a
class that can produce reducers as needed.
"""
def __init__(self, dispatch):
self._encoder = _jpype.JClass('org.jpype.pickle.Encoder')()
self._builder = JUnserializer()
self._dispatch = dispatch
# Extension dispatch table holds reduce method
self._call = self.reduce
# Pure Python _Pickler uses get()
def get(self, cls):
if not issubclass(cls, (_jpype.JClass, _jpype.JObject)):
return self._dispatch.get(cls)
return self._call
# Python3 cPickler uses __getitem__()
def __getitem__(self, cls):
if not issubclass(cls, (_jpype.JClass, _jpype.JObject)):
return self._dispatch[cls]
return self._call
def reduce(self, obj):
byte = bytes(self._encoder.pack(obj))
return (self._builder, (byte, ))
class JPickler(pickle.Pickler):
"""Pickler overloaded to support Java objects
Parameters:
file: a file or other writeable object.
*args: any arguments support by the native pickler.
Raises:
java.io.NotSerializableException: if a class is not serializable or
one of its members
java.io.InvalidClassException: an error occures in constructing a
serialization.
"""
def __init__(self, file, *args, **kwargs):
pickle.Pickler.__init__(self, file, *args, **kwargs)
# In Python3 we need to hook into the dispatch table for extensions
self.dispatch_table = _JDispatch(dispatch_table)
class JUnpickler(pickle.Unpickler):
"""Unpickler overloaded to support Java objects
Parameters:
file: a file or other readable object.
*args: any arguments support by the native unpickler.
Raises:
java.lang.ClassNotFoundException: if a serialized class is not
found by the current classloader.
java.io.InvalidClassException: if the serialVersionUID for the
class does not match, usually as a result of a new jar
version.
java.io.StreamCorruptedException: if the pickle file has been
altered or corrupted.
"""
def __init__(self, file, *args, **kwargs):
self._decoder = _jpype.JClass('org.jpype.pickle.Decoder')()
pickle.Unpickler.__init__(self, file, *args, **kwargs)
def find_class(self, module, cls):
"""Specialization for Java classes.
We just need to substitute the stub class for a real
one which points to our decoder instance.
"""
if cls == "JUnserializer":
decoder = self._decoder
class JUnserializer(object):
def __call__(self, *args):
return decoder.unpack(args[0])
return JUnserializer
return pickle.Unpickler.find_class(self, module, cls) | PypiClean |
/KivyAuth-2.3.3-py3-none-any.whl/kivyauth/desktop/facebook_auth.py | import requests
from oauthlib.oauth2 import WebApplicationClient
import json
import webbrowser
import random
from kivyauth.desktop.utils import (
request,
redirect,
is_connected,
start_server,
app,
_close_server_pls,
port,
stop_login,
)
from kivy.app import App
from kivy.clock import Clock
# facebook configuration
FACEBOOK_CLIENT_ID = ""
FACEBOOK_CLIENT_SECRET = ""
fb_authorization_endpoint = "https://www.facebook.com/v15.0/dialog/oauth?"
fb_token_endpoint = "https://graph.facebook.com/v15.0/oauth/access_token?"
fb_userinfo_endpoint = "https://graph.facebook.com/v15.0/me?"
client_facebook = None
event_success_listener = None
event_error_listener = None
__all__ = ("initialize_fb", "login_facebook", "logout_facebook")
def initialize_fb(success_listener, error_listener, client_id=None, client_secret=None):
a = App.get_running_app()
a.bind(on_stop=lambda *args: _close_server_pls(port))
global event_success_listener
event_success_listener = success_listener
global event_error_listener
event_error_listener = error_listener
global FACEBOOK_CLIENT_ID
FACEBOOK_CLIENT_ID = client_id
global FACEBOOK_CLIENT_SECRET
FACEBOOK_CLIENT_SECRET = client_secret
global client_facebook
client_facebook = WebApplicationClient(FACEBOOK_CLIENT_ID)
@app.route("/loginFacebook")
def loginFacebook():
st = "".join([random.choice("abcdefgh1234567") for _ in range(10)])
ds = "".join([random.choice("1234567890") for _ in range(10)])
request_uri = client_facebook.prepare_request_uri(
fb_authorization_endpoint,
redirect_uri=request.base_url + "/callbackFacebook",
scope=["email"],
state="{st=" + st + ",ds=" + ds + "}",
)
return redirect(request_uri)
@app.route("/loginFacebook/callbackFacebook")
def callbackFacebook():
code = request.args.get("code")
# prepare a request to get tokens
token_url, headers, body = client_facebook.prepare_token_request(
fb_token_endpoint,
client_id=FACEBOOK_CLIENT_ID,
client_secret=FACEBOOK_CLIENT_SECRET,
code=code,
redirect_url=request.base_url,
)
# send the request and get the response
token_response = requests.post(token_url, headers=headers, data=body)
# send the request and get the response
# app_token_response = requests.get(token_url, headers=headers, data=body)
headers = {
"Authorization": token_response.json()["access_token"]
+ " "
+ token_response.json()["token_type"]
}
request_uri = client_facebook.prepare_request_uri(
fb_userinfo_endpoint,
fields=["id", "name", "email", "picture"],
access_token=token_response.json()["access_token"],
)
# make the request and get the response
userinfo_response = requests.get(request_uri, headers=headers, data=None).json()
stop_login()
# parse the information
if userinfo_response.get("id"):
Clock.schedule_once(lambda *args: event_success_listener(
userinfo_response["name"],
userinfo_response["email"],
userinfo_response["picture"]["data"]["url"],
), 0)
return "<h2>Logged in using Facebook. Return back to the Kivy application</h2>"
event_error_listener()
return "User Email not available or not verified"
def login_facebook():
if is_connected():
start_server(port)
webbrowser.open("https://127.0.0.1:{}/loginFacebook".format(port), 1, False)
else:
event_error_listener()
def logout_facebook(after_logout):
"""
Logout from facebook login
:param: `after_logout` - Function to be called after logging out
"""
after_logout() | PypiClean |
/KratosMultilevelMonteCarloApplication-9.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/MultilevelMonteCarloApplication/XMC/xmc/multiCriterion.py | import numpy as np
# XMC imports
from xmc.tools import dynamicImport
from xmc.methodDefs_multiCriterion import flag, interpreter
class MultiCriterion:
"""
This class handles any number of criteria, the associated tolerances (with
possible splitting), and the combination of these criteria into an interpred output.
"""
def __init__(self, **keywordArgs):
# Attributes
self.criteria = keywordArgs.get("criteria")
self.inputsForCriterion = keywordArgs.get("inputsForCriterion")
self._interpreter = dynamicImport(keywordArgs.get("interpreter"))
self.splitCriteria = keywordArgs.get("splitCriteria", None)
self.toleranceToSplit = keywordArgs.get("toleranceToSplit", None)
self.flagStructure = interpreter.interpretationStructure
# Methods
self._flagFunction = dynamicImport(
keywordArgs.get("flag", "xmc.methodDefs_multiCriterion.flag.plainFlag")
)
def __getstate__(self):
# Captures what is normally pickled
state = self.__dict__.copy()
# Replace the PyCOMPSs-decorated entries with explicit label
for attribute, value in state.items():
if hasattr(value, "__module__") and "pycompss" in value.__module__:
state[attribute] = "unassigned_task_decorator"
# what we return here will be stored in the pickle
return state
# TODO Find a solution to re-assign original PyCOMPS-decorated attributes
# or at least an undecorated version.
# See reference below for a simple way to do that (but it is intrusive to PyCOMPSs)
# https://stackoverflow.com/a/33024739
# This is not currently necessary, just more robust.
# def __setstate__(self,newState):
# Re-create desired instance
# ...
# re-instate our __dict__ state from the pickled state
# self.__dict__.update(newState)
def tolerances(self, criteriaReferences=None):
"""
Returns the tolerances of the requested criteria.
"""
# TODO Is it possible to set default as criteria_references=range(0,len(self.criteria)-1) instead of using this conditional structure?
if criteriaReferences is None:
criteriaReferences = range(len(self.criteria))
tolerance_values = []
for i in criteriaReferences:
# TODO Do not add None values to tolerance_values
# It is perfectly normal that some MonoCriterion objects have tolerance None.
tolerance_values.append(self.criteria[i].tolerance)
return tolerance_values
def splittingParameter(self):
"""
Returns the splitting parameter currently applied
"""
return self.criteria[self.splitCriteria[0]].tolerance / self.toleranceToSplit
def setTolerance(self, criterionReferences, toleranceValues):
for i in range(len(criterionReferences)):
self.criteria[criterionReferences[i]].tolerance = toleranceValues[i]
def splitTolerance(self, splittingParameter):
if self.toleranceToSplit is not None:
split_tolerances = self.toleranceToSplit * np.array(
[splittingParameter, 1 - splittingParameter]
)
self.setTolerance(self.splitCriteria, split_tolerances)
else:
pass
def updateTolerance(self, criteriaToUpdate=None):
"""
Update the tolerances of self.criteria entries specified by criteriaToUpdate
"""
if criteriaToUpdate is None:
criteriaToUpdate = range(len(self.criteria))
for i in criteriaToUpdate:
if hasattr(self.criteria[i], "updateTolerance"):
self.criteria[i].updateTolerance()
def flag(self, values):
"""
Return the output of the criterion.
It takes the expected values as input, evaluate each elementary criterion on them and combine their boolean outputs into a dictionary flag.
This is currently a wrapper for the protected _flagFunction attribute.
"""
return self._flagFunction(
values, self.criteria, self.inputsForCriterion, self._interpreter
) | PypiClean |
/BobBuildTool-0.23.1.tar.gz/BobBuildTool-0.23.1/doc/manual/introduction.rst | Introduction
============
Bob is a build automation tool inspired by bitbake and portage. Its main
purpose is to build software packages, very much like packages in a Linux
distribution. It typically works on coarse entities i.e. not on individual
source files.
.. image:: /images/overview.png
In contrast to similar tools, Bob tries to focus on the following requirements
that are special when building complex embedded systems:
* Holistic approach: Bob can be used to describe and build the whole software
stack of a project. At the same time, Bob can be used to build, change and
test arbitrary parts of the project by involved developers.
* Cross compilation with multiple tool chains: Some of these tool chains may
have to be built during the build process. Bob can also be used to verify the
build environment, override specific host tools or abort the process if some
prerequisites are not met.
* Reproducible builds: Bob aims to provide a framework which enables
reproducible and even bit identical builds. To do so, each package declares
its required environment, tools and dependencies. With this information Bob
executes the build steps in a controlled environment.
* Continuous integration: building from live branches and not just fixed
tarballs is fully supported. All packages are described in a declarative way.
Using this information, the packages can be built locally but also as separate
jobs on a build server (e.g. Jenkins). Bob can track all dependencies between
the packages, and commits can trigger rebuilds of all affected packages.
* Variant management: because all packages declare their input environment
explicitly, Bob can compute if a package must be built differently or can be
reused from another build.
All in all Bob is just a framework for the controlled execution of shell
scripts. To maximize reproducibility, Bob tracks the environment and the input
of these scripts. If in doubt, Bob will rebuild the (supposedly) changed
package.
What sets Bob apart from other systems is the functional approach. Bob viewes
build recipes as (sometimes imperfect) functions where the source code and
dependencies are the input and the built package is the result. Every package
is kept separately and only declared dependencies are available to the package
build scripts.
In contrast to that, typical other package build systems describe dependencies
that must be satisfied in a shared root file system. This ensures that required
files are present at the known locations but it is perfectly ok that more is
there. Bob on the other hand has no concept of "installation". Packages are
computed with their scripts and from the declared input.
| PypiClean |
/MorletWaveModal-0.6.3-py3-none-any.whl/mwmodal/mw_modal.py | import numpy as np
import mwdi as mw
from scipy.optimize import least_squares
from warnings import warn
from .tools import *
class MorletWaveModal(object):
def __init__(self, free_response, fs, n_1=5, n_2=10, k_lo=10, num_k=10):
"""
Initiates the MorletWaveModal object
:param free_response: analyzed signal
:param fs: frequency of sampling
:param n_1: time spread parameter 1
:param n_2: time spread parameter 2
:param k_lo: starting `k` value
:param num_k: number of distributed `k` values in `[k_lo, k_hi]` range
:return:
"""
self.damp_lim = (0.0002, 0.02)
self.free_response = free_response
self.fs = fs
self.n_1 = n_1
self.n_2 = n_2
self.k_lo = k_lo
self.num_k = num_k
self.omega_id = None
self.identifier_morlet_wave = mw.MorletWave(free_response, fs)
return None
def identify_modal_parameters(self, omega_estimated, damping_estimated=0.0025):
"""
Wrapper method which performs identification of modal parameters for selected mode.
:param omega_estimated: initial natural circular frequency
:param damping_estimated: Damping ratio estimated for selected mode
:return omega_identified, delta_identified, amplitude_identified, phase_identified:
"""
run_steps = True
while run_steps:
### Step #1 ###
self.initialization(omega_estimated, damping_estimated)
### Step #2 ###
omega_identified = self.identify_natural_frequency(omega_estimated)
### Step #3 ###
self.morlet_wave_integrate()
damping_identified = self.identify_damping()
similar = np.isclose(damping_estimated, damping_identified, \
rtol=0.1, atol=0)
if damping_estimated < damping_identified and not similar:
warn(f'Estimated damping: {damping_estimated:.5} is smaller then ' \
f'identified: {damping_identified:.5}. Returning to step #1.')
damping_estimated = damping_identified
omega_estimated = omega_identified
else:
run_steps = False
if damping_estimated > damping_identified and not similar:
warn(f'Estimated damping: {damping_estimated:.5} is higher then ' \
f'identified: {damping_identified:.5}, possible higher k_lim available.')
### Step #4 ###
amplitude_identified, phase_identified = self.identify_amplitude_phase(damping_identified)
return omega_identified, damping_identified, amplitude_identified, phase_identified
def initialization(self, omega_estimated, damping_estimated):
"""
Step #1: Selection of the estimated damping ratio, determination of `k_lim` and
distribution of `k_j` values in range `[k_lo, k_hi=k_lim]`
:param omega_estimated: initial natural circular frequency
:param damping_estimated: Damping ratio estimated for selected mode
:return:
"""
if self.damp_lim[0] <= damping_estimated < 0.0025:
k_lim = 400
elif 0.0025 <= damping_estimated <= self.damp_lim[1]:
k_lim = k_limit_theoretical(self.n_1, damping_estimated) * 1
elif self.damp_lim[0] > damping_estimated:
warn(f'Estimated damping {damping_estimated:.4f} is lower then limit {self.damp_lim[0]:.4f}, using limit.')
k_lim = 400
elif damping_estimated > self.damp_lim[1]:
warn(f'Estimated damping {damping_estimated:.4f} is higher then limit {self.damp_lim[1]:.4f}, using limit.')
k_lim = k_limit_theoretical(self.n_1, self.damp_lim[1])
print('k_lim =', k_lim)
# test k_lim against signal length for the selected mode
n_signal = self.free_response.size
k_signal = get_k(omega_estimated, self.fs, n_signal, self.n_1)
print('k_signal =', k_signal)
if k_lim > k_signal:
warn(f'k_lim: {k_lim} exceeds signal length k_signal: {k_signal}. k_lim is adjusted to signal length.')
k_lim = k_signal
print('k_lim =', k_lim)
# check k_lo-k_hi range to avoid double k_j values.
num_k = k_lim - self.k_lo + 1
if num_k < self.num_k:
if num_k < 3:
raise Exception('Extend k_lo-k_hi range.')
else:
raise Exception(f'num_k is too large. Extend k_lo-k_hi range or set k_num to {num_k}.')
self.k = np.linspace(self.k_lo, k_lim, self.num_k, dtype=int)
return None
def identify_natural_frequency(self, omega_estimated):
"""
Searches for the natural frequencies in `k` values from range.
This method distributes `k` values in the selected range. The signal
length is check against `k_hi` value. Morlet wave integral is
determined in all `k` values from the range. The search for maximum
values of the Morlet-wave integrals for `omega` in `k` range is
initiated.
Natural frequency is determined by averaging frequencies obtained in
`k` range.
:param omega_estimated: initial natural circular frequency
:return: identified natural frequency
"""
self.find_natural_frequencies(omega_estimated)
omega_identified = np.average(self.omega_id, weights=self.k)
return omega_identified
def identify_damping(self):
"""
Identifies damping using least square minimization of Eq.(15)
:return: identified damping
"""
if self.omega_id is None:
raise Exception(f'Natural frequencies not identified.')
damp = least_squares(self.identifier_morlet_wave.exact_mwdi_goal_function, \
x0=0.001, method='lm', \
args=(self.integral_ratio, self.n_1, self.n_2, self.k))
if not(damp.success):
raise Exception(f'Optimizer returned false:\n{damp.message}.')
return damp.x[0]
def identify_amplitude_phase(self, damping):
"""
Identifies amplitude and phase using LS minimization of Eqs.(16), (17)
:param damping: identified damping
:return: identified amplitude and phase
"""
if self.omega_id is None:
raise Exception(f'Natural frequencies not identified.')
######### Amplitude #########
amp_test = np.mean(get_amplitude(self.k, self.n_1, damping, \
self.omega_id, self.integral[0,]))
amplitude = least_squares(cost_fun_amplitude, x0=amp_test, method='lm', \
args=(self.omega_id, damping, self.k, \
self.n_1, np.abs(self.integral[0,])))
if not(amplitude.success):
raise Exception(f'Optimizer returned false for amplitude:\n{amplitude.message}.')
########## Phase ############
phi_tilde = -np.angle((-1)**(self.k) * self.integral[0,])
phi_test = np.mean(phi_tilde)
phase = least_squares(cost_fun_phase, x0=phi_test, method='trf', bounds=(-np.pi, np.pi), \
args=(self.k, np.abs(np.tan(phi_test)), phi_tilde))
if not(phase.success):
raise Exception(f'Optimizer returned false for phase:\nAmplitude: {amplitude.x[0]}\n{phase.message}.')
return amplitude.x[0], phase.x[0]
def find_natural_frequencies(self, omega):
"""
Searches for natural frequencies around initially defined `omega`.
:param omega: guessed circular natural frequency
:return:
"""
self.omega_id = np.zeros_like(self.k, dtype=float)
for i, k_ in enumerate(self.k):
self.omega_id[i] = self.identifier_morlet_wave.find_natural_frequency(omega, self.n_1, k_)
return None
def morlet_wave_integrate(self):
"""
Calculates the signal based morlet integral ratio, Eq.(10).
:param test: experimental purpose
:return:
"""
if self.omega_id is None:
raise Exception(f'Natural frequencies not identified.')
N_hi = get_number_of_samples(self.k[-1], np.min(self.omega_id), self.fs)
N_response = self.free_response.size
if N_hi > N_response:
raise Exception(f'Wave function is larger {N_hi} then signal {N_response}.\n' \
f'Omega: {np.around(self.omega_id, 1)}.\nPossible k_lo={self.k_lo} is too low, try increase.')
N_k = self.k.size
psi = np.zeros((N_hi, N_k), dtype=np.complex128)
self.integral = np.zeros((2, N_k), dtype=np.complex128)
for j, n_ in enumerate((self.n_1, self.n_2)):
for i, k_ in enumerate(self.k):
psi_N = get_number_of_samples(k_, self.omega_id[i], self.fs)
psi[:psi_N, i] = self.identifier_morlet_wave.morlet_wave(self.omega_id[i], n_, k_)
temp = np.einsum('i,ij->ij', self.free_response[:N_hi], np.conj(psi))
self.integral[j,] = np.trapz(temp, dx=1/self.fs, axis=0)
self.integral_ratio = np.abs(self.integral[0]) / np.abs(self.integral[1])
return None | PypiClean |
/Haroun-0.1.5.tar.gz/Haroun-0.1.5/haroun/model.py | import torch
import numpy as np
import time
import copy
import matplotlib.pyplot as plt
class Model():
def __init__(self, network, optimizer, criterion, device):
super(Model, self).__init__()
self.net = network.to(device)
self.optim = optimizer
self.loss = criterion
self.device = device
print("Model initialized succssefully :)\n")
def train(self, train_data, val_data, epochs, patience, batch_size, learning_rate):
if self.optim == "adam":
self.optim = torch.optim.Adam(self.net.parameters(), lr=learning_rate)
best_loss = np.inf
self.patience = patience
self.train_losses = []
self.val_losses = []
self.achieved_epochs = []
train_inputs, train_outputs = train_data
val_inputs, val_outputs = val_data
total_train = train_inputs.size()[0]
total_val = val_inputs.size()[0]
print("Train loop:\n")
t0 = time.time()
for epoch in range(epochs):
self.net.train()
train_loss = 0
val_loss = 0
self.achieved_epochs.append(epoch)
train_permutation = torch.randperm(total_train)
val_permutation = torch.randperm(total_val)
for i in range(0, total_train, batch_size):
self.optim.zero_grad()
indices = train_permutation[i:i+batch_size]
batch_x, batch_y = train_inputs[indices], train_outputs[indices]
outputs = self.net(batch_x)
loss = self.loss(outputs, batch_y)
loss.backward()
self.optim.step()
train_loss += loss
train_loss = train_loss.cpu().detach() / total_train
self.train_losses.append(train_loss)
for j in range(0, total_val, batch_size):
self.net.eval()
indices = val_permutation[j:j+batch_size]
batch_x, batch_y = val_inputs[indices], val_outputs[indices]
outputs = self.net(batch_x)
loss = self.loss(outputs, batch_y)
val_loss += loss
val_loss = val_loss.cpu().detach() / total_val
self.val_losses.append(val_loss)
if val_loss < best_loss:
best_loss = val_loss
cost_patience = patience
self.state_dict = copy.deepcopy(self.net.state_dict())
print(f"\tEpoch: {epoch+1}/{epochs}, ",
f"Train Loss: {train_loss:.3g}, ",
f"Val Loss: {val_loss:.3g}")
else:
cost_patience -= 1
if cost_patience < 0:
print(f"\nEarly stopping after {patience} epochs of no improvements")
break
else:
print(f"\tEpoch: {epoch+1}/{epochs}, ",
f"Train Loss: {train_loss:.3g}, ",
f"Val Loss: {val_loss:.3g} - No improvement",
f"-> Remaining patience: {cost_patience}")
tf = time.time()
print(f"\nTrain finished successfully in {tf-t0:.3g}s")
def evaluate(self, test_data):
test_inputs, test_outputs = test_data
self.net.load_state_dict(self.state_dict)
predictions = self.net(test_inputs).cpu().detach().numpy()
correct = 0
wrong = 0
for i,(j,k) in enumerate(zip(predictions, test_outputs.cpu().detach())):
if np.argmax(j) == np.argmax(k):
correct +=1
else:
wrong += 1
score = 100 * correct / test_outputs.shape[0]
print(f'\nTest accuracy:{score:.3g}%')
print(f'Correct predictions: {correct}, Wrong predictions: {wrong}')
def save(self, path, checkpoint_name):
torch.save(self.state_dict, f"{path}/{checkpoint_name}.pth")
print("\nCheckpoint saved successfully :)")
def plot(self):
f, ax = plt.subplots()
ax.plot(self.achieved_epochs, self.train_losses, label='train')
ax.plot(self.achieved_epochs, self.val_losses, label='validation')
ax.set_title('model loss')
ax.set_ylabel('loss')
ax.set_xlabel('epoch')
no_improvement_line = self.achieved_epochs[-1] - self.patience
ax.axvline(x=no_improvement_line, color='r')
ax.legend(loc='upper center', frameon=False)
plt.show() | PypiClean |
/FAST-2.5.tar.gz/FAST-2.5/README.txt | FAST README
===========
The FAST Python package integrates a variety of Python packages that are
related to software testing.
exact - Definition and management of computational experiments
sqa - SQA scripts for managing software releases
swtest - Tools for software tests
testing - Tools for automatically finding and executing various types
of software tests
INSTALLATION
============
See the INSTALL.txt file.
LICENSE
=======
See the LICENSE.txt file.
GETTING STARTED
===============
Directories
fast - The root directory for FAST's Python source code
client - Scripts used to manage the client-server interactions
doc - Documentation about FAST capabilities
examples - Contains subdirectories for FAST packages that illustrate
the use of FAST tools
scripts - Contains command-line executables
server - Scripts used to manage the client-server interactions
test - Directories of test utilities
Documentation and Bug Trcking
The FAST Trac wiki supports documentation, issue tracking,
and browsing of the FAST subversion repository:
https://software.sandia.gov/trac/fast
The fast/doc directory contains documentation that is included
with the FAST software.
Authors
See the AUTHORS.txt file.
Project Managers
William E. Hart, [email protected]
Mailing List
FAST is managed with the Acro Project. A separate checkins mailing
list is managed for FAST, but otherwise the main Acro mailing lists
are used to manage the development of this software:
[email protected]
[email protected]
Web Page
https://software.sandia.gov/trac/fast
THIRD PARTY SOFTARE
===================
FAST depends on the PyUtilib package, and several other standard Python packages. Testing FAST depends on the 'nose' and 'coverage' packages.
FAST does not rely on any third-party libraries.
| PypiClean |
/Netfoll_TL-2.0.1-py3-none-any.whl/netfoll_tl/events/__init__.py | from .raw import Raw
from .album import Album
from .chataction import ChatAction
from .messagedeleted import MessageDeleted
from .messageedited import MessageEdited
from .messageread import MessageRead
from .newmessage import NewMessage
from .userupdate import UserUpdate
from .callbackquery import CallbackQuery
from .inlinequery import InlineQuery
_HANDLERS_ATTRIBUTE = '__tl.handlers'
class StopPropagation(Exception):
"""
If this exception is raised in any of the handlers for a given event,
it will stop the execution of all other registered event handlers.
It can be seen as the ``StopIteration`` in a for loop but for events.
Example usage:
>>> from telethon import TelegramClient, events
>>> client = TelegramClient(...)
>>>
>>> @client.on(events.NewMessage)
... async def delete(event):
... await event.delete()
... # No other event handler will have a chance to handle this event
... raise StopPropagation
...
>>> @client.on(events.NewMessage)
... async def _(event):
... # Will never be reached, because it is the second handler
... pass
"""
# For some reason Sphinx wants the silly >>> or
# it will show warnings and look bad when generated.
pass
def register(event=None):
"""
Decorator method to *register* event handlers. This is the client-less
`add_event_handler()
<telethon.client.updates.UpdateMethods.add_event_handler>` variant.
Note that this method only registers callbacks as handlers,
and does not attach them to any client. This is useful for
external modules that don't have access to the client, but
still want to define themselves as a handler. Example:
>>> from telethon import events
>>> @events.register(events.NewMessage)
... async def handler(event):
... ...
...
>>> # (somewhere else)
...
>>> from telethon import TelegramClient
>>> client = TelegramClient(...)
>>> client.add_event_handler(handler)
Remember that you can use this as a non-decorator
through ``register(event)(callback)``.
Args:
event (`_EventBuilder` | `type`):
The event builder class or instance to be used,
for instance ``events.NewMessage``.
"""
if isinstance(event, type):
event = event()
elif not event:
event = Raw()
def decorator(callback):
handlers = getattr(callback, _HANDLERS_ATTRIBUTE, [])
handlers.append(event)
setattr(callback, _HANDLERS_ATTRIBUTE, handlers)
return callback
return decorator
def unregister(callback, event=None):
"""
Inverse operation of `register` (though not a decorator). Client-less
`remove_event_handler
<telethon.client.updates.UpdateMethods.remove_event_handler>`
variant. **Note that this won't remove handlers from the client**,
because it simply can't, so you would generally use this before
adding the handlers to the client.
This method is here for symmetry. You will rarely need to
unregister events, since you can simply just not add them
to any client.
If no event is given, all events for this callback are removed.
Returns how many callbacks were removed.
"""
found = 0
if event and not isinstance(event, type):
event = type(event)
handlers = getattr(callback, _HANDLERS_ATTRIBUTE, [])
handlers.append((event, callback))
i = len(handlers)
while i:
i -= 1
ev = handlers[i]
if not event or isinstance(ev, event):
del handlers[i]
found += 1
return found
def is_handler(callback):
"""
Returns `True` if the given callback is an
event handler (i.e. you used `register` on it).
"""
return hasattr(callback, _HANDLERS_ATTRIBUTE)
def list(callback):
"""
Returns a list containing the registered event
builders inside the specified callback handler.
"""
return getattr(callback, _HANDLERS_ATTRIBUTE, [])[:]
def _get_handlers(callback):
"""
Like ``list`` but returns `None` if the callback was never registered.
"""
return getattr(callback, _HANDLERS_ATTRIBUTE, None) | PypiClean |
/Findex_GUI-0.2.18-py3-none-any.whl/findex_gui/static/js/jquery.highlight.js | jQuery.extend({
highlight: function (node, re, nodeName, className) {
if (node.nodeType === 3) {
var match = node.data.match(re);
if (match) {
var highlight = document.createElement(nodeName || 'span');
highlight.className = className || 'highlight';
var wordNode = node.splitText(match.index);
wordNode.splitText(match[0].length);
var wordClone = wordNode.cloneNode(true);
highlight.appendChild(wordClone);
wordNode.parentNode.replaceChild(highlight, wordNode);
return 1; //skip added node in parent
}
} else if ((node.nodeType === 1 && node.childNodes) && // only element nodes that have children
!/(script|style)/i.test(node.tagName) && // ignore script and style nodes
!(node.tagName === nodeName.toUpperCase() && node.className === className)) { // skip if already highlighted
for (var i = 0; i < node.childNodes.length; i++) {
i += jQuery.highlight(node.childNodes[i], re, nodeName, className);
}
}
return 0;
}
});
jQuery.fn.unhighlight = function (options) {
var settings = { className: 'highlight', element: 'span' };
jQuery.extend(settings, options);
return this.find(settings.element + "." + settings.className).each(function () {
var parent = this.parentNode;
parent.replaceChild(this.firstChild, this);
parent.normalize();
}).end();
};
jQuery.fn.highlight = function (words, options) {
var settings = { className: 'highlight', element: 'span', caseSensitive: false, wordsOnly: false };
jQuery.extend(settings, options);
if (words.constructor === String) {
words = [words];
}
words = jQuery.grep(words, function(word, i){
return word != '';
});
words = jQuery.map(words, function(word, i) {
return word.replace(/[-[\]{}()*+?.,\\^$|#\s]/g, "\\$&");
});
if (words.length == 0) { return this; };
var flag = settings.caseSensitive ? "" : "i";
var pattern = "(" + words.join("|") + ")";
if (settings.wordsOnly) {
pattern = "\\b" + pattern + "\\b";
}
var re = new RegExp(pattern, flag);
return this.each(function () {
jQuery.highlight(this, re, settings.element, settings.className);
});
}; | PypiClean |
/Flask-Swag-0.1.2.tar.gz/Flask-Swag-0.1.2/flask_swag/resources/swagger-ui/lang/en.js | 'use strict';
/* jshint quotmark: double */
window.SwaggerTranslator.learn({
"Warning: Deprecated":"Warning: Deprecated",
"Implementation Notes":"Implementation Notes",
"Response Class":"Response Class",
"Status":"Status",
"Parameters":"Parameters",
"Parameter":"Parameter",
"Value":"Value",
"Description":"Description",
"Parameter Type":"Parameter Type",
"Data Type":"Data Type",
"Response Messages":"Response Messages",
"HTTP Status Code":"HTTP Status Code",
"Reason":"Reason",
"Response Model":"Response Model",
"Request URL":"Request URL",
"Response Body":"Response Body",
"Response Code":"Response Code",
"Response Headers":"Response Headers",
"Hide Response":"Hide Response",
"Headers":"Headers",
"Try it out!":"Try it out!",
"Show/Hide":"Show/Hide",
"List Operations":"List Operations",
"Expand Operations":"Expand Operations",
"Raw":"Raw",
"can't parse JSON. Raw result":"can't parse JSON. Raw result",
"Model Schema":"Model Schema",
"Model":"Model",
"Click to set as parameter value":"Click to set as parameter value",
"apply":"apply",
"Username":"Username",
"Password":"Password",
"Terms of service":"Terms of service",
"Created by":"Created by",
"See more at":"See more at",
"Contact the developer":"Contact the developer",
"api version":"api version",
"Response Content Type":"Response Content Type",
"Parameter content type:":"Parameter content type:",
"fetching resource":"fetching resource",
"fetching resource list":"fetching resource list",
"Explore":"Explore",
"Show Swagger Petstore Example Apis":"Show Swagger Petstore Example Apis",
"Can't read from server. It may not have the appropriate access-control-origin settings.":"Can't read from server. It may not have the appropriate access-control-origin settings.",
"Please specify the protocol for":"Please specify the protocol for",
"Can't read swagger JSON from":"Can't read swagger JSON from",
"Finished Loading Resource Information. Rendering Swagger UI":"Finished Loading Resource Information. Rendering Swagger UI",
"Unable to read api":"Unable to read api",
"from path":"from path",
"server returned":"server returned"
}); | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/configs/lad/lad_r101_paa_r50_fpn_coco_1x.py | _base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64) | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/utilities/__init__.py | from __future__ import absolute_import
from .azync import await_callback
from .coercing import (
coerce_sequence_of_list,
coerce_sequence_of_tuple,
is_item_iterable,
is_sequence_of_dict,
is_sequence_of_float,
is_sequence_of_int,
is_sequence_of_iterable,
is_sequence_of_list,
is_sequence_of_str,
is_sequence_of_tuple,
)
from .colors import (
Colormap,
black,
blue,
color_to_colordict,
color_to_rgb,
cyan,
green,
hex_to_rgb,
i_to_black,
i_to_blue,
i_to_green,
i_to_red,
i_to_rgb,
i_to_white,
is_color_hex,
is_color_light,
is_color_rgb,
red,
rgb_to_hex,
rgb_to_rgb,
white,
yellow,
)
from .datetime import now, timestamp
from .decorators import (
abstractclassmethod,
abstractstaticmethod,
memoize,
print_profile,
)
from .descriptors import Float, RGBColour
from .encoders import DataDecoder, DataEncoder
from .images import gif_from_images
from .itertools import (
flatten,
grouper,
iterable_like,
linspace,
meshgrid,
normalize_values,
pairwise,
remap_values,
window,
)
from .maps import geometric_key, geometric_key_xy, reverse_geometric_key
from .remote import download_file_from_remote
from .ssh import SSH
from .xfunc import XFunc
__all__ = [
"await_callback",
"is_sequence_of_str",
"is_sequence_of_int",
"is_sequence_of_float",
"is_sequence_of_tuple",
"is_sequence_of_list",
"is_sequence_of_dict",
"is_sequence_of_iterable",
"is_item_iterable",
"coerce_sequence_of_tuple",
"coerce_sequence_of_list",
"i_to_rgb",
"i_to_red",
"i_to_green",
"i_to_blue",
"i_to_white",
"i_to_black",
"is_color_rgb",
"is_color_hex",
"is_color_light",
"rgb_to_hex",
"rgb_to_rgb",
"hex_to_rgb",
"color_to_colordict",
"color_to_rgb",
"Colormap",
"red",
"green",
"blue",
"yellow",
"cyan",
"white",
"black",
"timestamp",
"now",
"abstractstaticmethod",
"abstractclassmethod",
"memoize",
"print_profile",
"Float",
"RGBColour",
"DataDecoder",
"DataEncoder",
"gif_from_images",
"normalize_values",
"remap_values",
"meshgrid",
"linspace",
"flatten",
"pairwise",
"window",
"iterable_like",
"grouper",
"geometric_key",
"reverse_geometric_key",
"geometric_key_xy",
"download_file_from_remote",
"SSH",
"XFunc",
] | PypiClean |
/AoikRegistryEditor-0.1.0-py3-none-any.whl/aoikregistryeditor/tkinterutil/listbox.py | from __future__ import absolute_import
from tkinter import Listbox
from tkinter.constants import DISABLED
from tkinter.constants import END
from tkinter.constants import HORIZONTAL
from tkinter.constants import VERTICAL
from tkinter.ttk import Scrollbar
from .eventor import Eventor
from .vidget import Vidget
#
class _HiddenScrollbar(Scrollbar):
"""
Scrollbar that hides if slider's both ends reached extreme position.
"""
def set(self, lo, hi):
"""
Set scrollbar slider's end positions.
@param lo: Low end position. A float value between 0.0 and 1.0.
@param hi: High end position. A float value between 0.0 and 1.0.
@return: None.
"""
# If scrollbar slider's both ends reached extreme position
if float(lo) <= 0.0 and float(hi) >= 1.0:
# Hide the scrollbar
self.grid_remove()
# If not scrollbar slider's both ends reached extreme position
else:
# Show the scrollbar
self.grid()
# Call super version
Scrollbar.set(self, lo, hi)
#
class ListboxVidget(Vidget, Eventor):
"""
ListboxVidget contains a Listbox widget. It adds the following abilities:
- Store items of any type, unlike Listbox widget that only stores texts.
- Remember selected item even if the listbox widget lost focus.
- Notify pre-change and post-change events.
"""
# Error raised when trying to change the listbox while a change is going on
class CircularCallError(ValueError):
pass
# Error raised when trying to change the listbox while it is disabled
class DisabledError(ValueError):
pass
# Event notified when the listbox's items are to be changed
ITEMS_CHANGE_SOON = 'ITEMS_CHANGE_SOON'
# Event notified when the listbox's items are changed
ITEMS_CHANGE_DONE = 'ITEMS_CHANGE_DONE'
# Event notified when the listbox's active item is to be changed
ITEMCUR_CHANGE_SOON = 'ITEMCUR_CHANGE_SOON'
# Event notified when the listbox's active item is changed
ITEMCUR_CHANGE_DONE = 'ITEMCUR_CHANGE_DONE'
# Events list
EVENTS = (
ITEMS_CHANGE_SOON,
ITEMS_CHANGE_DONE,
ITEMCUR_CHANGE_SOON,
ITEMCUR_CHANGE_DONE,
)
def __init__(
self,
items=None,
item_to_text=None,
normal_bg='',
normal_fg='',
active_bg='sky blue',
active_fg='white',
selected_bg='steel blue',
selected_fg='white',
master=None,
):
"""
Initialize object.
@param items: Items list.
@param item_to_text: Item-to-text function. Default is `str`.
@param normal_bg: Unselected item background color.
@param normal_fg: Unselected item foreground color.
@param active_bg: Active item background color. `Active` means the item
is selected (in general meaning) but the listbox has no focus.
@param active_fg: Active item foreground color. `Active` means the item
is selected (in general meaning) but the listbox has no focus.
@param selected_bg: Selected item background color. `Selected` means
the item is selected (in general meaning) and the listbox has focus.
@param selected_fg: Selected item foreground color. `Selected` means
the item is selected (in general meaning) and the listbox has focus.
@param master: Master widget.
@return: None.
"""
# Initialize Vidget.
# Create main frame widget.
Vidget.__init__(
self,
master=master,
)
# Initialize Eventor
Eventor.__init__(self)
# If items list is given
if items is not None:
# If items list is not list
if not isinstance(items, list):
# Raise error
raise TypeError(items)
# If items list is list.
# If items list is not given, or items list is given and is list
# Items list
self._items = items if items is not None else []
# Item-to-text function. Default is `str`.
self._item_to_text = item_to_text if item_to_text is not None else str
# Unselected item background color
self._normal_fg = normal_fg
# Unselected item foreground color
self._normal_bg = normal_bg
# Active item background color
self._active_fg = active_fg
# Active item foreground color
self._active_bg = active_bg
# Selected item background color
self._selected_fg = selected_fg
# Selected item foreground color
self._selected_bg = selected_bg
# Whether the listbox is changing
self._is_changing = False
# Active index. `-1` means void, i.e. no item is active.
self._indexcur = -1
# Whether active index is being reset to same value
self._is_resetting = False
# Create listbox widget
self._listbox = Listbox(
master=self.widget(),
relief='groove',
activestyle='none',
highlightthickness=0,
# Active index cache only supports single-selection mode for now.
# See 2N6OR.
selectmode='single',
)
# Set the listbox widget as config target
self.config_target_set(self._listbox)
# Create x-axis scrollbar
self._scrollbar_xview = _HiddenScrollbar(
self.widget(),
orient=HORIZONTAL,
)
# Create y-axis scrollbar
self._scrollbar_yview = _HiddenScrollbar(
self.widget(),
orient=VERTICAL,
)
# Mount scrollbars
self._listbox.config(xscrollcommand=self._scrollbar_xview.set)
self._listbox.config(yscrollcommand=self._scrollbar_yview.set)
self._scrollbar_xview.config(command=self._listbox.xview)
self._scrollbar_yview.config(command=self._listbox.yview)
# Bind single-click event handler
self._listbox.bind('<Button-1>', self._on_single_click)
# Bind double-click event handler
self._listbox.bind('<Double-Button-1>', self._on_double_click)
# Update listbox widget
self._listbox_widget_update(keep_active=False)
# Update widget
self._widget_update()
def _widget_update(self):
"""
Update widget.
@return: None.
"""
# Row 0 for listbox and y-axis scrollbar
self.widget().rowconfigure(0, weight=1)
# Row 1 for x-axis scrollbar
self.widget().rowconfigure(1, weight=0)
# Column 0 for listbox and x-axis scrollbar
self.widget().columnconfigure(0, weight=1)
# Column 1 for y-axis scrollbar
self.widget().columnconfigure(1, weight=0)
# Lay out listbox
self._listbox.grid(row=0, column=0, sticky='NSEW')
# Lay out x-axis scrollbar
self._scrollbar_xview.grid(row=1, column=0, sticky='EW')
# Lay out y-axis scrollbar
self._scrollbar_yview.grid(row=0, column=1, sticky='NS')
def is_enabled(self):
"""
Test whether the listbox is enabled.
@return: Boolean.
"""
# Return whether the listbox is enabled
return self._listbox.config('state')[4] != DISABLED
def is_changing(self):
"""
Test whether the listbox is changing.
@return: Boolean.
"""
# Return whether the listbox is changing
return self._is_changing
def is_resetting(self):
"""
Test whether the listbox is setting active index to the same value.
@return: Boolean.
"""
# Return whether the listbox is setting active index to the same value
return self._is_resetting
def size(self):
"""
Get number of items.
@return: Number of items.
"""
# Return number of items
return len(self._items)
def items(self):
"""
Get items list.
Notice do not change the list outside.
@return: Items list.
"""
# Return items list
return self._items
def items_set(
self,
items,
notify=True,
keep_active=False,
):
"""
Set items list.
Notice do not change the list outside.
@param items: Items list.
@param notify: Whether notify pre-change and post-change events.
@param keep_active: Whether keep or clear active index.
@return: None.
"""
# If the items is not list
if not isinstance(items, list):
# Raise error
raise TypeError(items)
# If the items is list.
# If the listbox is disabled
if not self.is_enabled():
# Raise error
raise ListboxVidget.DisabledError()
# If the listbox is not disabled.
# If the listbox is changing
if self._is_changing:
# Raise error
raise ListboxVidget.CircularCallError()
# If the listbox is not changing.
# Set changing flag on
self._is_changing = True
# If notify events
if notify:
# Notify pre-change event
self.handler_notify(self.ITEMS_CHANGE_SOON)
# Store the new items
self._items = items
# Update listbox widget
self._listbox_widget_update(
keep_active=keep_active
)
# If notify events
if notify:
# Notify post-change event
self.handler_notify(self.ITEMS_CHANGE_DONE)
# Set changing flag off
self._is_changing = False
def index_is_valid(self, index):
"""
Test whether given index is valid. Notice -1 is not valid.
@param index: Index to test.
@return: Boolean.
"""
# Test whether given index is valid
return 0 <= index and index < self.size()
def index_is_valid_or_void(self, index):
"""
Test whether given index is valid or is -1.
@param index: Index to test.
@return: Boolean.
"""
# Test whether given index is valid or is -1
return index == -1 or self.index_is_valid(index)
def index_first(self):
"""
Get the first item's index.
@return: First item's index, or -1 if the listbox is empty.
"""
# Return the first item's index
return 0 if self.size() > 0 else -1
def index_last(self):
"""
Get the last item's index.
@return: Last item's index, or -1 if the listbox is empty.
"""
# Return the last item's index
return self.size() - 1
def indexcur(self, internal=False, raise_error=False):
"""
Get the active index.
@param internal: See 2N6OR.
@return: The active index. If no active active, either return -1, or
raise IndexError if `raise_error` is True.
"""
# Get active indexes
indexcurs = self._indexcurs(internal=internal)
# If have active indexes
if indexcurs:
# Return the first active index
return indexcurs[0]
# If no active indexes
else:
# If raise error
if raise_error:
# Raise error
raise IndexError(-1)
# If not raise error
else:
# Return -1
return -1
def _indexcurs(self, internal=False):
"""
Get active indexes list.
2N6OR
@param internal: Whether use listbox widget's selected indexes, instead
of cached active index.
Notice listbox widget has no selected indexes if it has no focus.
Notice using cached active index only supports single-selection mode,
which means the result list has at most one index.
@return: Active indexes list.
"""
# If use listbox widget's selected indexes
if internal:
# Return listbox widget's selected indexes list
return [int(x) for x in self._listbox.curselection()]
# If not use listbox widget's selected indexes
else:
# If cached active index is valid
if self.index_is_valid(self._indexcur):
# Return a list with the cached active index
return [self._indexcur]
# If cached active index is not valid
else:
# Return empty list
return []
def indexcur_set(
self,
index,
focus=False,
notify=True,
notify_arg=None,
):
"""
Set active index.
@param index: The index to set.
@param focus: Whether set focus on the listbox widget.
@param notify: Whether notify pre-change and post-change events.
@param notify_arg: Event argument.
@return: None.
"""
# If the index is not valid or -1
if not self.index_is_valid_or_void(index):
# Raise error
raise IndexError(index)
# If the index is valid or is -1.
# If the listbox is not enabled
if not self.is_enabled():
# Raise error
raise ListboxVidget.DisabledError()
# If the listbox is enabled.
# If the listbox is changing
if self._is_changing:
# Raise error
raise ListboxVidget.CircularCallError()
# If the listbox is not changing.
# Set changing flag on
self._is_changing = True
# Get old active index
old_indexcur = self._indexcur
# Set resetting flag on if new and old indexes are equal
self._is_resetting = (index == old_indexcur)
# If notify events
if notify:
# Notify pre-change event
self.handler_notify(self.ITEMCUR_CHANGE_SOON, notify_arg)
# If old active index is valid
if self.index_is_valid(old_indexcur):
# Set old active item's background color to normal color
self._listbox.itemconfig(old_indexcur, background=self._normal_bg)
# Set old active item's foreground color to normal color
self._listbox.itemconfig(old_indexcur, foreground=self._normal_fg)
# Cache new active index
self._indexcur = index
# Clear listbox widget's selection
self._listbox.selection_clear(0, END)
# Set listbox widget's selection
self._listbox.selection_set(index)
# Set listbox widget's activated index
self._listbox.activate(index)
# If new active index is valid
if index != -1:
# Set new active item's background color to active color
self._listbox.itemconfig(index, background=self._active_bg)
# Set new active item's foreground color to active color
self._listbox.itemconfig(index, foreground=self._active_fg)
# If set focus
if focus:
# Set focus on the listbox widget
self._listbox.focus_set()
# If new active index is valid
if index != -1:
# Make the active item visible
self._listbox.see(index)
# If notify events
if notify:
# Notify post-change event
self.handler_notify(self.ITEMCUR_CHANGE_DONE, notify_arg)
# Set resetting flag off
self._is_resetting = False
# Set changing flag off
self._is_changing = False
def indexcur_set_by_event(
self,
event,
focus=False,
notify=True,
notify_arg=None,
):
"""
Set active index using a Tkinter event object that contains coordinates
of the active item.
@param event: Tkinter event object.
@param focus: Whether set focus on the listbox widget.
@param notify: Whether notify pre-change and post-change events.
@param notify_arg: Event argument.
@return: None.
"""
# Get the event's y co-ordinate's nearest listbox item index
index = self._listbox.nearest(event.y)
# If the index is not valid
if not self.index_is_valid_or_void(index):
# Ignore the event
return
# If the index is valid
else:
# Set the index as active index
self.indexcur_set(
index=index,
focus=focus,
notify=notify,
notify_arg=notify_arg,
)
def item(self, index):
"""
Get item at given index.
@return: Item at given index, or IndexError if the index is not valid.
"""
return self.items()[index]
def itemcur(self, internal=False, raise_error=False):
"""
Get the active item.
@param internal: See 2N6OR.
@param raise_error: Whether raise error if no active item.
@return: The active item. If no active item, if `raise_error` is
True, raise IndexError, otherwise return None.
"""
# Get active index.
# May raise IndexError if `raise_error` is True.
indexcur = self.indexcur(
internal=internal,
raise_error=raise_error,
)
# If no active index
if indexcur == -1:
# Return None
return None
# If have active index
else:
# Return the active item
return self.items()[indexcur]
def item_insert(
self,
item,
index=None,
notify=True,
keep_active=True,
):
"""
Insert item at given index.
@param item: Item to insert.
@param index: Index to insert. `None` means active index, and if no
active index, insert at the end.
@param notify: Whether notify pre-change and post-change events.
@param keep_active: Whether keep or clear active index.
@return: None.
"""
# If notify events
if notify:
# Notify pre-change events
self.handler_notify(self.ITEMCUR_CHANGE_SOON)
self.handler_notify(self.ITEMS_CHANGE_SOON)
# Get old active index
active_index = self.indexcur()
# If the index is None,
# it means use active index.
if index is None:
# Use active index.
# `-1` works and means appending.
index = active_index
# Insert the item to the items list
self._items.insert(index, item)
# If old active index is valid
if active_index != -1:
# If old active index is GE the inserted index
if active_index >= index:
# Shift active index by one
active_index += 1
# If old active index is not GE the inserted index, use it as-is.
# Set new active index
self.indexcur_set(index=active_index, notify=False)
# Update listbox widget
self._listbox_widget_update(
keep_active=keep_active
)
# If notify events
if notify:
# Notify post-change events
self.handler_notify(self.ITEMS_CHANGE_DONE)
self.handler_notify(self.ITEMCUR_CHANGE_DONE)
def item_remove(
self,
index,
notify=True,
keep_active=True,
):
"""
Remove item at given index.
@param index: Index to remove.
@param notify: Whether notify pre-change and post-change events.
@param keep_active: Whether keep or clear active index.
@return: None.
"""
# If the index is not valid
if not self.index_is_valid(index):
# Raise error
raise ValueError(index)
# If the index is valid.
# If notify events
if notify:
# Notify pre-change events
self.handler_notify(self.ITEMCUR_CHANGE_SOON)
self.handler_notify(self.ITEMS_CHANGE_SOON)
# Get old active index
active_index = self.indexcur()
# Remove item at the index
del self._items[index]
# If old active index is valid
if active_index != -1:
# Get the last index
index_last = self.index_last()
# If old active index is GT the last index
if active_index > index_last:
# Use the last index as new active index
active_index = index_last
# If old active index is not GT the last index, use it as-is.
# Set new active index
self.indexcur_set(index=active_index, notify=False)
# Update listbox widget
self._listbox_widget_update(
keep_active=keep_active
)
# If notify events
if notify:
# Notify post-change events
self.handler_notify(self.ITEMS_CHANGE_DONE)
self.handler_notify(self.ITEMCUR_CHANGE_DONE)
def handler_add(
self,
event,
handler,
need_arg=False,
):
"""
Add event handler for an event.
If the event is ListboxVidget event, add the event handler to Eventor.
If the event is not ListboxVidget event, add the event handler to
listbox widget.
Notice this method overrides `Eventor.handler_add` in order to add
non-ListboxVidget event handler to listbox widget.
@param event: Event name.
@param handler: Event handler.
@param need_arg: Whether the event handler needs event argument.
@return: None.
"""
# If the event is ListboxVidget event
if event in self.EVENTS:
# Add the event handler to Eventor
return Eventor.handler_add(
self,
event=event,
handler=handler,
need_arg=need_arg,
)
# If the event is not ListboxVidget event,
# it is assumed to be Tkinter widget event.
else:
# Add the event handler to listbox widget
return self.bind(
event=event,
handler=handler,
)
def bind(
self,
event,
handler,
):
"""
Add event handler to listbox widget.
ListboxVidget internally uses `<Button-1>` and `<Double-Button-1>` to
capture active index changes. So if the given event is `<Button-1>` or
`<Double-Button-1>`, the given handler will be wrapped.
@param event: Event name.
@param handler: Event handler.
@return: None.
"""
# If the event is not `<Button-1>` or `<Double-Button-1>`
if event not in ['<Button-1>', '<Double-Button-1>']:
# Add the event handler to listbox widget
self._listbox.bind(event, handler)
# If the event is `<Button-1>` or `<Double-Button-1>`
else:
# Create event handler wrapper
def handler_wrapper(e):
"""
Event handler wrapper that sets new active index and then calls
the wrapped event handler.
Setting new active index is needed because when this handler is
called by Tkinter, the active index of the listbox is still
old.
@param e: Tkinter event object.
@return: None.
"""
# Set new active index
self.indexcur_set_by_event(e, notify=True)
# Call the wrapped event handler
handler(e)
# Add the event handler wrapper to the listbox widget
self._listbox.bind(event, handler_wrapper)
def _on_single_click(self, event):
"""
`<Button-1>` event handler that updates active index.
@param event: Tkinter event object.
@return: None.
"""
# Updates active index
self.indexcur_set_by_event(event, notify=True)
def _on_double_click(self, event):
"""
`<Double-Button-1>` event handler that updates active index.
@param event: Tkinter event object.
@return: None.
"""
# Updates active index
self.indexcur_set_by_event(event, notify=True)
def _listbox_widget_update(
self,
keep_active,
):
"""
Update listbox widget's items and selection.
@param keep_active: Whether keep or clear active index.
@return: None.
"""
# Remove old items from listbox widget
self._listbox.delete(0, END)
# Insert new items into listbox widget.
# For each ListboxVidget items.
for index, item in enumerate(self.items()):
# Get item text
item_text = self._item_to_text(item)
# Insert the item text into listbox widget
self._listbox.insert(index, item_text)
# Set the item's normal background color
self._listbox.itemconfig(index, background=self._normal_bg)
# Set the item's normal foreground color
self._listbox.itemconfig(index, foreground=self._normal_fg)
# Set the item's selected background color
self._listbox.itemconfig(index, selectbackground=self._selected_bg)
# Set the item's selected foreground color
self._listbox.itemconfig(index, selectforeground=self._selected_fg)
# If keep active index
if keep_active:
# Use old active index
indexcur = self._indexcur
# If not keep active index
else:
# Set active index to -1
indexcur = self._indexcur = -1
# Clear old selection
self._listbox.selection_clear(0, END)
# Set new selection.
# `-1` works.
self._listbox.selection_set(indexcur)
# Set new active index.
# `-1` works.
self._listbox.activate(indexcur)
# If new active index is valid
if indexcur != -1:
# Set active background color
self._listbox.itemconfig(indexcur, background=self._active_bg)
# Set active foreground color
self._listbox.itemconfig(indexcur, foreground=self._active_fg)
# Make the active item visible
self._listbox.see(indexcur) | PypiClean |
/Auptimizer-2.0.tar.gz/Auptimizer-2.0/src/aup/EE/Experiment.py | import json
import logging
import os
import signal
import sys
import _thread
import threading
import time
from ..Proposer import get_proposer, SPECIAL_EXIT_PROPOSERS
from .Job import Job
from .Resource import get_resource_manager
from ..aup import BasicConfig
from ..utils import set_default_keyvalue, check_missing_key, get_default_connector, get_default_username
from ..compression.utils import *
from ..Proposer import ProposerStatus
logger = logging.getLogger(__name__)
def _verify_config(config):
"""
verify the experiment configuration is fulfilled for experiment
:param config: experiment configuration
:return: config if verified
"""
check_missing_key(config, "script", "Missing required value for 'script'.", log=logger)
check_missing_key(config, "resource", "Missing required value for 'resource'", log=logger)
check_missing_key(config, "name", "Missing required value for 'name'", log=logger)
return config
class Experiment:
"""
Experiment Class - create and run an experiment
:param exp_config: configuration of the experiment
:type exp_config: BasicConfig.BasicConfig
:param username: username, default: None - will use login username
:type username: str
:param connector: connector to database
:type connector: AbstractConnector
:param auppath: Auptimizer env.ini file folder, default is either ``./.aup`` or ``~/.aup``
:type auppath: str
:param sleep_time: time to pause between jobs
:type sleep_time: int
"""
def __init__(self,
exp_config,
username=None,
connector=None,
auppath=os.path.join(".aup"),
sleep_time=1,
eid=None,
start=True,
request_stop_time=5):
self.sleep_time = sleep_time
self.fail_safe = False
self.job_retries = 0
self.exp_config = _verify_config(exp_config)
self.resource_args = None
self.connector = connector if connector else get_default_connector(auppath=auppath, log=logger)
self.username = get_default_username(username)
self.is_compression_exp = False
self.compression_params = []
self.request_stop_thr = None
self.request_stop_time = request_stop_time
self.submitted = False
if "job_failure" in self.exp_config:
set_default_keyvalue("ignore_fail", False, self.exp_config["job_failure"], log=logger)
set_default_keyvalue("job_retries", 3, self.exp_config["job_failure"], log=logger)
self.fail_safe = self.exp_config["job_failure"]["ignore_fail"]
self.job_retries = self.exp_config["job_failure"]["job_retries"]
if "compression" in self.exp_config:
self.is_compression_exp = True
self.exp_config, self.compression_params = translate_compression_config(self.exp_config)
set_default_keyvalue("cwd", os.getcwd(), self.exp_config, log=logger)
set_default_keyvalue("workingdir", os.getcwd(), self.exp_config, log=logger)
set_default_keyvalue("n_parallel", 1, self.exp_config, log=logger)
check_missing_key(self.exp_config, "target", "Specify max/min for target", log=logger)
check_missing_key(self.exp_config, "proposer", "Specify the optimization `proposer`", log=logger)
self.proposer = get_proposer(self.exp_config['proposer'])(self.exp_config)
if "resource_args" in self.exp_config:
if "early_stop" in self.exp_config["resource_args"]:
self.exp_config["resource_args"]["track_intermediate_results"] = True
self.resource_manager = get_resource_manager(self.exp_config["resource"], self.connector,
self.exp_config["n_parallel"], auppath=auppath,
maximize=(self.exp_config["target"] == "max"),
**self.exp_config["resource_args"],
workingdir=self.exp_config['workingdir'],
script=self.exp_config['script'],
runtime_args = exp_config.get('runtime_args', {}))
self.resource_args = self.exp_config["resource_args"]
else:
self.resource_manager = get_resource_manager(self.exp_config["resource"], self.connector,
self.exp_config["n_parallel"], auppath=auppath,
maximize=(self.exp_config["target"] == "max"),
workingdir=self.exp_config['workingdir'],
script=self.exp_config['script'],
runtime_args = exp_config.get('runtime_args', {}))
if eid is None:
if start is True:
self.eid = self.resource_manager.connector.start_experiment(self.username, self.exp_config)
else:
self.eid = self.resource_manager.connector.create_experiment(self.username, self.exp_config)
else:
self.eid = eid
self.resource_manager.connector.start_experiment_by_eid(self.eid)
self.resource_manager.eid = self.eid
self.pending_jobs = {}
if 'runtime_args' in exp_config:
self.runtime_args = exp_config['runtime_args']
else:
self.runtime_args = {}
logger.info("Experiment %d is created" % self.eid)
logger.debug("Experiment config is %s" % json.dumps(self.exp_config))
def add_suspend_signal(self):
signal.signal(signal.SIGINT, lambda x, y: self._suspend(x, y))
def add_refresh_signal(self):
signal.signal(signal.SIGUSR1, lambda x, y: self._force_refresh(x, y))
def finish(self):
"""
Finish experiment if no job is running
:return: job id, best score
:rtype: (int, float)
"""
while self.proposer.get_status() == ProposerStatus.RUNNING:
logger.debug("Waiting for proposer")
time.sleep(self.sleep_time)
while len(self.pending_jobs) != 0:
# resource manager will prevent experiment shutdown with pending jobs.
# but just in case
logger.debug("Waiting for pending job")
time.sleep(self.sleep_time)
result = self.resource_manager.finish(status=self.proposer.get_status().name)
self.connector.close()
if self.request_stop_thr is not None:
self.request_stop_thr.join()
if result is None or len(result) == 0:
logger.warning("No result so far")
return None, -1
else:
logger.info("Finished")
logger.critical("Best job (%d) with score %f in experiment %d" % (result[0], result[1], self.eid))
try:
self.proposer.save(os.path.join(".", "exp%d.pkl" % self.eid))
except NotImplementedError:
pass
return result[:2]
def resume(self, filename):
"""
Restore previous experiment, previous job during suspension won't be run in this round
:param filename: filename (saved by pickle as exp%d.pkl)
:type filename: str
"""
self.proposer.reload(filename) # Note: previously failed jobs won't be execute again.
self.start()
def start(self):
"""
Start experiment
"""
remaining_jobs = self.proposer.get_remaining_jobs()
parallel_jobs = min(remaining_jobs, self.exp_config.n_parallel)
self.request_stop_thr = threading.Thread(target=self._check_status)
self.request_stop_thr.start()
for i in range(parallel_jobs - len(self.pending_jobs)):
rc = self.submit_job()
self.submitted = self.submitted or rc
if not self.submitted:
logger.fatal("No job is running; quit")
self.proposer.set_status(ProposerStatus.FAILED)
raise Exception("Cannot run experiment!")
elif not rc:
logger.warning("Job submission failed, keep running")
def submit_job(self, job=None, rid_blacklist=None):
"""
Submit a new job to run if there is resource available
:param job: optional job parameter in case a job needs resubmitting
:type job: aup.EE.Job.Job object
:param rid_blacklist: resource ids to exclude when submitting job
:type rid_blacklist: [int]
:return: True if job submitted, else False
"""
rid = self.resource_manager.get_available(self.username, self.exp_config["resource"], rid_blacklist=rid_blacklist)
if rid is None:
self.resource_manager.log_error_message("Not enough resources!")
logger.warning("Increase resource or reduce n_parallel, no enough resources")
return False
if job is None:
proposal = self.proposer.get()
if proposal is not None and self.is_compression_exp:
proposal = deserialize_compression_proposal(self.exp_config, self.compression_params, proposal)
self.proposer.increment_job_counter()
if job is None and proposal is None:
if self.exp_config['proposer'] in SPECIAL_EXIT_PROPOSERS:
logger.info("%s is waiting to finish." % self.exp_config['proposer'])
return True
else:
logger.warning("Waiting other jobs finished\n"
"Think about rebalance your task loads, if you see this message shows up too many")
return False
else:
if job is None:
job_config = BasicConfig(**proposal)
job = Job(self.exp_config["script"], job_config, self.exp_config["workingdir"], retries=self.job_retries)
job.jid = self.resource_manager.connector.job_started(self.eid, rid, job_config)
else:
self.resource_manager.connector.job_retry(rid, job.jid)
logger.info("Submitting job %d with resource %d in experiment %d" % (job.jid, rid, self.eid))
job.was_executed = False
self.pending_jobs[job.jid] = job
# update the status, but after appending to pending_jobs
# to avoid premature termination
self.proposer.check_termination()
self.resource_manager.run_job(job, rid, self.exp_config, self.update, **self.runtime_args)
return True
def update(self, score, jid):
"""
Callback function passed to :mod:`aup.EE.Resource.AbstractResourceManager` to
update the job history (also proposer and connector)
:param score: score returned from job (using :func:`aup.utils.print_result`)
:type score: float
:param jid: job id
:type jid: int
"""
if score == "ERROR":
job = self.pending_jobs.pop(jid)
if job.jid in self.resource_manager.jobs and \
job.curr_retries < job.retries:
rid = self.resource_manager.jobs[jid]
job.rid_blacklist.add(rid)
self.resource_manager.connector.job_failed(rid, jid)
job.curr_retries += 1
logger.info("Retrying job %d (%d/%d)" % (jid, job.curr_retries, job.retries))
self.submit_job(job, rid_blacklist=job.rid_blacklist)
elif not self.fail_safe:
self.resource_manager.finish_job(jid, None, "FAILED")
self.proposer.set_status(ProposerStatus.FAILED)
logger.fatal("Stop Experiment due to job failure (ignore_fail flag set to false)")
else:
self.resource_manager.finish_job(jid, None, "FAILED")
try:
self.proposer.failed(job)
except Exception as ex:
self.proposer.set_status(ProposerStatus.FAILED)
logger.fatal("Stop Experiment due to job failure (failed jobs unsupported by proposer)")
logger.info("Job %d is finished (failed)" % (jid))
if self.proposer.get_status() == ProposerStatus.RUNNING:
self.start()
elif score == "EARLY STOPPED":
self.pending_jobs.pop(jid)
self.resource_manager.finish_job(jid, score, "EARLY_STOPPED")
logger.info("Job %d was early stopped" % (jid))
if self.proposer.get_status() == ProposerStatus.RUNNING:
self.start()
else:
self.proposer.update(score, self.pending_jobs[jid])
self.pending_jobs.pop(jid)
self.resource_manager.finish_job(jid, score, "FINISHED")
logger.info("Job %d is finished with result %s" % (jid, score))
if self.proposer.get_status() == ProposerStatus.RUNNING:
self.start()
def _suspend(self, sig, frame):
"""
Stop experiment by enter "Ctrl-C"
"""
logger.fatal("Experiment ended at user's request")
for i in self.pending_jobs:
logger.warning("Job with ID %d is cancelled" % i) # Note: cancelled job won't be run again.
try:
self.proposer.save(os.path.join(".", "exp%d.pkl" % self.eid))
except NotImplementedError:
pass
self.resource_manager.suspend()
result = self.resource_manager.finish(status="STOPPED")
self.connector.close()
if result is None:
logger.warning("No valid result so far")
else:
logger.critical("Best job (%d) with score %f in experiment %d" % (result[0], result[1], self.eid))
if self.request_stop_thr is not None:
self.request_stop_thr.join()
sys.exit(1)
def _check_status(self):
"""
Checks the database status of the experiment for external stopping requests
This method is run continuously in a separate "clean-up" thread in order to check for
external modifications to the experiment status in the database, in case a user
wants to stop an experiment remotely (e.g. from another process).
"""
if self.connector is None or self.eid is None:
logger.warning("Could not start thread for checking external experiment stopping requests.")
return
while True:
try:
if self.connector.is_closed():
logger.debug("Closing down clean-up thread.")
return
status = self.connector.maybe_get_experiment_status(self.eid)
if status == "REQUEST_STOP":
return _thread.interrupt_main()
time.sleep(self.request_stop_time)
except Exception as ex:
logger.debug("Error in clean-up thread: {}".format(ex))
def _force_refresh(self, sig, frame):
# currently useful for async resource manager timers
self.resource_manager.refresh() | PypiClean |
/BlueWhale3-Timeseries-0.3.13.tar.gz/BlueWhale3-Timeseries-0.3.13/doc/widgets/aggregate.md | Aggregate
=========
Aggregate data by second, minute, hour, day, week, month, or year.
**Inputs**
- Time series: Time series as output by **As Timeseries** widget.
**Outputs**
- Time series: Aggregated time series.
**Aggregate** joins together instances at the same level of granularity. In other words, if aggregating by day, all instances from the same day will be merged into one. Aggregation function can be defined separately based on the type of the attribute.

1. Interval to aggregate the time series by. Options are: second, minute, hour, day, week, month, or year.
2. Aggregation function for each of the time series in the table. Discrete variables (sequences) can only be aggregated using mode (i.e. most frequent value), whereas string variables can only be aggregated using string concatenation.
#### See also
[Moving Transform](moving_transform_w.md)
| PypiClean |
/CocoRPy27-1.4.1.zip/CocoRPy27-1.4.1/setupInfo.py | import time
MetaData = {
'name': 'CocoRPy27',
'long_name': 'Coco/R for Python 2.7',
'version': '1.4.1',
'build_date': time.strftime( '%Y-%m-%d %H:%M:%S GMT', time.gmtime() ),
'description': 'Python implementation of the famous CoCo/R LL(k) compiler generator ported to Python.',
'url': 'https://sourceforge.net/projects/cocorforpython/',
'download_url': 'https://sourceforge.net/projects/cocorforpython/',
'Coco/R URL': 'http://www.ssw.uni-linz.ac.at/coco',
'author': 'Ronald H Longo',
'author_email': '[email protected]',
'maintainer': 'Ronald H Longo',
'maintainer_email': '[email protected]',
'platforms': [ 'Python 2.7', 'Windows', 'Unix' ],
'license': 'GPL',
'packages': [ '' ],
'data_files': [
( 'documentation', [ 'documentation/*' ] ),
( 'examples', [ 'examples/*' ] ),
( 'frames', [ 'frames/*' ] ),
( 'pimaker', [ 'pimaker/*' ] ),
( 'sources', [ 'sources/*' ] ),
( 'testSuite', [ 'testSuite/*' ] )
],
'classifiers': [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Interpreters',
'Topic :: Software Development :: Pre-processors',
'Topic :: System :: Shells',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Linguistic'
]
}
VersionInfo = {
'1.4':
{
'changes': [ "Branch from git/aixp, merged into Ron Longo's original main line.",
"Pimaker version 1.1" ],
'bugfixes': [ "several file loading and path problems" ],
'contributions': [ 'Ron Longo performed the merge.' ]
},
'1.2aixp': {
'changes': [ "Branch from Ron Longo's original implementation to git/aixp." ],
'bugfixes': [ "few" ],
'contributions': [ "git/aixp, the latest updates from the java implementation." ]
},
'1.1.0rc': {
'changes': [ "Coco/R now passes all tests of the official Coco/R test suite" ],
'bugfixes': [ ],
'contirubtions': [ ]
},
'1.0.10b2':{
'changes': [ "Updated builder and renamed it to pimaker" ],
'bugfixes': [ "Many code generator bug fixes" ],
'contributions': [ "Wayne Wiitanen has contributed a version of the EXPR example that works with CocoPy." ]
},
'1.0.9b2': {
'changes': [ "Simplified the Errors class and error handling.",
"Completed a first version of my builder application." ],
'bugfixes': [ "Repaired a bug in SemErr() didn't work properly." ]
},
'1.0.7b1': {
'changes': [ ],
'bugfixes': [ "Repaired LINUX bug found in v1.0.6b1" ]
},
'1.0.6b1': {
'changes': [ "Completed a beta version of builder.py",
"Updated README.txt to describe builder.py",
"Removed HowToBootstrap.txt from Documents" ],
'bugfixes': [ "Coco.atg does not bootstrap on LINUX." ]
}
} | PypiClean |
/MegEngine-1.13.1-cp37-cp37m-macosx_10_14_x86_64.whl/megengine/data/dataset/vision/objects365.py | import json
import os
from collections import defaultdict
import cv2
import numpy as np
from .meta_vision import VisionDataset
class Objects365(VisionDataset):
r"""`Objects365 <https://www.objects365.org/overview.html>`_ Dataset."""
supported_order = (
"image",
"boxes",
"boxes_category",
"info",
)
def __init__(
self, root, ann_file, remove_images_without_annotations=False, *, order=None
):
super().__init__(root, order=order, supported_order=self.supported_order)
with open(ann_file, "r") as f:
dataset = json.load(f)
self.imgs = dict()
for img in dataset["images"]:
self.imgs[img["id"]] = img
self.img_to_anns = defaultdict(list)
for ann in dataset["annotations"]:
# for saving memory
if (
"boxes" not in self.order
and "boxes_category" not in self.order
and "bbox" in ann
):
del ann["bbox"]
self.img_to_anns[ann["image_id"]].append(ann)
self.cats = dict()
for cat in dataset["categories"]:
self.cats[cat["id"]] = cat
self.ids = list(sorted(self.imgs.keys()))
# filter images without detection annotations
if remove_images_without_annotations:
ids = []
for img_id in self.ids:
anno = self.img_to_anns[img_id]
# filter crowd annotations
anno = [obj for obj in anno if obj["iscrowd"] == 0]
anno = [
obj for obj in anno if obj["bbox"][2] > 0 and obj["bbox"][3] > 0
]
if len(anno) > 0:
ids.append(img_id)
self.img_to_anns[img_id] = anno
else:
del self.imgs[img_id]
del self.img_to_anns[img_id]
self.ids = ids
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(sorted(self.cats.keys()))
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
def __getitem__(self, index):
img_id = self.ids[index]
anno = self.img_to_anns[img_id]
target = []
for k in self.order:
if k == "image":
file_name = self.imgs[img_id]["file_name"]
path = os.path.join(self.root, file_name)
image = cv2.imread(path, cv2.IMREAD_COLOR)
target.append(image)
elif k == "boxes":
boxes = [obj["bbox"] for obj in anno]
boxes = np.array(boxes, dtype=np.float32).reshape(-1, 4)
# transfer boxes from xywh to xyxy
boxes[:, 2:] += boxes[:, :2]
target.append(boxes)
elif k == "boxes_category":
boxes_category = [obj["category_id"] for obj in anno]
boxes_category = [
self.json_category_id_to_contiguous_id[c] for c in boxes_category
]
boxes_category = np.array(boxes_category, dtype=np.int32)
target.append(boxes_category)
elif k == "info":
info = self.imgs[img_id]
info = [info["height"], info["width"], info["file_name"]]
target.append(info)
else:
raise NotImplementedError
return tuple(target)
def __len__(self):
return len(self.ids)
def get_img_info(self, index):
img_id = self.ids[index]
img_info = self.imgs[img_id]
return img_info
class_names = (
"person",
"sneakers",
"chair",
"hat",
"lamp",
"bottle",
"cabinet/shelf",
"cup",
"car",
"glasses",
"picture/frame",
"desk",
"handbag",
"street lights",
"book",
"plate",
"helmet",
"leather shoes",
"pillow",
"glove",
"potted plant",
"bracelet",
"flower",
"tv",
"storage box",
"vase",
"bench",
"wine glass",
"boots",
"bowl",
"dining table",
"umbrella",
"boat",
"flag",
"speaker",
"trash bin/can",
"stool",
"backpack",
"couch",
"belt",
"carpet",
"basket",
"towel/napkin",
"slippers",
"barrel/bucket",
"coffee table",
"suv",
"toy",
"tie",
"bed",
"traffic light",
"pen/pencil",
"microphone",
"sandals",
"canned",
"necklace",
"mirror",
"faucet",
"bicycle",
"bread",
"high heels",
"ring",
"van",
"watch",
"sink",
"horse",
"fish",
"apple",
"camera",
"candle",
"teddy bear",
"cake",
"motorcycle",
"wild bird",
"laptop",
"knife",
"traffic sign",
"cell phone",
"paddle",
"truck",
"cow",
"power outlet",
"clock",
"drum",
"fork",
"bus",
"hanger",
"nightstand",
"pot/pan",
"sheep",
"guitar",
"traffic cone",
"tea pot",
"keyboard",
"tripod",
"hockey",
"fan",
"dog",
"spoon",
"blackboard/whiteboard",
"balloon",
"air conditioner",
"cymbal",
"mouse",
"telephone",
"pickup truck",
"orange",
"banana",
"airplane",
"luggage",
"skis",
"soccer",
"trolley",
"oven",
"remote",
"baseball glove",
"paper towel",
"refrigerator",
"train",
"tomato",
"machinery vehicle",
"tent",
"shampoo/shower gel",
"head phone",
"lantern",
"donut",
"cleaning products",
"sailboat",
"tangerine",
"pizza",
"kite",
"computer box",
"elephant",
"toiletries",
"gas stove",
"broccoli",
"toilet",
"stroller",
"shovel",
"baseball bat",
"microwave",
"skateboard",
"surfboard",
"surveillance camera",
"gun",
"life saver",
"cat",
"lemon",
"liquid soap",
"zebra",
"duck",
"sports car",
"giraffe",
"pumpkin",
"piano",
"stop sign",
"radiator",
"converter",
"tissue ",
"carrot",
"washing machine",
"vent",
"cookies",
"cutting/chopping board",
"tennis racket",
"candy",
"skating and skiing shoes",
"scissors",
"folder",
"baseball",
"strawberry",
"bow tie",
"pigeon",
"pepper",
"coffee machine",
"bathtub",
"snowboard",
"suitcase",
"grapes",
"ladder",
"pear",
"american football",
"basketball",
"potato",
"paint brush",
"printer",
"billiards",
"fire hydrant",
"goose",
"projector",
"sausage",
"fire extinguisher",
"extension cord",
"facial mask",
"tennis ball",
"chopsticks",
"electronic stove and gas stove",
"pie",
"frisbee",
"kettle",
"hamburger",
"golf club",
"cucumber",
"clutch",
"blender",
"tong",
"slide",
"hot dog",
"toothbrush",
"facial cleanser",
"mango",
"deer",
"egg",
"violin",
"marker",
"ship",
"chicken",
"onion",
"ice cream",
"tape",
"wheelchair",
"plum",
"bar soap",
"scale",
"watermelon",
"cabbage",
"router/modem",
"golf ball",
"pine apple",
"crane",
"fire truck",
"peach",
"cello",
"notepaper",
"tricycle",
"toaster",
"helicopter",
"green beans",
"brush",
"carriage",
"cigar",
"earphone",
"penguin",
"hurdle",
"swing",
"radio",
"CD",
"parking meter",
"swan",
"garlic",
"french fries",
"horn",
"avocado",
"saxophone",
"trumpet",
"sandwich",
"cue",
"kiwi fruit",
"bear",
"fishing rod",
"cherry",
"tablet",
"green vegetables",
"nuts",
"corn",
"key",
"screwdriver",
"globe",
"broom",
"pliers",
"volleyball",
"hammer",
"eggplant",
"trophy",
"dates",
"board eraser",
"rice",
"tape measure/ruler",
"dumbbell",
"hamimelon",
"stapler",
"camel",
"lettuce",
"goldfish",
"meat balls",
"medal",
"toothpaste",
"antelope",
"shrimp",
"rickshaw",
"trombone",
"pomegranate",
"coconut",
"jellyfish",
"mushroom",
"calculator",
"treadmill",
"butterfly",
"egg tart",
"cheese",
"pig",
"pomelo",
"race car",
"rice cooker",
"tuba",
"crosswalk sign",
"papaya",
"hair drier",
"green onion",
"chips",
"dolphin",
"sushi",
"urinal",
"donkey",
"electric drill",
"spring rolls",
"tortoise/turtle",
"parrot",
"flute",
"measuring cup",
"shark",
"steak",
"poker card",
"binoculars",
"llama",
"radish",
"noodles",
"yak",
"mop",
"crab",
"microscope",
"barbell",
"bread/bun",
"baozi",
"lion",
"red cabbage",
"polar bear",
"lighter",
"seal",
"mangosteen",
"comb",
"eraser",
"pitaya",
"scallop",
"pencil case",
"saw",
"table tennis paddle",
"okra",
"starfish",
"eagle",
"monkey",
"durian",
"game board",
"rabbit",
"french horn",
"ambulance",
"asparagus",
"hoverboard",
"pasta",
"target",
"hotair balloon",
"chainsaw",
"lobster",
"iron",
"flashlight",
) | PypiClean |
/Hikka_TL-1.24.14-py3-none-any.whl/telethon/client/downloads.py | import datetime
import io
import os
import pathlib
import typing
import inspect
import asyncio
from ..crypto import AES
from .. import utils, helpers, errors, hints
from ..requestiter import RequestIter
from ..tl import TLObject, types, functions
try:
import aiohttp
except ImportError:
aiohttp = None
if typing.TYPE_CHECKING:
from .telegramclient import TelegramClient
# Chunk sizes for upload.getFile must be multiples of the smallest size
MIN_CHUNK_SIZE = 4096
MAX_CHUNK_SIZE = 512 * 1024
# 2021-01-15, users reported that `errors.TimeoutError` can occur while downloading files.
TIMED_OUT_SLEEP = 1
class _DirectDownloadIter(RequestIter):
async def _init(
self, file, dc_id, offset, stride, chunk_size, request_size, file_size, msg_data
):
self.request = functions.upload.GetFileRequest(
file, offset=offset, limit=request_size
)
self.total = file_size
self._stride = stride
self._chunk_size = chunk_size
self._last_part = None
self._msg_data = msg_data
self._timed_out = False
self._exported = dc_id and self.client.session.dc_id != dc_id
if not self._exported:
# The used sender will also change if ``FileMigrateError`` occurs
self._sender = self.client._sender
else:
try:
self._sender = await self.client._borrow_exported_sender(dc_id)
except errors.DcIdInvalidError:
# Can't export a sender for the ID we are currently in
config = await self.client(functions.help.GetConfigRequest())
for option in config.dc_options:
if option.ip_address == self.client.session.server_address:
self.client.session.set_dc(
option.id, option.ip_address, option.port
)
self.client.session.save()
break
# TODO Figure out why the session may have the wrong DC ID
self._sender = self.client._sender
self._exported = False
async def _load_next_chunk(self):
cur = await self._request()
self.buffer.append(cur)
if len(cur) < self.request.limit:
self.left = len(self.buffer)
await self.close()
else:
self.request.offset += self._stride
async def _request(self):
try:
result = await self.client._call(self._sender, self.request)
self._timed_out = False
if isinstance(result, types.upload.FileCdnRedirect):
raise NotImplementedError # TODO Implement
else:
return result.bytes
except errors.TimeoutError as e:
if self._timed_out:
self.client._log[__name__].warning(
"Got two timeouts in a row while downloading file"
)
raise
self._timed_out = True
self.client._log[__name__].info(
"Got timeout while downloading file, retrying once"
)
await asyncio.sleep(TIMED_OUT_SLEEP)
return await self._request()
except errors.FileMigrateError as e:
self.client._log[__name__].info("File lives in another DC")
self._sender = await self.client._borrow_exported_sender(e.new_dc)
self._exported = True
return await self._request()
except errors.FilerefUpgradeNeededError as e:
# Only implemented for documents which are the ones that may take that long to download
if (
not self._msg_data
or not isinstance(
self.request.location, types.InputDocumentFileLocation
)
or self.request.location.thumb_size != ""
):
raise
self.client._log[__name__].info(
"File ref expired during download; refetching message"
)
chat, msg_id = self._msg_data
msg = await self.client.get_messages(chat, ids=msg_id)
if not isinstance(msg.media, types.MessageMediaDocument):
raise
document = msg.media.document
# Message media may have been edited for something else
if document.id != self.request.location.id:
raise
self.request.location.file_reference = document.file_reference
return await self._request()
async def close(self):
if not self._sender:
return
try:
if self._exported:
await self.client._return_exported_sender(self._sender)
elif self._sender != self.client._sender:
await self._sender.disconnect()
finally:
self._sender = None
async def __aenter__(self):
return self
async def __aexit__(self, *args):
await self.close()
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
class _GenericDownloadIter(_DirectDownloadIter):
async def _load_next_chunk(self):
# 1. Fetch enough for one chunk
data = b""
# 1.1. ``bad`` is how much into the data we have we need to offset
bad = self.request.offset % self.request.limit
before = self.request.offset
# 1.2. We have to fetch from a valid offset, so remove that bad part
self.request.offset -= bad
done = False
while not done and len(data) - bad < self._chunk_size:
cur = await self._request()
self.request.offset += self.request.limit
data += cur
done = len(cur) < self.request.limit
# 1.3 Restore our last desired offset
self.request.offset = before
# 2. Fill the buffer with the data we have
# 2.1. Slicing `bytes` is expensive, yield `memoryview` instead
mem = memoryview(data)
# 2.2. The current chunk starts at ``bad`` offset into the data,
# and each new chunk is ``stride`` bytes apart of the other
for i in range(bad, len(data), self._stride):
self.buffer.append(mem[i : i + self._chunk_size])
# 2.3. We will yield this offset, so move to the next one
self.request.offset += self._stride
# 2.4. If we are in the last chunk, we will return the last partial data
if done:
self.left = len(self.buffer)
await self.close()
return
# 2.5. If we are not done, we can't return incomplete chunks.
if len(self.buffer[-1]) != self._chunk_size:
self._last_part = self.buffer.pop().tobytes()
# 3. Be careful with the offsets. Re-fetching a bit of data
# is fine, since it greatly simplifies things.
# TODO Try to not re-fetch data
self.request.offset -= self._stride
class DownloadMethods:
# region Public methods
async def download_profile_photo(
self: "TelegramClient",
entity: "hints.EntityLike",
file: "hints.FileLike" = None,
*,
download_big: bool = True
) -> typing.Optional[str]:
"""
Downloads the profile photo from the given user, chat or channel.
Arguments
entity (`entity`):
From who the photo will be downloaded.
.. note::
This method expects the full entity (which has the data
to download the photo), not an input variant.
It's possible that sometimes you can't fetch the entity
from its input (since you can get errors like
``ChannelPrivateError``) but you already have it through
another call, like getting a forwarded message from it.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If file is the type `bytes`, it will be downloaded in-memory
as a bytestring (e.g. ``file=bytes``).
download_big (`bool`, optional):
Whether to use the big version of the available photos.
Returns
`None` if no photo was provided, or if it was Empty. On success
the file path is returned since it may differ from the one given.
Example
.. code-block:: python
# Download your own profile photo
path = await client.download_profile_photo('me')
print(path)
"""
# hex(crc32(x.encode('ascii'))) for x in
# ('User', 'Chat', 'UserFull', 'ChatFull')
ENTITIES = (0x2DA17977, 0xC5AF5D94, 0x1F4661B9, 0xD49A2697)
# ('InputPeer', 'InputUser', 'InputChannel')
INPUTS = (0xC91C90B6, 0xE669BF46, 0x40F202FD)
if not isinstance(entity, TLObject) or entity.SUBCLASS_OF_ID in INPUTS:
entity = await self.get_entity(entity)
thumb = -1 if download_big else 0
possible_names = []
if entity.SUBCLASS_OF_ID not in ENTITIES:
photo = entity
else:
if not hasattr(entity, "photo"):
# Special case: may be a ChatFull with photo:Photo
# This is different from a normal UserProfilePhoto and Chat
if not hasattr(entity, "chat_photo"):
return None
return await self._download_photo(
entity.chat_photo,
file,
date=None,
thumb=thumb,
progress_callback=None,
)
possible_names.extend(
getattr(entity, attr, None)
for attr in ("username", "first_name", "title")
)
photo = entity.photo
if not isinstance(photo, (types.UserProfilePhoto, types.ChatPhoto)):
# It doesn't make any sense to check if `photo` can be used
# as input location, because then this method would be able
# to "download the profile photo of a message", i.e. its
# media which should be done with `download_media` instead.
return None
dc_id = photo.dc_id
loc = types.InputPeerPhotoFileLocation(
peer=await self.get_input_entity(entity),
photo_id=photo.photo_id,
big=download_big,
)
file = self._get_proper_filename(
file, "profile_photo", ".jpg", possible_names=possible_names
)
try:
result = await self.download_file(loc, file, dc_id=dc_id)
return result if file is bytes else file
except errors.LocationInvalidError:
# See issue #500, Android app fails as of v4.6.0 (1155).
# The fix seems to be using the full channel chat photo.
ie = await self.get_input_entity(entity)
ty = helpers._entity_type(ie)
if ty != helpers._EntityType.CHANNEL:
# Until there's a report for chats, no need to.
return None
full = await self(functions.channels.GetFullChannelRequest(ie))
return await self._download_photo(
full.full_chat.chat_photo,
file,
date=None,
progress_callback=None,
thumb=thumb,
)
async def download_media(
self: "TelegramClient",
message: "hints.MessageLike",
file: "hints.FileLike" = None,
*,
thumb: "typing.Union[int, types.TypePhotoSize]" = None,
progress_callback: "hints.ProgressCallback" = None
) -> typing.Optional[typing.Union[str, bytes]]:
"""
Downloads the given media from a message object.
Note that if the download is too slow, you should consider installing
``cryptg`` (through ``pip install cryptg``) so that decrypting the
received data is done in C instead of Python (much faster).
See also `Message.download_media() <telethon.tl.custom.message.Message.download_media>`.
Arguments
message (`Message <telethon.tl.custom.message.Message>` | :tl:`Media`):
The media or message containing the media that will be downloaded.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If file is the type `bytes`, it will be downloaded in-memory
as a bytestring (e.g. ``file=bytes``).
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(received bytes, total)``.
thumb (`int` | :tl:`PhotoSize`, optional):
Which thumbnail size from the document or photo to download,
instead of downloading the document or photo itself.
If it's specified but the file does not have a thumbnail,
this method will return `None`.
The parameter should be an integer index between ``0`` and
``len(sizes)``. ``0`` will download the smallest thumbnail,
and ``len(sizes) - 1`` will download the largest thumbnail.
You can also use negative indices, which work the same as
they do in Python's `list`.
You can also pass the :tl:`PhotoSize` instance to use.
Alternatively, the thumb size type `str` may be used.
In short, use ``thumb=0`` if you want the smallest thumbnail
and ``thumb=-1`` if you want the largest thumbnail.
.. note::
The largest thumbnail may be a video instead of a photo,
as they are available since layer 116 and are bigger than
any of the photos.
Returns
`None` if no media was provided, or if it was Empty. On success
the file path is returned since it may differ from the one given.
Example
.. code-block:: python
path = await client.download_media(message)
await client.download_media(message, filename)
# or
path = await message.download_media()
await message.download_media(filename)
# Printing download progress
def callback(current, total):
print('Downloaded', current, 'out of', total,
'bytes: {:.2%}'.format(current / total))
await client.download_media(message, progress_callback=callback)
"""
# Downloading large documents may be slow enough to require a new file reference
# to be obtained mid-download. Store (input chat, message id) so that the message
# can be re-fetched.
msg_data = None
# TODO This won't work for messageService
if isinstance(message, types.Message):
date = message.date
media = message.media
msg_data = (message.input_chat, message.id) if message.input_chat else None
else:
date = datetime.datetime.now()
media = message
if isinstance(media, str):
media = utils.resolve_bot_file_id(media)
if isinstance(media, types.MessageService) and isinstance(
message.action, types.MessageActionChatEditPhoto
):
media = media.photo
if isinstance(media, types.MessageMediaWebPage) and isinstance(
media.webpage, types.WebPage
):
media = media.webpage.document or media.webpage.photo
if isinstance(media, (types.MessageMediaPhoto, types.Photo)):
return await self._download_photo(
media, file, date, thumb, progress_callback
)
elif isinstance(media, (types.MessageMediaDocument, types.Document)):
return await self._download_document(
media, file, date, thumb, progress_callback, msg_data
)
elif isinstance(media, types.MessageMediaContact) and thumb is None:
return self._download_contact(media, file)
elif (
isinstance(media, (types.WebDocument, types.WebDocumentNoProxy))
and thumb is None
):
return await self._download_web_document(media, file, progress_callback)
async def download_file(
self: "TelegramClient",
input_location: "hints.FileLike",
file: "hints.OutFileLike" = None,
*,
part_size_kb: float = None,
file_size: int = None,
progress_callback: "hints.ProgressCallback" = None,
dc_id: int = None,
key: bytes = None,
iv: bytes = None
) -> typing.Optional[bytes]:
"""
Low-level method to download files from their input location.
.. note::
Generally, you should instead use `download_media`.
This method is intended to be a bit more low-level.
Arguments
input_location (:tl:`InputFileLocation`):
The file location from which the file will be downloaded.
See `telethon.utils.get_input_location` source for a complete
list of supported types.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If the file path is `None` or `bytes`, then the result
will be saved in memory and returned as `bytes`.
part_size_kb (`int`, optional):
Chunk size when downloading files. The larger, the less
requests will be made (up to 512KB maximum).
file_size (`int`, optional):
The file size that is about to be downloaded, if known.
Only used if ``progress_callback`` is specified.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(downloaded bytes, total)``. Note that the
``total`` is the provided ``file_size``.
dc_id (`int`, optional):
The data center the library should connect to in order
to download the file. You shouldn't worry about this.
key ('bytes', optional):
In case of an encrypted upload (secret chats) a key is supplied
iv ('bytes', optional):
In case of an encrypted upload (secret chats) an iv is supplied
Example
.. code-block:: python
# Download a file and print its header
data = await client.download_file(input_file, bytes)
print(data[:16])
"""
return await self._download_file(
input_location,
file,
part_size_kb=part_size_kb,
file_size=file_size,
progress_callback=progress_callback,
dc_id=dc_id,
key=key,
iv=iv,
)
async def _download_file(
self: "TelegramClient",
input_location: "hints.FileLike",
file: "hints.OutFileLike" = None,
*,
part_size_kb: float = None,
file_size: int = None,
progress_callback: "hints.ProgressCallback" = None,
dc_id: int = None,
key: bytes = None,
iv: bytes = None,
msg_data: tuple = None
) -> typing.Optional[bytes]:
if not part_size_kb:
part_size_kb = (
utils.get_appropriated_part_size(file_size) if file_size else 64
)
part_size = int(part_size_kb * 1024)
if part_size % MIN_CHUNK_SIZE != 0:
raise ValueError("The part size must be evenly divisible by 4096.")
if isinstance(file, pathlib.Path):
file = str(file.absolute())
in_memory = file is None or file is bytes
if in_memory:
f = io.BytesIO()
elif isinstance(file, str):
# Ensure that we'll be able to download the media
helpers.ensure_parent_dir_exists(file)
f = open(file, "wb")
else:
f = file
try:
async for chunk in self._iter_download(
input_location, request_size=part_size, dc_id=dc_id, msg_data=msg_data
):
if iv and key:
chunk = AES.decrypt_ige(chunk, key, iv)
r = f.write(chunk)
if inspect.isawaitable(r):
await r
if progress_callback:
r = progress_callback(f.tell(), file_size)
if inspect.isawaitable(r):
await r
# Not all IO objects have flush (see #1227)
if callable(getattr(f, "flush", None)):
f.flush()
if in_memory:
return f.getvalue()
finally:
if isinstance(file, str) or in_memory:
f.close()
def iter_download(
self: "TelegramClient",
file: "hints.FileLike",
*,
offset: int = 0,
stride: int = None,
limit: int = None,
chunk_size: int = None,
request_size: int = MAX_CHUNK_SIZE,
file_size: int = None,
dc_id: int = None
):
"""
Iterates over a file download, yielding chunks of the file.
This method can be used to stream files in a more convenient
way, since it offers more control (pausing, resuming, etc.)
.. note::
Using a value for `offset` or `stride` which is not a multiple
of the minimum allowed `request_size`, or if `chunk_size` is
different from `request_size`, the library will need to do a
bit more work to fetch the data in the way you intend it to.
You normally shouldn't worry about this.
Arguments
file (`hints.FileLike`):
The file of which contents you want to iterate over.
offset (`int`, optional):
The offset in bytes into the file from where the
download should start. For example, if a file is
1024KB long and you just want the last 512KB, you
would use ``offset=512 * 1024``.
stride (`int`, optional):
The stride of each chunk (how much the offset should
advance between reading each chunk). This parameter
should only be used for more advanced use cases.
It must be bigger than or equal to the `chunk_size`.
limit (`int`, optional):
The limit for how many *chunks* will be yielded at most.
chunk_size (`int`, optional):
The maximum size of the chunks that will be yielded.
Note that the last chunk may be less than this value.
By default, it equals to `request_size`.
request_size (`int`, optional):
How many bytes will be requested to Telegram when more
data is required. By default, as many bytes as possible
are requested. If you would like to request data in
smaller sizes, adjust this parameter.
Note that values outside the valid range will be clamped,
and the final value will also be a multiple of the minimum
allowed size.
file_size (`int`, optional):
If the file size is known beforehand, you should set
this parameter to said value. Depending on the type of
the input file passed, this may be set automatically.
dc_id (`int`, optional):
The data center the library should connect to in order
to download the file. You shouldn't worry about this.
Yields
`bytes` objects representing the chunks of the file if the
right conditions are met, or `memoryview` objects instead.
Example
.. code-block:: python
# Streaming `media` to an output file
# After the iteration ends, the sender is cleaned up
with open('photo.jpg', 'wb') as fd:
async for chunk in client.iter_download(media):
fd.write(chunk)
# Fetching only the header of a file (32 bytes)
# You should manually close the iterator in this case.
#
# "stream" is a common name for asynchronous generators,
# and iter_download will yield `bytes` (chunks of the file).
stream = client.iter_download(media, request_size=32)
header = await stream.__anext__() # "manual" version of `async for`
await stream.close()
assert len(header) == 32
"""
return self._iter_download(
file,
offset=offset,
stride=stride,
limit=limit,
chunk_size=chunk_size,
request_size=request_size,
file_size=file_size,
dc_id=dc_id,
)
def _iter_download(
self: "TelegramClient",
file: "hints.FileLike",
*,
offset: int = 0,
stride: int = None,
limit: int = None,
chunk_size: int = None,
request_size: int = MAX_CHUNK_SIZE,
file_size: int = None,
dc_id: int = None,
msg_data: tuple = None
):
info = utils._get_file_info(file)
if info.dc_id is not None:
dc_id = info.dc_id
if file_size is None:
file_size = info.size
file = info.location
if chunk_size is None:
chunk_size = request_size
if limit is None and file_size is not None:
limit = (file_size + chunk_size - 1) // chunk_size
if stride is None:
stride = chunk_size
elif stride < chunk_size:
raise ValueError("stride must be >= chunk_size")
request_size -= request_size % MIN_CHUNK_SIZE
if request_size < MIN_CHUNK_SIZE:
request_size = MIN_CHUNK_SIZE
elif request_size > MAX_CHUNK_SIZE:
request_size = MAX_CHUNK_SIZE
if (
chunk_size == request_size
and offset % MIN_CHUNK_SIZE == 0
and stride % MIN_CHUNK_SIZE == 0
and (limit is None or offset % limit == 0)
):
cls = _DirectDownloadIter
self._log[__name__].info(
"Starting direct file download in chunks of %d at %d, stride %d",
request_size,
offset,
stride,
)
else:
cls = _GenericDownloadIter
self._log[__name__].info(
"Starting indirect file download in chunks of %d at %d, stride %d",
request_size,
offset,
stride,
)
return cls(
self,
limit,
file=file,
dc_id=dc_id,
offset=offset,
stride=stride,
chunk_size=chunk_size,
request_size=request_size,
file_size=file_size,
msg_data=msg_data,
)
# endregion
# region Private methods
@staticmethod
def _get_thumb(thumbs, thumb):
# Seems Telegram has changed the order and put `PhotoStrippedSize`
# last while this is the smallest (layer 116). Ensure we have the
# sizes sorted correctly with a custom function.
def sort_thumbs(thumb):
if isinstance(thumb, types.PhotoStrippedSize):
return 1, len(thumb.bytes)
if isinstance(thumb, types.PhotoCachedSize):
return 1, len(thumb.bytes)
if isinstance(thumb, types.PhotoSize):
return 1, thumb.size
if isinstance(thumb, types.PhotoSizeProgressive):
return 1, max(thumb.sizes)
if isinstance(thumb, types.VideoSize):
return 2, thumb.size
# Empty size or invalid should go last
return 0, 0
thumbs = list(sorted(thumbs, key=sort_thumbs))
for i in reversed(range(len(thumbs))):
# :tl:`PhotoPathSize` is used for animated stickers preview, and the thumb is actually
# a SVG path of the outline. Users expect thumbnails to be JPEG files, so pretend this
# thumb size doesn't actually exist (#1655).
if isinstance(thumbs[i], types.PhotoPathSize):
thumbs.pop(i)
if thumb is None:
return thumbs[-1]
elif isinstance(thumb, int):
return thumbs[thumb]
elif isinstance(thumb, str):
return next((t for t in thumbs if t.type == thumb), None)
elif isinstance(
thumb,
(
types.PhotoSize,
types.PhotoCachedSize,
types.PhotoStrippedSize,
types.VideoSize,
),
):
return thumb
else:
return None
def _download_cached_photo_size(self: "TelegramClient", size, file):
# No need to download anything, simply write the bytes
if isinstance(size, types.PhotoStrippedSize):
data = utils.stripped_photo_to_jpg(size.bytes)
else:
data = size.bytes
if file is bytes:
return data
elif isinstance(file, str):
helpers.ensure_parent_dir_exists(file)
f = open(file, "wb")
else:
f = file
try:
f.write(data)
finally:
if isinstance(file, str):
f.close()
return file
async def _download_photo(
self: "TelegramClient", photo, file, date, thumb, progress_callback
):
"""Specialized version of .download_media() for photos"""
# Determine the photo and its largest size
if isinstance(photo, types.MessageMediaPhoto):
photo = photo.photo
if not isinstance(photo, types.Photo):
return
# Include video sizes here (but they may be None so provide an empty list)
size = self._get_thumb(photo.sizes + (photo.video_sizes or []), thumb)
if not size or isinstance(size, types.PhotoSizeEmpty):
return
if isinstance(size, types.VideoSize):
file = self._get_proper_filename(file, "video", ".mp4", date=date)
else:
file = self._get_proper_filename(file, "photo", ".jpg", date=date)
if isinstance(size, (types.PhotoCachedSize, types.PhotoStrippedSize)):
return self._download_cached_photo_size(size, file)
if isinstance(size, types.PhotoSizeProgressive):
file_size = max(size.sizes)
else:
file_size = size.size
result = await self.download_file(
types.InputPhotoFileLocation(
id=photo.id,
access_hash=photo.access_hash,
file_reference=photo.file_reference,
thumb_size=size.type,
),
file,
file_size=file_size,
progress_callback=progress_callback,
)
return result if file is bytes else file
@staticmethod
def _get_kind_and_names(attributes):
"""Gets kind and possible names for :tl:`DocumentAttribute`."""
kind = "document"
possible_names = []
for attr in attributes:
if isinstance(attr, types.DocumentAttributeFilename):
possible_names.insert(0, attr.file_name)
elif isinstance(attr, types.DocumentAttributeAudio):
kind = "audio"
if attr.performer and attr.title:
possible_names.append("{} - {}".format(attr.performer, attr.title))
elif attr.performer:
possible_names.append(attr.performer)
elif attr.title:
possible_names.append(attr.title)
elif attr.voice:
kind = "voice"
return kind, possible_names
async def _download_document(
self, document, file, date, thumb, progress_callback, msg_data
):
"""Specialized version of .download_media() for documents."""
if isinstance(document, types.MessageMediaDocument):
document = document.document
if not isinstance(document, types.Document):
return
if thumb is None:
kind, possible_names = self._get_kind_and_names(document.attributes)
file = self._get_proper_filename(
file,
kind,
utils.get_extension(document),
date=date,
possible_names=possible_names,
)
size = None
else:
file = self._get_proper_filename(file, "photo", ".jpg", date=date)
size = self._get_thumb(document.thumbs, thumb)
if isinstance(size, (types.PhotoCachedSize, types.PhotoStrippedSize)):
return self._download_cached_photo_size(size, file)
result = await self._download_file(
types.InputDocumentFileLocation(
id=document.id,
access_hash=document.access_hash,
file_reference=document.file_reference,
thumb_size=size.type if size else "",
),
file,
file_size=size.size if size else document.size,
progress_callback=progress_callback,
msg_data=msg_data,
)
return result if file is bytes else file
@classmethod
def _download_contact(cls, mm_contact, file):
"""
Specialized version of .download_media() for contacts.
Will make use of the vCard 4.0 format.
"""
first_name = mm_contact.first_name
last_name = mm_contact.last_name
phone_number = mm_contact.phone_number
# Remove these pesky characters
first_name = first_name.replace(";", "")
last_name = (last_name or "").replace(";", "")
result = (
"BEGIN:VCARD\n"
"VERSION:4.0\n"
"N:{f};{l};;;\n"
"FN:{f} {l}\n"
"TEL;TYPE=cell;VALUE=uri:tel:+{p}\n"
"END:VCARD\n".format(
f=first_name, l=last_name, p=phone_number
).encode(
"utf-8"
)
)
if file is bytes:
return result
elif isinstance(file, str):
file = cls._get_proper_filename(
file,
"contact",
".vcard",
possible_names=[first_name, phone_number, last_name],
)
f = open(file, "wb")
else:
f = file
try:
f.write(result)
finally:
# Only close the stream if we opened it
if isinstance(file, str):
f.close()
return file
@classmethod
async def _download_web_document(cls, web, file, progress_callback):
"""
Specialized version of .download_media() for web documents.
"""
if not aiohttp:
raise ValueError(
"Cannot download web documents without the aiohttp "
"dependency install it (pip install aiohttp)"
)
# TODO Better way to get opened handles of files and auto-close
in_memory = file is bytes
if in_memory:
f = io.BytesIO()
elif isinstance(file, str):
kind, possible_names = cls._get_kind_and_names(web.attributes)
file = cls._get_proper_filename(
file, kind, utils.get_extension(web), possible_names=possible_names
)
f = open(file, "wb")
else:
f = file
try:
async with aiohttp.ClientSession() as session:
# TODO Use progress_callback; get content length from response
# https://github.com/telegramdesktop/tdesktop/blob/c7e773dd9aeba94e2be48c032edc9a78bb50234e/Telegram/SourceFiles/ui/images.cpp#L1318-L1319
async with session.get(web.url) as response:
while True:
chunk = await response.content.read(128 * 1024)
if not chunk:
break
f.write(chunk)
finally:
if isinstance(file, str) or file is bytes:
f.close()
return f.getvalue() if in_memory else file
@staticmethod
def _get_proper_filename(file, kind, extension, date=None, possible_names=None):
"""Gets a proper filename for 'file', if this is a path.
'kind' should be the kind of the output file (photo, document...)
'extension' should be the extension to be added to the file if
the filename doesn't have any yet
'date' should be when this file was originally sent, if known
'possible_names' should be an ordered list of possible names
If no modification is made to the path, any existing file
will be overwritten.
If any modification is made to the path, this method will
ensure that no existing file will be overwritten.
"""
if isinstance(file, pathlib.Path):
file = str(file.absolute())
if file is not None and not isinstance(file, str):
# Probably a stream-like object, we cannot set a filename here
return file
if file is None:
file = ""
elif os.path.isfile(file):
# Make no modifications to valid existing paths
return file
if os.path.isdir(file) or not file:
try:
name = (
None
if possible_names is None
else next(x for x in possible_names if x)
)
except StopIteration:
name = None
if not name:
if not date:
date = datetime.datetime.now()
name = "{}_{}-{:02}-{:02}_{:02}-{:02}-{:02}".format(
kind,
date.year,
date.month,
date.day,
date.hour,
date.minute,
date.second,
)
file = os.path.join(file, name)
directory, name = os.path.split(file)
name, ext = os.path.splitext(name)
if not ext:
ext = extension
result = os.path.join(directory, name + ext)
if not os.path.isfile(result):
return result
i = 1
while True:
result = os.path.join(directory, "{} ({}){}".format(name, i, ext))
if not os.path.isfile(result):
return result
i += 1
# endregion | PypiClean |
/FuncsForSPO-0.0.3.10.tar.gz/FuncsForSPO-0.0.3.10/README.md | # FuncsForSPO - Funcoes para Selenium; Python; Openpyxl; SQLite3
## pip install FuncsForSPO
### Antes de tudo instale o Psutil para evitar erros. (pip install psutil)
Aqui voce achara funcoes produzidas para ter maior agilidade nos desenvolvimentos nas tecnologias abaixo:
* Selenium
* Existem diversas funcoes em pt-br que vao te ajudar a desenvolver os seus projetos mais rapidamente em selenium
* Openpyxl (ainda em desenvolvimento para mais funcoes)
* Algumas funcoes que minimizarao o trabalho pesado de mexer com openpyxl
* Python
* Funcoes criadas para o Python, como excluir varios arquivos, criar, verificar se um programa / executavel esta aberto, entre outras funcionalidades
## Instalacao
**Para evitar erros, por favor, instale anteriormente o psutil (pip install psutil)**
de pip install FuncsForSPO em seu ambiente virtual e pronto!
Powered By https://github.com/gabriellopesdesouza2002
# Current Version -> 0.0.3.10
version==0.0.3.10 -> removida funcao que retorna uma tupla ao reverso, e adicionada a funcao (reverse_iter) que retorna ao reverso qualquer iteravel | adicionada a funcao que retorna os valores absolutos de qualquer arquivo e/ou diretorio de um caminho relativo de um diretorio (arquivos_com_caminho_absoluto_do_arquivo); adicionada tambem uma funcao que faz download de arquivos na internet (download_file); melhorias nas DocStrings
version==0.0.3.9 -> criada uma funcao que retorna somente numeros utilizando re
version==0.0.3.8 -> adicionada 2 funcoes 1-> retorna os valores absolutos de qualquer arquivo 2-> remove qualquer arquivo que contenha (1), (2), (3) (etc) em qualquer pasta
version==0.0.3.7 -> criada uma funcao que retorna um user-agent do tipo random
version==0.0.3.6 -> melhoria na funcao pega_id
version==0.0.3.5 -> adicionada uma funcao para retornar uma tupla ao reverso, (1,2,3,4,5,6,7,8,9,0), -> (0,9,8,7,6,5,4,3,2,1); adicionada execao do wget, correcoes: 1- correcao ao fazer log, ao enviar um objeto, automaticamente ele e convertido para string removido os print na funcao de pegar colunas no openpyxl
version==0.0.3.4 -> webdriver-manager e instalado automaticamente como dependencia
version==0.0.3.3 -> Adicao de varias funcoes do openpyxl* necessita TDD's
version==0.0.3.2 -> Adicao de 2 funcoes no functions_for_py (pega_caminho_atual; pega_caminho_atual_e_concatena_novo_dir)
version==0.0.3.1 -> Adicao de excecao para erro de login no Gmail; adicao de funcao para pegar codigo fonte de um WebElement Selenium
version==0.0.3.0 -> Adicao de funcoes para SQLite
version==0.0.2.8 ->Melhoria nos imports das execoes em Selenium
version==0.0.2.7 -> Corrigido erro ao enviar as funcoes de openpyxl
version==0.0.2.6 -> Corrigido erro ao utilizar a funcao "faz_log()"
| PypiClean |
/NREL_farms-1.0.5-py3-none-any.whl/farms/disc.py | import numpy as np
from farms import SOLAR_CONSTANT
def disc(ghi, sza, doy, pressure=1013.25, sza_lim=87):
"""Estimate DNI from GHI using the DISC model.
*Warning: should only be used for cloudy FARMS data.
The DISC algorithm converts global horizontal irradiance to direct
normal irradiance through empirical relationships between the global
and direct clearness indices.
Parameters
----------
ghi : np.ndarray
Global horizontal irradiance in W/m2.
sza : np.ndarray
Solar zenith angle in degrees.
doy : np.ndarray
Day of year (array of integers).
pressure : np.ndarray
Pressure in mbar (same as hPa).
sza_lim : float | int
Upper limit for solar zenith angle in degrees. SZA values greater than
this will be truncated at this value. 87 deg chosen to simulate the
FORTRAN code in use by SRRL (from Perez).
Returns
-------
DNI : np.ndarray
Estimated direct normal irradiance in W/m2.
"""
A = np.zeros_like(ghi)
B = np.zeros_like(ghi)
C = np.zeros_like(ghi)
day_angle = 2. * np.pi * (doy - 1) / 365
re_var = (1.00011 + 0.034221 * np.cos(day_angle)
+ 0.00128 * np.sin(day_angle)
+ 0.000719 * np.cos(2. * day_angle)
+ 7.7E-5 * np.sin(2. * day_angle))
if len(re_var.shape) < len(sza.shape):
re_var = np.tile(re_var.reshape((len(re_var), 1)), sza.shape[1])
I0 = re_var * SOLAR_CONSTANT
I0h = I0 * np.cos(np.radians(sza))
Ztemp = np.copy(sza)
Ztemp[Ztemp > sza_lim] = sza_lim
AM = (1. / (np.cos(np.radians(Ztemp))
+ 0.15 * (np.power((93.885 - Ztemp), -1.253)))
* 100 * pressure / 101325)
Kt = ghi / I0h
Kt[Kt < 0] = 0
A[Kt > 0.6] = (-5.743 + 21.77 * Kt[Kt > 0.6]
- 27.49 * np.power(Kt[Kt > 0.6], 2)
+ 11.56 * np.power(Kt[Kt > 0.6], 3))
B[Kt > 0.6] = (41.4 - 118.5 * Kt[Kt > 0.6]
+ 66.05 * np.power(Kt[Kt > 0.6], 2)
+ 31.9 * np.power(Kt[Kt > 0.6], 3))
C[Kt > 0.6] = (-47.01 + 184.2 * Kt[Kt > 0.6]
- 222. * np.power(Kt[Kt > 0.6], 2)
+ 73.81 * np.power(Kt[Kt > 0.6], 3))
A[Kt <= 0.6] = (0.512 - 1.56 * Kt[Kt <= 0.6]
+ 2.286 * np.power(Kt[Kt <= 0.6], 2)
- 2.222 * np.power(Kt[Kt <= 0.6], 3))
B[Kt <= 0.6] = 0.37 + 0.962 * Kt[Kt <= 0.6]
C[Kt <= 0.6] = (-0.28 + 0.932 * Kt[Kt <= 0.6]
- 2.048 * np.power(Kt[Kt <= 0.6], 2))
delKn = A + B * np.exp(C * AM)
Knc = (0.866 - 0.122 * AM + 0.0121 * np.power(AM, 2)
- 0.000653 * np.power(AM, 3) + 0.000014 * np.power(AM, 4))
Kn = Knc - delKn
DNI = (Kn) * I0
DNI[np.logical_or.reduce((sza >= sza_lim, ghi < 1, DNI < 0))] = 0
return DNI | PypiClean |
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/fipy/viewers/vtkViewer/vtkViewer.py | from __future__ import unicode_literals
__docformat__ = 'restructuredtext'
__all__ = ["VTKViewer"]
from future.utils import text_to_native_str
__all__ = [text_to_native_str(n) for n in __all__]
from fipy.viewers.viewer import AbstractViewer
from fipy.tests.doctestPlus import register_skipper
def _checkForTVTK():
hasTVTK = True
try:
try:
from tvtk.api import tvtk
except ImportError as e:
from enthought.tvtk.api import tvtk
# With Python 2.7 end-of-life, bitrot has set in. While tvtk is
# available from conda-forge, it can raise an exception when it
# internally does this import.
from collections.abc import Sequence
except Exception:
hasTVTK = False
return hasTVTK
register_skipper(flag="TVTK",
test=_checkForTVTK,
why="the `tvtk` package cannot be imported")
class VTKViewer(AbstractViewer):
"""Renders `_MeshVariable` data in VTK format
"""
def __init__(self, vars, title=None, limits={}, **kwlimits):
"""Creates a `VTKViewer`
Parameters
----------
vars : ~fipy.variables.cellVariable.CellVariable or ~fipy.variables.faceVariable.FaceVariable or list
the `MeshVariable` objects to display.
title : str, optional
displayed at the top of the `Viewer` window
limits : dict, optional
a (deprecated) alternative to limit keyword arguments
xmin, xmax, ymin, ymax, zmin, zmax, datamin, datamax : float, optional
displayed range of data. Any limit set to
a (default) value of `None` will autoscale.
"""
kwlimits.update(limits)
AbstractViewer.__init__(self, vars=vars, title=title, **kwlimits)
mesh = self.vars[0].mesh
self.dataset = self._makeDataSet(mesh)
data = self._data
for var in self.vars:
name, rank, value = self._nameRankValue(var)
i = data.add_array(value)
data.get_array(i).name = name
if rank == 0:
data.set_active_scalars(name)
elif rank == 1:
data.set_active_vectors(name)
else:
data.set_active_tensors(name)
def _makeDataSet(self, mesh):
pass
@staticmethod
def _nameRankValue(var):
name = var.name or "%s #%d" % (var.__class__.__name__, id(var))
rank = var.rank
value = var.mesh._toVTK3D(var.value, rank=rank)
return (name, rank, value)
def plot(self, filename=None):
data = self._data
from fipy.tools import numerix
for var in self.vars:
name, rank, value = self._nameRankValue(var)
if not (numerix.array(value.shape) == 0).any():
data.get_array(name).to_array()[:] = value
try:
from tvtk.misc import write_data
except ImportError as e:
from enthought.tvtk.misc import write_data
write_data(self.dataset, filename)
def _getSuitableVars(self, vars):
if type(vars) not in [type([]), type(())]:
vars = [vars]
cls = self._variableClass
vars = [var for var in vars if isinstance(var, cls)]
if len(vars) == 0:
raise TypeError("%s can only display %s" % (self.__class__.__name__, cls.__name__))
vars = [var for var in vars if var.mesh==vars[0].mesh]
return vars
if __name__ == "__main__":
# import fipy.tests.doctestPlus
# fipy.tests.doctestPlus.execButNoTest()
from fipy import *
m = Grid3D(nx=2, ny=1, nz=1)
# m = Grid3D(nx=3, ny=4, nz=5)
x, y, z = m.cellCenters
v1 = CellVariable(mesh=m, value=x*y*z, name="x*y*z")
v2 = CellVariable(mesh=m, value=x*y*y, name="x*y*y")
v3 = v1.grad
v3.name = "v1.grad"
v4 = v1.faceGrad
v4.name = "v1.faceGrad"
v5 = v1.harmonicFaceValue
v5.name = "v1.harmonicFaceValue"
v6 = v1.arithmeticFaceValue
v6.name = "v1.arithmeticFaceValue"
# vw = VTKViewer(vars=(v1, v2))
# vw = VTKViewer(vars=(v1, v2, v3)) #, v4, v5, v6))
vw = VTKViewer(vars=(v4, v5, v6))
vw.plot(filename="face.vtk")
# m = Grid2D(nx=1, ny=2)
# x, y = m.cellCenters
# v1 = CellVariable(mesh=m, value=x*y, name="v1")
# v2 = CellVariable(mesh=m, value=x*x) #, name="v2")
# vw = VTKViewer(vars=(v1, v2))
# m = Grid1D(nx=10)
# x, = m.cellCenters
# v1 = CellVariable(mesh=m, value=x*x, name="v1")
# v2 = CellVariable(mesh=m, value=x) #, name="v2")
# vw = VTKViewer(vars=(v1, v2)) | PypiClean |
/NSPython-0.3.zip/NSPython-0.3/nspython/foundation.py |
from nspython import *
from nspython import _ffi
_ = load('Foundation')
class NSString(NSObject):
def __str__(self):
return _ffi.string(self.UTF8String())
at = lambda s: NSString.stringWithUTF8String_(_ffi.new('char[]', s.encode('utf8')))
# classes
class NSAffineTransform(NSObject): pass
class NSArray(NSObject): pass
class NSAssertionHandler(NSObject): pass
class NSAttributedString(NSObject): pass
class NSAutoreleasePool(NSObject): pass
class NSBundle(NSObject): pass
class NSCache(NSObject): pass
class NSCachedURLResponse(NSObject): pass
class NSCalendar(NSObject): pass
class NSCharacterSet(NSObject): pass
class NSClassDescription(NSObject): pass
class NSCoder(NSObject): pass
class NSConditionLock(NSObject): pass
class NSConnection(NSObject): pass
class NSData(NSObject): pass
class NSDate(NSObject): pass
class NSDateComponents(NSObject): pass
class NSDecimalNumberHandler(NSObject): pass
class NSDictionary(NSObject): pass
class NSDistantObjectRequest(NSObject): pass
class NSDistributedLock(NSObject): pass
class NSEnumerator(NSObject): pass
class NSError(NSObject): pass
class NSException(NSObject): pass
class NSExpression(NSObject): pass
class NSFileHandle(NSObject): pass
class NSFileManager(NSObject): pass
class NSFormatter(NSObject): pass
class NSGarbageCollector(NSObject): pass
class NSHTTPCookie(NSObject): pass
class NSHTTPCookieStorage(NSObject): pass
class NSHashTable(NSObject): pass
class NSHost(NSObject): pass
class NSIndexPath(NSObject): pass
class NSIndexSet(NSObject): pass
class NSInvocation(NSObject): pass
class NSLocale(NSObject): pass
class NSLock(NSObject): pass
class NSMapTable(NSObject): pass
class NSMetadataItem(NSObject): pass
class NSMetadataQuery(NSObject): pass
class NSMetadataQueryAttributeValueTuple(NSObject): pass
class NSMetadataQueryResultGroup(NSObject): pass
class NSMethodSignature(NSObject): pass
class NSNetService(NSObject): pass
class NSNetServiceBrowser(NSObject): pass
class NSNotification(NSObject): pass
class NSNotificationCenter(NSObject): pass
class NSNotificationQueue(NSObject): pass
class NSNull(NSObject): pass
class NSOperation(NSObject): pass
class NSOperationQueue(NSObject): pass
class NSOrthography(NSObject): pass
class NSPipe(NSObject): pass
class NSPointerArray(NSObject): pass
class NSPointerFunctions(NSObject): pass
class NSPort(NSObject): pass
class NSPortMessage(NSObject): pass
class NSPortNameServer(NSObject): pass
class NSPredicate(NSObject): pass
class NSProcessInfo(NSObject): pass
class NSRecursiveLock(NSObject): pass
class NSRunLoop(NSObject): pass
class NSScanner(NSObject): pass
class NSSet(NSObject): pass
class NSSortDescriptor(NSObject): pass
class NSSpellServer(NSObject): pass
class NSStream(NSObject): pass
# class NSString(NSObject): pass
class NSTask(NSObject): pass
class NSTextCheckingResult(NSObject): pass
class NSThread(NSObject): pass
class NSTimeZone(NSObject): pass
class NSTimer(NSObject): pass
class NSURL(NSObject): pass
class NSURLAuthenticationChallenge(NSObject): pass
class NSURLCache(NSObject): pass
class NSURLConnection(NSObject): pass
class NSURLCredential(NSObject): pass
class NSURLCredentialStorage(NSObject): pass
class NSURLDownload(NSObject): pass
class NSURLProtectionSpace(NSObject): pass
class NSURLProtocol(NSObject): pass
class NSURLRequest(NSObject): pass
class NSURLResponse(NSObject): pass
class NSUndoManager(NSObject): pass
class NSUserDefaults(NSObject): pass
class NSValue(NSObject): pass
class NSValueTransformer(NSObject): pass
class NSXMLNode(NSObject): pass
class NSXMLParser(NSObject): pass
# class NSDistantObject(NSProxy): pass
# class NSProtocolChecker(NSProxy): pass
class NSMutableData(NSData): pass
class NSCalendarDate(NSDate): pass
class NSNumber(NSValue): pass
class NSXMLDTD(NSXMLNode): pass
class NSXMLDTDNode(NSXMLNode): pass
class NSXMLDocument(NSXMLNode): pass
class NSXMLElement(NSXMLNode): pass
class NSMutableAttributedString(NSAttributedString): pass
class NSMutableCharacterSet(NSCharacterSet): pass
class NSMutableString(NSString): pass
class NSDateFormatter(NSFormatter): pass
class NSNumberFormatter(NSFormatter): pass
class NSMutableArray(NSArray): pass
class NSMutableDictionary(NSDictionary): pass
class NSDirectoryEnumerator(NSEnumerator): pass
class NSMutableIndexSet(NSIndexSet): pass
class NSMutableSet(NSSet): pass
class NSComparisonPredicate(NSPredicate): pass
class NSCompoundPredicate(NSPredicate): pass
class NSInputStream(NSStream): pass
class NSOutputStream(NSStream): pass
class NSMutableURLRequest(NSURLRequest): pass
class NSHTTPURLResponse(NSURLResponse): pass
class NSMachPort(NSPort): pass
class NSMessagePort(NSPort): pass
class NSSocketPort(NSPort): pass
class NSMachBootstrapServer(NSPortNameServer): pass
class NSMessagePortNameServer(NSPortNameServer): pass
class NSSocketPortNameServer(NSPortNameServer): pass
class NSBlockOperation(NSOperation): pass
class NSInvocationOperation(NSOperation): pass
class NSDistributedNotificationCenter(NSNotificationCenter): pass
class NSArchiver(NSCoder): pass
class NSKeyedArchiver(NSCoder): pass
class NSKeyedUnarchiver(NSCoder): pass
class NSPortCoder(NSCoder): pass
class NSUnarchiver(NSCoder): pass
class NSPurgeableData(NSMutableData): pass
class NSDecimalNumber(NSNumber): pass
class NSCountedSet(NSMutableSet): pass
# constants
_ffi.cdef('''
id NSAMPMDesignation;
id NSAppleEventManagerWillProcessFirstEventNotification;
id NSAppleScriptErrorAppName;
id NSAppleScriptErrorBriefMessage;
id NSAppleScriptErrorMessage;
id NSAppleScriptErrorNumber;
id NSAppleScriptErrorRange;
id NSArgumentDomain;
id NSAssertionHandlerKey;
id NSAverageKeyValueOperator;
id NSBuddhistCalendar;
id NSBundleDidLoadNotification;
id NSCharacterConversionException;
id NSChineseCalendar;
id NSClassDescriptionNeededForClassNotification;
id NSCocoaErrorDomain;
id NSConnectionDidDieNotification;
id NSConnectionDidInitializeNotification;
id NSConnectionReplyMode;
id NSCountKeyValueOperator;
id NSCurrencySymbol;
id NSCurrentLocaleDidChangeNotification;
id NSDateFormatString;
id NSDateTimeOrdering;
id NSDecimalDigits;
id NSDecimalNumberDivideByZeroException;
id NSDecimalNumberExactnessException;
id NSDecimalNumberOverflowException;
id NSDecimalNumberUnderflowException;
id NSDecimalSeparator;
id NSDefaultRunLoopMode;
id NSDestinationInvalidException;
id NSDidBecomeSingleThreadedNotification;
id NSDistinctUnionOfArraysKeyValueOperator;
id NSDistinctUnionOfObjectsKeyValueOperator;
id NSDistinctUnionOfSetsKeyValueOperator;
id NSEarlierTimeDesignations;
id NSErrorFailingURLStringKey;
id NSFTPPropertyActiveTransferModeKey;
id NSFTPPropertyFTPProxy;
id NSFTPPropertyFileOffsetKey;
id NSFTPPropertyUserLoginKey;
id NSFTPPropertyUserPasswordKey;
id NSFailedAuthenticationException;
id NSFileAppendOnly;
id NSFileBusy;
id NSFileCreationDate;
id NSFileDeviceIdentifier;
id NSFileExtensionHidden;
id NSFileGroupOwnerAccountID;
id NSFileGroupOwnerAccountName;
id NSFileHFSCreatorCode;
id NSFileHFSTypeCode;
id NSFileHandleConnectionAcceptedNotification;
id NSFileHandleDataAvailableNotification;
id NSFileHandleNotificationDataItem;
id NSFileHandleNotificationFileHandleItem;
id NSFileHandleNotificationMonitorModes;
id NSFileHandleOperationException;
id NSFileHandleReadCompletionNotification;
id NSFileHandleReadToEndOfFileCompletionNotification;
id NSFileImmutable;
id NSFileModificationDate;
id NSFileOwnerAccountID;
id NSFileOwnerAccountName;
id NSFilePathErrorKey;
id NSFilePosixPermissions;
id NSFileReferenceCount;
id NSFileSize;
id NSFileSystemFileNumber;
id NSFileSystemFreeNodes;
id NSFileSystemFreeSize;
id NSFileSystemNodes;
id NSFileSystemNumber;
id NSFileSystemSize;
id NSFileType;
id NSFileTypeBlockSpecial;
id NSFileTypeCharacterSpecial;
id NSFileTypeDirectory;
id NSFileTypeRegular;
id NSFileTypeSocket;
id NSFileTypeSymbolicLink;
id NSFileTypeUnknown;
id NSGenericException;
id NSGlobalDomain;
id NSGrammarCorrections;
id NSGrammarRange;
id NSGrammarUserDescription;
id NSGregorianCalendar;
id NSHTTPCookieComment;
id NSHTTPCookieCommentURL;
id NSHTTPCookieDiscard;
id NSHTTPCookieDomain;
id NSHTTPCookieExpires;
id NSHTTPCookieManagerAcceptPolicyChangedNotification;
id NSHTTPCookieManagerCookiesChangedNotification;
id NSHTTPCookieMaximumAge;
id NSHTTPCookieName;
id NSHTTPCookieOriginURL;
id NSHTTPCookiePath;
id NSHTTPCookiePort;
id NSHTTPCookieSecure;
id NSHTTPCookieValue;
id NSHTTPCookieVersion;
id NSHTTPPropertyErrorPageDataKey;
id NSHTTPPropertyHTTPProxy;
id NSHTTPPropertyRedirectionHeadersKey;
id NSHTTPPropertyServerHTTPVersionKey;
id NSHTTPPropertyStatusCodeKey;
id NSHTTPPropertyStatusReasonKey;
id NSHebrewCalendar;
id NSHelpAnchorErrorKey;
id NSHourNameDesignations;
id NSISO8601Calendar;
id NSInconsistentArchiveException;
id NSIndianCalendar;
id NSInternalInconsistencyException;
id NSInternationalCurrencyString;
id NSInvalidArchiveOperationException;
id NSInvalidArgumentException;
id NSInvalidReceivePortException;
id NSInvalidSendPortException;
id NSInvalidUnarchiveOperationException;
id NSInvocationOperationCancelledException;
id NSInvocationOperationVoidResultException;
id NSIsNilTransformerName;
id NSIsNotNilTransformerName;
id NSIslamicCalendar;
id NSIslamicCivilCalendar;
id NSJapaneseCalendar;
id NSKeyValueChangeIndexesKey;
id NSKeyValueChangeKindKey;
id NSKeyValueChangeNewKey;
id NSKeyValueChangeNotificationIsPriorKey;
id NSKeyValueChangeOldKey;
id NSKeyedUnarchiveFromDataTransformerName;
id NSLaterTimeDesignations;
id NSLoadedClasses;
id NSLocalNotificationCenterType;
id NSLocaleAlternateQuotationBeginDelimiterKey;
id NSLocaleAlternateQuotationEndDelimiterKey;
id NSLocaleCalendar;
id NSLocaleCollationIdentifier;
id NSLocaleCollatorIdentifier;
id NSLocaleCountryCode;
id NSLocaleCurrencyCode;
id NSLocaleCurrencySymbol;
id NSLocaleDecimalSeparator;
id NSLocaleExemplarCharacterSet;
id NSLocaleGroupingSeparator;
id NSLocaleIdentifier;
id NSLocaleLanguageCode;
id NSLocaleMeasurementSystem;
id NSLocaleQuotationBeginDelimiterKey;
id NSLocaleQuotationEndDelimiterKey;
id NSLocaleScriptCode;
id NSLocaleUsesMetricSystem;
id NSLocaleVariantCode;
id NSLocalizedDescriptionKey;
id NSLocalizedFailureReasonErrorKey;
id NSLocalizedRecoveryOptionsErrorKey;
id NSLocalizedRecoverySuggestionErrorKey;
id NSMachErrorDomain;
id NSMallocException;
id NSMaximumKeyValueOperator;
id NSMetadataQueryDidFinishGatheringNotification;
id NSMetadataQueryDidStartGatheringNotification;
id NSMetadataQueryDidUpdateNotification;
id NSMetadataQueryGatheringProgressNotification;
id NSMetadataQueryLocalComputerScope;
id NSMetadataQueryNetworkScope;
id NSMetadataQueryResultContentRelevanceAttribute;
id NSMetadataQueryUserHomeScope;
id NSMinimumKeyValueOperator;
id NSMonthNameArray;
id NSNegateBooleanTransformerName;
id NSNegativeCurrencyFormatString;
id NSNetServicesErrorCode;
id NSNetServicesErrorDomain;
id NSNextDayDesignations;
id NSNextNextDayDesignations;
id NSOSStatusErrorDomain;
id NSObjectInaccessibleException;
id NSObjectNotAvailableException;
id NSOldStyleException;
id NSOperationNotSupportedForKeyException;
id NSPOSIXErrorDomain;
id NSParseErrorException;
id NSPersianCalendar;
id NSPortDidBecomeInvalidNotification;
id NSPortReceiveException;
id NSPortSendException;
id NSPortTimeoutException;
id NSPositiveCurrencyFormatString;
id NSPriorDayDesignations;
id NSRangeException;
id NSRecoveryAttempterErrorKey;
id NSRegistrationDomain;
id NSRepublicOfChinaCalendar;
id NSRunLoopCommonModes;
id NSShortDateFormatString;
id NSShortMonthNameArray;
id NSShortTimeDateFormatString;
id NSShortWeekDayNameArray;
id NSStreamDataWrittenToMemoryStreamKey;
id NSStreamFileCurrentOffsetKey;
id NSStreamSOCKSErrorDomain;
id NSStreamSOCKSProxyConfigurationKey;
id NSStreamSOCKSProxyHostKey;
id NSStreamSOCKSProxyPasswordKey;
id NSStreamSOCKSProxyPortKey;
id NSStreamSOCKSProxyUserKey;
id NSStreamSOCKSProxyVersion4;
id NSStreamSOCKSProxyVersion5;
id NSStreamSOCKSProxyVersionKey;
id NSStreamSocketSSLErrorDomain;
id NSStreamSocketSecurityLevelKey;
id NSStreamSocketSecurityLevelNegotiatedSSL;
id NSStreamSocketSecurityLevelNone;
id NSStreamSocketSecurityLevelSSLv2;
id NSStreamSocketSecurityLevelSSLv3;
id NSStreamSocketSecurityLevelTLSv1;
id NSStringEncodingErrorKey;
id NSSumKeyValueOperator;
id NSSystemClockDidChangeNotification;
id NSSystemTimeZoneDidChangeNotification;
id NSTaskDidTerminateNotification;
id NSTextCheckingCityKey;
id NSTextCheckingCountryKey;
id NSTextCheckingJobTitleKey;
id NSTextCheckingNameKey;
id NSTextCheckingOrganizationKey;
id NSTextCheckingPhoneKey;
id NSTextCheckingStateKey;
id NSTextCheckingStreetKey;
id NSTextCheckingZIPKey;
id NSThisDayDesignations;
id NSThousandsSeparator;
id NSThreadWillExitNotification;
id NSTimeDateFormatString;
id NSTimeFormatString;
id NSURLAttributeModificationDateKey;
id NSURLAuthenticationMethodClientCertificate;
id NSURLAuthenticationMethodDefault;
id NSURLAuthenticationMethodHTMLForm;
id NSURLAuthenticationMethodHTTPBasic;
id NSURLAuthenticationMethodHTTPDigest;
id NSURLAuthenticationMethodNTLM;
id NSURLAuthenticationMethodNegotiate;
id NSURLAuthenticationMethodServerTrust;
id NSURLContentAccessDateKey;
id NSURLContentModificationDateKey;
id NSURLCreationDateKey;
id NSURLCredentialStorageChangedNotification;
id NSURLCustomIconKey;
id NSURLEffectiveIconKey;
id NSURLErrorDomain;
id NSURLErrorFailingURLErrorKey;
id NSURLErrorFailingURLPeerTrustErrorKey;
id NSURLErrorFailingURLStringErrorKey;
id NSURLErrorKey;
id NSURLFileAllocatedSizeKey;
id NSURLFileScheme;
id NSURLFileSizeKey;
id NSURLHasHiddenExtensionKey;
id NSURLIsAliasFileKey;
id NSURLIsDirectoryKey;
id NSURLIsHiddenKey;
id NSURLIsPackageKey;
id NSURLIsRegularFileKey;
id NSURLIsSymbolicLinkKey;
id NSURLIsSystemImmutableKey;
id NSURLIsUserImmutableKey;
id NSURLIsVolumeKey;
id NSURLLabelColorKey;
id NSURLLabelNumberKey;
id NSURLLinkCountKey;
id NSURLLocalizedLabelKey;
id NSURLLocalizedNameKey;
id NSURLLocalizedTypeDescriptionKey;
id NSURLNameKey;
id NSURLParentDirectoryURLKey;
id NSURLProtectionSpaceFTP;
id NSURLProtectionSpaceFTPProxy;
id NSURLProtectionSpaceHTTP;
id NSURLProtectionSpaceHTTPProxy;
id NSURLProtectionSpaceHTTPS;
id NSURLProtectionSpaceHTTPSProxy;
id NSURLProtectionSpaceSOCKSProxy;
id NSURLTypeIdentifierKey;
id NSURLVolumeAvailableCapacityKey;
id NSURLVolumeIsJournalingKey;
id NSURLVolumeLocalizedFormatDescriptionKey;
id NSURLVolumeResourceCountKey;
id NSURLVolumeSupportsCasePreservedNamesKey;
id NSURLVolumeSupportsCaseSensitiveNamesKey;
id NSURLVolumeSupportsHardLinksKey;
id NSURLVolumeSupportsJournalingKey;
id NSURLVolumeSupportsPersistentIDsKey;
id NSURLVolumeSupportsSparseFilesKey;
id NSURLVolumeSupportsSymbolicLinksKey;
id NSURLVolumeSupportsZeroRunsKey;
id NSURLVolumeTotalCapacityKey;
id NSURLVolumeURLKey;
id NSUnarchiveFromDataTransformerName;
id NSUndefinedKeyException;
id NSUnderlyingErrorKey;
id NSUndoManagerCheckpointNotification;
id NSUndoManagerDidOpenUndoGroupNotification;
id NSUndoManagerDidRedoChangeNotification;
id NSUndoManagerDidUndoChangeNotification;
id NSUndoManagerWillCloseUndoGroupNotification;
id NSUndoManagerWillRedoChangeNotification;
id NSUndoManagerWillUndoChangeNotification;
id NSUnionOfArraysKeyValueOperator;
id NSUnionOfObjectsKeyValueOperator;
id NSUnionOfSetsKeyValueOperator;
id NSUserDefaultsDidChangeNotification;
id NSWeekDayNameArray;
id NSWillBecomeMultiThreadedNotification;
id NSXMLParserErrorDomain;
id NSYearMonthWeekDesignations;
''')
NSAMPMDesignation = _.NSAMPMDesignation
NSAppleEventManagerWillProcessFirstEventNotification = _.NSAppleEventManagerWillProcessFirstEventNotification
NSAppleEventTimeOutDefault = -1.0
NSAppleEventTimeOutNone = -2.0
NSAppleScriptErrorAppName = _.NSAppleScriptErrorAppName
NSAppleScriptErrorBriefMessage = _.NSAppleScriptErrorBriefMessage
NSAppleScriptErrorMessage = _.NSAppleScriptErrorMessage
NSAppleScriptErrorNumber = _.NSAppleScriptErrorNumber
NSAppleScriptErrorRange = _.NSAppleScriptErrorRange
NSArgumentDomain = _.NSArgumentDomain
NSAssertionHandlerKey = _.NSAssertionHandlerKey
NSAverageKeyValueOperator = _.NSAverageKeyValueOperator
NSBuddhistCalendar = _.NSBuddhistCalendar
NSBundleDidLoadNotification = _.NSBundleDidLoadNotification
NSCharacterConversionException = _.NSCharacterConversionException
NSChineseCalendar = _.NSChineseCalendar
NSClassDescriptionNeededForClassNotification = _.NSClassDescriptionNeededForClassNotification
NSCocoaErrorDomain = _.NSCocoaErrorDomain
NSConnectionDidDieNotification = _.NSConnectionDidDieNotification
NSConnectionDidInitializeNotification = _.NSConnectionDidInitializeNotification
NSConnectionReplyMode = _.NSConnectionReplyMode
NSCountKeyValueOperator = _.NSCountKeyValueOperator
NSCurrencySymbol = _.NSCurrencySymbol
NSCurrentLocaleDidChangeNotification = _.NSCurrentLocaleDidChangeNotification
NSDateFormatString = _.NSDateFormatString
NSDateTimeOrdering = _.NSDateTimeOrdering
NSDeallocateZombies = False
NSDebugEnabled = False
NSDecimalDigits = _.NSDecimalDigits
NSDecimalNumberDivideByZeroException = _.NSDecimalNumberDivideByZeroException
NSDecimalNumberExactnessException = _.NSDecimalNumberExactnessException
NSDecimalNumberOverflowException = _.NSDecimalNumberOverflowException
NSDecimalNumberUnderflowException = _.NSDecimalNumberUnderflowException
NSDecimalSeparator = _.NSDecimalSeparator
NSDefaultRunLoopMode = _.NSDefaultRunLoopMode
NSDestinationInvalidException = _.NSDestinationInvalidException
NSDidBecomeSingleThreadedNotification = _.NSDidBecomeSingleThreadedNotification
NSDistinctUnionOfArraysKeyValueOperator = _.NSDistinctUnionOfArraysKeyValueOperator
NSDistinctUnionOfObjectsKeyValueOperator = _.NSDistinctUnionOfObjectsKeyValueOperator
NSDistinctUnionOfSetsKeyValueOperator = _.NSDistinctUnionOfSetsKeyValueOperator
NSEarlierTimeDesignations = _.NSEarlierTimeDesignations
NSErrorFailingURLStringKey = _.NSErrorFailingURLStringKey
NSFTPPropertyActiveTransferModeKey = _.NSFTPPropertyActiveTransferModeKey
NSFTPPropertyFTPProxy = _.NSFTPPropertyFTPProxy
NSFTPPropertyFileOffsetKey = _.NSFTPPropertyFileOffsetKey
NSFTPPropertyUserLoginKey = _.NSFTPPropertyUserLoginKey
NSFTPPropertyUserPasswordKey = _.NSFTPPropertyUserPasswordKey
NSFailedAuthenticationException = _.NSFailedAuthenticationException
NSFileAppendOnly = _.NSFileAppendOnly
NSFileBusy = _.NSFileBusy
NSFileCreationDate = _.NSFileCreationDate
NSFileDeviceIdentifier = _.NSFileDeviceIdentifier
NSFileExtensionHidden = _.NSFileExtensionHidden
NSFileGroupOwnerAccountID = _.NSFileGroupOwnerAccountID
NSFileGroupOwnerAccountName = _.NSFileGroupOwnerAccountName
NSFileHFSCreatorCode = _.NSFileHFSCreatorCode
NSFileHFSTypeCode = _.NSFileHFSTypeCode
NSFileHandleConnectionAcceptedNotification = _.NSFileHandleConnectionAcceptedNotification
NSFileHandleDataAvailableNotification = _.NSFileHandleDataAvailableNotification
NSFileHandleNotificationDataItem = _.NSFileHandleNotificationDataItem
NSFileHandleNotificationFileHandleItem = _.NSFileHandleNotificationFileHandleItem
NSFileHandleNotificationMonitorModes = _.NSFileHandleNotificationMonitorModes
NSFileHandleOperationException = _.NSFileHandleOperationException
NSFileHandleReadCompletionNotification = _.NSFileHandleReadCompletionNotification
NSFileHandleReadToEndOfFileCompletionNotification = _.NSFileHandleReadToEndOfFileCompletionNotification
NSFileImmutable = _.NSFileImmutable
NSFileModificationDate = _.NSFileModificationDate
NSFileOwnerAccountID = _.NSFileOwnerAccountID
NSFileOwnerAccountName = _.NSFileOwnerAccountName
NSFilePathErrorKey = _.NSFilePathErrorKey
NSFilePosixPermissions = _.NSFilePosixPermissions
NSFileReferenceCount = _.NSFileReferenceCount
NSFileSize = _.NSFileSize
NSFileSystemFileNumber = _.NSFileSystemFileNumber
NSFileSystemFreeNodes = _.NSFileSystemFreeNodes
NSFileSystemFreeSize = _.NSFileSystemFreeSize
NSFileSystemNodes = _.NSFileSystemNodes
NSFileSystemNumber = _.NSFileSystemNumber
NSFileSystemSize = _.NSFileSystemSize
NSFileType = _.NSFileType
NSFileTypeBlockSpecial = _.NSFileTypeBlockSpecial
NSFileTypeCharacterSpecial = _.NSFileTypeCharacterSpecial
NSFileTypeDirectory = _.NSFileTypeDirectory
NSFileTypeRegular = _.NSFileTypeRegular
NSFileTypeSocket = _.NSFileTypeSocket
NSFileTypeSymbolicLink = _.NSFileTypeSymbolicLink
NSFileTypeUnknown = _.NSFileTypeUnknown
NSFoundationVersionNumber = 751.63
NSGenericException = _.NSGenericException
NSGlobalDomain = _.NSGlobalDomain
NSGrammarCorrections = _.NSGrammarCorrections
NSGrammarRange = _.NSGrammarRange
NSGrammarUserDescription = _.NSGrammarUserDescription
NSGregorianCalendar = _.NSGregorianCalendar
NSHTTPCookieComment = _.NSHTTPCookieComment
NSHTTPCookieCommentURL = _.NSHTTPCookieCommentURL
NSHTTPCookieDiscard = _.NSHTTPCookieDiscard
NSHTTPCookieDomain = _.NSHTTPCookieDomain
NSHTTPCookieExpires = _.NSHTTPCookieExpires
NSHTTPCookieManagerAcceptPolicyChangedNotification = _.NSHTTPCookieManagerAcceptPolicyChangedNotification
NSHTTPCookieManagerCookiesChangedNotification = _.NSHTTPCookieManagerCookiesChangedNotification
NSHTTPCookieMaximumAge = _.NSHTTPCookieMaximumAge
NSHTTPCookieName = _.NSHTTPCookieName
NSHTTPCookieOriginURL = _.NSHTTPCookieOriginURL
NSHTTPCookiePath = _.NSHTTPCookiePath
NSHTTPCookiePort = _.NSHTTPCookiePort
NSHTTPCookieSecure = _.NSHTTPCookieSecure
NSHTTPCookieValue = _.NSHTTPCookieValue
NSHTTPCookieVersion = _.NSHTTPCookieVersion
NSHTTPPropertyErrorPageDataKey = _.NSHTTPPropertyErrorPageDataKey
NSHTTPPropertyHTTPProxy = _.NSHTTPPropertyHTTPProxy
NSHTTPPropertyRedirectionHeadersKey = _.NSHTTPPropertyRedirectionHeadersKey
NSHTTPPropertyServerHTTPVersionKey = _.NSHTTPPropertyServerHTTPVersionKey
NSHTTPPropertyStatusCodeKey = _.NSHTTPPropertyStatusCodeKey
NSHTTPPropertyStatusReasonKey = _.NSHTTPPropertyStatusReasonKey
NSHangOnUncaughtException = False
NSHebrewCalendar = _.NSHebrewCalendar
NSHelpAnchorErrorKey = _.NSHelpAnchorErrorKey
NSHourNameDesignations = _.NSHourNameDesignations
NSISO8601Calendar = _.NSISO8601Calendar
NSInconsistentArchiveException = _.NSInconsistentArchiveException
NSIndianCalendar = _.NSIndianCalendar
# (NSHashTableCallBacks)NSIntHashCallBacks
# (NSMapTableKeyCallBacks)NSIntMapKeyCallBacks
# (NSMapTableValueCallBacks)NSIntMapValueCallBacks
# (NSHashTableCallBacks)NSIntegerHashCallBacks
# (NSMapTableKeyCallBacks)NSIntegerMapKeyCallBacks
# (NSMapTableValueCallBacks)NSIntegerMapValueCallBacks
NSInternalInconsistencyException = _.NSInternalInconsistencyException
NSInternationalCurrencyString = _.NSInternationalCurrencyString
NSInvalidArchiveOperationException = _.NSInvalidArchiveOperationException
NSInvalidArgumentException = _.NSInvalidArgumentException
NSInvalidReceivePortException = _.NSInvalidReceivePortException
NSInvalidSendPortException = _.NSInvalidSendPortException
NSInvalidUnarchiveOperationException = _.NSInvalidUnarchiveOperationException
NSInvocationOperationCancelledException = _.NSInvocationOperationCancelledException
NSInvocationOperationVoidResultException = _.NSInvocationOperationVoidResultException
NSIsNilTransformerName = _.NSIsNilTransformerName
NSIsNotNilTransformerName = _.NSIsNotNilTransformerName
NSIslamicCalendar = _.NSIslamicCalendar
NSIslamicCivilCalendar = _.NSIslamicCivilCalendar
NSJapaneseCalendar = _.NSJapaneseCalendar
NSKeepAllocationStatistics = True
NSKeyValueChangeIndexesKey = _.NSKeyValueChangeIndexesKey
NSKeyValueChangeKindKey = _.NSKeyValueChangeKindKey
NSKeyValueChangeNewKey = _.NSKeyValueChangeNewKey
NSKeyValueChangeNotificationIsPriorKey = _.NSKeyValueChangeNotificationIsPriorKey
NSKeyValueChangeOldKey = _.NSKeyValueChangeOldKey
NSKeyedUnarchiveFromDataTransformerName = _.NSKeyedUnarchiveFromDataTransformerName
NSLaterTimeDesignations = _.NSLaterTimeDesignations
NSLoadedClasses = _.NSLoadedClasses
NSLocalNotificationCenterType = _.NSLocalNotificationCenterType
NSLocaleAlternateQuotationBeginDelimiterKey = _.NSLocaleAlternateQuotationBeginDelimiterKey
NSLocaleAlternateQuotationEndDelimiterKey = _.NSLocaleAlternateQuotationEndDelimiterKey
NSLocaleCalendar = _.NSLocaleCalendar
NSLocaleCollationIdentifier = _.NSLocaleCollationIdentifier
NSLocaleCollatorIdentifier = _.NSLocaleCollatorIdentifier
NSLocaleCountryCode = _.NSLocaleCountryCode
NSLocaleCurrencyCode = _.NSLocaleCurrencyCode
NSLocaleCurrencySymbol = _.NSLocaleCurrencySymbol
NSLocaleDecimalSeparator = _.NSLocaleDecimalSeparator
NSLocaleExemplarCharacterSet = _.NSLocaleExemplarCharacterSet
NSLocaleGroupingSeparator = _.NSLocaleGroupingSeparator
NSLocaleIdentifier = _.NSLocaleIdentifier
NSLocaleLanguageCode = _.NSLocaleLanguageCode
NSLocaleMeasurementSystem = _.NSLocaleMeasurementSystem
NSLocaleQuotationBeginDelimiterKey = _.NSLocaleQuotationBeginDelimiterKey
NSLocaleQuotationEndDelimiterKey = _.NSLocaleQuotationEndDelimiterKey
NSLocaleScriptCode = _.NSLocaleScriptCode
NSLocaleUsesMetricSystem = _.NSLocaleUsesMetricSystem
NSLocaleVariantCode = _.NSLocaleVariantCode
NSLocalizedDescriptionKey = _.NSLocalizedDescriptionKey
NSLocalizedFailureReasonErrorKey = _.NSLocalizedFailureReasonErrorKey
NSLocalizedRecoveryOptionsErrorKey = _.NSLocalizedRecoveryOptionsErrorKey
NSLocalizedRecoverySuggestionErrorKey = _.NSLocalizedRecoverySuggestionErrorKey
NSMachErrorDomain = _.NSMachErrorDomain
NSMallocException = _.NSMallocException
NSMaximumKeyValueOperator = _.NSMaximumKeyValueOperator
NSMetadataQueryDidFinishGatheringNotification = _.NSMetadataQueryDidFinishGatheringNotification
NSMetadataQueryDidStartGatheringNotification = _.NSMetadataQueryDidStartGatheringNotification
NSMetadataQueryDidUpdateNotification = _.NSMetadataQueryDidUpdateNotification
NSMetadataQueryGatheringProgressNotification = _.NSMetadataQueryGatheringProgressNotification
NSMetadataQueryLocalComputerScope = _.NSMetadataQueryLocalComputerScope
NSMetadataQueryNetworkScope = _.NSMetadataQueryNetworkScope
NSMetadataQueryResultContentRelevanceAttribute = _.NSMetadataQueryResultContentRelevanceAttribute
NSMetadataQueryUserHomeScope = _.NSMetadataQueryUserHomeScope
NSMinimumKeyValueOperator = _.NSMinimumKeyValueOperator
NSMonthNameArray = _.NSMonthNameArray
NSNegateBooleanTransformerName = _.NSNegateBooleanTransformerName
NSNegativeCurrencyFormatString = _.NSNegativeCurrencyFormatString
NSNetServicesErrorCode = _.NSNetServicesErrorCode
NSNetServicesErrorDomain = _.NSNetServicesErrorDomain
NSNextDayDesignations = _.NSNextDayDesignations
NSNextNextDayDesignations = _.NSNextNextDayDesignations
# (NSHashTableCallBacks)NSNonOwnedPointerHashCallBacks
# (NSMapTableKeyCallBacks)NSNonOwnedPointerMapKeyCallBacks
# (NSMapTableValueCallBacks)NSNonOwnedPointerMapValueCallBacks
# (NSMapTableKeyCallBacks)NSNonOwnedPointerOrNullMapKeyCallBacks
# (NSHashTableCallBacks)NSNonRetainedObjectHashCallBacks
# (NSMapTableKeyCallBacks)NSNonRetainedObjectMapKeyCallBacks
# (NSMapTableValueCallBacks)NSNonRetainedObjectMapValueCallBacks
NSOSStatusErrorDomain = _.NSOSStatusErrorDomain
# (NSHashTableCallBacks)NSObjectHashCallBacks
NSObjectInaccessibleException = _.NSObjectInaccessibleException
# (NSMapTableKeyCallBacks)NSObjectMapKeyCallBacks
# (NSMapTableValueCallBacks)NSObjectMapValueCallBacks
NSObjectNotAvailableException = _.NSObjectNotAvailableException
NSOldStyleException = _.NSOldStyleException
NSOperationNotSupportedForKeyException = _.NSOperationNotSupportedForKeyException
# (NSHashTableCallBacks)NSOwnedObjectIdentityHashCallBacks
# (NSHashTableCallBacks)NSOwnedPointerHashCallBacks
# (NSMapTableKeyCallBacks)NSOwnedPointerMapKeyCallBacks
# (NSMapTableValueCallBacks)NSOwnedPointerMapValueCallBacks
NSPOSIXErrorDomain = _.NSPOSIXErrorDomain
NSParseErrorException = _.NSParseErrorException
NSPersianCalendar = _.NSPersianCalendar
# (NSHashTableCallBacks)NSPointerToStructHashCallBacks
NSPortDidBecomeInvalidNotification = _.NSPortDidBecomeInvalidNotification
NSPortReceiveException = _.NSPortReceiveException
NSPortSendException = _.NSPortSendException
NSPortTimeoutException = _.NSPortTimeoutException
NSPositiveCurrencyFormatString = _.NSPositiveCurrencyFormatString
NSPriorDayDesignations = _.NSPriorDayDesignations
NSRangeException = _.NSRangeException
NSRecoveryAttempterErrorKey = _.NSRecoveryAttempterErrorKey
NSRegistrationDomain = _.NSRegistrationDomain
NSRepublicOfChinaCalendar = _.NSRepublicOfChinaCalendar
NSRunLoopCommonModes = _.NSRunLoopCommonModes
NSShortDateFormatString = _.NSShortDateFormatString
NSShortMonthNameArray = _.NSShortMonthNameArray
NSShortTimeDateFormatString = _.NSShortTimeDateFormatString
NSShortWeekDayNameArray = _.NSShortWeekDayNameArray
NSStreamDataWrittenToMemoryStreamKey = _.NSStreamDataWrittenToMemoryStreamKey
NSStreamFileCurrentOffsetKey = _.NSStreamFileCurrentOffsetKey
NSStreamSOCKSErrorDomain = _.NSStreamSOCKSErrorDomain
NSStreamSOCKSProxyConfigurationKey = _.NSStreamSOCKSProxyConfigurationKey
NSStreamSOCKSProxyHostKey = _.NSStreamSOCKSProxyHostKey
NSStreamSOCKSProxyPasswordKey = _.NSStreamSOCKSProxyPasswordKey
NSStreamSOCKSProxyPortKey = _.NSStreamSOCKSProxyPortKey
NSStreamSOCKSProxyUserKey = _.NSStreamSOCKSProxyUserKey
NSStreamSOCKSProxyVersion4 = _.NSStreamSOCKSProxyVersion4
NSStreamSOCKSProxyVersion5 = _.NSStreamSOCKSProxyVersion5
NSStreamSOCKSProxyVersionKey = _.NSStreamSOCKSProxyVersionKey
NSStreamSocketSSLErrorDomain = _.NSStreamSocketSSLErrorDomain
NSStreamSocketSecurityLevelKey = _.NSStreamSocketSecurityLevelKey
NSStreamSocketSecurityLevelNegotiatedSSL = _.NSStreamSocketSecurityLevelNegotiatedSSL
NSStreamSocketSecurityLevelNone = _.NSStreamSocketSecurityLevelNone
NSStreamSocketSecurityLevelSSLv2 = _.NSStreamSocketSecurityLevelSSLv2
NSStreamSocketSecurityLevelSSLv3 = _.NSStreamSocketSecurityLevelSSLv3
NSStreamSocketSecurityLevelTLSv1 = _.NSStreamSocketSecurityLevelTLSv1
NSStringEncodingErrorKey = _.NSStringEncodingErrorKey
NSSumKeyValueOperator = _.NSSumKeyValueOperator
NSSystemClockDidChangeNotification = _.NSSystemClockDidChangeNotification
NSSystemTimeZoneDidChangeNotification = _.NSSystemTimeZoneDidChangeNotification
NSTaskDidTerminateNotification = _.NSTaskDidTerminateNotification
NSTextCheckingCityKey = _.NSTextCheckingCityKey
NSTextCheckingCountryKey = _.NSTextCheckingCountryKey
NSTextCheckingJobTitleKey = _.NSTextCheckingJobTitleKey
NSTextCheckingNameKey = _.NSTextCheckingNameKey
NSTextCheckingOrganizationKey = _.NSTextCheckingOrganizationKey
NSTextCheckingPhoneKey = _.NSTextCheckingPhoneKey
NSTextCheckingStateKey = _.NSTextCheckingStateKey
NSTextCheckingStreetKey = _.NSTextCheckingStreetKey
NSTextCheckingZIPKey = _.NSTextCheckingZIPKey
NSThisDayDesignations = _.NSThisDayDesignations
NSThousandsSeparator = _.NSThousandsSeparator
NSThreadWillExitNotification = _.NSThreadWillExitNotification
NSTimeDateFormatString = _.NSTimeDateFormatString
NSTimeFormatString = _.NSTimeFormatString
NSURLAttributeModificationDateKey = _.NSURLAttributeModificationDateKey
NSURLAuthenticationMethodClientCertificate = _.NSURLAuthenticationMethodClientCertificate
NSURLAuthenticationMethodDefault = _.NSURLAuthenticationMethodDefault
NSURLAuthenticationMethodHTMLForm = _.NSURLAuthenticationMethodHTMLForm
NSURLAuthenticationMethodHTTPBasic = _.NSURLAuthenticationMethodHTTPBasic
NSURLAuthenticationMethodHTTPDigest = _.NSURLAuthenticationMethodHTTPDigest
NSURLAuthenticationMethodNTLM = _.NSURLAuthenticationMethodNTLM
NSURLAuthenticationMethodNegotiate = _.NSURLAuthenticationMethodNegotiate
NSURLAuthenticationMethodServerTrust = _.NSURLAuthenticationMethodServerTrust
NSURLContentAccessDateKey = _.NSURLContentAccessDateKey
NSURLContentModificationDateKey = _.NSURLContentModificationDateKey
NSURLCreationDateKey = _.NSURLCreationDateKey
NSURLCredentialStorageChangedNotification = _.NSURLCredentialStorageChangedNotification
NSURLCustomIconKey = _.NSURLCustomIconKey
NSURLEffectiveIconKey = _.NSURLEffectiveIconKey
NSURLErrorDomain = _.NSURLErrorDomain
NSURLErrorFailingURLErrorKey = _.NSURLErrorFailingURLErrorKey
NSURLErrorFailingURLPeerTrustErrorKey = _.NSURLErrorFailingURLPeerTrustErrorKey
NSURLErrorFailingURLStringErrorKey = _.NSURLErrorFailingURLStringErrorKey
NSURLErrorKey = _.NSURLErrorKey
NSURLFileAllocatedSizeKey = _.NSURLFileAllocatedSizeKey
NSURLFileScheme = _.NSURLFileScheme
NSURLFileSizeKey = _.NSURLFileSizeKey
NSURLHasHiddenExtensionKey = _.NSURLHasHiddenExtensionKey
NSURLIsAliasFileKey = _.NSURLIsAliasFileKey
NSURLIsDirectoryKey = _.NSURLIsDirectoryKey
NSURLIsHiddenKey = _.NSURLIsHiddenKey
NSURLIsPackageKey = _.NSURLIsPackageKey
NSURLIsRegularFileKey = _.NSURLIsRegularFileKey
NSURLIsSymbolicLinkKey = _.NSURLIsSymbolicLinkKey
NSURLIsSystemImmutableKey = _.NSURLIsSystemImmutableKey
NSURLIsUserImmutableKey = _.NSURLIsUserImmutableKey
NSURLIsVolumeKey = _.NSURLIsVolumeKey
NSURLLabelColorKey = _.NSURLLabelColorKey
NSURLLabelNumberKey = _.NSURLLabelNumberKey
NSURLLinkCountKey = _.NSURLLinkCountKey
NSURLLocalizedLabelKey = _.NSURLLocalizedLabelKey
NSURLLocalizedNameKey = _.NSURLLocalizedNameKey
NSURLLocalizedTypeDescriptionKey = _.NSURLLocalizedTypeDescriptionKey
NSURLNameKey = _.NSURLNameKey
NSURLParentDirectoryURLKey = _.NSURLParentDirectoryURLKey
NSURLProtectionSpaceFTP = _.NSURLProtectionSpaceFTP
NSURLProtectionSpaceFTPProxy = _.NSURLProtectionSpaceFTPProxy
NSURLProtectionSpaceHTTP = _.NSURLProtectionSpaceHTTP
NSURLProtectionSpaceHTTPProxy = _.NSURLProtectionSpaceHTTPProxy
NSURLProtectionSpaceHTTPS = _.NSURLProtectionSpaceHTTPS
NSURLProtectionSpaceHTTPSProxy = _.NSURLProtectionSpaceHTTPSProxy
NSURLProtectionSpaceSOCKSProxy = _.NSURLProtectionSpaceSOCKSProxy
NSURLTypeIdentifierKey = _.NSURLTypeIdentifierKey
NSURLVolumeAvailableCapacityKey = _.NSURLVolumeAvailableCapacityKey
NSURLVolumeIsJournalingKey = _.NSURLVolumeIsJournalingKey
NSURLVolumeLocalizedFormatDescriptionKey = _.NSURLVolumeLocalizedFormatDescriptionKey
NSURLVolumeResourceCountKey = _.NSURLVolumeResourceCountKey
NSURLVolumeSupportsCasePreservedNamesKey = _.NSURLVolumeSupportsCasePreservedNamesKey
NSURLVolumeSupportsCaseSensitiveNamesKey = _.NSURLVolumeSupportsCaseSensitiveNamesKey
NSURLVolumeSupportsHardLinksKey = _.NSURLVolumeSupportsHardLinksKey
NSURLVolumeSupportsJournalingKey = _.NSURLVolumeSupportsJournalingKey
NSURLVolumeSupportsPersistentIDsKey = _.NSURLVolumeSupportsPersistentIDsKey
NSURLVolumeSupportsSparseFilesKey = _.NSURLVolumeSupportsSparseFilesKey
NSURLVolumeSupportsSymbolicLinksKey = _.NSURLVolumeSupportsSymbolicLinksKey
NSURLVolumeSupportsZeroRunsKey = _.NSURLVolumeSupportsZeroRunsKey
NSURLVolumeTotalCapacityKey = _.NSURLVolumeTotalCapacityKey
NSURLVolumeURLKey = _.NSURLVolumeURLKey
NSUnarchiveFromDataTransformerName = _.NSUnarchiveFromDataTransformerName
NSUndefinedKeyException = _.NSUndefinedKeyException
NSUnderlyingErrorKey = _.NSUnderlyingErrorKey
NSUndoManagerCheckpointNotification = _.NSUndoManagerCheckpointNotification
NSUndoManagerDidOpenUndoGroupNotification = _.NSUndoManagerDidOpenUndoGroupNotification
NSUndoManagerDidRedoChangeNotification = _.NSUndoManagerDidRedoChangeNotification
NSUndoManagerDidUndoChangeNotification = _.NSUndoManagerDidUndoChangeNotification
NSUndoManagerWillCloseUndoGroupNotification = _.NSUndoManagerWillCloseUndoGroupNotification
NSUndoManagerWillRedoChangeNotification = _.NSUndoManagerWillRedoChangeNotification
NSUndoManagerWillUndoChangeNotification = _.NSUndoManagerWillUndoChangeNotification
NSUnionOfArraysKeyValueOperator = _.NSUnionOfArraysKeyValueOperator
NSUnionOfObjectsKeyValueOperator = _.NSUnionOfObjectsKeyValueOperator
NSUnionOfSetsKeyValueOperator = _.NSUnionOfSetsKeyValueOperator
NSUserDefaultsDidChangeNotification = _.NSUserDefaultsDidChangeNotification
NSWeekDayNameArray = _.NSWeekDayNameArray
NSWillBecomeMultiThreadedNotification = _.NSWillBecomeMultiThreadedNotification
NSXMLParserErrorDomain = _.NSXMLParserErrorDomain
NSYearMonthWeekDesignations = _.NSYearMonthWeekDesignations
# (NSPoint)NSZeroPoint
# (NSRect)NSZeroRect
# (NSSize)NSZeroSize
NSZombieEnabled = False
# enums
NSASCIIStringEncoding = 1
NSAdminApplicationDirectory = 4
NSAggregateExpressionType = 14
NSAllApplicationsDirectory = 100
NSAllDomainsMask = 65535
NSAllLibrariesDirectory = 101
NSAllPredicateModifier = 1
NSAnchoredSearch = 8
NSAndPredicateType = 1
NSAnyPredicateModifier = 2
NSApplicationDirectory = 1
NSApplicationSupportDirectory = 14
NSArgumentEvaluationScriptError = 3
NSArgumentsWrongScriptError = 6
NSAtomicWrite = 1
NSAttributedStringEnumerationLongestEffectiveRangeNotRequired = 1048576
NSAttributedStringEnumerationReverse = 2
NSAutosavedInformationDirectory = 11
NSBackwardsSearch = 4
NSBeginsWithComparison = 5
NSBeginsWithPredicateOperatorType = 8
NSBetweenPredicateOperatorType = 100
NSBinarySearchingFirstEqual = 256
NSBinarySearchingInsertionIndex = 1024
NSBinarySearchingLastEqual = 512
NSBlockExpressionType = 19
NSBundleExecutableArchitectureI386 = 7
NSBundleExecutableArchitecturePPC = 18
NSBundleExecutableArchitecturePPC64 = 16777234
NSBundleExecutableArchitectureX86_64 = 16777223
NSCachesDirectory = 13
NSCalculationDivideByZero = 4
NSCalculationLossOfPrecision = 1
NSCalculationNoError = 0
NSCalculationOverflow = 3
NSCalculationUnderflow = 2
NSCannotCreateScriptCommandError = 10
NSCaseInsensitivePredicateOption = 1
NSCaseInsensitiveSearch = 1
NSCollectorDisabledOption = 2
NSConstantValueExpressionType = 0
NSContainerSpecifierError = 2
NSContainsComparison = 7
NSContainsPredicateOperatorType = 99
NSCoreServiceDirectory = 10
NSCustomSelectorPredicateOperatorType = 11
NSDataReadingMapped = 1
NSDataReadingUncached = 2
NSDataSearchAnchored = 2
NSDataSearchBackwards = 1
NSDataWritingAtomic = 1
NSDateFormatterBehavior10_0 = 1000
NSDateFormatterBehavior10_4 = 1040
NSDateFormatterBehaviorDefault = 0
NSDateFormatterFullStyle = 4
NSDateFormatterLongStyle = 3
NSDateFormatterMediumStyle = 2
NSDateFormatterNoStyle = 0
NSDateFormatterShortStyle = 1
NSDayCalendarUnit = 16
NSDecimalMaxSize = 8
NSDecimalNoScale = 32767
NSDemoApplicationDirectory = 2
NSDesktopDirectory = 12
NSDeveloperApplicationDirectory = 3
NSDeveloperDirectory = 6
NSDiacriticInsensitivePredicateOption = 2
NSDiacriticInsensitiveSearch = 128
NSDirectPredicateModifier = 0
NSDirectoryEnumerationSkipsHiddenFiles = 4
NSDirectoryEnumerationSkipsPackageDescendants = 2
NSDirectoryEnumerationSkipsSubdirectoryDescendants = 1
NSDocumentDirectory = 9
NSDocumentationDirectory = 8
NSDownloadsDirectory = 15
NSEndsWithComparison = 6
NSEndsWithPredicateOperatorType = 9
NSEnumerationConcurrent = 1
NSEnumerationReverse = 2
NSEqualToComparison = 0
NSEqualToPredicateOperatorType = 4
NSEraCalendarUnit = 2
NSEvaluatedObjectExpressionType = 1
NSEverySubelement = 1
NSExecutableArchitectureMismatchError = 3585
NSExecutableErrorMaximum = 3839
NSExecutableErrorMinimum = 3584
NSExecutableLinkError = 3588
NSExecutableLoadError = 3587
NSExecutableNotLoadableError = 3584
NSExecutableRuntimeMismatchError = 3586
NSFileErrorMaximum = 1023
NSFileErrorMinimum = 0
NSFileLockingError = 255
NSFileManagerItemReplacementUsingNewMetadataOnly = 1
NSFileManagerItemReplacementWithoutDeletingBackupItem = 2
NSFileNoSuchFileError = 4
NSFileReadCorruptFileError = 259
NSFileReadInapplicableStringEncodingError = 261
NSFileReadInvalidFileNameError = 258
NSFileReadNoPermissionError = 257
NSFileReadNoSuchFileError = 260
NSFileReadTooLargeError = 263
NSFileReadUnknownError = 256
NSFileReadUnknownStringEncodingError = 264
NSFileReadUnsupportedSchemeError = 262
NSFileWriteInapplicableStringEncodingError = 517
NSFileWriteInvalidFileNameError = 514
NSFileWriteNoPermissionError = 513
NSFileWriteOutOfSpaceError = 640
NSFileWriteUnknownError = 512
NSFileWriteUnsupportedSchemeError = 518
NSFileWriteVolumeReadOnlyError = 642
NSForcedOrderingSearch = 512
NSFormattingError = 2048
NSFormattingErrorMaximum = 2559
NSFormattingErrorMinimum = 2048
NSFoundationVersionNumber10_0 = 397.39999999999998
NSFoundationVersionNumber10_1 = 425.00000000000000
NSFoundationVersionNumber10_1_1 = 425.00000000000000
NSFoundationVersionNumber10_1_2 = 425.00000000000000
NSFoundationVersionNumber10_1_3 = 425.00000000000000
NSFoundationVersionNumber10_1_4 = 425.00000000000000
NSFoundationVersionNumber10_2 = 462.00000000000000
NSFoundationVersionNumber10_2_1 = 462.00000000000000
NSFoundationVersionNumber10_2_2 = 462.00000000000000
NSFoundationVersionNumber10_2_3 = 462.00000000000000
NSFoundationVersionNumber10_2_4 = 462.00000000000000
NSFoundationVersionNumber10_2_5 = 462.00000000000000
NSFoundationVersionNumber10_2_6 = 462.00000000000000
NSFoundationVersionNumber10_2_7 = 462.69999999999999
NSFoundationVersionNumber10_2_8 = 462.69999999999999
NSFoundationVersionNumber10_3 = 500.00000000000000
NSFoundationVersionNumber10_3_1 = 500.00000000000000
NSFoundationVersionNumber10_3_2 = 500.30000000000001
NSFoundationVersionNumber10_3_3 = 500.54000000000002
NSFoundationVersionNumber10_3_4 = 500.56000000000000
NSFoundationVersionNumber10_3_5 = 500.56000000000000
NSFoundationVersionNumber10_3_6 = 500.56000000000000
NSFoundationVersionNumber10_3_7 = 500.56000000000000
NSFoundationVersionNumber10_3_8 = 500.56000000000000
NSFoundationVersionNumber10_3_9 = 500.57999999999998
NSFoundationVersionNumber10_4 = 567.00000000000000
NSFoundationVersionNumber10_4_1 = 567.00000000000000
NSFoundationVersionNumber10_4_10 = 567.28999999999996
NSFoundationVersionNumber10_4_11 = 567.36000000000001
NSFoundationVersionNumber10_4_2 = 567.12000000000000
NSFoundationVersionNumber10_4_3 = 567.21000000000004
NSFoundationVersionNumber10_4_4_Intel = 567.23000000000002
NSFoundationVersionNumber10_4_4_PowerPC = 567.21000000000004
NSFoundationVersionNumber10_4_5 = 567.25000000000000
NSFoundationVersionNumber10_4_6 = 567.25999999999999
NSFoundationVersionNumber10_4_7 = 567.26999999999998
NSFoundationVersionNumber10_4_8 = 567.27999999999997
NSFoundationVersionNumber10_4_9 = 567.28999999999996
NSFoundationVersionNumber10_5 = 677.00000000000000
NSFoundationVersionNumber10_5_1 = 677.10000000000002
NSFoundationVersionNumber10_5_2 = 677.14999999999998
NSFoundationVersionNumber10_5_3 = 677.19000000000005
NSFoundationVersionNumber10_5_4 = 677.19000000000005
NSFoundationVersionNumber10_5_5 = 677.21000000000004
NSFoundationVersionNumber10_5_6 = 677.22000000000003
NSFoundationVersionNumber10_5_7 = 677.24000000000001
NSFoundationVersionNumber10_5_8 = 677.25999999999999
NSFoundationVersionNumber10_6 = 751.00000000000000
NSFoundationVersionNumber10_6_1 = 751.00000000000000
NSFoundationVersionNumber10_6_2 = 751.13999999999999
NSFoundationVersionNumber10_6_3 = 751.21000000000004
NSFoundationVersionWithFileManagerResourceForkSupport = 412
NSFunctionExpressionType = 4
NSGEOMETRY_TYPES_SAME_AS_CGGEOMETRY_TYPES = None
NSGreaterThanComparison = 4
NSGreaterThanOrEqualToComparison = 3
NSGreaterThanOrEqualToPredicateOperatorType = 3
NSGreaterThanPredicateOperatorType = 2
NSHPUXOperatingSystem = 4
NSHTTPCookieAcceptPolicyAlways = 0
NSHTTPCookieAcceptPolicyNever = 1
NSHTTPCookieAcceptPolicyOnlyFromMainDocumentDomain = 2
NSHashTableCopyIn = 65536
NSHashTableObjectPointerPersonality = 512
NSHashTableStrongMemory = 0
NSHashTableZeroingWeakMemory = 1
NSHourCalendarUnit = 32
NSINTEGER_DEFINED = 1
NSISO2022JPStringEncoding = 21
NSISOLatin1StringEncoding = 5
NSISOLatin2StringEncoding = 9
NSInPredicateOperatorType = 10
NSIndexSubelement = 0
NSInputMethodsDirectory = 16
NSIntegerMax = 2147483647
NSIntegerMin = -2147483648
NSInternalScriptError = 8
NSInternalSpecifierError = 5
NSIntersectSetExpressionType = 6
NSInvalidIndexSpecifierError = 4
NSItemReplacementDirectory = 99
NSJapaneseEUCStringEncoding = 3
NSKeyPathExpressionType = 3
NSKeySpecifierEvaluationScriptError = 2
NSKeyValueChangeInsertion = 2
NSKeyValueChangeRemoval = 3
NSKeyValueChangeReplacement = 4
NSKeyValueChangeSetting = 1
NSKeyValueIntersectSetMutation = 3
NSKeyValueMinusSetMutation = 2
NSKeyValueObservingOptionInitial = 4
NSKeyValueObservingOptionNew = 1
NSKeyValueObservingOptionOld = 2
NSKeyValueObservingOptionPrior = 8
NSKeyValueSetSetMutation = 4
NSKeyValueUnionSetMutation = 1
NSKeyValueValidationError = 1024
NSLessThanComparison = 2
NSLessThanOrEqualToComparison = 1
NSLessThanOrEqualToPredicateOperatorType = 1
NSLessThanPredicateOperatorType = 0
NSLibraryDirectory = 5
NSLikePredicateOperatorType = 7
NSLiteralSearch = 2
NSLocalDomainMask = 2
NSLocaleLanguageDirectionBottomToTop = 4
NSLocaleLanguageDirectionLeftToRight = 1
NSLocaleLanguageDirectionRightToLeft = 2
NSLocaleLanguageDirectionTopToBottom = 3
NSLocaleLanguageDirectionUnknown = 0
NSMACHOperatingSystem = 5
NSMacOSRomanStringEncoding = 30
NSMachPortDeallocateNone = 0
NSMachPortDeallocateReceiveRight = 2
NSMachPortDeallocateSendRight = 1
NSMapTableCopyIn = 65536
NSMapTableObjectPointerPersonality = 512
NSMapTableStrongMemory = 0
NSMapTableZeroingWeakMemory = 1
NSMappedRead = 1
NSMatchesPredicateOperatorType = 6
NSMaxXEdge = None
NSMaxYEdge = None
NSMiddleSubelement = 2
NSMinXEdge = None
NSMinYEdge = None
NSMinusSetExpressionType = 7
NSMinuteCalendarUnit = 64
NSMonthCalendarUnit = 8
NSMoviesDirectory = 17
NSMusicDirectory = 18
NSNEXTSTEPStringEncoding = 2
NSNetServiceNoAutoRename = 1
NSNetServicesActivityInProgress = -72003
NSNetServicesBadArgumentError = -72004
NSNetServicesCancelledError = -72005
NSNetServicesCollisionError = -72001
NSNetServicesInvalidError = -72006
NSNetServicesNotFoundError = -72002
NSNetServicesTimeoutError = -72007
NSNetServicesUnknownError = -72000
NSNetworkDomainMask = 4
NSNoScriptError = 0
NSNoSpecifierError = 0
NSNoSubelement = 4
NSNoTopLevelContainersSpecifierError = 1
NSNonLossyASCIIStringEncoding = 7
NSNotEqualToPredicateOperatorType = 5
NSNotFound = 2147483647
NSNotPredicateType = 0
NSNotificationCoalescingOnName = 1
NSNotificationCoalescingOnSender = 2
NSNotificationDeliverImmediately = 1
NSNotificationNoCoalescing = 0
NSNotificationPostToAllSessions = 2
NSNotificationSuspensionBehaviorCoalesce = 2
NSNotificationSuspensionBehaviorDeliverImmediately = 4
NSNotificationSuspensionBehaviorDrop = 1
NSNotificationSuspensionBehaviorHold = 3
NSNumberFormatterBehavior10_0 = 1000
NSNumberFormatterBehavior10_4 = 1040
NSNumberFormatterBehaviorDefault = 0
NSNumberFormatterCurrencyStyle = 2
NSNumberFormatterDecimalStyle = 1
NSNumberFormatterNoStyle = 0
NSNumberFormatterPadAfterPrefix = 1
NSNumberFormatterPadAfterSuffix = 3
NSNumberFormatterPadBeforePrefix = 0
NSNumberFormatterPadBeforeSuffix = 2
NSNumberFormatterPercentStyle = 3
NSNumberFormatterRoundCeiling = 0
NSNumberFormatterRoundDown = 2
NSNumberFormatterRoundFloor = 1
NSNumberFormatterRoundHalfDown = 5
NSNumberFormatterRoundHalfEven = 4
NSNumberFormatterRoundHalfUp = 6
NSNumberFormatterRoundUp = 3
NSNumberFormatterScientificStyle = 4
NSNumberFormatterSpellOutStyle = 5
NSNumericSearch = 64
NSOSF1OperatingSystem = 7
NSObjectAutoreleasedEvent = 3
NSObjectExtraRefDecrementedEvent = 5
NSObjectExtraRefIncrementedEvent = 4
NSObjectInternalRefDecrementedEvent = 7
NSObjectInternalRefIncrementedEvent = 6
NSOpenStepUnicodeReservedBase = 62464
NSOperationNotSupportedForKeyScriptError = 9
NSOperationNotSupportedForKeySpecifierError = 6
NSOperationQueueDefaultMaxConcurrentOperationCount = -1
NSOperationQueuePriorityHigh = 4
NSOperationQueuePriorityLow = -4
NSOperationQueuePriorityNormal = 0
NSOperationQueuePriorityVeryHigh = 8
NSOperationQueuePriorityVeryLow = -8
NSOrPredicateType = 2
NSOrderedAscending = -1
NSOrderedDescending = 1
NSOrderedSame = 0
NSPicturesDirectory = 19
NSPointerFunctionsCStringPersonality = 768
NSPointerFunctionsCopyIn = 65536
NSPointerFunctionsIntegerPersonality = 1280
NSPointerFunctionsMachVirtualMemory = 4
NSPointerFunctionsMallocMemory = 3
NSPointerFunctionsObjectPersonality = 0
NSPointerFunctionsObjectPointerPersonality = 512
NSPointerFunctionsOpaqueMemory = 2
NSPointerFunctionsOpaquePersonality = 256
NSPointerFunctionsStrongMemory = 0
NSPointerFunctionsStructPersonality = 1024
NSPointerFunctionsZeroingWeakMemory = 1
NSPositionAfter = 0
NSPositionBefore = 1
NSPositionBeginning = 2
NSPositionEnd = 3
NSPositionReplace = 4
NSPostASAP = 2
NSPostNow = 3
NSPostWhenIdle = 1
NSPreferencePanesDirectory = 22
NSPrinterDescriptionDirectory = 20
NSPropertyListBinaryFormat_v1_0 = 200
NSPropertyListErrorMaximum = 4095
NSPropertyListErrorMinimum = 3840
NSPropertyListImmutable = 0
NSPropertyListMutableContainers = 1
NSPropertyListMutableContainersAndLeaves = 2
NSPropertyListOpenStepFormat = 1
NSPropertyListReadCorruptError = 3840
NSPropertyListReadStreamError = 3842
NSPropertyListReadUnknownVersionError = 3841
NSPropertyListWriteStreamError = 3851
NSPropertyListXMLFormat_v1_0 = 100
NSQuarterCalendarUnit = 2048
NSRandomSubelement = 3
NSReceiverEvaluationScriptError = 1
NSReceiversCantHandleCommandScriptError = 4
NSRelativeAfter = 0
NSRelativeBefore = 1
NSRequiredArgumentsMissingScriptError = 5
NSRoundBankers = 3
NSRoundDown = 1
NSRoundPlain = 0
NSRoundUp = 2
NSSaveOptionsAsk = 2
NSSaveOptionsNo = 1
NSSaveOptionsYes = 0
NSScannedOption = 1
NSSecondCalendarUnit = 128
NSSharedPublicDirectory = 21
NSShiftJISStringEncoding = 8
NSSolarisOperatingSystem = 3
NSSortConcurrent = 1
NSSortStable = 16
NSStreamEventEndEncountered = 16
NSStreamEventErrorOccurred = 8
NSStreamEventHasBytesAvailable = 2
NSStreamEventHasSpaceAvailable = 4
NSStreamEventNone = 0
NSStreamEventOpenCompleted = 1
NSStreamStatusAtEnd = 5
NSStreamStatusClosed = 6
NSStreamStatusError = 7
NSStreamStatusNotOpen = 0
NSStreamStatusOpen = 2
NSStreamStatusOpening = 1
NSStreamStatusReading = 3
NSStreamStatusWriting = 4
NSStringEncodingConversionAllowLossy = 1
NSStringEncodingConversionExternalRepresentation = 2
NSStringEnumerationByComposedCharacterSequences = 2
NSStringEnumerationByLines = 0
NSStringEnumerationByParagraphs = 1
NSStringEnumerationBySentences = 4
NSStringEnumerationByWords = 3
NSStringEnumerationLocalized = 1024
NSStringEnumerationReverse = 256
NSStringEnumerationSubstringNotRequired = 512
NSSubqueryExpressionType = 13
NSSunOSOperatingSystem = 6
NSSymbolStringEncoding = 6
NSSystemDomainMask = 8
NSTaskTerminationReasonExit = 1
NSTaskTerminationReasonUncaughtSignal = 2
NSTextCheckingAllCustomTypes = 0
NSTextCheckingAllSystemTypes = -1
NSTextCheckingAllTypes = -1
NSTextCheckingTypeAddress = 16
NSTextCheckingTypeCorrection = 512
NSTextCheckingTypeDash = 128
NSTextCheckingTypeDate = 8
NSTextCheckingTypeGrammar = 4
NSTextCheckingTypeLink = 32
NSTextCheckingTypeOrthography = 1
NSTextCheckingTypeQuote = 64
NSTextCheckingTypeReplacement = 256
NSTextCheckingTypeSpelling = 2
NSTimeIntervalSince1970 = 978307200.00000000
NSTimeZoneNameStyleDaylightSaving = 2
NSTimeZoneNameStyleGeneric = 4
NSTimeZoneNameStyleShortDaylightSaving = 3
NSTimeZoneNameStyleShortGeneric = 5
NSTimeZoneNameStyleShortStandard = 1
NSTimeZoneNameStyleStandard = 0
NSUIntegerMax = 4294967295
NSURLBookmarkCreationMinimalBookmark = 512
NSURLBookmarkCreationPreferFileIDResolution = 256
NSURLBookmarkCreationSuitableForBookmarkFile = 1024
NSURLBookmarkResolutionWithoutMounting = 512
NSURLBookmarkResolutionWithoutUI = 256
NSURLCacheStorageAllowed = 0
NSURLCacheStorageAllowedInMemoryOnly = 1
NSURLCacheStorageNotAllowed = 2
NSURLCredentialPersistenceForSession = 1
NSURLCredentialPersistenceNone = 0
NSURLCredentialPersistencePermanent = 2
NSURLErrorBadServerResponse = -1011
NSURLErrorBadURL = -1000
NSURLErrorCancelled = -999
NSURLErrorCannotCloseFile = -3002
NSURLErrorCannotConnectToHost = -1004
NSURLErrorCannotCreateFile = -3000
NSURLErrorCannotDecodeContentData = -1016
NSURLErrorCannotDecodeRawData = -1015
NSURLErrorCannotFindHost = -1003
NSURLErrorCannotLoadFromNetwork = -2000
NSURLErrorCannotMoveFile = -3005
NSURLErrorCannotOpenFile = -3001
NSURLErrorCannotParseResponse = -1017
NSURLErrorCannotRemoveFile = -3004
NSURLErrorCannotWriteToFile = -3003
NSURLErrorClientCertificateRejected = -1205
NSURLErrorClientCertificateRequired = -1206
NSURLErrorDNSLookupFailed = -1006
NSURLErrorDataLengthExceedsMaximum = -1103
NSURLErrorDownloadDecodingFailedMidStream = -3006
NSURLErrorDownloadDecodingFailedToComplete = -3007
NSURLErrorFileDoesNotExist = -1100
NSURLErrorFileIsDirectory = -1101
NSURLErrorHTTPTooManyRedirects = -1007
NSURLErrorNetworkConnectionLost = -1005
NSURLErrorNoPermissionsToReadFile = -1102
NSURLErrorNotConnectedToInternet = -1009
NSURLErrorRedirectToNonExistentLocation = -1010
NSURLErrorResourceUnavailable = -1008
NSURLErrorSecureConnectionFailed = -1200
NSURLErrorServerCertificateHasBadDate = -1201
NSURLErrorServerCertificateHasUnknownRoot = -1203
NSURLErrorServerCertificateNotYetValid = -1204
NSURLErrorServerCertificateUntrusted = -1202
NSURLErrorTimedOut = -1001
NSURLErrorUnknown = -1
NSURLErrorUnsupportedURL = -1002
NSURLErrorUserAuthenticationRequired = -1013
NSURLErrorUserCancelledAuthentication = -1012
NSURLErrorZeroByteResource = -1014
NSURLHandleLoadFailed = 3
NSURLHandleLoadInProgress = 2
NSURLHandleLoadSucceeded = 1
NSURLHandleNotLoaded = 0
NSURLRequestReloadIgnoringCacheData = 1
NSURLRequestReloadIgnoringLocalAndRemoteCacheData = 4
NSURLRequestReloadIgnoringLocalCacheData = 1
NSURLRequestReloadRevalidatingCacheData = 5
NSURLRequestReturnCacheDataDontLoad = 3
NSURLRequestReturnCacheDataElseLoad = 2
NSURLRequestUseProtocolCachePolicy = 0
NSUTF16BigEndianStringEncoding = -1879047936
NSUTF16LittleEndianStringEncoding = -1811939072
NSUTF16StringEncoding = 10
NSUTF32BigEndianStringEncoding = -1744830208
NSUTF32LittleEndianStringEncoding = -1677721344
NSUTF32StringEncoding = -1946156800
NSUTF8StringEncoding = 4
NSUncachedRead = 2
NSUndefinedDateComponent = 2147483647
NSUndoCloseGroupingRunLoopOrdering = 350000
NSUnicodeStringEncoding = 10
NSUnionSetExpressionType = 5
NSUnknownKeyScriptError = 7
NSUnknownKeySpecifierError = 3
NSUserCancelledError = 3072
NSUserDirectory = 7
NSUserDomainMask = 1
NSValidationErrorMaximum = 2047
NSValidationErrorMinimum = 1024
NSVariableExpressionType = 2
NSVolumeEnumerationProduceFileReferenceURLs = 4
NSVolumeEnumerationSkipHiddenVolumes = 2
NSWeekCalendarUnit = 256
NSWeekdayCalendarUnit = 512
NSWeekdayOrdinalCalendarUnit = 1024
NSWidthInsensitiveSearch = 256
NSWindows95OperatingSystem = 2
NSWindowsCP1250StringEncoding = 15
NSWindowsCP1251StringEncoding = 11
NSWindowsCP1252StringEncoding = 12
NSWindowsCP1253StringEncoding = 13
NSWindowsCP1254StringEncoding = 14
NSWindowsNTOperatingSystem = 1
NSWrapCalendarComponents = 1
NSXMLAttributeCDATAKind = 6
NSXMLAttributeDeclarationKind = 10
NSXMLAttributeEntitiesKind = 11
NSXMLAttributeEntityKind = 10
NSXMLAttributeEnumerationKind = 14
NSXMLAttributeIDKind = 7
NSXMLAttributeIDRefKind = 8
NSXMLAttributeIDRefsKind = 9
NSXMLAttributeKind = 3
NSXMLAttributeNMTokenKind = 12
NSXMLAttributeNMTokensKind = 13
NSXMLAttributeNotationKind = 15
NSXMLCommentKind = 6
NSXMLDTDKind = 8
NSXMLDocumentHTMLKind = 2
NSXMLDocumentIncludeContentTypeDeclaration = 262144
NSXMLDocumentKind = 1
NSXMLDocumentTextKind = 3
NSXMLDocumentTidyHTML = 512
NSXMLDocumentTidyXML = 1024
NSXMLDocumentValidate = 8192
NSXMLDocumentXHTMLKind = 1
NSXMLDocumentXInclude = 65536
NSXMLDocumentXMLKind = 0
NSXMLElementDeclarationAnyKind = 18
NSXMLElementDeclarationElementKind = 20
NSXMLElementDeclarationEmptyKind = 17
NSXMLElementDeclarationKind = 11
NSXMLElementDeclarationMixedKind = 19
NSXMLElementDeclarationUndefinedKind = 16
NSXMLElementKind = 2
NSXMLEntityDeclarationKind = 9
NSXMLEntityGeneralKind = 1
NSXMLEntityParameterKind = 4
NSXMLEntityParsedKind = 2
NSXMLEntityPredefined = 5
NSXMLEntityUnparsedKind = 3
NSXMLInvalidKind = 0
NSXMLNamespaceKind = 4
NSXMLNodeCompactEmptyElement = 4
NSXMLNodeExpandEmptyElement = 2
NSXMLNodeIsCDATA = 1
NSXMLNodeOptionsNone = 0
NSXMLNodePreserveAll = -1048546
NSXMLNodePreserveAttributeOrder = 2097152
NSXMLNodePreserveCDATA = 16777216
NSXMLNodePreserveCharacterReferences = 134217728
NSXMLNodePreserveDTD = 67108864
NSXMLNodePreserveEmptyElements = 6
NSXMLNodePreserveEntities = 4194304
NSXMLNodePreserveNamespaceOrder = 1048576
NSXMLNodePreservePrefixes = 8388608
NSXMLNodePreserveQuotes = 24
NSXMLNodePreserveWhitespace = 33554432
NSXMLNodePrettyPrint = 131072
NSXMLNodeUseDoubleQuotes = 16
NSXMLNodeUseSingleQuotes = 8
NSXMLNotationDeclarationKind = 12
NSXMLParserAttributeHasNoValueError = 41
NSXMLParserAttributeListNotFinishedError = 51
NSXMLParserAttributeListNotStartedError = 50
NSXMLParserAttributeNotFinishedError = 40
NSXMLParserAttributeNotStartedError = 39
NSXMLParserAttributeRedefinedError = 42
NSXMLParserCDATANotFinishedError = 63
NSXMLParserCharacterRefAtEOFError = 10
NSXMLParserCharacterRefInDTDError = 13
NSXMLParserCharacterRefInEpilogError = 12
NSXMLParserCharacterRefInPrologError = 11
NSXMLParserCommentContainsDoubleHyphenError = 80
NSXMLParserCommentNotFinishedError = 45
NSXMLParserConditionalSectionNotFinishedError = 59
NSXMLParserConditionalSectionNotStartedError = 58
NSXMLParserDOCTYPEDeclNotFinishedError = 61
NSXMLParserDelegateAbortedParseError = 512
NSXMLParserDocumentStartError = 3
NSXMLParserElementContentDeclNotFinishedError = 55
NSXMLParserElementContentDeclNotStartedError = 54
NSXMLParserEmptyDocumentError = 4
NSXMLParserEncodingNotSupportedError = 32
NSXMLParserEntityBoundaryError = 90
NSXMLParserEntityIsExternalError = 29
NSXMLParserEntityIsParameterError = 30
NSXMLParserEntityNotFinishedError = 37
NSXMLParserEntityNotStartedError = 36
NSXMLParserEntityRefAtEOFError = 14
NSXMLParserEntityRefInDTDError = 17
NSXMLParserEntityRefInEpilogError = 16
NSXMLParserEntityRefInPrologError = 15
NSXMLParserEntityRefLoopError = 89
NSXMLParserEntityReferenceMissingSemiError = 23
NSXMLParserEntityReferenceWithoutNameError = 22
NSXMLParserEntityValueRequiredError = 84
NSXMLParserEqualExpectedError = 75
NSXMLParserExternalStandaloneEntityError = 82
NSXMLParserExternalSubsetNotFinishedError = 60
NSXMLParserExtraContentError = 86
NSXMLParserGTRequiredError = 73
NSXMLParserInternalError = 1
NSXMLParserInvalidCharacterError = 9
NSXMLParserInvalidCharacterInEntityError = 87
NSXMLParserInvalidCharacterRefError = 8
NSXMLParserInvalidConditionalSectionError = 83
NSXMLParserInvalidDecimalCharacterRefError = 7
NSXMLParserInvalidEncodingError = 81
NSXMLParserInvalidEncodingNameError = 79
NSXMLParserInvalidHexCharacterRefError = 6
NSXMLParserInvalidURIError = 91
NSXMLParserLTRequiredError = 72
NSXMLParserLTSlashRequiredError = 74
NSXMLParserLessThanSymbolInAttributeError = 38
NSXMLParserLiteralNotFinishedError = 44
NSXMLParserLiteralNotStartedError = 43
NSXMLParserMisplacedCDATAEndStringError = 62
NSXMLParserMisplacedXMLDeclarationError = 64
NSXMLParserMixedContentDeclNotFinishedError = 53
NSXMLParserMixedContentDeclNotStartedError = 52
NSXMLParserNAMERequiredError = 68
NSXMLParserNMTOKENRequiredError = 67
NSXMLParserNamespaceDeclarationError = 35
NSXMLParserNoDTDError = 94
NSXMLParserNotWellBalancedError = 85
NSXMLParserNotationNotFinishedError = 49
NSXMLParserNotationNotStartedError = 48
NSXMLParserOutOfMemoryError = 2
NSXMLParserPCDATARequiredError = 69
NSXMLParserParsedEntityRefAtEOFError = 18
NSXMLParserParsedEntityRefInEpilogError = 20
NSXMLParserParsedEntityRefInInternalError = 88
NSXMLParserParsedEntityRefInInternalSubsetError = 21
NSXMLParserParsedEntityRefInPrologError = 19
NSXMLParserParsedEntityRefMissingSemiError = 25
NSXMLParserParsedEntityRefNoNameError = 24
NSXMLParserPrematureDocumentEndError = 5
NSXMLParserProcessingInstructionNotFinishedError = 47
NSXMLParserProcessingInstructionNotStartedError = 46
NSXMLParserPublicIdentifierRequiredError = 71
NSXMLParserSeparatorRequiredError = 66
NSXMLParserSpaceRequiredError = 65
NSXMLParserStandaloneValueError = 78
NSXMLParserStringNotClosedError = 34
NSXMLParserStringNotStartedError = 33
NSXMLParserTagNameMismatchError = 76
NSXMLParserURIFragmentError = 92
NSXMLParserURIRequiredError = 70
NSXMLParserUndeclaredEntityError = 26
NSXMLParserUnfinishedTagError = 77
NSXMLParserUnknownEncodingError = 31
NSXMLParserUnparsedEntityError = 28
NSXMLParserXMLDeclNotFinishedError = 57
NSXMLParserXMLDeclNotStartedError = 56
NSXMLProcessingInstructionKind = 5
NSXMLTextKind = 7
NSYearCalendarUnit = 4
NS_BLOCKS_AVAILABLE = 1
NS_BigEndian = 2
NS_LittleEndian = 1
NS_UNICHAR_IS_EIGHT_BIT = 0
NS_UnknownByteOrder = 0 | PypiClean |
/Ngoto-0.0.39-py3-none-any.whl/ngoto/core/util/rich/_inspect.py | from __future__ import absolute_import
import inspect
from inspect import cleandoc, getdoc, getfile, isclass, ismodule, signature
from typing import Any, Collection, Iterable, Optional, Tuple, Type, Union
from .console import Group, RenderableType
from .control import escape_control_codes
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin
from .panel import Panel
from .pretty import Pretty
from .table import Table
from .text import Text, TextType
def _first_paragraph(doc: str) -> str:
"""Get the first paragraph from a docstring."""
paragraph, _, _ = doc.partition("\n\n")
return paragraph
class Inspect(JupyterMixin):
"""A renderable to inspect any Python Object.
Args:
obj (Any): An object to inspect.
title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
methods (bool, optional): Enable inspection of callables. Defaults to False.
docs (bool, optional): Also render doc strings. Defaults to True.
private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
sort (bool, optional): Sort attributes alphabetically. Defaults to True.
all (bool, optional): Show all attributes. Defaults to False.
value (bool, optional): Pretty print value of object. Defaults to True.
"""
def __init__(
self,
obj: Any,
*,
title: Optional[TextType] = None,
help: bool = False,
methods: bool = False,
docs: bool = True,
private: bool = False,
dunder: bool = False,
sort: bool = True,
all: bool = True,
value: bool = True,
) -> None:
self.highlighter = ReprHighlighter()
self.obj = obj
self.title = title or self._make_title(obj)
if all:
methods = private = dunder = True
self.help = help
self.methods = methods
self.docs = docs or help
self.private = private or dunder
self.dunder = dunder
self.sort = sort
self.value = value
def _make_title(self, obj: Any) -> Text:
"""Make a default title."""
title_str = (
str(obj)
if (isclass(obj) or callable(obj) or ismodule(obj))
else str(type(obj))
)
title_text = self.highlighter(title_str)
return title_text
def __rich__(self) -> Panel:
return Panel.fit(
Group(*self._render()),
title=self.title,
border_style="scope.border",
padding=(0, 1),
)
def _get_signature(self, name: str, obj: Any) -> Optional[Text]:
"""Get a signature for a callable."""
try:
_signature = str(signature(obj)) + ":"
except ValueError:
_signature = "(...)"
except TypeError:
return None
source_filename: Optional[str] = None
try:
source_filename = getfile(obj)
except (OSError, TypeError):
# OSError is raised if obj has no source file, e.g. when defined in REPL.
pass
callable_name = Text(name, style="inspect.callable")
if source_filename:
callable_name.stylize(f"link file://{source_filename}")
signature_text = self.highlighter(_signature)
qualname = name or getattr(obj, "__qualname__", name)
# If obj is a module, there may be classes (which are callable) to display
if inspect.isclass(obj):
prefix = "class"
elif inspect.iscoroutinefunction(obj):
prefix = "async def"
else:
prefix = "def"
qual_signature = Text.assemble(
(f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"),
(qualname, "inspect.callable"),
signature_text,
)
return qual_signature
def _render(self) -> Iterable[RenderableType]:
"""Render object."""
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
key, (_error, value) = item
return (callable(value), key.strip("_").lower())
def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
"""Get attribute or any exception."""
try:
return (None, getattr(obj, attr_name))
except Exception as error:
return (error, None)
obj = self.obj
keys = dir(obj)
total_items = len(keys)
if not self.dunder:
keys = [key for key in keys if not key.startswith("__")]
if not self.private:
keys = [key for key in keys if not key.startswith("_")]
not_shown_count = total_items - len(keys)
items = [(key, safe_getattr(key)) for key in keys]
if self.sort:
items.sort(key=sort_items)
items_table = Table.grid(padding=(0, 1), expand=False)
items_table.add_column(justify="right")
add_row = items_table.add_row
highlighter = self.highlighter
if callable(obj):
signature = self._get_signature("", obj)
if signature is not None:
yield signature
yield ""
if self.docs:
_doc = self._get_formatted_doc(obj)
if _doc is not None:
doc_text = Text(_doc, style="inspect.help")
doc_text = highlighter(doc_text)
yield doc_text
yield ""
if self.value and not (isclass(obj) or callable(obj) or ismodule(obj)):
yield Panel(
Pretty(obj, indent_guides=True, max_length=10, max_string=60),
border_style="inspect.value.border",
)
yield ""
for key, (error, value) in items:
key_text = Text.assemble(
(
key,
"inspect.attr.dunder" if key.startswith("__") else "inspect.attr",
),
(" =", "inspect.equals"),
)
if error is not None:
warning = key_text.copy()
warning.stylize("inspect.error")
add_row(warning, highlighter(repr(error)))
continue
if callable(value):
if not self.methods:
continue
_signature_text = self._get_signature(key, value)
if _signature_text is None:
add_row(key_text, Pretty(value, highlighter=highlighter))
else:
if self.docs:
docs = self._get_formatted_doc(value)
if docs is not None:
_signature_text.append("\n" if "\n" in docs else " ")
doc = highlighter(docs)
doc.stylize("inspect.doc")
_signature_text.append(doc)
add_row(key_text, _signature_text)
else:
add_row(key_text, Pretty(value, highlighter=highlighter))
if items_table.row_count:
yield items_table
elif not_shown_count:
yield Text.from_markup(
f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] "
f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options."
)
def _get_formatted_doc(self, object_: Any) -> Optional[str]:
"""
Extract the docstring of an object, process it and returns it.
The processing consists in cleaning up the doctring's indentation,
taking only its 1st paragraph if `self.help` is not True,
and escape its control codes.
Args:
object_ (Any): the object to get the docstring from.
Returns:
Optional[str]: the processed docstring, or None if no docstring was found.
"""
docs = getdoc(object_)
if docs is None:
return None
docs = cleandoc(docs).strip()
if not self.help:
docs = _first_paragraph(docs)
return escape_control_codes(docs)
def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]:
"""Returns the MRO of an object's class, or of the object itself if it's a class."""
if not hasattr(obj, "__mro__"):
# N.B. we cannot use `if type(obj) is type` here because it doesn't work with
# some types of classes, such as the ones that use abc.ABCMeta.
obj = type(obj)
return getattr(obj, "__mro__", ())
def get_object_types_mro_as_strings(obj: object) -> Collection[str]:
"""
Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class.
Examples:
`object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']`
"""
return [
f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}'
for type_ in get_object_types_mro(obj)
]
def is_object_one_of_types(
obj: object, fully_qualified_types_names: Collection[str]
) -> bool:
"""
Returns `True` if the given object's class (or the object itself, if it's a class) has one of the
fully qualified names in its MRO.
"""
for type_name in get_object_types_mro_as_strings(obj):
if type_name in fully_qualified_types_names:
return True
return False | PypiClean |
/NeodroidVision-0.3.0-py36-none-any.whl/NeodroidVision-0.3.0.dist-info/LICENSE.md | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1
through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or
are under common control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii)
beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source
code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including
but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as
indicated by a copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work
and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or
additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the
Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright
owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including
but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems
that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "
Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been
received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to
You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce,
prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a
perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise
transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are
necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (
including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within
the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this
License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with
or without modifications, and in Source or Object form, provided that You meet the following conditions:
(a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent,
trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to
any part of the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You
distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those
notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a
NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided
along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such
third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not
modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be
construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms
and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a
whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in
this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for
inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any
additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any
separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product
names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and
each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT,
MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness
of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this
License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or
otherwise, unless required by applicable law (such as deliberate and grossly negligent acts)
or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a result of this License or out of the use or
inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure
or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the
possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose
to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or
rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and
on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and
hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Christian Heider Nielsen
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "
AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
| PypiClean |
/Flask-MDBootstrap-3.0.5.tar.gz/Flask-MDBootstrap-3.0.5/flask_mdbootstrap/static/MDB-Pro/js/modules/material-select/material-select-view.min.js | !function(e){var t={};function i(n){if(t[n])return t[n].exports;var r=t[n]={i:n,l:!1,exports:{}};return e[n].call(r.exports,r,r.exports,i),r.l=!0,r.exports}i.m=e,i.c=t,i.d=function(e,t,n){i.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,t){if(1&t&&(e=i(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(i.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var r in e)i.d(n,r,function(t){return e[t]}.bind(null,r));return n},i.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(t,"a",t),t},i.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},i.p="",i(i.s=123)}([function(e,t,i){(function(t){var i=function(e){return e&&e.Math==Math&&e};e.exports=i("object"==typeof globalThis&&globalThis)||i("object"==typeof window&&window)||i("object"==typeof self&&self)||i("object"==typeof t&&t)||Function("return this")()}).call(this,i(54))},function(e,t){e.exports=function(e){try{return!!e()}catch(e){return!0}}},function(e,t,i){var n=i(0),r=i(12),a=i(26),o=i(46),s=n.Symbol,l=r("wks");e.exports=function(e){return l[e]||(l[e]=o&&s[e]||(o?s:a)("Symbol."+e))}},function(e,t){var i={}.hasOwnProperty;e.exports=function(e,t){return i.call(e,t)}},function(e,t,i){var n=i(0),r=i(22).f,a=i(6),o=i(14),s=i(21),l=i(47),c=i(48);e.exports=function(e,t){var i,u,p,d,h,f=e.target,v=e.global,g=e.stat;if(i=v?n:g?n[f]||s(f,{}):(n[f]||{}).prototype)for(u in t){if(d=t[u],p=e.noTargetGet?(h=r(i,u))&&h.value:i[u],!c(v?u:f+(g?".":"#")+u,e.forced)&&void 0!==p){if(typeof d==typeof p)continue;l(d,p)}(e.sham||p&&p.sham)&&a(d,"sham",!0),o(i,u,d,e)}}},function(e,t){e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},function(e,t,i){var n=i(7),r=i(9),a=i(18);e.exports=n?function(e,t,i){return r.f(e,t,a(1,i))}:function(e,t,i){return e[t]=i,e}},function(e,t,i){var n=i(1);e.exports=!n((function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a}))},function(e,t,i){var n=i(5);e.exports=function(e){if(!n(e))throw TypeError(String(e)+" is not an object");return e}},function(e,t,i){var n=i(7),r=i(33),a=i(8),o=i(20),s=Object.defineProperty;t.f=n?s:function(e,t,i){if(a(e),t=o(t,!0),a(i),r)try{return s(e,t,i)}catch(e){}if("get"in i||"set"in i)throw TypeError("Accessors not supported");return"value"in i&&(e[t]=i.value),e}},function(e,t,i){var n=i(27),r=i(13);e.exports=function(e){return n(r(e))}},function(e,t,i){var n=i(15),r=Math.min;e.exports=function(e){return e>0?r(n(e),9007199254740991):0}},function(e,t,i){var n=i(30),r=i(55);(e.exports=function(e,t){return r[e]||(r[e]=void 0!==t?t:{})})("versions",[]).push({version:"3.3.2",mode:n?"pure":"global",copyright:"© 2019 Denis Pushkarev (zloirock.ru)"})},function(e,t){e.exports=function(e){if(null==e)throw TypeError("Can't call method on "+e);return e}},function(e,t,i){var n=i(0),r=i(12),a=i(6),o=i(3),s=i(21),l=i(34),c=i(28),u=c.get,p=c.enforce,d=String(l).split("toString");r("inspectSource",(function(e){return l.call(e)})),(e.exports=function(e,t,i,r){var l=!!r&&!!r.unsafe,c=!!r&&!!r.enumerable,u=!!r&&!!r.noTargetGet;"function"==typeof i&&("string"!=typeof t||o(i,"name")||a(i,"name",t),p(i).source=d.join("string"==typeof t?t:"")),e!==n?(l?!u&&e[t]&&(c=!0):delete e[t],c?e[t]=i:a(e,t,i)):c?e[t]=i:s(t,i)})(Function.prototype,"toString",(function(){return"function"==typeof this&&u(this).source||l.call(this)}))},function(e,t){var i=Math.ceil,n=Math.floor;e.exports=function(e){return isNaN(e=+e)?0:(e>0?n:i)(e)}},function(e,t,i){var n=i(13);e.exports=function(e){return Object(n(e))}},function(e,t){var i={}.toString;e.exports=function(e){return i.call(e).slice(8,-1)}},function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},function(e,t){e.exports={}},function(e,t,i){var n=i(5);e.exports=function(e,t){if(!n(e))return e;var i,r;if(t&&"function"==typeof(i=e.toString)&&!n(r=i.call(e)))return r;if("function"==typeof(i=e.valueOf)&&!n(r=i.call(e)))return r;if(!t&&"function"==typeof(i=e.toString)&&!n(r=i.call(e)))return r;throw TypeError("Can't convert object to primitive value")}},function(e,t,i){var n=i(0),r=i(6);e.exports=function(e,t){try{r(n,e,t)}catch(i){n[e]=t}return t}},function(e,t,i){var n=i(7),r=i(40),a=i(18),o=i(10),s=i(20),l=i(3),c=i(33),u=Object.getOwnPropertyDescriptor;t.f=n?u:function(e,t){if(e=o(e),t=s(t,!0),c)try{return u(e,t)}catch(e){}if(l(e,t))return a(!r.f.call(e,t),e[t])}},function(e,t){e.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},function(e,t,i){var n=i(57),r=i(27),a=i(16),o=i(11),s=i(42),l=[].push,c=function(e){var t=1==e,i=2==e,c=3==e,u=4==e,p=6==e,d=5==e||p;return function(h,f,v,g){for(var b,y,m=a(h),$=r(m),w=n(f,v,3),S=o($.length),k=0,x=g||s,O=t?x(h,S):i?x(h,0):void 0;S>k;k++)if((d||k in $)&&(y=w(b=$[k],k,m),e))if(t)O[k]=y;else if(y)switch(e){case 3:return!0;case 5:return b;case 6:return k;case 2:l.call(O,b)}else if(u)return!1;return p?-1:c||u?u:O}};e.exports={forEach:c(0),map:c(1),filter:c(2),some:c(3),every:c(4),find:c(5),findIndex:c(6)}},function(e,t,i){var n=i(12),r=i(26),a=n("keys");e.exports=function(e){return a[e]||(a[e]=r(e))}},function(e,t){var i=0,n=Math.random();e.exports=function(e){return"Symbol("+String(void 0===e?"":e)+")_"+(++i+n).toString(36)}},function(e,t,i){var n=i(1),r=i(17),a="".split;e.exports=n((function(){return!Object("z").propertyIsEnumerable(0)}))?function(e){return"String"==r(e)?a.call(e,""):Object(e)}:Object},function(e,t,i){var n,r,a,o=i(56),s=i(0),l=i(5),c=i(6),u=i(3),p=i(25),d=i(19),h=s.WeakMap;if(o){var f=new h,v=f.get,g=f.has,b=f.set;n=function(e,t){return b.call(f,e,t),t},r=function(e){return v.call(f,e)||{}},a=function(e){return g.call(f,e)}}else{var y=p("state");d[y]=!0,n=function(e,t){return c(e,y,t),t},r=function(e){return u(e,y)?e[y]:{}},a=function(e){return u(e,y)}}e.exports={set:n,get:r,has:a,enforce:function(e){return a(e)?r(e):n(e,{})},getterFor:function(e){return function(t){var i;if(!l(t)||(i=r(t)).type!==e)throw TypeError("Incompatible receiver, "+e+" required");return i}}}},function(e,t,i){var n=i(37),r=i(23).concat("length","prototype");t.f=Object.getOwnPropertyNames||function(e){return n(e,r)}},function(e,t){e.exports=!1},function(e,t,i){var n=i(17);e.exports=Array.isArray||function(e){return"Array"==n(e)}},function(e,t,i){var n=i(45),r=i(0),a=function(e){return"function"==typeof e?e:void 0};e.exports=function(e,t){return arguments.length<2?a(n[e])||a(r[e]):n[e]&&n[e][t]||r[e]&&r[e][t]}},function(e,t,i){var n=i(7),r=i(1),a=i(36);e.exports=!n&&!r((function(){return 7!=Object.defineProperty(a("div"),"a",{get:function(){return 7}}).a}))},function(e,t,i){var n=i(12);e.exports=n("native-function-to-string",Function.toString)},function(e,t,i){var n=i(8),r=i(63),a=i(23),o=i(19),s=i(64),l=i(36),c=i(25)("IE_PROTO"),u=function(){},p=function(){var e,t=l("iframe"),i=a.length;for(t.style.display="none",s.appendChild(t),t.src=String("javascript:"),(e=t.contentWindow.document).open(),e.write("<script>document.F=Object<\/script>"),e.close(),p=e.F;i--;)delete p.prototype[a[i]];return p()};e.exports=Object.create||function(e,t){var i;return null!==e?(u.prototype=n(e),i=new u,u.prototype=null,i[c]=e):i=p(),void 0===t?i:r(i,t)},o[c]=!0},function(e,t,i){var n=i(0),r=i(5),a=n.document,o=r(a)&&r(a.createElement);e.exports=function(e){return o?a.createElement(e):{}}},function(e,t,i){var n=i(3),r=i(10),a=i(39).indexOf,o=i(19);e.exports=function(e,t){var i,s=r(e),l=0,c=[];for(i in s)!n(o,i)&&n(s,i)&&c.push(i);for(;t.length>l;)n(s,i=t[l++])&&(~a(c,i)||c.push(i));return c}},function(e,t,i){var n=i(15),r=Math.max,a=Math.min;e.exports=function(e,t){var i=n(e);return i<0?r(i+t,0):a(i,t)}},function(e,t,i){var n=i(10),r=i(11),a=i(38),o=function(e){return function(t,i,o){var s,l=n(t),c=r(l.length),u=a(o,c);if(e&&i!=i){for(;c>u;)if((s=l[u++])!=s)return!0}else for(;c>u;u++)if((e||u in l)&&l[u]===i)return e||u||0;return!e&&-1}};e.exports={includes:o(!0),indexOf:o(!1)}},function(e,t,i){"use strict";var n={}.propertyIsEnumerable,r=Object.getOwnPropertyDescriptor,a=r&&!n.call({1:2},1);t.f=a?function(e){var t=r(this,e);return!!t&&t.enumerable}:n},function(e,t,i){var n=i(37),r=i(23);e.exports=Object.keys||function(e){return n(e,r)}},function(e,t,i){var n=i(5),r=i(31),a=i(2)("species");e.exports=function(e,t){var i;return r(e)&&("function"!=typeof(i=e.constructor)||i!==Array&&!r(i.prototype)?n(i)&&null===(i=i[a])&&(i=void 0):i=void 0),new(void 0===i?Array:i)(0===t?0:t)}},function(e,t){t.f=Object.getOwnPropertySymbols},function(e,t,i){"use strict";var n=i(4),r=i(24).find,a=i(51),o=!0;"find"in[]&&Array(1).find((function(){o=!1})),n({target:"Array",proto:!0,forced:o},{find:function(e){return r(this,e,arguments.length>1?arguments[1]:void 0)}}),a("find")},function(e,t,i){e.exports=i(0)},function(e,t,i){var n=i(1);e.exports=!!Object.getOwnPropertySymbols&&!n((function(){return!String(Symbol())}))},function(e,t,i){var n=i(3),r=i(53),a=i(22),o=i(9);e.exports=function(e,t){for(var i=r(t),s=o.f,l=a.f,c=0;c<i.length;c++){var u=i[c];n(e,u)||s(e,u,l(t,u))}}},function(e,t,i){var n=i(1),r=/#|\.prototype\./,a=function(e,t){var i=s[o(e)];return i==c||i!=l&&("function"==typeof t?n(t):!!t)},o=a.normalize=function(e){return String(e).replace(r,".").toLowerCase()},s=a.data={},l=a.NATIVE="N",c=a.POLYFILL="P";e.exports=a},function(e,t){e.exports=function(e){if("function"!=typeof e)throw TypeError(String(e)+" is not a function");return e}},function(e,t,i){var n=i(1),r=i(2)("species");e.exports=function(e){return!n((function(){var t=[];return(t.constructor={})[r]=function(){return{foo:1}},1!==t[e](Boolean).foo}))}},function(e,t,i){var n=i(2),r=i(35),a=i(6),o=n("unscopables"),s=Array.prototype;null==s[o]&&a(s,o,r(null)),e.exports=function(e){s[o][e]=!0}},function(e,t,i){"use strict";var n=i(1);e.exports=function(e,t){var i=[][e];return!i||!n((function(){i.call(null,t||function(){throw 1},1)}))}},function(e,t,i){var n=i(32),r=i(29),a=i(43),o=i(8);e.exports=n("Reflect","ownKeys")||function(e){var t=r.f(o(e)),i=a.f;return i?t.concat(i(e)):t}},function(e,t){var i;i=function(){return this}();try{i=i||new Function("return this")()}catch(e){"object"==typeof window&&(i=window)}e.exports=i},function(e,t,i){var n=i(0),r=i(21),a=n["__core-js_shared__"]||r("__core-js_shared__",{});e.exports=a},function(e,t,i){var n=i(0),r=i(34),a=n.WeakMap;e.exports="function"==typeof a&&/native code/.test(r.call(a))},function(e,t,i){var n=i(49);e.exports=function(e,t,i){if(n(e),void 0===t)return e;switch(i){case 0:return function(){return e.call(t)};case 1:return function(i){return e.call(t,i)};case 2:return function(i,n){return e.call(t,i,n)};case 3:return function(i,n,r){return e.call(t,i,n,r)}}return function(){return e.apply(t,arguments)}}},,,,,function(e,t,i){"use strict";var n,r,a=i(82),o=RegExp.prototype.exec,s=String.prototype.replace,l=o,c=(n=/a/,r=/b*/g,o.call(n,"a"),o.call(r,"a"),0!==n.lastIndex||0!==r.lastIndex),u=void 0!==/()??/.exec("")[1];(c||u)&&(l=function(e){var t,i,n,r,l=this;return u&&(i=new RegExp("^"+l.source+"$(?!\\s)",a.call(l))),c&&(t=l.lastIndex),n=o.call(l,e),c&&n&&(l.lastIndex=l.global?n.index+n[0].length:t),u&&n&&n.length>1&&s.call(n[0],i,(function(){for(r=1;r<arguments.length-2;r++)void 0===arguments[r]&&(n[r]=void 0)})),n}),e.exports=l},function(e,t,i){var n=i(7),r=i(9),a=i(8),o=i(41);e.exports=n?Object.defineProperties:function(e,t){a(e);for(var i,n=o(t),s=n.length,l=0;s>l;)r.f(e,i=n[l++],t[i]);return e}},function(e,t,i){var n=i(32);e.exports=n("document","documentElement")},,function(e,t,i){"use strict";var n=i(4),r=i(39).indexOf,a=i(52),o=[].indexOf,s=!!o&&1/[1].indexOf(1,-0)<0,l=a("indexOf");n({target:"Array",proto:!0,forced:s||l},{indexOf:function(e){return s?o.apply(this,arguments)||0:r(this,e,arguments.length>1?arguments[1]:void 0)}})},function(e,t,i){"use strict";var n=i(20),r=i(9),a=i(18);e.exports=function(e,t,i){var o=n(t);o in e?r.f(e,o,a(0,i)):e[o]=i}},function(e,t,i){"use strict";var n=i(4),r=i(62);n({target:"RegExp",proto:!0,forced:/./.exec!==r},{exec:r})},,function(e,t){e.exports="\t\n\v\f\r \u2028\u2029\ufeff"},,,,,function(e,t,i){"use strict";var n=i(4),r=i(1),a=i(31),o=i(5),s=i(16),l=i(11),c=i(67),u=i(42),p=i(50),d=i(2)("isConcatSpreadable"),h=!r((function(){var e=[];return e[d]=!1,e.concat()[0]!==e})),f=p("concat"),v=function(e){if(!o(e))return!1;var t=e[d];return void 0!==t?!!t:a(e)};n({target:"Array",proto:!0,forced:!h||!f},{concat:function(e){var t,i,n,r,a,o=s(this),p=u(o,0),d=0;for(t=-1,n=arguments.length;t<n;t++)if(a=-1===t?o:arguments[t],v(a)){if(d+(r=l(a.length))>9007199254740991)throw TypeError("Maximum allowed index exceeded");for(i=0;i<r;i++,d++)i in a&&c(p,d,a[i])}else{if(d>=9007199254740991)throw TypeError("Maximum allowed index exceeded");c(p,d++,a)}return p.length=d,p}})},,,function(e,t,i){"use strict";var n=i(4),r=i(27),a=i(10),o=i(52),s=[].join,l=r!=Object,c=o("join",",");n({target:"Array",proto:!0,forced:l||c},{join:function(e){return s.call(a(this),void 0===e?",":e)}})},function(e,t,i){var n=i(15),r=i(13),a=function(e){return function(t,i){var a,o,s=String(r(t)),l=n(i),c=s.length;return l<0||l>=c?e?"":void 0:(a=s.charCodeAt(l))<55296||a>56319||l+1===c||(o=s.charCodeAt(l+1))<56320||o>57343?e?s.charAt(l):a:e?s.slice(l,l+2):o-56320+(a-55296<<10)+65536}};e.exports={codeAt:a(!1),charAt:a(!0)}},,,function(e,t,i){"use strict";var n=i(8);e.exports=function(){var e=n(this),t="";return e.global&&(t+="g"),e.ignoreCase&&(t+="i"),e.multiline&&(t+="m"),e.dotAll&&(t+="s"),e.unicode&&(t+="u"),e.sticky&&(t+="y"),t}},,function(e,t,i){var n=i(13),r="["+i(70)+"]",a=RegExp("^"+r+r+"*"),o=RegExp(r+r+"*$"),s=function(e){return function(t){var i=String(n(t));return 1&e&&(i=i.replace(a,"")),2&e&&(i=i.replace(o,"")),i}};e.exports={start:s(1),end:s(2),trim:s(3)}},,,function(e,t,i){"use strict";var n=i(6),r=i(14),a=i(1),o=i(2),s=i(62),l=o("species"),c=!a((function(){var e=/./;return e.exec=function(){var e=[];return e.groups={a:"7"},e},"7"!=="".replace(e,"$<a>")})),u=!a((function(){var e=/(?:)/,t=e.exec;e.exec=function(){return t.apply(this,arguments)};var i="ab".split(e);return 2!==i.length||"a"!==i[0]||"b"!==i[1]}));e.exports=function(e,t,i,p){var d=o(e),h=!a((function(){var t={};return t[d]=function(){return 7},7!=""[e](t)})),f=h&&!a((function(){var t=!1,i=/a/;return i.exec=function(){return t=!0,null},"split"===e&&(i.constructor={},i.constructor[l]=function(){return i}),i[d](""),!t}));if(!h||!f||"replace"===e&&!c||"split"===e&&!u){var v=/./[d],g=i(d,""[e],(function(e,t,i,n,r){return t.exec===s?h&&!r?{done:!0,value:v.call(t,i,n)}:{done:!0,value:e.call(i,t,n)}:{done:!1}})),b=g[0],y=g[1];r(String.prototype,e,b),r(RegExp.prototype,d,2==t?function(e,t){return y.call(e,this,t)}:function(e){return y.call(e,this)}),p&&n(RegExp.prototype[d],"sham",!0)}}},function(e,t,i){var n=i(17),r=i(62);e.exports=function(e,t){var i=e.exec;if("function"==typeof i){var a=i.call(e,t);if("object"!=typeof a)throw TypeError("RegExp exec method returned something other than an Object or null");return a}if("RegExp"!==n(e))throw TypeError("RegExp#exec called on incompatible receiver");return r.call(e,t)}},,,function(e,t,i){"use strict";var n=i(87),r=i(8),a=i(16),o=i(11),s=i(15),l=i(13),c=i(94),u=i(88),p=Math.max,d=Math.min,h=Math.floor,f=/\$([$&'`]|\d\d?|<[^>]*>)/g,v=/\$([$&'`]|\d\d?)/g;n("replace",2,(function(e,t,i){return[function(i,n){var r=l(this),a=null==i?void 0:i[e];return void 0!==a?a.call(i,r,n):t.call(String(r),i,n)},function(e,a){var l=i(t,e,this,a);if(l.done)return l.value;var h=r(e),f=String(this),v="function"==typeof a;v||(a=String(a));var g=h.global;if(g){var b=h.unicode;h.lastIndex=0}for(var y=[];;){var m=u(h,f);if(null===m)break;if(y.push(m),!g)break;""===String(m[0])&&(h.lastIndex=c(f,o(h.lastIndex),b))}for(var $,w="",S=0,k=0;k<y.length;k++){m=y[k];for(var x=String(m[0]),O=p(d(s(m.index),f.length),0),C=[],L=1;L<m.length;L++)C.push(void 0===($=m[L])?$:String($));var A=m.groups;if(v){var M=[x].concat(C,O,f);void 0!==A&&M.push(A);var _=String(a.apply(void 0,M))}else _=n(x,f,O,C,A,a);O>=S&&(w+=f.slice(S,O)+_,S=O+x.length)}return w+f.slice(S)}];function n(e,i,n,r,o,s){var l=n+e.length,c=r.length,u=v;return void 0!==o&&(o=a(o),u=f),t.call(s,u,(function(t,a){var s;switch(a.charAt(0)){case"$":return"$";case"&":return e;case"`":return i.slice(0,n);case"'":return i.slice(l);case"<":s=o[a.slice(1,-1)];break;default:var u=+a;if(0===u)return t;if(u>c){var p=h(u/10);return 0===p?t:p<=c?void 0===r[p-1]?a.charAt(1):r[p-1]+a.charAt(1):t}s=r[u-1]}return void 0===s?"":s}))}}))},function(e,t,i){"use strict";var n=i(4),r=i(24).filter;n({target:"Array",proto:!0,forced:!i(50)("filter")},{filter:function(e){return r(this,e,arguments.length>1?arguments[1]:void 0)}})},,function(e,t,i){"use strict";var n=i(79).charAt;e.exports=function(e,t,i){return t+(i?n(e,t).length:1)}},,,,function(e,t,i){"use strict";var n=i(4),r=i(24).map;n({target:"Array",proto:!0,forced:!i(50)("map")},{map:function(e){return r(this,e,arguments.length>1?arguments[1]:void 0)}})},,function(e,t,i){"use strict";var n=i(87),r=i(101),a=i(8),o=i(13),s=i(106),l=i(94),c=i(11),u=i(88),p=i(62),d=i(1),h=[].push,f=Math.min,v=!d((function(){return!RegExp(4294967295,"y")}));n("split",2,(function(e,t,i){var n;return n="c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length?function(e,i){var n=String(o(this)),a=void 0===i?4294967295:i>>>0;if(0===a)return[];if(void 0===e)return[n];if(!r(e))return t.call(n,e,a);for(var s,l,c,u=[],d=(e.ignoreCase?"i":"")+(e.multiline?"m":"")+(e.unicode?"u":"")+(e.sticky?"y":""),f=0,v=new RegExp(e.source,d+"g");(s=p.call(v,n))&&!((l=v.lastIndex)>f&&(u.push(n.slice(f,s.index)),s.length>1&&s.index<n.length&&h.apply(u,s.slice(1)),c=s[0].length,f=l,u.length>=a));)v.lastIndex===s.index&&v.lastIndex++;return f===n.length?!c&&v.test("")||u.push(""):u.push(n.slice(f)),u.length>a?u.slice(0,a):u}:"0".split(void 0,0).length?function(e,i){return void 0===e&&0===i?[]:t.call(this,e,i)}:t,[function(t,i){var r=o(this),a=null==t?void 0:t[e];return void 0!==a?a.call(t,r,i):n.call(String(r),t,i)},function(e,r){var o=i(n,e,this,r,n!==t);if(o.done)return o.value;var p=a(e),d=String(this),h=s(p,RegExp),g=p.unicode,b=(p.ignoreCase?"i":"")+(p.multiline?"m":"")+(p.unicode?"u":"")+(v?"y":"g"),y=new h(v?p:"^(?:"+p.source+")",b),m=void 0===r?4294967295:r>>>0;if(0===m)return[];if(0===d.length)return null===u(y,d)?[d]:[];for(var $=0,w=0,S=[];w<d.length;){y.lastIndex=v?w:0;var k,x=u(y,v?d:d.slice(w));if(null===x||(k=f(c(y.lastIndex+(v?0:w)),d.length))===$)w=l(d,w,g);else{if(S.push(d.slice($,w)),S.length===m)return S;for(var O=1;O<=x.length-1;O++)if(S.push(x[O]),S.length===m)return S;w=$=k}}return S.push(d.slice($)),S}]}),!v)},function(e,t,i){var n=i(5),r=i(17),a=i(2)("match");e.exports=function(e){var t;return n(e)&&(void 0!==(t=e[a])?!!t:"RegExp"==r(e))}},,,,,function(e,t,i){var n=i(8),r=i(49),a=i(2)("species");e.exports=function(e,t){var i,o=n(e).constructor;return void 0===o||null==(i=n(o)[a])?t:r(i)}},,,,,,function(e,t,i){var n=i(4),r=i(16),a=i(41);n({target:"Object",stat:!0,forced:i(1)((function(){a(1)}))},{keys:function(e){return a(r(e))}})},function(e,t,i){"use strict";var n=i(4),r=i(84).trim;n({target:"String",proto:!0,forced:i(121)("trim")},{trim:function(){return r(this)}})},,,,function(e,t,i){"use strict";i.r(t),i.d(t,"default",(function(){return r}));i(75),i(92),i(44),i(66),i(78),i(68),i(91),i(100),i(113);function n(e,t){for(var i=0;i<t.length;i++){var n=t[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(e,n.key,n)}}var r=function(){function e(t){!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.view=t}var t,i,r;return t=e,(i=[{key:"destroy",value:function(){var e=this.view.$nativeSelect.data("select-id");this.view.$nativeSelect.data("select-id",null).removeClass("initialized"),this.view.$nativeSelect.parent().find("span.caret").remove(),this.view.$nativeSelect.parent().find("input").remove(),this.view.$nativeSelect.unwrap(),$("ul#select-options-".concat(e)).remove()}},{key:"render",value:function(){this.setWrapperClasses(),this.setMaterialSelectInitialValue(),this.view.$nativeSelect.data("select-id",this.view.properties.id),this.view.$nativeSelect.before(this.view.$selectWrapper),this.view.options.showResetButton&&this.appendResetButton(),this.appendDropdownIcon(),this.appendMaterialSelect(),this.appendMaterialOptionsList(),this.appendNativeSelect(),this.appendSelectLabel(),this.appendCustomTemplateParts(),this.shouldValidate&&this.appendValidationFeedbackElements(),this.isRequired&&this.enableValidation(),this.isDisabled||(this.setMaterialOptionsListMaxHeight(),this.view.dropdown=this.view.$materialSelect.dropdown({hover:!1,closeOnClick:!1,resetScroll:!1})),this.shouldInheritTabindex&&this.view.$materialSelect.attr("tabindex",this.view.$nativeSelect.attr("tabindex")),this.isDefaultMaterialInput&&this.view.$mainLabel.css("top","-7px"),this.isCustomSelect&&this.view.$materialSelect.css({display:"inline-block",width:"100%",height:"calc(1.5em + .75rem + 2px)",padding:".375rem 1.75rem .375rem .75rem",fontSize:"1rem",lineHeight:"1.5",backgroundColor:"#fff",border:"1px solid #ced4da"}),this.addAccessibilityAttributes(),this.markInitialized()}},{key:"setWrapperClasses",value:function(){this.isDefaultMaterialInput?this.view.$selectWrapper.addClass(this.view.$nativeSelect.attr("class").split(" ").filter((function(e){return"md-form"!==e})).join(" ")).css({marginTop:"1.5rem",marginBottom:"1.5rem"}):this.view.$selectWrapper.addClass(this.view.$nativeSelect.attr("class"))}},{key:"setMaterialSelectInitialValue",value:function(){if(this.view.options.placeholder)this.view.$materialSelect.attr("placeholder",this.view.options.placeholder),this.view.$nativeSelect.find('option[value=""][selected][disabled][data-mdb-placeholder]').length||this.view.$nativeSelect.prepend('<option value="" selected disabled data-mdb-placeholder></option>');else{var e=this.view.$materialSelectInitialOption.replace(/"/g,""").replace(/ +/g," ").trim();this.view.$materialSelect.val(e)}}},{key:"appendDropdownIcon",value:function(){this.isDisabled&&this.view.$dropdownIcon.addClass("disabled"),this.view.$selectWrapper.append(this.view.$dropdownIcon)}},{key:"appendResetButton",value:function(){this.isDisabled&&this.view.$btnReset.addClass("disabled"),-1===this.view.$nativeSelect.get(0).selectedIndex&&this.view.$btnReset.hide(),this.view.$selectWrapper.append(this.view.$btnReset)}},{key:"appendMaterialSelect",value:function(){this.view.$selectWrapper.append(this.view.$materialSelect)}},{key:"appendMaterialOptionsList",value:function(){this.isSearchable&&this.appendSearchInputOption(),this.isEditable&&this.isSearchable&&this.appendAddOptionBtn(),this.buildMaterialOptions(),this.isMultiple&&this.appendToggleAllCheckbox(),this.view.$selectWrapper.append(this.view.$materialOptionsList)}},{key:"appendNativeSelect",value:function(){this.view.$nativeSelect.appendTo(this.view.$selectWrapper)}},{key:"appendSelectLabel",value:function(){(this.view.$materialSelect.val()||this.view.options.placeholder)&&this.view.$mainLabel.addClass("active"),this.view.$mainLabel[this.isDisabled?"addClass":"removeClass"]("disabled"),this.view.$mainLabel.appendTo(this.view.$selectWrapper)}},{key:"appendCustomTemplateParts",value:function(){var e=this;this.view.$customTemplateParts.each((function(t,i){$(i).appendTo(e.view.$materialOptionsList).wrap("<li></li>")})),this.view.$btnSave.appendTo(this.view.$selectWrapper).clone().appendTo(this.view.$materialOptionsList)}},{key:"appendValidationFeedbackElements",value:function(){this.view.$validFeedback.insertAfter(this.view.$selectWrapper),this.view.$invalidFeedback.insertAfter(this.view.$selectWrapper)}},{key:"enableValidation",value:function(){this.view.$nativeSelect.css({position:"absolute",top:"1rem",left:"0",height:"0",width:"0",opacity:"0",padding:"0","pointer-events":"none"}),-1===this.view.$nativeSelect.attr("style").indexOf("inline!important")&&this.view.$nativeSelect.attr("style","".concat(this.view.$nativeSelect.attr("style")," display: inline!important;")),this.view.$nativeSelect.attr("tabindex",-1),this.view.$nativeSelect.data("inherit-tabindex",!1)}},{key:"setMaterialOptionsListMaxHeight",value:function(){var e=$("<div />").appendTo($("body"));e.css({position:"absolute !important",visibility:"hidden !important",display:"block !important"}),this.view.$materialOptionsList.show();var t=this.view.$materialOptionsList.clone().appendTo(e),i=this.view.options.visibleOptions,n=0,r=t.find("li").not(".disabled"),a=r.first().height(),o=r.length;if(this.isSearchable&&(n+=this.view.$searchInput.height()),this.isMultiple&&(n+=this.view.$toggleAll.height()),this.view.$materialOptionsList.hide(),e.remove(),i>=0&&i<o){var s=a*i+n;this.view.$materialOptionsList.css("max-height",s),this.view.$materialSelect.data("maxheight",s)}}},{key:"addAccessibilityAttributes",value:function(){this.view.$materialSelect.attr({role:this.isSearchable?"combobox":"listbox","aria-multiselectable":this.isMultiple,"aria-disabled":this.isDisabled,"aria-required":this.isRequired,"aria-labelledby":this.view.$mainLabel.attr("id"),"aria-haspopup":!0,"aria-expanded":!1}),this.view.$searchInput&&this.view.$searchInput.attr("role","searchbox"),this.view.$materialOptionsList.find("li").each((function(){var e=$(this);e.attr({role:"option","aria-selected":e.hasClass("active"),"aria-disabled":e.hasClass("disabled")})}))}},{key:"markInitialized",value:function(){this.view.$nativeSelect.addClass("initialized")}},{key:"appendSearchInputOption",value:function(){var e=this.view.$nativeSelect.attr("searchable"),t=this.isDefaultMaterialInput?"":"md-form",i=this.isDefaultMaterialInput?"select-default mb-2":"";this.view.$searchInput=$('<span class="search-wrap ml-2"><div class="'.concat(t,' mt-0"><input type="text" class="search w-100 d-block ').concat(i,'" tabindex="-1" placeholder="').concat(e,'"></div></span>')),this.view.$materialOptionsList.append(this.view.$searchInput),this.view.$searchInput.on("click",(function(e){return e.stopPropagation()}))}},{key:"appendAddOptionBtn",value:function(){this.view.$searchInput.append(this.view.$addOptionBtn)}},{key:"buildMaterialOptions",value:function(){var e=this;this.view.$nativeSelectChildren.each((function(t,i){var n=$(i);if(n.is("option"))e.buildSingleOption(n,e.isMultiple?"multiple":"");else if(n.is("optgroup")){var r=$('<li class="optgroup"><span>'.concat(n.attr("label"),"</span></li>"));e.view.$materialOptionsList.append(r),n.children("option").each((function(t,i){e.buildSingleOption($(i),"optgroup-option")}))}}))}},{key:"appendToggleAllCheckbox",value:function(){var e=this.view.$materialOptionsList.find("li").first();e.hasClass("disabled")&&e.find("input").prop("disabled")?e.after(this.view.$toggleAll):this.view.$materialOptionsList.find("li").first().before(this.view.$toggleAll)}},{key:"addNewOption",value:function(){var e=this.view.$searchInput.find("input").val(),t=$('<option value="'.concat(e.toLowerCase(),'" selected>').concat(e,"</option>")).prop("selected",!0);this.isMultiple||this.view.$nativeSelectChildren.each((function(e,t){$(t).attr("selected",!1)})),this.view.$nativeSelect.append(t)}},{key:"buildSingleOption",value:function(e,t){var i=e.is(":disabled")?"disabled":"",n=e.is(":selected")?"active":"",r="optgroup-option"===t?"optgroup-option":"",a=e.data("icon"),o=e.data("fas")?'<i class="fa-pull-right m-2 fas fa-'.concat(e.data("fas")," ").concat(this.view.options.fasClasses,'"></i> '):"",s=e.data("far")?'<i class="fa-pull-right m-2 far fa-'.concat(e.data("far")," ").concat(this.view.options.farClasses,'"></i> '):"",l=e.data("fab")?'<i class="fa-pull-right m-2 fab fa-'.concat(e.data("fab")," ").concat(this.view.options.fabClasses,'"></i> '):"",c=e.attr("class"),u=a?'<img alt="" src="'.concat(a,'" class="').concat(c,'">'):"",p=this.isMultiple?'<input type="checkbox" class="form-check-input" '.concat(i,"/><label></label>"):"",d=e.data("secondary-text")?'<p class="text-muted pt-0 mb-0" disabled>'.concat(e.data("secondary-text"),"</p>"):"";this.view.$materialOptionsList.append($('<li class="'.concat(i," ").concat(n," ").concat(r,'">').concat(u,'<span class="filtrable ').concat(this.view.options.copyClassesOption?c:"",'">').concat(p," ").concat(e.html()," ").concat(o," ").concat(s," ").concat(l," ").concat(d,"</span></li>")))}},{key:"shouldValidate",get:function(){return this.view.options.validate}},{key:"shouldInheritTabindex",get:function(){return!1!==this.view.$nativeSelect.data("inherit-tabindex")}},{key:"isMultiple",get:function(){return this.view.isMultiple}},{key:"isSearchable",get:function(){return this.view.isSearchable}},{key:"isRequired",get:function(){return this.view.isRequired}},{key:"isEditable",get:function(){return this.view.isEditable}},{key:"isDisabled",get:function(){return this.view.isDisabled}},{key:"isDefaultMaterialInput",get:function(){return this.view.options.defaultMaterialInput}},{key:"isCustomSelect",get:function(){return this.view.$materialSelect.hasClass("custom-select")&&this.view.$materialSelect.hasClass("select-dropdown")}}])&&n(t.prototype,i),r&&n(t,r),e}()},,function(e,t,i){"use strict";var n=i(4),r=i(39).includes,a=i(51);n({target:"Array",proto:!0},{includes:function(e){return r(this,e,arguments.length>1?arguments[1]:void 0)}}),a("includes")},function(e,t,i){"use strict";var n=i(4),r=i(126),a=i(13);n({target:"String",proto:!0,forced:!i(127)("includes")},{includes:function(e){return!!~String(a(this)).indexOf(r(e),arguments.length>1?arguments[1]:void 0)}})},function(e,t,i){var n=i(1),r=i(70);e.exports=function(e){return n((function(){return!!r[e]()||"
"!="
"[e]()||r[e].name!==e}))}},,function(e,t,i){"use strict";i.r(t),i.d(t,"default",(function(){return a}));i(75),i(92),i(44),i(119),i(66),i(98),i(112),i(68),i(120),i(91),i(113);var n=i(117);function r(e,t){for(var i=0;i<t.length;i++){var n=t[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(e,n.key,n)}}var a=function(){function e(t,i){var r=i.options,a=i.properties.id;!function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}(this,e),this.properties={id:a,isMultiple:Boolean(t.attr("multiple")),isSearchable:Boolean(t.attr("searchable")),isRequired:Boolean(t.attr("required")),isEditable:Boolean(t.attr("editable"))},this.options=this._copyOptions(r),this.$nativeSelect=t,this.$selectWrapper=$('<div class="select-wrapper"></div>'),this.$materialOptionsList=$('<ul id="select-options-'.concat(this.properties.id,'" class="dropdown-content select-dropdown w-100 ').concat(this.properties.isMultiple?"multiple-select-dropdown":"",'"></ul>')),this.$materialSelectInitialOption=t.find("option:selected").text()||t.find("option:first").text()||"",this.$nativeSelectChildren=this.$nativeSelect.children("option, optgroup"),this.$materialSelect=$('<input type="text" class="'.concat(this.options.defaultMaterialInput?"browser-default custom-select multi-bs-select select-dropdown form-control":"select-dropdown form-control",'" ').concat(!this.options.validate&&'readonly="true"',' required="').concat(this.options.validate?"true":"false",'" ').concat(this.$nativeSelect.is(" :disabled")?"disabled":"",' data-activates="select-options-').concat(this.properties.id,'" value=""/>')),this.$dropdownIcon=this.options.defaultMaterialInput?"":$('<span class="caret">▼</span>'),this.$searchInput=null,this.$noSearchResultsInfo=$("<li><span><i>".concat(this.options.labels.noSearchResults,"</i></span></li>")),this.$toggleAll=$('<li class="select-toggle-all"><span><input type="checkbox" class="form-check-input"><label>'.concat(this.options.labels.selectAll,"</label></span></li>")),this.$addOptionBtn=$('<i class="select-add-option fas fa-plus"></i>'),this.$mainLabel=this._jQueryFallback(this.$nativeSelect.next("label.mdb-main-label"),$("label[for='".concat(this.properties.id,"']"))),this.$customTemplateParts=this._jQueryFallback(this.$nativeSelect.nextUntil("select",".mdb-select-template-part"),$("[data-mdb-select-template-part-for='".concat(this.properties.id,"']"))),this.$btnSave=this.$nativeSelect.nextUntil("select",".btn-save"),this.$btnReset=$('<span class="reset-select-btn">×</span>'),this.$validFeedback=$('<div class="valid-feedback">'.concat(this.options.labels.validFeedback,"</div>")),this.$invalidFeedback=$('<div class="invalid-feedback">'.concat(this.options.labels.invalidFeedback,"</div>")),this.keyCodes={tab:9,enter:13,shift:16,alt:18,esc:27,space:32,end:35,home:36,arrowUp:38,arrowDown:40},this.renderer=new n.default(this),this.dropdown=null}var t,i,a;return t=e,a=[{key:"isMobileDevice",get:function(){return/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent)}}],(i=[{key:"destroy",value:function(){this.renderer.destroy()}},{key:"render",value:function(){this.renderer.render()}},{key:"selectPreselectedOptions",value:function(e){var t=this;if(this.isMultiple)this.$nativeSelect.find("option:selected:not(:disabled)").each((function(i,n){var r=n.index;t.$materialOptionsList.find("li:not(.optgroup):not(.select-toggle-all)").eq(r).addClass("selected active").find(":checkbox").prop("checked",!0),e(r)}));else{var i=this.$nativeSelect.find("option:selected").first(),n=this.$nativeSelect.find("option").index(i.get(0));i.get(0)&&"disabled"!==i.attr("disabled")&&e(n)}}},{key:"bindResetButtonClick",value:function(e){var t=this;this.$btnReset.on("click",(function(i){i.preventDefault(),t.$nativeSelect.find('option[value=""][selected][disabled][data-mdb-novalue]').length||(t._toggleResetButton(!0),t.$materialSelect.val(t.isMultiple?[]:""),t.$materialSelect.trigger("close"),t.$mainLabel.removeClass("active"),t.$materialOptionsList.find("li.active, li.selected").removeClass("active").removeClass("selected"),t.$materialOptionsList.find('li[aria-selected="true"]').attr("aria-selected","false"),t.$materialOptionsList.find('input[type="checkbox"]').prop("checked",!1),e())}))}},{key:"bindAddNewOptionClick",value:function(){this.$addOptionBtn.on("click",this.renderer.addNewOption.bind(this.renderer))}},{key:"bindMaterialSelectFocus",value:function(){var e=this;this.$materialSelect.on("focus",(function(t){var i=$(t.target);if(i.parent().addClass("active"),$("ul.select-dropdown").not(e.$materialOptionsList.get(0)).is(":visible")&&$("input.select-dropdown").trigger("close"),e.$mainLabel.addClass("active"),!e.$materialOptionsList.is(":visible")){var n=i.val(),r=e.$materialOptionsList.find("li").filter((function(){return $(this).text().toLowerCase()===n.toLowerCase()})).get(0);e._selectSingleOption(r)}e.isMultiple||e.$mainLabel.addClass("active")}))}},{key:"bindMaterialSelectClick",value:function(){var e=this;this.$materialSelect.on("mousedown",(function(e){3===e.which&&e.preventDefault()})),this.$materialSelect.on("click",(function(t){t.stopPropagation(),e.$mainLabel.addClass("active"),e._updateDropdownScrollTop()}))}},{key:"bindMaterialSelectBlur",value:function(){var e=this;this.$materialSelect.on("blur",(function(t){var i=$(t.target);i.parent().removeClass("active"),e.isMultiple||e.isSearchable||i.trigger("close"),e.$materialOptionsList.find("li.selected").removeClass("selected")}))}},{key:"bindMaterialOptionsListTouchstart",value:function(){this.$materialOptionsList.on("touchstart",(function(e){return e.stopPropagation()}))}},{key:"bindMaterialSelectKeydown",value:function(){var e=this;this.$materialSelect.on("keydown",(function(t){var i=$(t.target),n=t.which===e.keyCodes.tab,r=t.which===e.keyCodes.arrowUp,a=t.which===e.keyCodes.arrowDown,o=t.which===e.keyCodes.enter,s=t.which===e.keyCodes.esc,l=a&&t.altKey,c=r&&t.altKey,u=t.which===e.keyCodes.home,p=t.which===e.keyCodes.end,d=t.which===e.keyCodes.space,h=e.$materialOptionsList.is(":visible");switch(!0){case n:return e._handleTabKey(i);case!h&&(o||l):case e.isMultiple&&!h&&(a||r):return i.trigger("open"),e._updateDropdownScrollTop();case h&&(s||c):return i.trigger("close");case!h&&(a||r):return e._handleClosedArrowUpDownKey(t.which);case h&&(a||r):return e._handleArrowUpDownKey(t.which);case h&&u:return e._handleHomeKey();case h&&p:return e._handleEndKey();case h&&(o||d):return e._handleEnterKey(i);default:return e._handleLetterKey(t)}}))}},{key:"bindMaterialSelectDropdownToggle",value:function(){var e=this;this.$materialSelect.on("open",(function(){return e.$materialSelect.attr("aria-expanded","true")})),this.$materialSelect.on("close",(function(){return e.$materialSelect.attr("aria-expanded","false")}))}},{key:"bindToggleAllClick",value:function(e){var t=this;this.$toggleAll.on("click",(function(i){var n=$(t.$toggleAll).find('input[type="checkbox"]').first(),r=Boolean($(n).prop("checked")),a=!r;$(n).prop("checked",!r),t.$materialOptionsList.find("li:not(.optgroup):not(.select-toggle-all)").each((function(i,n){var r=$(n),o=r.find('input[type="checkbox"]');r.attr("aria-selected",a),a&&o.is(":checked")||!a&&!o.is(":checked")||$(n).is(":hidden")||$(n).is(".disabled")||(o.prop("checked",a),t.$nativeSelect.find("option").eq(i).prop("selected",a),r.toggleClass("active"),t._selectOption(n),e(i))})),t.$nativeSelect.data("stop-refresh",!0),t._triggerChangeOnNativeSelect(),t.$nativeSelect.removeData("stop-refresh"),i.stopPropagation()}))}},{key:"bindMaterialOptionMousedown",value:function(){var e=this;this.$materialOptionsList.on("mousedown",(function(t){var i=t.target;$(".modal-content").find(e.$materialOptionsList).length&&i.scrollHeight>i.offsetHeight&&t.preventDefault()}))}},{key:"bindMaterialOptionClick",value:function(e){var t=this;this.$materialOptionsList.find("li:not(.optgroup)").not(this.$toggleAll).each((function(i,n){$(n).on("click",(function(r){r.stopPropagation(),t._toggleResetButton(!1);var a=$(n);if(!a.hasClass("disabled")&&!a.hasClass("optgroup")){var o=!0;if(t.isMultiple){a.find('input[type="checkbox"]').prop("checked",(function(e,t){return!t}));var s=Boolean(t.$nativeSelect.find("optgroup").length),l=t._isToggleAllPresent()?a.index()-1:a.index();switch(!0){case t.isSearchable&&s:o=e(l-a.prevAll(".optgroup").length-1);break;case t.isSearchable:o=e(l-1);break;case s:o=e(l-a.prevAll(".optgroup").length);break;default:o=e(l)}t._isToggleAllPresent()&&t._updateToggleAllOption(),t.$materialSelect.trigger("focus")}else{t.$materialOptionsList.find("li").removeClass("active").attr("aria-selected","false");var c=a.children().last()[0].childNodes[0];t.$materialSelect.val($(c).text().replace(/ +/g," ").trim()),t.$materialSelect.trigger("close")}a.toggleClass("active");var u=a.attr("aria-selected");a.attr("aria-selected","true"===u?"false":"true"),t._selectSingleOption(a),t.$nativeSelect.data("stop-refresh",!0);var p=t.$nativeSelect.attr("data-placeholder")?i+1:i;t.$nativeSelect.find("option").eq(p).prop("selected",o),t.$nativeSelect.removeData("stop-refresh"),t._triggerChangeOnNativeSelect(),t.$materialSelect.val()&&t.$mainLabel.addClass("active"),a.hasClass("li-added")&&t.renderer.buildSingleOption(a,"")}}))}))}},{key:"bindSingleMaterialOptionClick",value:function(){var e=this;this.$materialOptionsList.find("li").on("click",(function(){e.$materialSelect.trigger("close")}))}},{key:"bindSearchInputKeyup",value:function(){var e=this;this.$searchInput.find(".search").on("keyup",(function(t){var i=$(t.target),n=t.which===e.keyCodes.tab,r=t.which===e.keyCodes.esc,a=t.which===e.keyCodes.enter,o=a&&t.shiftKey,s=t.which===e.keyCodes.arrowUp;if(t.which===e.keyCodes.arrowDown||n||r||s)return e.$materialSelect.focus(),void e._handleArrowUpDownKey(t.which);var l=i.closest("ul"),c=i.val(),u=l.find("li span.filtrable"),p=!1;if(u.each((function(){var e=$(this);if("string"==typeof this.outerHTML){var t=this.textContent.toLowerCase();t.includes(c.toLowerCase())?e.show().parent().show():e.hide().parent().hide(),t.trim()===c.toLowerCase()&&(p=!0)}})),a)return e.isEditable&&!p?void e.renderer.addNewOption():(o&&e._handleEnterWithShiftKey(i),void e.$materialSelect.trigger("open"));e.$addOptionBtn[c&&e.isEditable&&!p?"show":"hide"](),0!==u.filter((function(e,t){return $(t).is(":visible")&&!$(t).parent().hasClass("disabled")})).length?(e.$toggleAll.show(),e.$materialOptionsList.find(e.$noSearchResultsInfo).remove(),e._updateToggleAllOption()):(e.$toggleAll.hide(),e.$materialOptionsList.append(e.$noSearchResultsInfo)),e.dropdown.updatePosition(e.$materialSelect,e.$materialOptionsList)}))}},{key:"bindHtmlClick",value:function(){var e=this;$("html").on("click",(function(t){$(t.target).closest("#select-options-".concat(e.properties.id)).length||$(t.target).hasClass("mdb-select")||!$("#select-options-".concat(e.properties.id)).hasClass("active")||(e.$materialSelect.trigger("close"),e.$materialSelect.val()||e.options.placeholder||e.$mainLabel.removeClass("active")),e.isSearchable&&null!==e.$searchInput&&e.$materialOptionsList.hasClass("active")&&e.$materialOptionsList.find(".search-wrap input.search").focus()}))}},{key:"bindMobileDevicesMousedown",value:function(){$("select").siblings("input.select-dropdown","input.multi-bs-select").on("mousedown",(function(t){e.isMobileDevice&&(t.clientX>=t.target.clientWidth||t.clientY>=t.target.clientHeight)&&t.preventDefault()}))}},{key:"bindSaveBtnClick",value:function(){var e=this;this.$btnSave.on("click",(function(){e.$materialSelect.trigger("close")}))}},{key:"_toggleResetButton",value:function(e){var t=this.$nativeSelect.data("stop-refresh");this.$nativeSelect.attr("data-stop-refresh","true"),e?this.$nativeSelect.prepend('<option value="" selected disabled data-mdb-novalue></option>'):this.$nativeSelect.find("option[data-mdb-novalue]").remove(),this.$nativeSelect.attr("data-stop-refresh",t),this.$btnReset[e?"hide":"show"]()}},{key:"_isToggleAllPresent",value:function(){return this.$materialOptionsList.find(this.$toggleAll).length}},{key:"_updateToggleAllOption",value:function(){var e=this.$materialOptionsList.find("li").not(".select-toggle-all, .disabled, :hidden").find("[type=checkbox]"),t=e.filter(":checked"),i=this.$toggleAll.find("[type=checkbox]").is(":checked");t.length!==e.length||i?t.length<e.length&&i&&this.$toggleAll.find("[type=checkbox]").prop("checked",!1):this.$toggleAll.find("[type=checkbox]").prop("checked",!0)}},{key:"_handleTabKey",value:function(e){this._handleEscKey(e)}},{key:"_handleEnterWithShiftKey",value:function(e){this.isMultiple?this.$toggleAll.trigger("click"):this._handleEnterKey(e)}},{key:"_handleEnterKey",value:function(e){this.$materialOptionsList.find("li.selected:not(.disabled)").trigger("click").addClass("active"),this._removeKeyboardActiveClass(),this.isMultiple||e.trigger("close")}},{key:"_handleArrowUpDownKey",value:function(e){var t=this._getArrowMatchedActiveOptions(e,!1),i=t.$matchedMaterialOption,n=t.$activeOption;this._selectSingleOption(i),this._removeKeyboardActiveClass(),i.find("input").is(":checked")||i.removeClass(this.options.keyboardActiveClass),n.hasClass("selected")||n.find("input").is(":checked")||!this.isMultiple||n.removeClass("active",this.options.keyboardActiveClass),i.addClass(this.options.keyboardActiveClass),i.position()&&this.$materialOptionsList.scrollTop(this.$materialOptionsList.scrollTop()+i.position().top)}},{key:"_handleClosedArrowUpDownKey",value:function(e){var t=this._getArrowMatchedActiveOptions(e,!0).$matchedMaterialOption;t.trigger("click").addClass("active"),this._updateDropdownScrollTop(),this._selectSingleOption(t)}},{key:"_getArrowMatchedActiveOptions",value:function(e,t){var i=this,n=t?"":":visible",r=this.$materialOptionsList.find("li".concat(n)).not(".disabled, .select-toggle-all"),a=r.first(),o=r.last(),s=this.$materialOptionsList.find("li.selected").length>0,l=null,c=null;if(e===this.keyCodes.arrowUp){var u=s?this.$materialOptionsList.find("li.selected").first():o,p=u.prev("li".concat(n,":not(.disabled, .select-toggle-all)"));c=p,r.each((function(e,t){$(t).hasClass(i.options.keyboardActiveClass)&&(p=r.eq(e-1),c=r.eq(e))})),l=u.is(a)||!s?u:p}else{var d=s?this.$materialOptionsList.find("li.selected").first():a,h=d.next("li".concat(n,":not(.disabled, .select-toggle-all)"));c=h,r.each((function(e,t){$(t).hasClass(i.options.keyboardActiveClass)&&(h=r.eq(e+1),c=r.eq(e))})),l=d.is(o)||!s?d:h}return{$matchedMaterialOption:l,$activeOption:c}}},{key:"_handleHomeKey",value:function(){this._selectBoundaryOption("first")}},{key:"_handleEndKey",value:function(){this._selectBoundaryOption("last")}},{key:"_selectBoundaryOption",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",t=this.$materialOptionsList.find("li:visible").not(".disabled, .select-toggle-all")[e]();this._selectSingleOption(t),this._removeKeyboardActiveClass(),t.find("input").is(":checked")||t.removeClass(this.options.keyboardActiveClass),t.addClass(this.options.keyboardActiveClass),t.position()&&this.$materialOptionsList.scrollTop(this.$materialOptionsList.scrollTop()+t.position().top)}},{key:"_handleEscKey",value:function(e){this._removeKeyboardActiveClass(),e.trigger("close")}},{key:"_handleLetterKey",value:function(e){var t=this;if(this._removeKeyboardActiveClass(),this.isSearchable){var i=e.which>46&&e.which<91,n=e.which>93&&e.which<106,r=8===e.which;(i||n)&&this.$searchInput.find("input").focus(),r&&this.$searchInput.find("input").val("").focus()}else{var a="",o=String.fromCharCode(e.which).toLowerCase(),s=Object.keys(this.keyCodes).map((function(e){return t.keyCodes[e]}));if(o&&-1===s.indexOf(e.which)){a+=o;var l=this.$materialOptionsList.find("li").filter((function(e,t){return $(t).text().toLowerCase().includes(a)})).first();this.isMultiple||this.$materialOptionsList.find("li").removeClass("active"),l.addClass("active"),this._selectSingleOption(l),this._updateDropdownScrollTop()}}}},{key:"_removeKeyboardActiveClass",value:function(){this.$materialOptionsList.find("li").removeClass(this.options.keyboardActiveClass)}},{key:"_triggerChangeOnNativeSelect",value:function(){var e=new KeyboardEvent("change",{bubbles:!0,cancelable:!0});this.$nativeSelect.get(0).dispatchEvent(e)}},{key:"_selectSingleOption",value:function(e){this.$materialOptionsList.find("li.selected").removeClass("selected"),this._selectOption(e)}},{key:"_updateDropdownScrollTop",value:function(){var e=this.$materialOptionsList.find("li.active").not(".disabled").first();e.length?this.$materialOptionsList.scrollTo(e):this.$materialOptionsList.scrollTop(0)}},{key:"_selectOption",value:function(e){$(e).addClass("selected")}},{key:"_copyOptions",value:function(e){return $.extend({},e)}},{key:"_jQueryFallback",value:function(){for(var e=null,t=0;t<arguments.length;t++)if((e=t<0||arguments.length<=t?void 0:arguments[t]).length)return e;return e}},{key:"isMultiple",get:function(){return this.properties.isMultiple}},{key:"isSearchable",get:function(){return this.properties.isSearchable}},{key:"isRequired",get:function(){return this.properties.isRequired}},{key:"isEditable",get:function(){return this.properties.isEditable}},{key:"isDisabled",get:function(){return this.$nativeSelect.is(":disabled")}}])&&r(t.prototype,i),a&&r(t,a),e}()},,,function(e,t,i){var n=i(101);e.exports=function(e){if(n(e))throw TypeError("The method doesn't accept regular expressions");return e}},function(e,t,i){var n=i(2)("match");e.exports=function(e){var t=/./;try{"/./"[e](t)}catch(i){try{return t[n]=!1,"/./"[e](t)}catch(e){}}return!1}}]); | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/charting/plot2d/common.js.uncompressed.js | define("dojox/charting/plot2d/common", ["dojo/_base/lang", "dojo/_base/array", "dojo/_base/Color",
"dojox/gfx", "dojox/lang/functional", "../scaler/common"],
function(lang, arr, Color, g, df, sc){
var common = lang.getObject("dojox.charting.plot2d.common", true);
return lang.mixin(common, {
doIfLoaded: sc.doIfLoaded,
makeStroke: function(stroke){
if(!stroke){ return stroke; }
if(typeof stroke == "string" || stroke instanceof Color){
stroke = {color: stroke};
}
return g.makeParameters(g.defaultStroke, stroke);
},
augmentColor: function(target, color){
var t = new Color(target),
c = new Color(color);
c.a = t.a;
return c;
},
augmentStroke: function(stroke, color){
var s = common.makeStroke(stroke);
if(s){
s.color = common.augmentColor(s.color, color);
}
return s;
},
augmentFill: function(fill, color){
var fc, c = new Color(color);
if(typeof fill == "string" || fill instanceof Color){
return common.augmentColor(fill, color);
}
return fill;
},
defaultStats: {
vmin: Number.POSITIVE_INFINITY, vmax: Number.NEGATIVE_INFINITY,
hmin: Number.POSITIVE_INFINITY, hmax: Number.NEGATIVE_INFINITY
},
collectSimpleStats: function(series){
var stats = lang.delegate(common.defaultStats);
for(var i = 0; i < series.length; ++i){
var run = series[i];
for(var j = 0; j < run.data.length; j++){
if(run.data[j] !== null){
if(typeof run.data[j] == "number"){
// 1D case
var old_vmin = stats.vmin, old_vmax = stats.vmax;
if(!("ymin" in run) || !("ymax" in run)){
arr.forEach(run.data, function(val, i){
if(val !== null){
var x = i + 1, y = val;
if(isNaN(y)){ y = 0; }
stats.hmin = Math.min(stats.hmin, x);
stats.hmax = Math.max(stats.hmax, x);
stats.vmin = Math.min(stats.vmin, y);
stats.vmax = Math.max(stats.vmax, y);
}
});
}
if("ymin" in run){ stats.vmin = Math.min(old_vmin, run.ymin); }
if("ymax" in run){ stats.vmax = Math.max(old_vmax, run.ymax); }
}else{
// 2D case
var old_hmin = stats.hmin, old_hmax = stats.hmax,
old_vmin = stats.vmin, old_vmax = stats.vmax;
if(!("xmin" in run) || !("xmax" in run) || !("ymin" in run) || !("ymax" in run)){
arr.forEach(run.data, function(val, i){
if(val !== null){
var x = "x" in val ? val.x : i + 1, y = val.y;
if(isNaN(x)){ x = 0; }
if(isNaN(y)){ y = 0; }
stats.hmin = Math.min(stats.hmin, x);
stats.hmax = Math.max(stats.hmax, x);
stats.vmin = Math.min(stats.vmin, y);
stats.vmax = Math.max(stats.vmax, y);
}
});
}
if("xmin" in run){ stats.hmin = Math.min(old_hmin, run.xmin); }
if("xmax" in run){ stats.hmax = Math.max(old_hmax, run.xmax); }
if("ymin" in run){ stats.vmin = Math.min(old_vmin, run.ymin); }
if("ymax" in run){ stats.vmax = Math.max(old_vmax, run.ymax); }
}
break;
}
}
}
return stats;
},
calculateBarSize: function(/* Number */ availableSize, /* Object */ opt, /* Number? */ clusterSize){
if(!clusterSize){
clusterSize = 1;
}
var gap = opt.gap, size = (availableSize - 2 * gap) / clusterSize;
if("minBarSize" in opt){
size = Math.max(size, opt.minBarSize);
}
if("maxBarSize" in opt){
size = Math.min(size, opt.maxBarSize);
}
size = Math.max(size, 1);
gap = (availableSize - size * clusterSize) / 2;
return {size: size, gap: gap}; // Object
},
collectStackedStats: function(series){
// collect statistics
var stats = lang.clone(common.defaultStats);
if(series.length){
// 1st pass: find the maximal length of runs
stats.hmin = Math.min(stats.hmin, 1);
stats.hmax = df.foldl(series, "seed, run -> Math.max(seed, run.data.length)", stats.hmax);
// 2nd pass: stack values
for(var i = 0; i < stats.hmax; ++i){
var v = series[0].data[i];
v = v && (typeof v == "number" ? v : v.y);
if(isNaN(v)){ v = 0; }
stats.vmin = Math.min(stats.vmin, v);
for(var j = 1; j < series.length; ++j){
var t = series[j].data[i];
t = t && (typeof t == "number" ? t : t.y);
if(isNaN(t)){ t = 0; }
v += t;
}
stats.vmax = Math.max(stats.vmax, v);
}
}
return stats;
},
curve: function(/* Number[] */a, /* Number|String */tension){
// FIX for #7235, submitted by Enzo Michelangeli.
// Emulates the smoothing algorithms used in a famous, unnamed spreadsheet
// program ;)
var array = a.slice(0);
if(tension == "x") {
array[array.length] = arr[0]; // add a last element equal to the first, closing the loop
}
var p=arr.map(array, function(item, i){
if(i==0){ return "M" + item.x + "," + item.y; }
if(!isNaN(tension)) { // use standard Dojo smoothing in tension is numeric
var dx=item.x-array[i-1].x, dy=array[i-1].y;
return "C"+(item.x-(tension-1)*(dx/tension))+","+dy+" "+(item.x-(dx/tension))+","+item.y+" "+item.x+","+item.y;
} else if(tension == "X" || tension == "x" || tension == "S") {
// use Excel "line smoothing" algorithm (http://xlrotor.com/resources/files.shtml)
var p0, p1 = array[i-1], p2 = array[i], p3;
var bz1x, bz1y, bz2x, bz2y;
var f = 1/6;
if(i==1) {
if(tension == "x") {
p0 = array[array.length-2];
} else { // "tension == X || tension == "S"
p0 = p1;
}
f = 1/3;
} else {
p0 = array[i-2];
}
if(i==(array.length-1)) {
if(tension == "x") {
p3 = array[1];
} else { // "tension == X || tension == "S"
p3 = p2;
}
f = 1/3;
} else {
p3 = array[i+1];
}
var p1p2 = Math.sqrt((p2.x-p1.x)*(p2.x-p1.x)+(p2.y-p1.y)*(p2.y-p1.y));
var p0p2 = Math.sqrt((p2.x-p0.x)*(p2.x-p0.x)+(p2.y-p0.y)*(p2.y-p0.y));
var p1p3 = Math.sqrt((p3.x-p1.x)*(p3.x-p1.x)+(p3.y-p1.y)*(p3.y-p1.y));
var p0p2f = p0p2 * f;
var p1p3f = p1p3 * f;
if(p0p2f > p1p2/2 && p1p3f > p1p2/2) {
p0p2f = p1p2/2;
p1p3f = p1p2/2;
} else if(p0p2f > p1p2/2) {
p0p2f = p1p2/2;
p1p3f = p1p2/2 * p1p3/p0p2;
} else if(p1p3f > p1p2/2) {
p1p3f = p1p2/2;
p0p2f = p1p2/2 * p0p2/p1p3;
}
if(tension == "S") {
if(p0 == p1) { p0p2f = 0; }
if(p2 == p3) { p1p3f = 0; }
}
bz1x = p1.x + p0p2f*(p2.x - p0.x)/p0p2;
bz1y = p1.y + p0p2f*(p2.y - p0.y)/p0p2;
bz2x = p2.x - p1p3f*(p3.x - p1.x)/p1p3;
bz2y = p2.y - p1p3f*(p3.y - p1.y)/p1p3;
}
return "C"+(bz1x+","+bz1y+" "+bz2x+","+bz2y+" "+p2.x+","+p2.y);
});
return p.join(" ");
},
getLabel: function(/*Number*/number, /*Boolean*/fixed, /*Number*/precision){
return sc.doIfLoaded("dojo/number", function(numberLib){
return (fixed ? numberLib.format(number, {places : precision}) :
numberLib.format(number)) || "";
}, function(){
return fixed ? number.toFixed(precision) : number.toString();
});
}
});
}); | PypiClean |
/FlexGet-3.9.6-py3-none-any.whl/flexget/plugins/filter/exists_series.py | from pathlib import Path
from loguru import logger
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.utils.log import log_once
from flexget.utils.template import RenderError
try:
# NOTE: Importing other plugins is discouraged!
from flexget.components.parsing import parsers as plugin_parsers
except ImportError:
raise plugin.DependencyError(issued_by=__name__, missing='parsers')
logger = logger.bind(name='exists_series')
class FilterExistsSeries:
"""
Intelligent series aware exists rejecting.
Example::
exists_series: /storage/series/
"""
schema = {
'anyOf': [
one_or_more({'type': 'string', 'format': 'path'}),
{
'type': 'object',
'properties': {
'path': one_or_more({'type': 'string', 'format': 'path'}),
'allow_different_qualities': {
'enum': ['better', True, False],
'default': False,
},
},
'required': ['path'],
'additionalProperties': False,
},
]
}
def prepare_config(self, config):
# if config is not a dict, assign value to 'path' key
if not isinstance(config, dict):
config = {'path': config}
# if only a single path is passed turn it into a 1 element list
if isinstance(config['path'], str):
config['path'] = [config['path']]
return config
@plugin.priority(-1)
def on_task_filter(self, task, config):
if not task.accepted:
logger.debug('Scanning not needed')
return
config = self.prepare_config(config)
accepted_series = {}
paths = set()
for entry in task.accepted:
if 'series_parser' in entry:
if entry['series_parser'].valid:
accepted_series.setdefault(entry['series_parser'].name, []).append(entry)
for folder in config['path']:
try:
paths.add(entry.render(folder))
except RenderError as e:
logger.error('Error rendering path `{}`: {}', folder, e)
else:
logger.debug('entry {} series_parser invalid', entry['title'])
if not accepted_series:
logger.warning(
'No accepted entries have series information. exists_series cannot filter them'
)
return
# scan through
# For speed, only test accepted entries since our priority should be after everything is accepted.
for series in accepted_series:
# make new parser from parser in entry
series_parser = accepted_series[series][0]['series_parser']
for folder in paths:
folder = Path(folder).expanduser()
if not folder.is_dir():
logger.warning('Directory {} does not exist', folder)
continue
for filename in folder.iterdir():
# run parser on filename data
try:
disk_parser = plugin.get('parsing', self).parse_series(
data=filename.name, name=series_parser.name
)
except plugin_parsers.ParseWarning as pw:
disk_parser = pw.parsed
log_once(pw.value, logger=logger)
if disk_parser.valid:
logger.debug('name {} is same series as {}', filename.name, series)
logger.debug('disk_parser.identifier = {}', disk_parser.identifier)
logger.debug('disk_parser.quality = {}', disk_parser.quality)
logger.debug('disk_parser.proper_count = {}', disk_parser.proper_count)
for entry in accepted_series[series]:
logger.debug(
'series_parser.identifier = {}', entry['series_parser'].identifier
)
if disk_parser.identifier != entry['series_parser'].identifier:
logger.trace('wrong identifier')
continue
logger.debug(
'series_parser.quality = {}', entry['series_parser'].quality
)
if config.get('allow_different_qualities') == 'better':
if entry['series_parser'].quality > disk_parser.quality:
logger.trace('better quality')
continue
elif config.get('allow_different_qualities'):
if disk_parser.quality != entry['series_parser'].quality:
logger.trace('wrong quality')
continue
logger.debug(
'entry parser.proper_count = {}',
entry['series_parser'].proper_count,
)
if disk_parser.proper_count >= entry['series_parser'].proper_count:
entry.reject('episode already exists')
continue
else:
logger.trace('new one is better proper, allowing')
continue
@event('plugin.register')
def register_plugin():
plugin.register(FilterExistsSeries, 'exists_series', interfaces=['task'], api_ver=2) | PypiClean |
/Container-WhooshAlchemyPlus-0.7.5.post3.tar.gz/Container-WhooshAlchemyPlus-0.7.5.post3/README.rst | Welcome to Flask-WhooshAlchemyPlus!
===================================
Forked from `gyllstromk/Flask-WhooshAlchemy <https://github.com/gyllstromk/Flask-WhooshAlchemy>`_
Flask-WhooshAlchemyPlus is a Flask extension that integrates the text-search functionality of `Whoosh <https://bitbucket.org/mchaput/whoosh/wiki/Home>`_ with the ORM of `SQLAlchemy <http://www.sqlalchemy.org/>`_ for use in `Flask <http://flask.pocoo.org/>`_ applications.
Source code and issue tracking at `GitHub <https://github.com/Revolution1/Flask-WhooshAlchemyPlus>`_.
Install
-------
::
$ pip install flask_whooshalchemyplus
Or:
::
$ git clone https://github.com/Revolution1/Flask-WhooshAlchemyPlus.git
$ cd Flask-WhooshAlchemyPlus && python setup.py install
Quickstart
----------
Let's set up the environment and create our model:
::
import flask_whooshalchemyplus
# set the location for the whoosh index
app.config['WHOOSH_BASE'] = 'path/to/whoosh/base'
class BlogPost(db.Model):
__tablename__ = 'blogpost'
__searchable__ = ['title', 'content'] # these fields will be indexed by whoosh
__analyzer__ = SimpleAnalyzer() # configure analyzer; defaults to
# StemmingAnalyzer if not specified
id = app.db.Column(app.db.Integer, primary_key=True)
title = app.db.Column(app.db.Unicode) # Indexed fields are either String,
content = app.db.Column(app.db.Text) # Unicode, or Text
created = db.Column(db.DateTime, default=datetime.datetime.utcnow)
flask_whooshalchemyplus.init_app(app) # initialize
Only two steps to get started:
1) Set the ``WHOOSH_BASE`` to the path for the whoosh index. If not set, it will default to a directory called 'whoosh_index' in the directory from which the application is run.
2) Add a ``__searchable__`` field to the model which specifies the fields (as ``str`` s) to be indexed .
3) set ``WHOOSH_DISABLED`` to ``True`` to disable whoosh indexing .
Let's create a post:
::
db.session.add(
BlogPost(title='My cool title', content='This is the first post.')
); db.session.commit()
After the session is committed, our new ``BlogPost`` is indexed. Similarly, if the post is deleted, it will be removed from the Whoosh index.
Manually Indexing
-----------------
By defualt records can be indexed only when the server is running.
So if you want to index them manually:
::
from flask_whooshalchemyplus import index_all
index_all(app)
Text Searching
--------------
To execute a simple search:
::
results = BlogPost.query.whoosh_search('cool')
This will return all ``BlogPost`` instances in which at least one indexed field (i.e., 'title' or 'content') is a text match to the query. Results are ranked according to their relevance score, with the best match appearing first when iterating. The result of this call is a (subclass of) ``sqlalchemy.orm.query.Query`` object, so you can chain other SQL operations. For example::
two_days_ago = datetime.date.today() - datetime.timedelta(2)
recent_matches = BlogPost.query.whoosh_search('first').filter(
BlogPost.created >= two_days_ago)
Or, in alternative (likely slower) order::
recent_matches = BlogPost.query.filter(
BlogPost.created >= two_days_ago).whoosh_search('first')
We can limit results::
# get 2 best results:
results = BlogPost.query.whoosh_search('cool', limit=2)
By default, the search is executed on all of the indexed fields as an OR conjunction. For example, if a model has 'title' and 'content' indicated as ``__searchable__``, a query will be checked against both fields, returning any instance whose title or content are a content match for the query. To specify particular fields to be checked, populate the ``fields`` parameter with the desired fields::
results = BlogPost.query.whoosh_search('cool', fields=('title',))
By default, results will only be returned if they contain all of the query terms (AND). To switch to an OR grouping, set the ``or_`` parameter to ``True``::
results = BlogPost.query.whoosh_search('cool', or_=True)
If you want ordinary text matching result too::
results = BlogPost.query.whoosh_search('cool', like=True)
This acts like ``whoosh_search('cool') + SQL LIKE '%cool%'``
pure_whoosh
------------------
If you want the ``whoosh.index.searcher().search()`` result::
results = BlogPost.pure_whoosh(self, query, limit=None, fields=None, or_=False)
WhooshDisabled context manager
------------------------------
To disable whoosh indexing temporarily:
::
with WhooshDisabled():
do sth.
CHANGELOG
---------
- v0.7.5 :
- feature: add WhooshDisabled context manager
- feature: add whoosh_index_all and init_app method
- refactory: indexing methods
- fix: index error: model has no attribute '__searchable__'
- v0.7.4 :
- Feature: add fuzzy-searching using SQL LIKE
- v0.7.3 :
- Fix: Chinese analyzer does not take affect
- v0.7.2 :
- Fix: index_all cannot detect indexable models by itself
- v0.7.1 :
- Feature: Indexing child module class `github issue #43 <https://github.com/gyllstromk/Flask-WhooshAlchemy/pull/43>`_
- Feature: Add python3 supprot
- Fix: Obey result sorting if caller explicitly uses order_by() on query `github pull request #32 <https://github.com/gyllstromk/Flask-WhooshAlchemy/pull/32>`_
- Fix: custom query_class usage `github pull request #35 <https://github.com/gyllstromk/Flask-WhooshAlchemy/pull/35>`_
- Feature: add ``WHOOSH_DISABLED`` option to disable whooshalchemyplus at runtime
| PypiClean |
/Cartopy-0.22.0-cp310-cp310-macosx_11_0_arm64.whl/cartopy/mpl/style.py | import warnings
# Define the matplotlib style aliases that cartopy can expand.
# Note: This should not contain the plural aliases
# (e.g. linewidths -> linewidth).
# This is an intended duplication of
# https://github.com/matplotlib/matplotlib/blob/\
# 2d2dab511d22b6cc9c812cfbcca6df3f9bf3094a/lib/matplotlib/patches.py#L20-L26
# Duplication intended to simplify readability, given the small number of
# aliases.
_ALIASES = {
'lw': 'linewidth',
'ls': 'linestyle',
'fc': 'facecolor',
'ec': 'edgecolor',
}
def merge(*style_dicts):
"""
Merge together multiple matplotlib style dictionaries in a predictable way
The approach taken is:
For each style:
* Expand aliases, such as "lw" -> "linewidth", but always prefer
the full form if over-specified (i.e. lw AND linewidth
are both set)
* "color" overwrites "facecolor" and "edgecolor" (as per
matplotlib), UNLESS facecolor == "never", which will be expanded
at finalization to 'none'
>>> style = merge({"lw": 1, "edgecolor": "black", "facecolor": "never"},
... {"linewidth": 2, "color": "gray"})
>>> sorted(style.items())
[('edgecolor', 'gray'), ('facecolor', 'never'), ('linewidth', 2)]
"""
style = {}
facecolor = None
for orig_style in style_dicts:
this_style = orig_style.copy()
for alias_from, alias_to in _ALIASES.items():
alias = this_style.pop(alias_from, None)
if alias_from in orig_style:
# n.b. alias_from doesn't trump alias_to
# (e.g. 'lw' doesn't trump 'linewidth').
this_style.setdefault(alias_to, alias)
color = this_style.pop('color', None)
if 'color' in orig_style:
this_style['edgecolor'] = color
this_style['facecolor'] = color
if isinstance(facecolor, str) and facecolor == 'never':
requested_color = this_style.pop('facecolor', None)
setting_color = not (
isinstance(requested_color, str) and
requested_color.lower() == 'none')
if (('fc' in orig_style or 'facecolor' in orig_style) and
setting_color):
warnings.warn('facecolor will have no effect as it has been '
'defined as "never".')
else:
facecolor = this_style.get('facecolor', facecolor)
# Push the remainder of the style into the merged style.
style.update(this_style)
return style
def finalize(style):
"""
Update the given matplotlib style according to cartopy's style rules.
Rules:
1. A facecolor of 'never' is replaced with 'none'.
"""
# Expand 'never' to 'none' if we have it.
facecolor = style.get('facecolor', None)
if facecolor == 'never':
style['facecolor'] = 'none'
return style | PypiClean |
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/media/scripting.py | from __future__ import unicode_literals
from django.db.models import Max
from django.contrib.contenttypes.models import ContentType
from cubane.lib.file import get_caption_from_filename
from cubane.media.models import Media, MediaFolder
from cubane.cms.models import MediaGallery
from cubane.tasks import TaskRunner
import os.path
import requests
import urlparse
class MediaScriptingMixin(object):
def delete_content(self):
"""
Override: Delete all media content as well.
"""
super(MediaScriptingMixin, self).delete_content()
self.delete_media_content()
def delete_media_content(self):
"""
Delete media content.
"""
for m in Media.objects.all():
m.delete()
def create_media_from_file(self, local_filepath, caption, filename=None, folder=None, generate_images=True):
"""
Create a new media item within the media library by downloading content
from the given url.
"""
# read media from local disk
f = open(local_filepath, 'rb')
content = f.read()
f.close()
# if no filename is given, determine filename from input path
if not filename:
filename = os.path.basename(local_filepath)
# create media with given content
return self.create_media(content, caption, filename, folder, generate_images=generate_images)
def create_media_from_url(self, url, caption=None, filename=None, folder=None, generate_images=True):
"""
Create a new media item within the media library by downloading content
from the given url.
"""
# download content from url
content = requests.get(url, timeout=1000)
if content == None: return None
if content.status_code != 200: return None
# generate filename based on given url
if filename == None:
url_parts = urlparse.urlparse(url)
path = url_parts.path
filename = os.path.basename(path)
# create media with given content
return self.create_media(content.content, caption, filename, folder, generate_images=generate_images)
def create_media_folder(self, title, parent=None, if_not_exists=True):
"""
Create a new media folder with given name.
"""
folder = None
if if_not_exists:
try:
folder = MediaFolder.objects.get(title=title, parent=parent)
except MediaFolder.DoesNotExist:
pass
if not folder:
folder = MediaFolder()
folder.title = title
folder.parent = parent
folder.save()
return folder
def create_media(self, content, caption, filename, folder=None, generate_images=True):
"""
Create a new media item within the media library based on the given
content data.
"""
media = self.create_media_object(caption, filename, folder)
if media:
media.upload_from_content(content, filename, generate_images=generate_images)
return media
def create_media_object(self, caption, filename, folder=None):
"""
Create a new media item object within the media library based on the
given meta data. This will simply create the meta data but will not
upload or store any actual image/document data and it is assumed that
this happens outside of the image media object creation.
"""
# generate caption based on filename if provided
if not caption and filename:
caption = get_caption_from_filename(filename)
media = Media()
media.caption = caption
media.filename = filename
if folder:
media.parent = folder
media.save()
return media
def create_blank_external_media(self, url, filename=None, caption=None, folder=None):
"""
Create a new (blank) media item with the given external url and
optionally the given parent folder.
"""
media = Media()
media.is_blank = True
media.external_url = url
# generate filename based on given url
url_parts = urlparse.urlparse(url)
path = url_parts.path
# filename
if filename:
media.filename = filename
else:
media.filename = os.path.basename(path)
# generate caption from filename
if caption:
media.caption = caption
else:
media.caption = get_caption_from_filename(media.filename)
# folder
if folder:
media.folder = folder
media.save()
# notify task runner that there is something to do
TaskRunner.notify()
return media
def add_media_to_gallery(self, page, images):
"""
Add list of images to the gallery of the given cms page.
"""
if not isinstance(images, list):
images = [images]
# get content type of page object
content_type = ContentType.objects.get_for_model(page.__class__)
# get last seq.
r = page.gallery_images.aggregate(Max('seq'))
seq = r.get('seq__max')
if seq == None: seq = 0
for i, image in enumerate(images, start=seq + 1):
mg = MediaGallery()
mg.media = image
mg.content_type = content_type
mg.target_id = page.pk
mg.seq = i
mg.save()
def clear_media_gallery(self, page):
"""
Remove all media from the gallery for the given page.
"""
[m.delete() for m in page.gallery_images.all()] | PypiClean |
/Nose-PyVersion-0.1b1.tar.gz/Nose-PyVersion-0.1b1/nose_pyversion.py |
from nose.plugins import Plugin
import os
class PyVersion(Plugin):
"""Nose plugin that excludes files based on python version and file name
If a filename has the format [NAME][SEPARATOR]py[VERSION].py and
VERSION doesn't match [major][minor][micro], [major][minor] or [major]
the file will be excluded from tests.
Options for pyversion::
pyversion-separator
The separator between [name] and 'py' in the filename
Example::
file_py3.py
file_py33.py
file_py330.py
file_py2.py
file_py27.py
file_py273.py
"""
name = 'pyversion'
"""Name of this nose plugin"""
score = 1
enabled = False
"""By default this plugin is not enabled"""
default_separator = '_'
"""The separator between filename and python version"""
separator = None
"""Separator between filename and python version
Will be set to :attr:`PyVersion.default_separator` or by the
pyversion-separator option.
"""
def options(self, parser, env=os.environ):
"""Define the command line options for the plugin."""
parser.add_option(
"--pyversion-separator",
action="store",
default=self.default_separator,
dest="pyversion_separator",
help="Separator for version-specific files")
super(PyVersion, self).options(parser, env=env)
def configure(self, options, conf):
super(PyVersion, self).configure(options, conf)
if not self.enabled:
return
self.separator = options.pyversion_separator or self.default_separator
if self.separator is '.':
raise Exception('Not a valid pyversion-separator: ' +
str(self.separator))
def wantFile(self, file):
import sys
import re
separator = re.escape(self.separator)
# Use '%' instead of str.format because of older python versions
is_versioned = r'^.+%spy\d+\.py$' % (separator,)
wrong_version = (
r'^.+%(sep)spy%(maj)d((%(min)s)|(%(min)s%(mic)s))?\.py$' % {
'sep': separator,
'maj': sys.version_info.major,
'min': sys.version_info.minor,
'mic': sys.version_info.micro})
if re.match(is_versioned, file) and not re.match(wrong_version,
file):
return False
else:
return None | PypiClean |
/META_TOOLBOX-2023.3.tar.gz/META_TOOLBOX-2023.3/META_TOOLBOX/META_FA_LIBRARY.py | import numpy as np
import META_TOOLBOX.META_CO_LIBRARY as META_CL
# CHAOTIC SEARCH
def CHAOTIC_SEARCH(OF_FUNCTION, ITER, X_BEST, OF_BEST, FIT_BEST, N_CHAOTICSEARCHS, ALPHA_CHAOTIC, D, N_ITER, X_L, X_U, NULL_DIC):
# INITIALIZATION VARIABLES
K = N_CHAOTICSEARCHS
CH = []
X_BESTNEW = X_BEST
OF_BESTNEW = OF_BEST
FIT_BESTNEW = FIT_BEST
# CSI UPDATE
CSI = (N_ITER - ITER + 1) / N_ITER
# CHAOTIC SEARCHS
for I_COUNT in range(0, K):
CH_XBEST = np.zeros((1, D))
X_BESTTEMPORARY = np.zeros((1, D))
if I_COUNT == 0:
# CHAOTIC UPDATE
CH.append(np.random.random())
else:
# CHAOTIC UPDATE POSITION, OF AND FIT
CH.append(ALPHA_CHAOTIC * CH[I_COUNT - 1] * (1 - CH[I_COUNT - 1]))
# CREATING THE CHAOTIC POSITION
for J_COUNT in range(D):
CH_XBEST[0, J_COUNT] = X_L[J_COUNT] + (X_U[J_COUNT] - X_L[J_COUNT]) * CH[I_COUNT]
# print('aqui', type(X_BESTNEW), X_BESTNEW)
X_BESTTEMPORARY[0, J_COUNT] = (1 - CSI) * X_BESTNEW[J_COUNT] + CSI * CH_XBEST[0, J_COUNT]
X_BESTTEMPORARY[0, :] = META_CL.CHECK_INTERVAL_01(X_BESTTEMPORARY[0, :], X_L, X_U)
OF_BESTTEMPORARY = OF_FUNCTION(X_BESTTEMPORARY[0, :], NULL_DIC)
FIT_BESTTEMPORARY = META_CL.FIT_VALUE(OF_BESTTEMPORARY)
# STORING BEST VALUE
if FIT_BESTTEMPORARY > FIT_BEST:
X_BESTNEW = X_BESTTEMPORARY[0, :]
OF_BESTNEW = OF_BESTTEMPORARY
FIT_BESTNEW = FIT_BESTTEMPORARY
return X_BESTNEW, OF_BESTNEW, FIT_BESTNEW
# CALCULATION OF THE DISCRIMINATING FACTOR OF THE MALE AND FEMALE FIREFLIES POPULATION
def DISCRIMINANT_FACTOR_MALE_MOVIMENT(FIT_XI, FIT_YK):
""" COMENTÁRIO NATIVO VOU FAZER"""
# COMPARISON OF FIREFLY BRIGHTNESS
if FIT_XI > FIT_YK:
D_1 = 1
else:
D_1 = -1
return D_1
# DETERMINAÇÃO DO FATOR DE ATRATIVIDADE BETA
def ATTRACTIVENESS_FIREFLY_PARAMETER(BETA_0, GAMMA, X_I, X_J, D):
"""
This function calculates distance between X_I and X_J fireflies.
Input:
BETA_0 | Attractiveness at r = 0 | Float
GAMMA | Light absorption coefficient 1 / (X_U - X_L) ** M | Py list[D]
X_I | I Firefly | Py list[D]
X_J | J Firefly | Py list[D]
D | Problem dimension | Integer
Output:
BETA | Attractiveness | Py list[D]
"""
AUX = 0
# Firefly distance
for I_COUNT in range(D):
AUX += (X_I[I_COUNT] - X_J[I_COUNT]) ** 2
R_IJ = np.sqrt(AUX)
# Beta attractiveness
BETA = []
for J_COUNT in range(D):
BETA.append(BETA_0 * np.exp(- GAMMA[J_COUNT] * R_IJ))
return BETA
# MOVIMENTO DE UM VAGALUME TRADICIONAL
def FIREFLY_MOVEMENT(OF_FUNCTION, X_T0I, X_J, BETA, ALPHA, SCALING, D, X_L, X_U, NULL_DIC):
"""
This function creates a new solution using FA movement algorithm.
Input:
OF_FUNCTION | External def user input this function in arguments | Py function
X_T0I | Design variable I particle before movement | Py list[D]
X_J | J Firefly | Py list[D]
BETA | Attractiveness | Py list[D]
ALPHA | Randomic factor | Float
D | Problem dimension | Integer
X_L | Lower limit design variables | Py list[D]
X_U | Upper limit design variables | Py list[D]
NULL_DIC | Empty variable for the user to use in the obj. function | ?
Output:
X_T1I | Design variable I particle after movement | Py list[D]
OF_T1I | Objective function X_T1I (new particle) | Float
FIT_T1I | Fitness X_T1I (new particle) | Float
NEOF | Number of objective function evaluations | Integer
"""
# Start internal variables
X_T1I = []
OF_T1I = 0
FIT_T1I = 0
for I_COUNT in range(D):
EPSILON_I = np.random.random() - 0.50
if SCALING:
S_D = X_U[I_COUNT] - X_L[I_COUNT]
else:
S_D = 1
NEW_VALUE = X_T0I[I_COUNT] + BETA[I_COUNT] * (X_J[I_COUNT] - X_T0I[I_COUNT]) + ALPHA * S_D * EPSILON_I
X_T1I.append(NEW_VALUE)
# Check boundes
X_T1I = META_CL.CHECK_INTERVAL_01(X_T1I, X_L, X_U)
# Evaluation of the objective function and fitness
OF_T1I = OF_FUNCTION(X_T1I, NULL_DIC)
FIT_T1I = META_CL.FIT_VALUE(OF_T1I)
NEOF = 1
return X_T1I, OF_T1I, FIT_T1I, NEOF
# MALE FIREFLY MOVEMENT
def MALE_FIREFLY_MOVEMENT(OF_FUNCTION, X_MALECURRENTI, FIT_MALECURRENTI, Y_FEMALECURRENTK, FIT_FEMALECURRENTK, Y_FEMALECURRENTJ, FIT_FEMALECURRENTJ, BETA_0, GAMMA, D, X_L, X_U, NULL_DIC):
""" COMENTÁRIO NATIVO VOU FAZER"""
# INITIALIZATION VARIABLES
SECOND_TERM = []
THIRD_TERM = []
X_MALENEWI = []
# DISCRIMINANT D FACTOR
D_1 = DISCRIMINANT_FACTOR_MALE_MOVIMENT(FIT_MALECURRENTI, FIT_FEMALECURRENTK)
D_2 = DISCRIMINANT_FACTOR_MALE_MOVIMENT(FIT_MALECURRENTI, FIT_FEMALECURRENTJ)
# ATTRACTIVENESS AMONG FIREFLIES
BETA_1 = ATTRACTIVENESS_FIREFLY_PARAMETER(BETA_0, GAMMA, X_MALECURRENTI, Y_FEMALECURRENTK, D)
BETA_2 = ATTRACTIVENESS_FIREFLY_PARAMETER(BETA_0, GAMMA, X_MALECURRENTI, Y_FEMALECURRENTJ, D)
# LAMBDA AND MU RANDOM PARAMETERS
LAMBDA = np.random.random()
MU = np.random.random()
for I_COUNT in range(D):
SECOND_TERM.append(D_1 * BETA_1[I_COUNT] * LAMBDA * (Y_FEMALECURRENTK[I_COUNT] - X_MALECURRENTI[I_COUNT]))
THIRD_TERM.append(D_2 * BETA_2[I_COUNT] * MU * (Y_FEMALECURRENTJ[I_COUNT] - X_MALECURRENTI[I_COUNT]))
# UPDATE FEMALE POSITION, OF AND FIT
for J_COUNT in range(D):
X_MALENEWI.append(X_MALECURRENTI[J_COUNT] + SECOND_TERM[J_COUNT] + THIRD_TERM[J_COUNT])
X_MALENEWI = META_CL.CHECK_INTERVAL_01(X_MALENEWI, X_L, X_U)
OF_MALENEWI = OF_FUNCTION(X_MALENEWI, NULL_DIC)
FIT_MALENEWI = META_CL.FIT_VALUE(OF_MALENEWI)
return X_MALENEWI, OF_MALENEWI, FIT_MALENEWI
# FEMALE FIREFLY MOVEMENT
def FEMALE_FIREFLY_MOVEMENT(OF_FUNCTION, Y_FEMALECURRENTI, X_MALEBEST, FIT_MALEBEST, BETA_0, GAMMA, D, X_L, X_U, NULL_DIC):
""" COMENTÁRIO NATIVO VOU FAZER"""
# INITIALIZATION VARIABLES
Y_FEMALENEWI = []
# ATTRACTIVENESS AMONG FIREFLIES (Y_FEMALE AND X_BEST)
BETA = ATTRACTIVENESS_FIREFLY_PARAMETER(BETA_0, GAMMA, Y_FEMALECURRENTI, X_MALEBEST, D)
# PHI RANDOM PARAMETER
PHI = np.random.random()
# UPDATE FEMALE POSITION, OF AND FIT
for I_COUNT in range(D):
Y_FEMALENEWI.append(Y_FEMALECURRENTI[I_COUNT] + BETA[I_COUNT] * PHI * (X_MALEBEST[I_COUNT] - Y_FEMALECURRENTI[I_COUNT]))
Y_FEMALENEWI = META_CL.CHECK_INTERVAL(Y_FEMALENEWI, X_L, X_U)
OF_FEMALENEWI = OF_FUNCTION(Y_FEMALENEWI, NULL_DIC)
FIT_FEMALENEWI = META_CL.FIT_VALUE(OF_FEMALENEWI)
return Y_FEMALENEWI, OF_FEMALENEWI, FIT_FEMALENEWI
# FATOR DE ABSORÇÃO DE LUZ GAMMA
def GAMMA_ASSEMBLY(X_L, X_U, D, M):
"""
This function calculates the light absorption coefficient.
Input:
X_L | Lower limit design variables | Py list[D]
X_U | Upper limit design variables | Py list[D]
D | Problem dimension | Integer
M | Exponent value in distance | Float
Output:
GAMMA | Light absorption coefficient 1 / (X_U - X_L) ** M | Py list[D]
"""
GAMMA = []
for I_COUNT in range(D):
DISTANCE = X_U[0] - X_L[0]
GAMMA.append(1 / DISTANCE ** M)
return GAMMA
def GDFA_MALE_MOVEMENT(OF_FUNCTION, X_T0I, Y_T0J, Y_T0K, GAMMA, BETA_0, D, X_L, X_U, NULL_DIC):
"""
This function creates a new solution using FA movement algorithm.
Input:
OF_FUNCTION | External def user input this function in arguments | Py function
X_T0I | Design variable I particle before movement | Py list[D]
X_J | J Firefly | Py list[D]
BETA | Attractiveness | Py list[D]
ALPHA | Randomic factor | Float
D | Problem dimension | Integer
X_L | Lower limit design variables | Py list[D]
X_U | Upper limit design variables | Py list[D]
NULL_DIC | Empty variable for the user to use in the obj. function | ?
Output:
X_T1I | Design variable I particle after movement | Py list[D]
OF_T1I | Objective function X_T1I (new particle) | Float
FIT_T1I | Fitness X_T1I (new particle) | Float
NEOF | Number of objective function evaluations | Integer
"""
# Start internal variables
X_T1I = []
OF_T1I = 0
FIT_T1I = 0
BETA_J = ATTRACTIVENESS_FIREFLY_PARAMETER(BETA_0, GAMMA, X_T0I, Y_T0J, D)
BETA_K = ATTRACTIVENESS_FIREFLY_PARAMETER(BETA_0, GAMMA, X_T0I, Y_T0K, D)
#D_1
if Y_T0J < X_T0I:
D_1 = 1
else:
D_1 = -1
#D_2
if Y_T0K < X_T0I:
D_2 = 1
else:
D_2 = -1
for I_COUNT in range(D):
LAMBDA = np.random.random()
MICRO = np.random.random()
NEW_VALUE = X_T0I[I_COUNT] + D_1 * BETA_J[I_COUNT] * LAMBDA * (Y_T0J[I_COUNT] - X_T0I[I_COUNT])
AUX_VALUE = D_2 * BETA_K[I_COUNT] * MICRO * (Y_T0K[I_COUNT] - X_T0I[I_COUNT])
NEW_VALUE += AUX_VALUE
X_T1I.append(NEW_VALUE)
# Check boundes
X_T1I = META_CL.CHECK_INTERVAL_01(X_T1I, X_L, X_U)
# Evaluation of the objective function and fitness
OF_T1I = OF_FUNCTION(X_T1I, NULL_DIC)
FIT_T1I = META_CL.FIT_VALUE(OF_T1I)
NEOF = 1
return X_T1I, OF_T1I, FIT_T1I, NEOF
def GDFA_FEMALE_MOVEMENT(OF_FUNCTION, X_T0I, Y_T0J, Y_T0K, GAMMA, BETA_0, D, X_L, X_U, NULL_DIC):
"""
This function creates a new solution using FA movement algorithm.
Input:
OF_FUNCTION | External def user input this function in arguments | Py function
X_T0I | Design variable I particle before movement | Py list[D]
X_J | J Firefly | Py list[D]
BETA | Attractiveness | Py list[D]
ALPHA | Randomic factor | Float
D | Problem dimension | Integer
X_L | Lower limit design variables | Py list[D]
X_U | Upper limit design variables | Py list[D]
NULL_DIC | Empty variable for the user to use in the obj. function | ?
Output:
X_T1I | Design variable I particle after movement | Py list[D]
OF_T1I | Objective function X_T1I (new particle) | Float
FIT_T1I | Fitness X_T1I (new particle) | Float
NEOF | Number of objective function evaluations | Integer
"""
# Start internal variables
X_T1I = []
OF_T1I = 0
FIT_T1I = 0
BETA_J = ATTRACTIVENESS_FIREFLY_PARAMETER(BETA_0, GAMMA, X_T0I, Y_T0J, D)
BETA_K = ATTRACTIVENESS_FIREFLY_PARAMETER(BETA_0, GAMMA, X_T0I, Y_T0K, D)
#TENHO QUE MEXER AINDA
#D_1
if Y_T0J < X_T0I:
D_1 = 1
else:
D_1 = -1
#D_2
if Y_T0K < X_T0I:
D_2 = 1
else:
D_2 = -1
for I_COUNT in range(D):
LAMBDA = np.random.random()
MICRO = np.random.random()
NEW_VALUE = X_T0I[I_COUNT] + D_1 * BETA_J[I_COUNT] * LAMBDA * (Y_T0J[I_COUNT] - X_T0I[I_COUNT])
AUX_VALUE = D_2 * BETA_K[I_COUNT] * MICRO * (Y_T0K[I_COUNT] - X_T0I[I_COUNT])
NEW_VALUE += AUX_VALUE
X_T1I.append(NEW_VALUE)
# Check boundes
X_T1I = META_CL.CHECK_INTERVAL_01(X_T1I, X_L, X_U)
# Evaluation of the objective function and fitness
OF_T1I = OF_FUNCTION(X_T1I, NULL_DIC)
FIT_T1I = META_CL.FIT_VALUE(OF_T1I)
NEOF = 1
return X_T1I, OF_T1I, FIT_T1I, NEOF
# /$$$$$$ /$$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$$$$$$$ /$$$$$$ /$$ /$$ /$$ /$$ /$$$$$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$ /$$$$$$$$ /$$$$$$
# /$$__ $$| $$__ $$| $$_____/| $$_____/ |__ $$__/| $$_____/ /$$__ $$| $$ | $$| $$$ | $$ /$$__ $$| $$ /$$__ $$ /$$__ $$|_ $$_/| $$_____/ /$$__ $$
# | $$ \__/| $$ \ $$| $$ | $$ | $$ | $$ | $$ \__/| $$ | $$| $$$$| $$| $$ \ $$| $$ | $$ \ $$| $$ \__/ | $$ | $$ | $$ \__/
# | $$ /$$$$| $$$$$$$/| $$$$$ | $$$$$ | $$ | $$$$$ | $$ | $$$$$$$$| $$ $$ $$| $$ | $$| $$ | $$ | $$| $$ /$$$$ | $$ | $$$$$ | $$$$$$
# | $$|_ $$| $$____/ | $$__/ | $$__/ | $$ | $$__/ | $$ | $$__ $$| $$ $$$$| $$ | $$| $$ | $$ | $$| $$|_ $$ | $$ | $$__/ \____ $$
# | $$ \ $$| $$ | $$ | $$ | $$ | $$ | $$ $$| $$ | $$| $$\ $$$| $$ | $$| $$ | $$ | $$| $$ \ $$ | $$ | $$ /$$ \ $$
# | $$$$$$/| $$ | $$$$$$$$| $$$$$$$$ | $$ | $$$$$$$$| $$$$$$/| $$ | $$| $$ \ $$| $$$$$$/| $$$$$$$$| $$$$$$/| $$$$$$/ /$$$$$$| $$$$$$$$| $$$$$$/
# \______/ |__/ |________/|________/ |__/ |________/ \______/ |__/ |__/|__/ \__/ \______/ |________/ \______/ \______/ |______/|________/ \______/ | PypiClean |
/ISPManCCP-0.0.1alpha3.1.tar.bz2/ISPManCCP-0.0.1alpha3.1/extra-packages/pyperl-1.0.1d/perlmod.py | class PerlClass:
def __init__(self, name = None, module=None, ctor="new"):
self.name = name
self.module = module or name
self.ctor = ctor
def __getattr__(self, name):
if name[:2] == '__':
if name[-2:] != '__' and name != '__':
return PerlClass(self.name, ctor=name[2:])
raise AttributeError, name
if self.name:
name = self.name + "::" + name
return PerlClass(name)
def __call__(self, *args):
import perl
name = self.name
perl_require(self.module)
return apply(perl.callm, (self.ctor, name) + args)
class PerlModule:
def __init__(self, name, __wantarray__ = 0):
self.name = name
self.wantarray = __wantarray__
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError, name
perl_require(self.name)
wantarray = self.wantarray
if len(name) > 6 and name[-6:] == '_tuple':
name = name[:-6]
wantarray = 1
full_name = self.name + "::" + name
import perl
func = perl.get_ref(full_name)
func.__wantarray__ = wantarray
return func
def __import__(self, funcs, namespace):
perl_require(self.name)
import perl
if funcs == '*':
funcs = tuple(perl.get_ref("@" + self.name + "::EXPORT"))
elif type(funcs) == type(""):
funcs = (funcs,)
for f in funcs:
namespace[f] = perl.get_ref(self.name + "::" + f)
Perl = PerlClass()
# Loading of perl modules
INC = {}
try:
from thread import get_ident
except ImportError:
def get_ident():
return 1
def perl_require(mod):
# Some caching since the real 'perl.require' is a bit
# heavy.
id = get_ident()
global INC
try:
return INC[id][mod]
except KeyError:
pass
import perl
if not INC.has_key(id):
INC[id] = {}
INC[id][mod] = perl.require(mod)
return INC[id][mod] | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/readable-stream/lib/_stream_duplex.js |
// a duplex stream is just a stream that is both readable and writable.
// Since JS doesn't have multiple prototypal inheritance, this class
// prototypally inherits from Readable, and then parasitically from
// Writable.
'use strict';
/*<replacement>*/
var objectKeys = Object.keys || function (obj) {
var keys = [];
for (var key in obj) keys.push(key);
return keys;
};
/*</replacement>*/
module.exports = Duplex;
var Readable = require('./_stream_readable');
var Writable = require('./_stream_writable');
require('inherits')(Duplex, Readable);
{
// Allow the keys array to be GC'ed.
var keys = objectKeys(Writable.prototype);
for (var v = 0; v < keys.length; v++) {
var method = keys[v];
if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method];
}
}
function Duplex(options) {
if (!(this instanceof Duplex)) return new Duplex(options);
Readable.call(this, options);
Writable.call(this, options);
this.allowHalfOpen = true;
if (options) {
if (options.readable === false) this.readable = false;
if (options.writable === false) this.writable = false;
if (options.allowHalfOpen === false) {
this.allowHalfOpen = false;
this.once('end', onend);
}
}
}
Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.highWaterMark;
}
});
Object.defineProperty(Duplex.prototype, 'writableBuffer', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState && this._writableState.getBuffer();
}
});
Object.defineProperty(Duplex.prototype, 'writableLength', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
return this._writableState.length;
}
});
// the no-half-open enforcer
function onend() {
// If the writable side ended, then we're ok.
if (this._writableState.ended) return;
// no more data can be written.
// But allow more writes to happen in this tick.
process.nextTick(onEndNT, this);
}
function onEndNT(self) {
self.end();
}
Object.defineProperty(Duplex.prototype, 'destroyed', {
// making it explicit this property is not enumerable
// because otherwise some prototype manipulation in
// userland will fail
enumerable: false,
get: function get() {
if (this._readableState === undefined || this._writableState === undefined) {
return false;
}
return this._readableState.destroyed && this._writableState.destroyed;
},
set: function set(value) {
// we ignore the value if the stream
// has not been initialized yet
if (this._readableState === undefined || this._writableState === undefined) {
return;
}
// backward compatibility, the user is explicitly
// managing destroyed
this._readableState.destroyed = value;
this._writableState.destroyed = value;
}
}); | PypiClean |
/Gbtestapi0.2-0.1a10.tar.gz/Gbtestapi0.2-0.1a10/src/gailbot/core/engines/watson/recognize_callback.py | from typing import Callable, Any, List, Dict
import sys
# Local imports
# Third party imports
from copy import deepcopy
from ibm_watson.websocket import RecognizeCallback
from gailbot.core.utils.logger import makelogger
logger = makelogger("callback")
class WatsonException(Exception):
def __init__(self, msg: str) -> None:
super().__init__(msg)
self.msg = msg
class CustomWatsonCallbacks(RecognizeCallback):
"""
Extends the watson callback class to allow custom callbacks to be executed
when an event occurs through the lifecycle of the websocket connection.
Inherits:
(RecognizeCallback)
"""
def __init__(self) -> None:
"""
Args:
closure (List):
User object that is passed as the first parameter of every
callback during the lifecycle of the websocket connection.
"""
self.closure = self._init_closure()
def reset(self) -> None:
logger.info("reset recognize callback")
self.closure = self._init_closure()
def get_results(self) -> Dict:
logger.info("on get result")
return deepcopy(self.closure)
def on_transcription(self, transcript: List) -> None:
"""
Called after the service returns the final result for the transcription.
"""
logger.info("on transcription")
try:
closure = self.closure
closure["callback_status"]["on_transcription"] = True
closure["results"]["transcript"].append(transcript)
except Exception as e:
logger.error(e, exc_info=e)
def on_connected(self) -> None:
"""
Called when a Websocket connection was made
"""
logger.info("connected to watson")
try:
closure = self.closure
closure["callback_status"]["on_connected"] = True
except Exception as e:
logger.error(e, exc_info=e)
def on_error(self, error: str) -> None:
"""
Called when there is an error in the Websocket connection.
"""
logger.error(f"get error {error}", exc_info=error)
closure = self.closure
closure["callback_status"]["on_error"] = True
closure["results"]["error"] = error
raise WatsonException(error)
def on_inactivity_timeout(self, error: str) -> None:
"""
Called when there is an inactivity timeout.
"""
logger.info("inactivity time out")
try:
closure = self.closure
closure["callback_status"]["on_inactivity_timeout"] = True
closure["results"]["error"] = error
except Exception as e:
logger.error(f"timeout error {e}", exc_info=e)
def on_listening(self) -> None:
"""
Called when the service is listening for audio.
"""
logger.info("watson is listening")
try:
closure = self.closure
closure["callback_status"]["on_listening"] = True
except Exception as e:
logger.error(f"on listening error {e}", exc_info=e)
def on_hypothesis(self, hypothesis: str) -> None:
"""
Called when an interim result is received.
"""
logger.info(f"on hypothesis {hypothesis}")
try:
closure = self.closure
closure["callback_status"]["on_hypothesis"] = True
except Exception as e:
logger.error(f"on hypothesis error {e}", exc_info=e)
def on_data(self, data: Dict) -> None:
"""
Called when the service returns results. The data is returned unparsed.
"""
logger.info(f"watson returned the results")
try:
closure = self.closure
closure["callback_status"]["on_data"] = True
closure["results"]["data"].append(data)
except Exception as e:
logger.error(f"on data error {e}", exc_info=e)
def on_close(self) -> None:
"""
Called when the Websocket connection is closed
"""
logger.info("on close")
try:
closure = self.closure
closure["callback_status"]["on_close"] = True
except Exception as e:
logger.error(f"on close error {e}", exc_info=e)
def _init_closure(self) -> Dict:
return {
"callback_status": {
"on_transcription": False,
"on_connected": False,
"on_error": False,
"on_inactivity_timeout": False,
"on_listening": False,
"on_hypothesis": False,
"on_data": False,
"on_close": False,
},
"results": {
"error": None,
"transcript": list(),
"hypothesis": list(),
"data": list(),
},
} | PypiClean |
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/fipy/terms/advectionTerm.py | from __future__ import division
from __future__ import unicode_literals
__docformat__ = 'restructuredtext'
__all__ = ["AdvectionTerm"]
from future.utils import text_to_native_str
__all__ = [text_to_native_str(n) for n in __all__]
from fipy.tools.numerix import MA
from fipy.tools import numerix
from fipy.terms.firstOrderAdvectionTerm import FirstOrderAdvectionTerm
class AdvectionTerm(FirstOrderAdvectionTerm):
r"""
The `AdvectionTerm` object constructs the `b` vector contribution for
the advection term given by
.. math::
u \abs{\nabla \phi}
from the advection equation given by:
.. math::
\frac{\partial \phi}{\partial t} + u \abs{\nabla \phi} = 0
The construction of the gradient magnitude term requires upwinding as in the standard
`FirstOrderAdvectionTerm`. The higher order terms are incorporated as follows.
The formula used here is given by:
.. math::
u_P \abs{\nabla \phi}_P = \max \left( u_P , 0 \right) \left[ \sum_A \min \left( D_{AP}, 0 \right)^2 \right]^{1/2} + \min \left( u_P , 0 \right) \left[ \sum_A \max \left( D_{AP}, 0 \right)^2 \right]^{1/2}
where,
.. math::
D_{AP} = \frac{ \phi_A - \phi_P } { d_{AP}} - \frac{ d_{AP} } {2} m \left(L_A, L_P \right)
and
.. math::
m\left(x, y\right) &= x \qquad \text{if $\abs{x} \le \abs{y} \forall xy \ge 0$} \\
m\left(x, y\right) &= y \qquad \text{if $\abs{x} > \abs{y} \forall xy \ge 0$} \\
m\left(x, y\right) &= 0 \qquad \text{if $xy < 0$}
also,
.. math::
L_A &= \frac{\phi_{AA} + \phi_P - 2 \phi_A}{d_{AP}^2} \\
L_P &= \frac{\phi_{A} + \phi_{PP} - 2 \phi_P}{d_{AP}^2}
Here are some simple test cases for this problem:
>>> from fipy.meshes import Grid1D
>>> from fipy.solvers import *
>>> SparseMatrix = LinearPCGSolver()._matrixClass
>>> mesh = Grid1D(dx = 1., nx = 3)
Trivial test:
>>> from fipy.variables.cellVariable import CellVariable
>>> coeff = CellVariable(mesh = mesh, value = numerix.zeros(3, 'd'))
>>> v, L, b = AdvectionTerm(0.)._buildMatrix(coeff, SparseMatrix)
>>> print(numerix.allclose(b, numerix.zeros(3, 'd'), atol = 1e-10)) # doctest: +PROCESSOR_0
True
Less trivial test:
>>> coeff = CellVariable(mesh = mesh, value = numerix.arange(3))
>>> v, L, b = AdvectionTerm(1.)._buildMatrix(coeff, SparseMatrix)
>>> print(numerix.allclose(b, numerix.array((0., -1., -1.)), atol = 1e-10)) # doctest: +PROCESSOR_0
True
Even less trivial
>>> coeff = CellVariable(mesh = mesh, value = numerix.arange(3))
>>> v, L, b = AdvectionTerm(-1.)._buildMatrix(coeff, SparseMatrix)
>>> print(numerix.allclose(b, numerix.array((1., 1., 0.)), atol = 1e-10)) # doctest: +PROCESSOR_0
True
Another trivial test case (more trivial than a trivial test case
standing on a harpsichord singing "trivial test cases are here again")
>>> vel = numerix.array((-1, 2, -3))
>>> coeff = CellVariable(mesh = mesh, value = numerix.array((4, 6, 1)))
>>> v, L, b = AdvectionTerm(vel)._buildMatrix(coeff, SparseMatrix)
>>> print(numerix.allclose(b, -vel * numerix.array((2, numerix.sqrt(5**2 + 2**2), 5)), atol = 1e-10)) # doctest: +PROCESSOR_0
True
Somewhat less trivial test case:
>>> from fipy.meshes import Grid2D
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 2, ny = 2)
>>> vel = numerix.array((3, -5, -6, -3))
>>> coeff = CellVariable(mesh = mesh, value = numerix.array((3, 1, 6, 7)))
>>> v, L, b = AdvectionTerm(vel)._buildMatrix(coeff, SparseMatrix)
>>> answer = -vel * numerix.array((2, numerix.sqrt(2**2 + 6**2), 1, 0))
>>> print(numerix.allclose(b, answer, atol = 1e-10)) # doctest: +PROCESSOR_0
True
For the above test cases the `AdvectionTerm` gives the
same result as the `AdvectionTerm`. The following test imposes a quadratic
field. The higher order term can resolve this field correctly.
.. math::
\phi = x^2
The returned vector ``b`` should have the value:
.. math::
-\abs{\nabla \phi} = -\left|\frac{\partial \phi}{\partial x}\right| = - 2 \abs{x}
Build the test case in the following way,
>>> mesh = Grid1D(dx = 1., nx = 5)
>>> vel = 1.
>>> coeff = CellVariable(mesh = mesh, value = mesh.cellCenters[0]**2)
>>> v, L, b = __AdvectionTerm(vel)._buildMatrix(coeff, SparseMatrix)
The first order term is not accurate. The first and last element are ignored because they
don't have any neighbors for higher order evaluation
>>> print(numerix.allclose(CellVariable(mesh=mesh,
... value=b).globalValue[1:-1], -2 * mesh.cellCenters.globalValue[0][1:-1]))
False
The higher order term is spot on.
>>> v, L, b = AdvectionTerm(vel)._buildMatrix(coeff, SparseMatrix)
>>> print(numerix.allclose(CellVariable(mesh=mesh,
... value=b).globalValue[1:-1], -2 * mesh.cellCenters.globalValue[0][1:-1]))
True
The `AdvectionTerm` will also resolve a circular field with
more accuracy,
.. math::
\phi = \left( x^2 + y^2 \right)^{1/2}
Build the test case in the following way,
>>> mesh = Grid2D(dx = 1., dy = 1., nx = 10, ny = 10)
>>> vel = 1.
>>> x, y = mesh.cellCenters
>>> r = numerix.sqrt(x**2 + y**2)
>>> coeff = CellVariable(mesh = mesh, value = r)
>>> v, L, b = __AdvectionTerm(1.)._buildMatrix(coeff, SparseMatrix)
>>> error = CellVariable(mesh=mesh, value=b + 1)
>>> ans = CellVariable(mesh=mesh, value=b + 1)
>>> ans[(x > 2) & (x < 8) & (y > 2) & (y < 8)] = 0.123105625618
>>> print((error <= ans).all())
True
The maximum error is large (about 12 %) for the first order advection.
>>> v, L, b = AdvectionTerm(1.)._buildMatrix(coeff, SparseMatrix)
>>> error = CellVariable(mesh=mesh, value=b + 1)
>>> ans = CellVariable(mesh=mesh, value=b + 1)
>>> ans[(x > 2) & (x < 8) & (y > 2) & (y < 8)] = 0.0201715476598
>>> print((error <= ans).all())
True
The maximum error is 2 % when using a higher order contribution.
"""
def _getDifferences(self, adjacentValues, cellValues, oldArray, cellToCellIDs, mesh):
dAP = mesh._cellToCellDistances
## adjacentGradient = numerix.take(oldArray.grad, cellToCellIDs)
adjacentGradient = numerix.take(oldArray.grad, mesh._cellToCellIDs, axis=-1)
adjacentNormalGradient = numerix.dot(adjacentGradient, mesh._cellNormals)
adjacentUpValues = cellValues + 2 * dAP * adjacentNormalGradient
cellIDs = numerix.repeat(numerix.arange(mesh.numberOfCells)[numerix.newaxis, ...],
mesh._maxFacesPerCell, axis=0)
cellIDs = MA.masked_array(cellIDs, mask = MA.getmask(mesh._cellToCellIDs))
cellGradient = numerix.take(oldArray.grad, cellIDs, axis=-1)
cellNormalGradient = numerix.dot(cellGradient, mesh._cellNormals)
cellUpValues = adjacentValues - 2 * dAP * cellNormalGradient
cellLaplacian = (cellUpValues + adjacentValues - 2 * cellValues) / dAP**2
adjacentLaplacian = (adjacentUpValues + cellValues - 2 * adjacentValues) / dAP**2
adjacentLaplacian = adjacentLaplacian.filled(0)
cellLaplacian = cellLaplacian.filled(0)
mm = numerix.where(cellLaplacian * adjacentLaplacian < 0.,
0.,
numerix.where(abs(cellLaplacian) > abs(adjacentLaplacian),
adjacentLaplacian,
cellLaplacian))
return FirstOrderAdvectionTerm._getDifferences(self, adjacentValues, cellValues, oldArray, cellToCellIDs, mesh) - mm * dAP / 2.
class __AdvectionTerm(FirstOrderAdvectionTerm):
"""
Dummy subclass for tests
"""
pass
def _test():
import fipy.tests.doctestPlus
return fipy.tests.doctestPlus.testmod()
if __name__ == "__main__":
_test() | PypiClean |
/MishMash-0.3.3.tar.gz/MishMash-0.3.3/README.rst | ========
MishMash
========
|Build Status| |License| |PyPI| |Python versions| |Coverage| |Status|
Music database and web interface.
Features
--------
* MishMash is a music database using `Python`_ and `SQLAlchemy`_.
* A command-line tool for building and managing a music database.
* Web browser interface (using `Pyramid`_) for browsing your music library.
* Uses `eyeD3`_ for reading MP3s and ID3 metadata.
* Support and tested with Python 3.6 and Postgresql. SQLite is periodically
tested with, but future features may not be supported (e.g. full text
search).
* Free software: GNU GPL v3.0 license
.. _Python: https://www.python.org/
.. _SQLAlchemy: http://www.sqlalchemy.org/
.. _eyeD3: http://eyeD3.nicfit.net/
.. _Pyramid: https://trypyramid.com/
Getting Started
----------------
::
$ mishmash info
/\/\_____ .__ .__ _____ .__ /\/\
\(\( \ |__| _____| |__ / \ _____ _____| |__\(\(
/ \ / \| |/ ___/ | \ / \ / \\__ \ / ___/ | \
/ Y \ |\___ \| Y \/ Y \/ __ \_\___ \| Y \
\____|__ /__/____ >___| /\____|__ (____ /____ >___| /
\/ \/ \/ \/ \/ \/ \/
Version : 0.3
Database URL : sqlite:////~/mishmash.db
Database version : 0.3
Last sync : Never
Configuration files : <default>
=== Music library ===
0 music tracks
0 music artists
0 music albums
0 music tags
Surprise, you now have an empty sqlite database in the current directory.
Let's leave it here for now, it can be located elsewhere or use a different
database using command line arguments and/or environment variables. Pretty
useless without any music.::
$ mishmash sync ~/Music/Melvins
Syncing library 'Music': paths=['~/Music/Melvins/']
Syncing directory: ~/Music/Melvins/
Syncing directory: ~/Music/Melvins/1984 - Mangled Demos
Adding artist: Melvins
Syncing directory: ~/Music/Melvins/1986 - 10 Songs
Adding album: 10 Songs
Adding track: ~/Music/Melvins/1986 - 10 Songs/Melvins - 01 - Easy As It Was.mp3
Updating album: 10 Songs
...
== Library 'Music' sync'd [ 8.73s time (45.9 files/s) ] ==
401 files sync'd
401 tracks added
0 tracks modified
0 orphaned tracks deleted
0 orphaned artists deleted
0 orphaned albums deleted
Use your database as you wish. Browse it with `mishmash web`, or use one of its
management commands.
Check out the `Unsonic`_ project for streaming capabilities.
.. _Unsonic: https://github.com/redshodan/unsonic
.. |Build Status| image:: https://travis-ci.org/nicfit/MishMash.svg?branch=master
:target: https://travis-ci.org/nicfit/MishMash
:alt: Build Status
.. |PyPI| image:: https://img.shields.io/pypi/v/MishMash.svg
:target: https://pypi.python.org/pypi/MishMash/
:alt: Latest Version
.. |Python versions| image:: https://img.shields.io/pypi/pyversions/MishMash.svg
:target: https://pypi.python.org/pypi/MishMash/
:alt: Supported Python versions
.. |License| image:: https://img.shields.io/pypi/l/MishMash.svg
:target: https://pypi.python.org/pypi/MishMash/
:alt: License
.. |Status| image:: https://img.shields.io/pypi/status/MishMash.svg
:target: https://pypi.python.org/pypi/MishMash/
:alt: Project Status
.. |Coverage| image:: https://coveralls.io/repos/nicfit/MishMash/badge.svg
:target: https://coveralls.io/r/nicfit/MishMash
:alt: Coverage Status
| PypiClean |
/NeuroRuler-1.7.tar.gz/NeuroRuler-1.7/src/GUI/helpers.py | import platform
import string
from typing import Union
import SimpleITK as sitk
import numpy as np
from PyQt6.QtGui import QImage, QColor, QPixmap, QIcon, QFont
from PyQt6 import QtWidgets
from PyQt6.QtWidgets import (
QApplication,
QDialog,
QLabel,
QMainWindow,
QFileDialog,
QMenu,
QVBoxLayout,
QWidget,
QMessageBox,
)
from PyQt6.QtCore import QSize
from PyQt6.QtCore import Qt
import qimage2ndarray
import src.utils.exceptions as exceptions
import src.utils.user_settings as user_settings
from src.utils.constants import deprecated
MACOS: bool = "macOS" in platform.platform()
WINDOW_TITLE_PADDING: int = 12
"""Used in InformationDialog to add width to the dialog to prevent the window title from being truncated."""
# tl;dr QColor can have alpha (e.g., if we wanted contour color to be transparent)
# but we don't have a need for it so don't support it.
# Call hasAlphaChannel() on many of the QImage's we're working with results in False.
# qimage2ndarray supports scalar/gray + alpha and RGB + alpha, but perhaps the numpy arrays
# we get from sitk.Image don't have alpha. We don't need to go to the effort of adding alpha.
def string_to_QColor(name_or_hex: str) -> QColor:
"""Convert a name (e.g. red) or 6-hexit rrggbb string to a `QColor`.
:param name_or_hex: name of color (e.g. blue) or rrggbb (hexits)
:type name_or_hex: str
:return: QColor
:rtype: QColor
:raise: exceptions.InvalidColor if `name_or_hex` not in specified formats"""
if name_or_hex.isalpha():
return QColor(name_or_hex)
if not all(char in string.hexdigits for char in name_or_hex):
raise exceptions.InvalidColor(name_or_hex)
channels: bytes = bytes.fromhex(name_or_hex)
if len(channels) == 3:
return QColor(channels[0], channels[1], channels[2])
else:
raise exceptions.InvalidColor(name_or_hex)
def mask_QImage(q_img: QImage, binary_mask: np.ndarray, color: QColor) -> None:
"""Given 2D `q_img` and 2D `binary_mask` of the same shape, apply `binary_mask` on `q_img`
to change `q_img` pixels corresponding to `binary_mask`=1 to `color`. Mutates `q_img`.
QImage and numpy use [reversed w,h order](https://stackoverflow.com/a/68220805/18479243).
This function checks that
`q_img.size().width() == binary_mask.shape[0]` and `q_img.size().height() == binary_mask.shape[1]`.
:param q_img:
:type q_img: QImage
:param binary_mask: 0|1 elements
:type binary_mask: np.ndarray
:param color:
:type color: QColor
:raise: exceptions.ArraysDifferentShape if the arrays are of different shape
:return: None
:rtype: None"""
if (
q_img.size().width() != binary_mask.shape[0]
or q_img.size().height() != binary_mask.shape[1]
):
raise exceptions.ArraysDifferentShape
for i in range(binary_mask.shape[0]):
for j in range(binary_mask.shape[1]):
if binary_mask[i][j]:
q_img.setPixelColor(i, j, color)
def sitk_slice_to_qimage(sitk_slice: sitk.Image) -> QImage:
"""Convert a 2D sitk.Image slice to a QImage.
This function calls sitk.GetArrayFromImage, which returns the transpose.
It also calls qimage2ndarray.array2qimage with normalize=True, normalizing
the pixels to 0..255.
:param sitk_slice: 2D slice
:type sitk_slice: sitk.Image
:return: 0..255 normalized QImage
:rtype: QImage"""
slice_np: np.ndarray = sitk.GetArrayFromImage(sitk_slice)
return qimage2ndarray.array2qimage(slice_np, normalize=True)
class ErrorMessageBox(QMessageBox):
def __init__(self, message: str):
""":param message: Error message
:type message: str"""
super().__init__()
# Window title is ignored on macOS.
# QDialog's window title is not ignored on macOS, but I'm pretty sure QDialog doesn't
# support icons.
self.setWindowTitle("Error")
self.setIconPixmap(
QPixmap(f":/{user_settings.THEME_NAME}/message_critical.svg")
)
self.setText(message)
# adjustSize adjusts size based on window content, not window title (+ window buttons)
# So for some menu options, the window title would be truncated if some width isn't added
# However, QDialog does show window title on macOS (unlike QMessageBox)
class InformationDialog(QDialog):
def __init__(self, title: str, message: str):
""":param title: Title of window
:type title: str
:param message: Informational message
:type message: str"""
super().__init__()
self.setWindowTitle(title)
layout: QVBoxLayout = QVBoxLayout()
message_label: QLabel = QLabel(message)
layout.addWidget(message_label)
self.setLayout(layout)
self.adjustSize()
# Add width to prevent truncation of the window title
self.setFixedSize(
self.size().width() + WINDOW_TITLE_PADDING * len(title),
self.size().height(),
)
# Deprecated because QMessageBox's window title doesn't show up on macOS
# However, QMessageBox can display an icon, whereas QDialog can't (I think)
# The icon provides some additional width that ill cause the window title to not be truncated, unlike QDialog
@deprecated
class InformationMessageBox(QMessageBox):
def __init__(self, title: str, message: str):
""":param title: Title of window (on macOS, QMessageBox window title doesn't show up so is instead prepended to the message)
:type title: str
:param message: Informational message
:type message: str"""
super().__init__()
self.setWindowTitle(title)
self.setIconPixmap(
QPixmap(f":/{user_settings.THEME_NAME}/message_information.svg")
)
if MACOS:
title += "\n\n"
self.setText(f"{title if MACOS else ''} {message}") | PypiClean |
/Ngoto-0.0.39-py3-none-any.whl/ngoto/core/util/rich/status.py | from types import TracebackType
from typing import Optional, Type
from .console import Console, RenderableType
from .jupyter import JupyterMixin
from .live import Live
from .spinner import Spinner
from .style import StyleType
class Status(JupyterMixin):
"""Displays a status indicator with a 'spinner' animation.
Args:
status (RenderableType): A status renderable (str or Text typically).
console (Console, optional): Console instance to use, or None for global console. Defaults to None.
spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
"""
def __init__(
self,
status: RenderableType,
*,
console: Optional[Console] = None,
spinner: str = "dots",
spinner_style: StyleType = "status.spinner",
speed: float = 1.0,
refresh_per_second: float = 12.5,
):
self.status = status
self.spinner_style = spinner_style
self.speed = speed
self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed)
self._live = Live(
self.renderable,
console=console,
refresh_per_second=refresh_per_second,
transient=True,
)
@property
def renderable(self) -> Spinner:
return self._spinner
@property
def console(self) -> "Console":
"""Get the Console used by the Status objects."""
return self._live.console
def update(
self,
status: Optional[RenderableType] = None,
*,
spinner: Optional[str] = None,
spinner_style: Optional[StyleType] = None,
speed: Optional[float] = None,
) -> None:
"""Update status.
Args:
status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None.
spinner (Optional[str], optional): New spinner or None for no change. Defaults to None.
spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None.
speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None.
"""
if status is not None:
self.status = status
if spinner_style is not None:
self.spinner_style = spinner_style
if speed is not None:
self.speed = speed
if spinner is not None:
self._spinner = Spinner(
spinner, text=self.status, style=self.spinner_style, speed=self.speed
)
self._live.update(self.renderable, refresh=True)
else:
self._spinner.update(
text=self.status, style=self.spinner_style, speed=self.speed
)
def start(self) -> None:
"""Start the status animation."""
self._live.start()
def stop(self) -> None:
"""Stop the spinner animation."""
self._live.stop()
def __rich__(self) -> RenderableType:
return self.renderable
def __enter__(self) -> "Status":
self.start()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.stop()
if __name__ == "__main__": # pragma: no cover
from time import sleep
from .console import Console
console = Console()
with console.status("[magenta]Covid detector booting up") as status:
sleep(3)
console.log("Importing advanced AI")
sleep(3)
console.log("Advanced Covid AI Ready")
sleep(3)
status.update(status="[bold blue] Scanning for Covid", spinner="earth")
sleep(3)
console.log("Found 10,000,000,000 copies of Covid32.exe")
sleep(3)
status.update(
status="[bold red]Moving Covid32.exe to Trash",
spinner="bouncingBall",
spinner_style="yellow",
)
sleep(5)
console.print("[bold green]Covid deleted successfully") | PypiClean |
/OccuPy-0.1.13.tar.gz/OccuPy-0.1.13/docs/Tutorials/gui.md | # GUI overview
The GUI of OccuPy allows you to open maps and view them as sliced 2D images. OccuPy is not meant to visualize the
map in any great detail, this is for you to make appropriate consistency checks. For fine analysis, the GUI will
call out to open ChimeraX, a much more sophisticated visualization tool.
<br><br>
The GUI automatically calculates and adjusts kernel settings per the users direction, and permits interactive
evaluation of map modification.
<br><br>
The GUI also exposes tools to generate masks based on the estimated scale.
<br><br>

---
## Input map
The map to be used as input. When you run OccuPy through the GUI it will the currently selected map.
OccuPy supports cubic .map and .mrc files. Occasionally, .ccp4 files cause issues.
<div class="admonition attention">
<p class="admonition-title">OccuPy wants raw input</p>
<p>
OccuPy was designed to expect unsharpened maps with solvent noise, so it's best to not
mask or post-process your map. In most cases its fine to do this too, but it's not recommended.
</p>
</div>
<div class="admonition error">
<p class="admonition-title">AI maps try too hard</p>
<p>
Machine-learning tools for post-processing alter the map in ways that OccuPy was not designed to anticipate. If
you provide a
map that has been altered by machine-learning methods, the output should be considered vey unreliable.
</p>
</div>
There is also an "emdb" button to fetch and unzip the main map of any EMD entry.
---
## Scale Kernel Settings
When you provide an input map, OccuPy checks the box dimensions and voxel size. Based on this, it calculates
suggested parameters to estimate the local scale with accuracy and confidence.
If you change these parameters, parameters below may be updated. Changing some of these parameters will alter the
local scale estimate, so leave them unchanged until you have gone through one of the specific tutorials and
understand what they do.
<div class="admonition hint">
<p class="admonition-title">Details depend on use</p>
<p>
More detailed specification of the scale kernel settings are described in the tuorials that describe estimation of
occupanvy and relative resolution, since they influence each case slightly differently.
</p>
</div>
---
## Modification options
In some cases you may want to change the input map based on the scale estimate. This is where you do that.
<br><br>
If the scale you have selected (below the viewer) is in "occupancy" mode (has 'occ' in its name), OccuPy will
let you use it to modify maps. The active modification will be interactively approximated in the "preview" and "plot"
tab of the viewer.
<br><br>
If the scale you have selected (below the viewer) has 'res' in its name, OccuPy will *not*
let you use it to modify, and inactivate the "preview" tab of the viewer and the "Modify Map" button.
<div class="admonition hint">
<p class="admonition-title">It's easier done than said</p>
<p>
If you try it out and follow one or more tutorials, this will make more sense than any explanation. More detailed
specification of the modification options are e.g. described in the tutorial on map modification.
</p>
</div>
---
## Optional/extra
These are options that you would normally not use, but that you might want to play with in certain cases. You can
skip this section for now, leaving them at default values.
### Limit box size
OccuPy down-samples the input map to this size for all internal calculations, using a Fourier-crop (low-pass)
procedure. This makes OccuPy use less memory and run faster. Any modified output is zero-padded in Fourier space to
match the input dimension.
### Ouput lowpass
This is only relevant for modified output. Beyond the limited box-size described above, OccuPy applies any
modification on the full-resolution input, not the low-passed input. So if the output should be low-passed, this
must be specified.
### Naive normalization
This is an option to ignore the tile size specified in the kernel settings. In this case, the scale estimation
becomes very sensitive to the Tau percentile (also a kernel setting), which might be desirable in some very special
circumstances.
### Histogram-match to input
Histogram-matching is a procedure to make the greyscale and contrast of two images as equal as possible. In some
cases this might be desirable to enforce, but most of the time the greyscale of the output will very closely match
the input anyway.
---
## The viewer
### Input
The map currently selected in the "input map" drop-down menu above.
### Scale
The map currently selected in the "scale map" drop-down menu **below** the viewer. You can either add files to this
drop-down
by browsing or by running OccuPy.
### Conf.
The estimated solvent confidence from the previous time OccuPy was run. This is based on the estimated solvent model,
which can be viewed on the tab adjacent to the output log. The confidence is used to restrict any modification, so
that solvent isn't amplified or attenuated.
### Sol.def
The map currently selected in the "solvent def" drop-down menu below. The solvent definition restricts the
estimation of a solvent model. This is not typically required.
### Preview
If you have an input and occupancy-mode scale selected, this will show a preview of the modification. **This does
not account for confidence and solvent compensation, and will look worse than the actual output**.
---
## The output log
This will document what is happening, for our convenience. But everything is also documented in the full log, which
you can access either through the menu or by double-clicking the output log *tab*.
<div class="admonition attention">
<p class="admonition-title">Some clutter during testing</p>
<p>
OccuPy is currently being alpha-tested, so there's a bunch of extra output to make it easier to respond to user
issues. These lines begin with "AT: " and should be colored green. You can safely ignore them.
</p>
</div>
---
## The Menu
Control over where OccuPy writes output, and other conventional options.
| PypiClean |
/Finance-Ultron-1.0.8.1.tar.gz/Finance-Ultron-1.0.8.1/ultron/optimize/model/modelbase.py | import abc, importlib
import warnings
from distutils.version import LooseVersion
import arrow
import numpy as np
import pandas as pd
try:
from sklearn import __version__ as sklearn_version
from sklearn import metrics
except ImportError:
warnings.warn("not installed sklean,please pip install --upgrade sklearn")
try:
from xgboost import __version__ as xgbboot_version
except ImportError:
warnings.warn("not installed xgboost,please pip install --upgrade xgboost")
try:
from lightgbm import __version__ as lightgbm_version
except ImportError:
warnings.warn(
"not installed lightgbm,please pip install --upgrade lightgbm")
try:
from mlxtend import __version__ as mlxtend_version
except ImportError:
warnings.warn(
"not installed lightgbm,please pip install --upgrade mlxtend")
from ultron.factor.utilities import list_eq
from ultron.factor.utilities import encode
from ultron.factor.utilities import decode
from ultron.factor.data.transformer import Transformer
class ModelBase(metaclass=abc.ABCMeta):
def __init__(self, features=None, fit_target=None):
if features is not None:
self.formulas = Transformer(features)
self.features = self.formulas.names
else:
self.features = None
if fit_target is not None:
self.fit_target = Transformer(fit_target)
else:
self.fit_target = None
self.impl = None
self.trained_time = None
def model_encode(self):
return encode(self.impl)
@classmethod
def model_decode(cls, model_desc):
return decode(model_desc)
def __eq__(self, rhs):
return self.model_encode() == rhs.model_encode() \
and self.trained_time == rhs.trained_time \
and list_eq(self.features, rhs.features) \
and encode(self.formulas) == encode(rhs.formulas) \
and encode(self.fit_target) == encode(rhs.fit_target)
def fit(self, x: pd.DataFrame, y: np.ndarray):
self.impl.fit(x[self.features].values, y.flatten())
self.trained_time = arrow.now().format("YYYY-MM-DD HH:mm:ss")
def predict(self, x: pd.DataFrame) -> np.ndarray:
return self.impl.predict(x[self.features].values)
def score(self, x: pd.DataFrame, y: np.ndarray) -> float:
return self.impl.score(x[self.features].values, y)
def ic(self, x: pd.DataFrame, y: np.ndarray) -> float:
predict_y = self.impl.predict(x[self.features].values)
return np.corrcoef(predict_y, y)[0, 1]
def accuracy(self, x: pd.DataFrame, y: np.ndarray) -> float:
predict_y = self.impl.predict(x[self.features].values)
return metrics.accuracy_score(y, predict_y)
def precision(self,
x: pd.DataFrame,
y: np.ndarray,
average='weighted') -> float:
predict_y = self.impl.predict(x[self.features].values)
return metrics.precision_score(y, predict_y, average=average)
def recall(self,
x: pd.DataFrame,
y: np.ndarray,
average='weighted') -> float:
predict_y = self.impl.predict(x[self.features].values)
return metrics.recall_score(y, predict_y, average=average)
def roc_auc(self,
x: pd.DataFrame,
y: np.ndarray,
average='weighted') -> float:
predict_y = self.impl.predict(x[self.features].values)
return metrics.roc_auc_score(y, predict_y, average=average)
def evs(self, x: pd.DataFrame, y: np.ndarray) -> float:
predict_y = self.impl.predict(x[self.features].values)
return metrics.mean_absolute_error(y, predict_y)
def mae(self, x: pd.DataFrame, y: np.ndarray) -> float:
predict_y = self.impl.predict(x[self.features].values)
return metrics.mean_absolute_error(y, predict_y)
def mse(self, x: pd.DataFrame, y: np.ndarray) -> float:
predict_y = self.impl.predict(x[self.features].values)
return metrics.mean_squared_error(y, predict_y)
def r2_score(self,
x: pd.DataFrame,
y: np.ndarray,
multioutput='uniform_average') -> float:
predict_y = self.impl.predict(x[self.features].values)
return metrics.r2_score(y, predict_y, multioutput=multioutput)
@property
def device(self):
return self.impl
@abc.abstractmethod
def save(self) -> dict:
if self.__class__.__module__ == '__main__':
warnings.warn(
"model is defined in a main module. The model_name may not be correct."
)
model_desc = dict(model_name=self.__class__.__module__ + "." +
self.__class__.__name__,
language='python',
saved_time=arrow.now().format("YYYY-MM-DD HH:mm:ss"),
features=list(self.features),
trained_time=self.trained_time,
desc=self.model_encode(),
formulas=encode(self.formulas),
fit_target=encode(self.fit_target),
internal_model=self.impl.__class__.__module__ + "." +
self.impl.__class__.__name__)
return model_desc
@classmethod
@abc.abstractmethod
def load(cls, model_desc: dict):
obj_layout = cls()
obj_layout.features = model_desc['features']
obj_layout.formulas = decode(model_desc['formulas'])
obj_layout.trained_time = model_desc['trained_time']
obj_layout.impl = cls.model_decode(model_desc['desc'])
if 'fit_target' in model_desc:
obj_layout.fit_target = decode(model_desc['fit_target'])
else:
obj_layout.fit_target = None
return obj_layout
def create_model_base(party_name=None):
if not party_name:
return ModelBase
else:
class ExternalLibBase(ModelBase):
_lib_name = party_name
def save(self) -> dict:
model_desc = super().save()
if self._lib_name == 'sklearn':
model_desc[self._lib_name + "_version"] = sklearn_version
elif self._lib_name == 'xgboost':
model_desc[self._lib_name + "_version"] = xgbboot_version
elif self._lib_name == 'lightgbm':
model_desc[self._lib_name + "_version"] = lightgbm_version
elif self._lib_name == 'mlxtend':
model_desc[self._lib_name + "_version"] = mlxtend_version
else:
raise ValueError(
"3rd party lib name ({0}) is not recognized".format(
self._lib_name))
return model_desc
@classmethod
def load(cls, model_desc: dict):
obj_layout = super().load(model_desc)
if cls._lib_name == 'sklearn':
current_version = sklearn_version
elif cls._lib_name == 'xgboost':
current_version = xgbboot_version
elif cls._lib_name == 'lightgbm':
current_version = lightgbm_version
elif cls._lib_name == 'mlxtend':
current_version = mlxtend_version
else:
raise ValueError(
"3rd party lib name ({0}) is not recognized".format(
cls._lib_name))
if LooseVersion(current_version) < LooseVersion(
model_desc[cls._lib_name + "_version"]):
warnings.warn(
'Current {2} version {0} is lower than the model version {1}. '
'Loaded model may work incorrectly.'.format(
sklearn_version, model_desc[cls._lib_name],
cls._lib_name))
return obj_layout
return ExternalLibBase
def load_module(name):
for ml in ['treemodel', 'linearmodel']:
module_name = 'ultron.optimize.model.{0}'.format(ml)
module = importlib.import_module(module_name)
if name in module.__dict__:
return importlib.import_module(module_name).__getattribute__(name)
raise ValueError("{0} not in model".format(name)) | PypiClean |
/Neodroid-0.4.9-py36-none-any.whl/neodroid/utilities/launcher/environment_launcher.py |
__author__ = "Christian Heider Nielsen"
def launch_environment(
environment_name,
*,
path_to_executables_directory,
ip="127.0.0.1",
port=5252,
headless=False,
):
"""
:param environment_name:
:param path_to_executables_directory:
:param ip:
:param port:
:param headless:
:return:
"""
import logging
import pathlib
from neodroid.utilities.launcher.download_utilities.download_environment import (
download_environment,
)
import os
import shlex
import struct
import subprocess
import sys
import stat
environment_name = pathlib.Path(environment_name)
if pathlib.Path.exists(environment_name):
path_to_executable = environment_name
else:
system_arch = struct.calcsize("P") * 8
logging.info(f"System Architecture: {system_arch}")
variation_name = (
f"{environment_name}" if not headless else f"{environment_name}_headless"
)
if sys.platform == "win32":
variation_name = f"{variation_name}_win"
elif sys.platform == "darwin":
variation_name = f"{variation_name}_mac"
else:
variation_name = f"{variation_name}_linux"
base_name = pathlib.Path(path_to_executables_directory) / environment_name
path = base_name / variation_name
if not base_name.exists():
old_mask = os.umask(000)
try:
base_name.mkdir(0o777, parents=True, exist_ok=True)
finally:
os.umask(old_mask)
if not path.exists():
download_environment(
variation_name, path_to_executables_directory=base_name
)
path_to_executable = path / "Neodroid.exe"
if sys.platform != "win32":
if system_arch == 32:
path_to_executable = path / f"{environment_name}.x86"
else:
path_to_executable = path / f"{environment_name}.x86_64"
"""
cwd = os.getcwd()
file_name = (file_name.strip()
.replace('.app', '').replace('.exe', '').replace('.x86_64', '').replace('.x86', ''))
true_filename = os.path.basename(os.path.normpath(file_name))
launch_string = None
if platform == 'linux' or platform == 'linux2':
candidates = glob.glob(pathlib.Path.joinpath(cwd, file_name) + '.x86_64')
if len(candidates) == 0:
candidates = glob.glob(pathlib.Path.joinpath(cwd, file_name) + '.x86')
if len(candidates) == 0:
candidates = glob.glob(file_name + '.x86_64')
if len(candidates) == 0:
candidates = glob.glob(file_name + '.x86')
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == 'darwin':
candidates = glob.glob(pathlib.Path.joinpath(cwd, file_name + '.app', 'Contents', 'MacOS',
true_filename))
if len(candidates) == 0:
candidates = glob.glob(pathlib.Path.joinpath(file_name + '.app', 'Contents', 'MacOS',
true_filename))
if len(candidates) == 0:
candidates = glob.glob(pathlib.Path.joinpath(cwd, file_name + '.app', 'Contents', 'MacOS', '*'))
if len(candidates) == 0:
candidates = glob.glob(pathlib.Path.joinpath(file_name + '.app', 'Contents', 'MacOS', '*'))
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == 'win32':
candidates = glob.glob(pathlib.Path.joinpath(cwd, file_name + '.exe'))
if len(candidates) == 0:
candidates = glob.glob(file_name + '.exe')
if len(candidates) > 0:
launch_string = candidates[0]
"""
st = path_to_executable.stat() # Ensure file is executable
path_to_executable.chmod(st.st_mode | stat.S_IEXEC)
# new_env = os.environ.copy()
# new_env['vblank_mode'] = '0'
# pre_args = ['vblank_mode=0','optirun']
post_args = shlex.split(
f" -ip {ip}"
f" -port {port}"
# f' -batchmode'
# f' -nographics'
)
# cmd= pre_args+[path_to_executable] + post_args
cmd = [path_to_executable] + post_args
logging.info(cmd)
return subprocess.Popen(
cmd
# ,env=new_env
) | PypiClean |
/Lenovo%20Ai%20Client-1.0.tar.gz/Lenovo Ai Client-1.0/aiClient/NaturalLanguageProcessingClient.py | from aiClient.AiBaseClient import AiBase
from aiClient.utils.NLPRequestProcess import process_request_sentiment,process_request_standford
from aiClient.utils.ApiUrl import AiUrl
class NaturalLanguageProcessing(AiBase):
"""
"""
def chinese_sentiment_analysis(self, text):
"""
"""
data = process_request_sentiment(text)
chinese_sentiment_analysis_url = AiUrl.chinese_sentiment_analysis
return self._request(chinese_sentiment_analysis_url, data)
def word_segmentation(self, text):
"""
"""
data = process_request_standford(text)
word_segmentation_url = AiUrl.word_segmentation
return self._request(word_segmentation_url, data)
def part_of_speech_tagging(self, text):
"""
"""
data = process_request_standford(text)
part_of_speech_tagging_url = AiUrl.part_of_speech_tagging
return self._request(part_of_speech_tagging_url, data)
def lemmatization(self, text):
"""
"""
data = process_request_standford(text)
lemmatization_url = AiUrl.lemmatization
return self._request(lemmatization_url, data)
def named_entity_recognition(self, text):
"""
"""
data = process_request_standford(text)
named_entity_recognition_url = AiUrl.named_entity_recognition
return self._request(named_entity_recognition_url, data)
def parsing(self, text):
"""
"""
data = process_request_standford(text)
parsing_url = AiUrl.parsing
return self._request(parsing_url, data)
def relationship_extraction(self, text):
"""
"""
data = process_request_standford(text)
relationship_extraction_url = AiUrl.relationship_extraction
return self._request(relationship_extraction_url, data)
def coreference_resolution(self, text):
"""
"""
data = process_request_standford(text)
coreference_resolution_url = AiUrl.conference_resolution
return self._request(coreference_resolution_url, data) | PypiClean |
/MindsDB-23.8.3.0.tar.gz/MindsDB-23.8.3.0/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/handshake_packet.py | from mindsdb.api.mysql.mysql_proxy.data_types.mysql_packet import Packet
from mindsdb.api.mysql.mysql_proxy.libs.constants.mysql import (
DEFAULT_AUTH_METHOD,
DEFAULT_COALLITION_ID,
FILLER_FOR_WIRESHARK_DUMP,
SERVER_STATUS_AUTOCOMMIT
)
from mindsdb.api.mysql.mysql_proxy.classes.server_capabilities import server_capabilities
from mindsdb.api.mysql.mysql_proxy.data_types.mysql_datum import Datum
class HandshakePacket(Packet):
'''
Implementation based on:
https://mariadb.com/kb/en/library/1-connecting-connecting/#initial-handshake-packet
'''
def setup(self):
capabilities = server_capabilities.value
self.protocol_version = Datum('int<1>', 10)
self.server_version = Datum('string<NUL>', '5.7.1-MindsDB-1.0')
self.connection_id = Datum('int<4>', self.proxy.connection_id)
self.scramble_1st_part = Datum('string<8>', self.proxy.salt[:8])
self.reserved_byte = Datum('string<1>', '')
self.server_capabilities_1st_part = Datum('int<2>', capabilities)
self.server_default_collation = Datum('int<1>', DEFAULT_COALLITION_ID)
self.status_flags = Datum('int<2>', SERVER_STATUS_AUTOCOMMIT)
self.server_capabilities_2nd_part = Datum('int<2>', capabilities >> 16)
self.wireshark_filler = Datum('int<1>', FILLER_FOR_WIRESHARK_DUMP)
# self.wireshark_filler = Datum('int<1>', len(self.proxy.salt))
self.reserved_filler1 = Datum('string<6>', '')
self.reserved_filler2 = Datum('string<4>', '')
self.scramble_2nd_part = Datum('string<NUL>', self.proxy.salt[8:])
self.null_close = Datum('string<NUL>', DEFAULT_AUTH_METHOD)
@property
def body(self):
order = [
'protocol_version',
'server_version',
'connection_id',
'scramble_1st_part',
'reserved_byte',
'server_capabilities_1st_part',
'server_default_collation',
'status_flags',
'server_capabilities_2nd_part',
'wireshark_filler',
'reserved_filler1',
'reserved_filler2',
'scramble_2nd_part',
'null_close'
]
string = b''
for key in order:
string += getattr(self, key).toStringPacket()
self.setBody(string)
return self._body
@staticmethod
def test():
import pprint
pprint.pprint(str(HandshakePacket().get_packet_string()))
# only run the test if this file is called from debugger
if __name__ == "__main__":
HandshakePacket.test() | PypiClean |
/Flask-COMBO-JSONAPI-1.1.0.tar.gz/Flask-COMBO-JSONAPI-1.1.0/flask_combo_jsonapi/data_layers/alchemy.py | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from sqlalchemy.orm import Session as SessionType
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.collections import InstrumentedList
from sqlalchemy.inspection import inspect
from sqlalchemy.orm.attributes import QueryableAttribute
from sqlalchemy.orm import joinedload, ColumnProperty, RelationshipProperty
from marshmallow import class_registry
from marshmallow.base import SchemaABC
from flask_combo_jsonapi.data_layers.base import BaseDataLayer
from flask_combo_jsonapi.data_layers.sorting.alchemy import create_sorts
from flask_combo_jsonapi.exceptions import (
RelationNotFound,
RelatedObjectNotFound,
JsonApiException,
ObjectNotFound,
InvalidInclude,
InvalidType,
PluginMethodNotImplementedError,
)
from flask_combo_jsonapi.data_layers.filtering.alchemy import create_filters
from flask_combo_jsonapi.schema import (
get_model_field,
get_related_schema,
get_relationships,
get_nested_fields,
get_schema_field,
)
from flask_combo_jsonapi.utils import SPLIT_REL
class SqlalchemyDataLayer(BaseDataLayer):
"""Sqlalchemy data layer"""
if TYPE_CHECKING:
session: "SessionType"
def __init__(self, kwargs):
"""Initialize an instance of SqlalchemyDataLayer
:param dict kwargs: initialization parameters of an SqlalchemyDataLayer instance
"""
super().__init__(kwargs)
if not hasattr(self, "session"):
raise Exception(
f"You must provide a session in data_layer_kwargs to use sqlalchemy data layer in {self.resource.__name__}"
)
if not hasattr(self, "model"):
raise Exception(
f"You must provide a model in data_layer_kwargs to use sqlalchemy data layer in {self.resource.__name__}"
)
self.disable_collection_count: bool = False
self.default_collection_count: int = -1
def post_init(self):
"""
Checking some props here
:return:
"""
if self.resource is None:
# if working outside the resource, it's not assigned here
return
if not hasattr(self.resource, "disable_collection_count") or self.resource.disable_collection_count is False:
return
params = self.resource.disable_collection_count
if isinstance(params, (bool, int)):
self.disable_collection_count = bool(params)
if isinstance(params, (tuple, list)):
try:
self.disable_collection_count, self.default_collection_count = params
except ValueError:
raise ValueError(
"Resource's attribute `disable_collection_count` "
"has to be bool or list/tuple with exactly 2 values!\n"
"For example `disable_collection_count = (True, 999)`"
)
# just ignoring other types, we don't know how to process them
def create_object(self, data, view_kwargs):
"""Create an object through sqlalchemy
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy
"""
for i_plugins in self.resource.plugins:
try:
i_plugins.data_layer_before_create_object(data=data, view_kwargs=view_kwargs, self_json_api=self)
except PluginMethodNotImplementedError:
pass
self.before_create_object(data, view_kwargs)
relationship_fields = get_relationships(self.resource.schema, model_field=True)
nested_fields = get_nested_fields(self.resource.schema, model_field=True)
join_fields = relationship_fields + nested_fields
for i_plugins in self.resource.plugins:
try:
data = i_plugins.data_layer_create_object_clean_data(
data=data, view_kwargs=view_kwargs, join_fields=join_fields, self_json_api=self,
)
except PluginMethodNotImplementedError:
pass
obj = self.model(**{key: value for (key, value) in data.items() if key not in join_fields})
self.apply_relationships(data, obj)
self.apply_nested_fields(data, obj)
for i_plugins in self.resource.plugins:
try:
i_plugins.data_layer_after_create_object(
data=data, view_kwargs=view_kwargs, obj=obj, self_json_api=self,
)
except PluginMethodNotImplementedError:
pass
self.session.add(obj)
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException(f"Object creation error: {e}", source={"pointer": "/data"})
self.after_create_object(obj, data, view_kwargs)
return obj
def get_object(self, view_kwargs, qs=None):
"""Retrieve an object through sqlalchemy
:params dict view_kwargs: kwargs from the resource view
:return DeclarativeMeta: an object from sqlalchemy
"""
# Нужно выталкивать из sqlalchemy Закешированные запросы, иначе не удастся загрузить данные о current_user
self.session.expire_all()
self.before_get_object(view_kwargs)
id_field = getattr(self, "id_field", inspect(self.model).primary_key[0].key)
try:
filter_field = getattr(self.model, id_field)
except Exception:
raise Exception(f"{self.model.__name__} has no attribute {id_field}")
url_field = getattr(self, "url_field", "id")
filter_value = view_kwargs[url_field]
query = self.retrieve_object_query(view_kwargs, filter_field, filter_value)
if self.resource is not None:
for i_plugins in self.resource.plugins:
try:
query = i_plugins.data_layer_get_object_update_query(
query=query, qs=qs, view_kwargs=view_kwargs, self_json_api=self,
)
except PluginMethodNotImplementedError:
pass
if qs is not None:
query = self.eagerload_includes(query, qs)
try:
obj = query.one()
except NoResultFound:
obj = None
self.after_get_object(obj, view_kwargs)
return obj
def get_collection_count(self, query, qs, view_kwargs) -> int:
"""
:param query: SQLAlchemy query
:param qs: QueryString
:param view_kwargs: view kwargs
:return:
"""
if self.disable_collection_count is True:
return self.default_collection_count
return query.count()
def get_collection(self, qs, view_kwargs):
"""Retrieve a collection of objects through sqlalchemy
:param QueryStringManager qs: a querystring manager to retrieve information from url
:param dict view_kwargs: kwargs from the resource view
:return tuple: the number of object and the list of objects
"""
# Нужно выталкивать из sqlalchemy Закешированные запросы, иначе не удастся загрузить данные о current_user
self.session.expire_all()
self.before_get_collection(qs, view_kwargs)
query = self.query(view_kwargs)
for i_plugins in self.resource.plugins:
try:
query = i_plugins.data_layer_get_collection_update_query(
query=query, qs=qs, view_kwargs=view_kwargs, self_json_api=self,
)
except PluginMethodNotImplementedError:
pass
if qs.filters:
query = self.filter_query(query, qs.filters, self.model)
if qs.sorting:
query = self.sort_query(query, qs.sorting)
objects_count = self.get_collection_count(query, qs, view_kwargs)
if getattr(self, "eagerload_includes", True):
query = self.eagerload_includes(query, qs)
query = self.paginate_query(query, qs.pagination)
collection = query.all()
collection = self.after_get_collection(collection, qs, view_kwargs)
return objects_count, collection
def update_object(self, obj, data, view_kwargs):
"""Update an object through sqlalchemy
:param DeclarativeMeta obj: an object from sqlalchemy
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if object have changed else False
"""
if obj is None:
url_field = getattr(self, "url_field", "id")
filter_value = view_kwargs[url_field]
raise ObjectNotFound(f"{self.model.__name__}: {filter_value} not found", source={"parameter": url_field})
self.before_update_object(obj, data, view_kwargs)
relationship_fields = get_relationships(self.resource.schema, model_field=True)
nested_fields = get_nested_fields(self.resource.schema, model_field=True)
join_fields = relationship_fields + nested_fields
for i_plugins in self.resource.plugins:
try:
data = i_plugins.data_layer_update_object_clean_data(
data=data, obj=obj, view_kwargs=view_kwargs, join_fields=join_fields, self_json_api=self,
)
except PluginMethodNotImplementedError:
pass
for key, value in data.items():
if hasattr(obj, key) and key not in join_fields:
setattr(obj, key, value)
self.apply_relationships(data, obj)
self.apply_nested_fields(data, obj)
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
orig_e = getattr(e, "orig", object)
message = getattr(orig_e, "args", [])
message = message[0] if message else None
e = message if message else e
raise JsonApiException("Update object error: " + str(e), source={"pointer": "/data"})
self.after_update_object(obj, data, view_kwargs)
def delete_object(self, obj, view_kwargs):
"""Delete an object through sqlalchemy
:param DeclarativeMeta item: an item from sqlalchemy
:param dict view_kwargs: kwargs from the resource view
"""
if obj is None:
url_field = getattr(self, "url_field", "id")
filter_value = view_kwargs[url_field]
raise ObjectNotFound(f"{self.model.__name__}: {filter_value} not found", source={"parameter": url_field})
self.before_delete_object(obj, view_kwargs)
for i_plugins in self.resource.plugins:
try:
i_plugins.data_layer_delete_object_clean_data(obj=obj, view_kwargs=view_kwargs, self_json_api=self)
except PluginMethodNotImplementedError:
pass
self.session.delete(obj)
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Delete object error: " + str(e))
self.after_delete_object(obj, view_kwargs)
def create_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Create a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
self.before_create_relationship(json_data, relationship_field, related_id_field, view_kwargs)
obj = self.get_object(view_kwargs)
if obj is None:
url_field = getattr(self, "url_field", "id")
filter_value = view_kwargs[url_field]
raise ObjectNotFound(f"{self.model.__name__}: {filter_value} not found", source={"parameter": url_field})
if not hasattr(obj, relationship_field):
raise RelationNotFound(f"{obj.__class__.__name__} has no attribute {relationship_field}")
related_model = getattr(obj.__class__, relationship_field).property.mapper.class_
updated = False
if isinstance(json_data["data"], list):
obj_ids = {str(getattr(obj__, related_id_field)) for obj__ in getattr(obj, relationship_field)}
for obj_ in json_data["data"]:
if obj_["id"] not in obj_ids:
getattr(obj, relationship_field).append(
self.get_related_object(related_model, related_id_field, obj_)
)
updated = True
else:
related_object = None
if json_data["data"] is not None:
related_object = self.get_related_object(related_model, related_id_field, json_data["data"])
obj_id = getattr(getattr(obj, relationship_field), related_id_field, None)
new_obj_id = getattr(related_object, related_id_field, None)
if obj_id != new_obj_id:
setattr(obj, relationship_field, related_object)
updated = True
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Create relationship error: " + str(e))
self.after_create_relationship(obj, updated, json_data, relationship_field, related_id_field, view_kwargs)
return obj, updated
def get_relationship(self, relationship_field, related_type_, related_id_field, view_kwargs):
"""Get a relationship
:param str relationship_field: the model attribute used for relationship
:param str related_type_: the related resource type
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return tuple: the object and related object(s)
"""
self.before_get_relationship(relationship_field, related_type_, related_id_field, view_kwargs)
obj = self.get_object(view_kwargs)
if obj is None:
url_field = getattr(self, "url_field", "id")
filter_value = view_kwargs[url_field]
raise ObjectNotFound(f"{self.model.__name__}: {filter_value} not found", source={"parameter": url_field})
if not hasattr(obj, relationship_field):
raise RelationNotFound(f"{obj.__class__.__name__} has no attribute {relationship_field}")
related_objects = getattr(obj, relationship_field)
if related_objects is None:
return obj, related_objects
self.after_get_relationship(
obj, related_objects, relationship_field, related_type_, related_id_field, view_kwargs,
)
if isinstance(related_objects, InstrumentedList):
return obj, [{"type": related_type_, "id": getattr(obj_, related_id_field)} for obj_ in related_objects]
else:
return obj, {"type": related_type_, "id": getattr(related_objects, related_id_field)}
def update_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Update a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
self.before_update_relationship(json_data, relationship_field, related_id_field, view_kwargs)
obj = self.get_object(view_kwargs)
if obj is None:
url_field = getattr(self, "url_field", "id")
filter_value = view_kwargs[url_field]
raise ObjectNotFound(f"{self.model.__name__}: {filter_value} not found", source={"parameter": url_field})
if not hasattr(obj, relationship_field):
raise RelationNotFound(f"{obj.__class__.__name__} has no attribute {relationship_field}")
related_model = getattr(obj.__class__, relationship_field).property.mapper.class_
updated = False
if isinstance(json_data["data"], list):
related_objects = []
for obj_ in json_data["data"]:
related_objects.append(self.get_related_object(related_model, related_id_field, obj_))
obj_ids = {getattr(obj__, related_id_field) for obj__ in getattr(obj, relationship_field)}
new_obj_ids = {getattr(related_object, related_id_field) for related_object in related_objects}
if obj_ids != new_obj_ids:
setattr(obj, relationship_field, related_objects)
updated = True
else:
related_object = None
if json_data["data"] is not None:
related_object = self.get_related_object(related_model, related_id_field, json_data["data"])
obj_id = getattr(getattr(obj, relationship_field), related_id_field, None)
new_obj_id = getattr(related_object, related_id_field, None)
if obj_id != new_obj_id:
setattr(obj, relationship_field, related_object)
updated = True
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Update relationship error: " + str(e))
self.after_update_relationship(obj, updated, json_data, relationship_field, related_id_field, view_kwargs)
return obj, updated
def delete_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Delete a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
"""
self.before_delete_relationship(json_data, relationship_field, related_id_field, view_kwargs)
obj = self.get_object(view_kwargs)
if obj is None:
url_field = getattr(self, "url_field", "id")
filter_value = view_kwargs[url_field]
raise ObjectNotFound(f"{self.model.__name__}: {filter_value} not found", source={"parameter": url_field})
if not hasattr(obj, relationship_field):
raise RelationNotFound(f"{obj.__class__.__name__} has no attribute {relationship_field}")
related_model = getattr(obj.__class__, relationship_field).property.mapper.class_
updated = False
if isinstance(json_data["data"], list):
obj_ids = {str(getattr(obj__, related_id_field)) for obj__ in getattr(obj, relationship_field)}
for obj_ in json_data["data"]:
if obj_["id"] in obj_ids:
getattr(obj, relationship_field).remove(
self.get_related_object(related_model, related_id_field, obj_)
)
updated = True
else:
setattr(obj, relationship_field, None)
updated = True
try:
self.session.commit()
except JsonApiException as e:
self.session.rollback()
raise e
except Exception as e:
self.session.rollback()
raise JsonApiException("Delete relationship error: " + str(e))
self.after_delete_relationship(obj, updated, json_data, relationship_field, related_id_field, view_kwargs)
return obj, updated
def get_related_object(self, related_model, related_id_field, obj):
"""Get a related object
:param Model related_model: an sqlalchemy model
:param str related_id_field: the identifier field of the related model
:param DeclarativeMeta obj: the sqlalchemy object to retrieve related objects from
:return DeclarativeMeta: a related object
"""
try:
related_object = (
self.session.query(related_model).filter(getattr(related_model, related_id_field) == obj["id"]).one()
)
except NoResultFound:
raise RelatedObjectNotFound(f"{related_model.__name__}.{related_id_field}: {obj['id']} not found")
return related_object
def apply_relationships(self, data, obj):
"""Apply relationship provided by data to obj
:param dict data: data provided by the client
:param DeclarativeMeta obj: the sqlalchemy object to plug relationships to
:return boolean: True if relationship have changed else False
"""
relationships_to_apply = []
relationship_fields = get_relationships(self.resource.schema, model_field=True)
for key, value in data.items():
if key in relationship_fields:
related_model = getattr(obj.__class__, key).property.mapper.class_
schema_field = get_schema_field(self.resource.schema, key)
related_id_field = self.resource.schema._declared_fields[schema_field].id_field
if isinstance(value, list):
related_objects = []
for identifier in value:
related_object = self.get_related_object(related_model, related_id_field, {"id": identifier})
related_objects.append(related_object)
relationships_to_apply.append({"field": key, "value": related_objects})
else:
related_object = None
if value is not None:
related_object = self.get_related_object(related_model, related_id_field, {"id": value})
relationships_to_apply.append({"field": key, "value": related_object})
for relationship in relationships_to_apply:
setattr(obj, relationship["field"], relationship["value"])
def apply_nested_fields(self, data, obj):
nested_fields_to_apply = []
nested_fields = get_nested_fields(self.resource.schema, model_field=True)
for key, value in data.items():
if key in nested_fields:
nested_field_inspection = inspect(getattr(obj.__class__, key))
if not isinstance(nested_field_inspection, QueryableAttribute):
raise InvalidType("Unrecognized nested field type: not a queryable attribute.")
if isinstance(nested_field_inspection.property, RelationshipProperty):
nested_model = getattr(obj.__class__, key).property.mapper.class_
if isinstance(value, list):
nested_objects = []
for identifier in value:
nested_object = nested_model(**identifier)
nested_objects.append(nested_object)
nested_fields_to_apply.append({"field": key, "value": nested_objects})
else:
nested_field = getattr(obj, key)
if nested_field:
for attribute, new_value in value.items():
setattr(nested_field, attribute, new_value)
else:
nested_fields_to_apply.append({"field": key, "value": nested_model(**value)})
elif isinstance(nested_field_inspection.property, ColumnProperty):
nested_fields_to_apply.append({"field": key, "value": value})
else:
raise InvalidType("Unrecognized nested field type: not a RelationshipProperty or ColumnProperty.")
for nested_field in nested_fields_to_apply:
setattr(obj, nested_field["field"], nested_field["value"])
def filter_query(self, query, filter_info, model):
"""Filter query according to jsonapi 1.0
:param Query query: sqlalchemy query to sort
:param filter_info: filter information
:type filter_info: dict or None
:param DeclarativeMeta model: an sqlalchemy model
:return Query: the sorted query
"""
if filter_info:
filters, joins = create_filters(model, filter_info, self.resource)
for i_join in joins:
query = query.join(*i_join)
query = query.filter(*filters)
return query
def sort_query(self, query, sort_info):
"""Sort query according to jsonapi 1.0
:param Query query: sqlalchemy query to sort
:param list sort_info: sort information
:return Query: the sorted query
"""
if sort_info:
sorts, joins = create_sorts(self.model, sort_info, self.resource if hasattr(self, "resource") else None)
for i_join in joins:
query = query.join(*i_join)
for i_sort in sorts:
query = query.order_by(i_sort)
return query
def paginate_query(self, query, paginate_info):
"""Paginate query according to jsonapi 1.0
:param Query query: sqlalchemy queryset
:param dict paginate_info: pagination information
:return Query: the paginated query
"""
if paginate_info.get("size") == 0:
return query
page_size = paginate_info.get("size")
query = query.limit(page_size)
if paginate_info.get("number"):
query = query.offset((paginate_info["number"] - 1) * page_size)
return query
def eagerload_includes(self, query, qs):
"""Use eagerload feature of sqlalchemy to optimize data retrieval for include querystring parameter
:param Query query: sqlalchemy queryset
:param QueryStringManager qs: a querystring manager to retrieve information from url
:return Query: the query with includes eagerloaded
"""
for include in qs.include:
joinload_object = None
if SPLIT_REL in include:
current_schema = self.resource.schema
for obj in include.split(SPLIT_REL):
try:
field = get_model_field(current_schema, obj)
except Exception as e:
raise InvalidInclude(str(e))
if joinload_object is None:
joinload_object = joinedload(field)
else:
joinload_object = joinload_object.joinedload(field)
related_schema_cls = get_related_schema(current_schema, obj)
if isinstance(related_schema_cls, SchemaABC):
related_schema_cls = related_schema_cls.__class__
else:
related_schema_cls = class_registry.get_class(related_schema_cls)
current_schema = related_schema_cls
else:
try:
field = get_model_field(self.resource.schema, include)
except Exception as e:
raise InvalidInclude(str(e))
joinload_object = joinedload(field)
query = query.options(joinload_object)
return query
def retrieve_object_query(self, view_kwargs, filter_field, filter_value):
"""Build query to retrieve object
:param dict view_kwargs: kwargs from the resource view
:params sqlalchemy_field filter_field: the field to filter on
:params filter_value: the value to filter with
:return sqlalchemy query: a query from sqlalchemy
"""
return self.session.query(self.model).filter(filter_field == filter_value)
def query(self, view_kwargs):
"""Construct the base query to retrieve wanted data
:param dict view_kwargs: kwargs from the resource view
"""
return self.session.query(self.model)
def before_create_object(self, data, view_kwargs):
"""Provide additional data before object creation
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
"""
pass
def after_create_object(self, obj, data, view_kwargs):
"""Provide additional data after object creation
:param obj: an object from data layer
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
"""
pass
def before_get_object(self, view_kwargs):
"""Make work before to retrieve an object
:param dict view_kwargs: kwargs from the resource view
"""
pass
def after_get_object(self, obj, view_kwargs):
"""Make work after to retrieve an object
:param obj: an object from data layer
:param dict view_kwargs: kwargs from the resource view
"""
pass
def before_get_collection(self, qs, view_kwargs):
"""Make work before to retrieve a collection of objects
:param QueryStringManager qs: a querystring manager to retrieve information from url
:param dict view_kwargs: kwargs from the resource view
"""
pass
def after_get_collection(self, collection, qs, view_kwargs):
"""Make work after to retrieve a collection of objects
:param iterable collection: the collection of objects
:param QueryStringManager qs: a querystring manager to retrieve information from url
:param dict view_kwargs: kwargs from the resource view
"""
return collection
def before_update_object(self, obj, data, view_kwargs):
"""Make checks or provide additional data before update object
:param obj: an object from data layer
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
"""
pass
def after_update_object(self, obj, data, view_kwargs):
"""Make work after update object
:param obj: an object from data layer
:param dict data: the data validated by marshmallow
:param dict view_kwargs: kwargs from the resource view
"""
pass
def before_delete_object(self, obj, view_kwargs):
"""Make checks before delete object
:param obj: an object from data layer
:param dict view_kwargs: kwargs from the resource view
"""
pass
def after_delete_object(self, obj, view_kwargs):
"""Make work after delete object
:param obj: an object from data layer
:param dict view_kwargs: kwargs from the resource view
"""
pass
def before_create_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work before to create a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
pass
def after_create_relationship(self, obj, updated, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work after to create a relationship
:param obj: an object from data layer
:param bool updated: True if object was updated else False
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
pass
def before_get_relationship(self, relationship_field, related_type_, related_id_field, view_kwargs):
"""Make work before to get information about a relationship
:param str relationship_field: the model attribute used for relationship
:param str related_type_: the related resource type
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return tuple: the object and related object(s)
"""
pass
def after_get_relationship(
self, obj, related_objects, relationship_field, related_type_, related_id_field, view_kwargs,
):
"""Make work after to get information about a relationship
:param obj: an object from data layer
:param iterable related_objects: related objects of the object
:param str relationship_field: the model attribute used for relationship
:param str related_type_: the related resource type
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return tuple: the object and related object(s)
"""
pass
def before_update_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work before to update a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
pass
def after_update_relationship(self, obj, updated, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work after to update a relationship
:param obj: an object from data layer
:param bool updated: True if object was updated else False
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
:return boolean: True if relationship have changed else False
"""
pass
def before_delete_relationship(self, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work before to delete a relationship
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
"""
pass
def after_delete_relationship(self, obj, updated, json_data, relationship_field, related_id_field, view_kwargs):
"""Make work after to delete a relationship
:param obj: an object from data layer
:param bool updated: True if object was updated else False
:param dict json_data: the request params
:param str relationship_field: the model attribute used for relationship
:param str related_id_field: the identifier field of the related model
:param dict view_kwargs: kwargs from the resource view
"""
pass | PypiClean |
/Autoneuro-0.0.1.tar.gz/Autoneuro-0.0.1/application/fileOperations/file_methods.py | import pickle
import os
import shutil
from application.logger.appLogger import AppLogger
class FileOperation:
"""
This class shall be used to save the model after training
and load the saved model for prediction.
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
def __init__(self):
self.file_object = open('logs/fileOperationLogs.txt', 'a+')
self.logger_object = AppLogger()
self.model_directory = 'models/'
def save_model(self, model, filename):
"""
Method Name: save_model
Description: Save the model file to directory
Outcome: File gets saved
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, 'Entered the save_model method of the File_Operation class')
try:
path = os.path.join(self.model_directory, filename) # create seperate directory for each cluster
if os.path.isdir(path): # remove previously existing models for each clusters
shutil.rmtree(self.model_directory)
os.makedirs(path)
else:
os.makedirs(path) #
with open(path + '/' + filename + '.sav',
'wb') as f:
pickle.dump(model, f) # save the model to file
self.logger_object.log(self.file_object,
'Model File ' + filename + ' saved. Exited the save_model method of the Model_Finder class')
return 'success'
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in save_model method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Model File ' + filename + ' could not be saved. Exited the save_model method of the Model_Finder class')
raise Exception()
def load_model(self, filename):
"""
Method Name: load_model
Description: load the model file to memory
Output: The Model file loaded in memory
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, 'Entered the load_model method of the File_Operation class')
try:
with open(self.model_directory + filename + '/' + filename + '.sav',
'rb') as f:
self.logger_object.log(self.file_object,
'Model File ' + filename + ' loaded. Exited the load_model method of the Model_Finder class')
return pickle.load(f)
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in load_model method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Model File ' + filename + ' could not be saved. Exited the load_model method of the Model_Finder class')
raise Exception()
def find_correct_model_file(self, cluster_number):
"""
Method Name: find_correct_model_file
Description: Select the correct model based on cluster number
Output: The Model file
On Failure: Raise Exception
Written By: iNeuron Intelligence
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object,
'Entered the find_correct_model_file method of the File_Operation class')
try:
self.cluster_number = cluster_number
self.folder_name = self.model_directory
self.list_of_model_files = []
self.list_of_files = os.listdir(self.folder_name)
for self.file in self.list_of_files:
try:
if (self.file.index(str(self.cluster_number)) != -1):
self.model_name = self.file
except:
continue
self.model_name = self.model_name.split('.')[0]
self.logger_object.log(self.file_object,
'Exited the find_correct_model_file method of the Model_Finder class.')
return self.model_name
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in find_correct_model_file method of the Model_Finder class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'Exited the find_correct_model_file method of the Model_Finder class with Failure')
raise Exception() | PypiClean |
/HavNegpy-1.2.tar.gz/HavNegpy-1.2/docs/_build/doctrees/nbsphinx/_build/html/_build/doctrees/nbsphinx/_build/html/_build/html/_build/html/hn_module_tutorial.ipynb | # Tutorial for the HN module of HavNegpy package
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import HavNegpy as dd
%matplotlib qt
os.chdir(r'M:\Marshall_Data\mohamed_data\mohamed_data\n44')
def create_dataframe(f):
col_names = ['Freq', 'T', 'Eps1', 'Eps2']
#f = input(str("Enter the filename:"))
df = pd.read_csv(f, sep=r"\s+",index_col=False,usecols = [0,1,2,3],names=col_names,header=None,skiprows=4,encoding='unicode_escape',engine='python')
col1 = ['log f']
for start in range(0, len(df), 63):
name = df['T'][start]
#print(name)
col1.append(name)
df2 = pd.DataFrame()
f1 = df['Freq'][0:63].values
x1 = np.log10((f1))
e = pd.DataFrame(x1)
df2['log f'] = pd.concat([e],axis=1,ignore_index=True)
global Cooling,Heating
for start in range(0, len(df), 63):
f = df['Eps2'][start:start+63].values
ep = np.log10(f)
d = pd.DataFrame(ep)
df2[start] = pd.concat([d],axis=1,ignore_index=True)
df2.columns = col1
'''
a = int(len(col1)/3)
b = 2*a
c = int(len(col1)) - b
Heating1 = df2.iloc[8:,0:a+1]
Cooling = df2.iloc[8:,a+1:b+1]
Heating2 = df2.iloc[8:,b+1:]
heat1_col = col1[0:a+1]
cool_col = col1[a+1:b+1]
heat2_col = col1[b+1:]
Cooling.columns = cool_col
Heating1.columns = heat1_col
Heating2.columns = heat2_col
f2 = df['Freq'][8:59].values
x2 = np.log10((f2))
Cooling['Freq'] = x2
Heating1['Freq'] = x2
Heating2['Freq'] = x2
'''
Cooling = df2.iloc[:,0:25]
Heating = df2.iloc[:,25:]
return df,df2,Cooling,Heating #Heating2
df,df2,cool,heat = create_dataframe('EPS.TXT')
x,y = df2['log f'][9:], heat[40][9:]
plt.figure()
plt.scatter(x,y,label='data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('Example for HN fitting')
```
image of the plot we are using in this tutorial

```
''' instantiate the HN module from HavgNegpy'''
hn = dd.HN()
''' select range to perform hn fitting'''
''' the select range functions pops in a separate window and allows you two clicks to select the region of interest (ROI)'''
''' In this tutorial, I'll plot the ROI and append as an image in the next cell'''
x1,y1 = hn.select_range(x,y)
''' view the data from select range'''
plt.scatter(x1,y1,label = 'Data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('ROI selected from HN module')
```
image of the ROI from HN module
```
''' dump the initial guess parameters using dump parameters method (varies for each fn), which dumps the parameters in a json file'''
''' this is required before performing the first fitting as it takes the initial guess from the json file created'''
hn.dump_parameters_hn()
''' view the initial guess for the ROI using initial_view method'''
''' I'll append the image in the next cell'''
hn.initial_view_hn(x1,y1)
```
image of the initial guess
```
''' pefrorm least squares fitting'''
''' The image of the curve fit is added in the next cell '''
hn.fit(x1,y1)
```
Example of the fit performed using single HN function
the procedure is similar for double HN and HN with conductivity

```
'''create a file to save fit results using create_analysis file method'''
''' before saving fit results an analysis file has to be created '''
hn.create_analysis_file()
''' save the fit results using save_fit method of the corresponding fit function'''
''' takes one argument, read more on the documentation'''
hn.save_fit_hn(1)
```
| PypiClean |
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/search/da.py | from typing import Dict
import snowballstemmer
from sphinx.search import SearchLanguage, parse_stop_word
danish_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/danish/stop.txt
og | and
i | in
jeg | I
det | that (dem. pronoun)/it (pers. pronoun)
at | that (in front of a sentence)/to (with infinitive)
en | a/an
den | it (pers. pronoun)/that (dem. pronoun)
til | to/at/for/until/against/by/of/into, more
er | present tense of "to be"
som | who, as
på | on/upon/in/on/at/to/after/of/with/for, on
de | they
med | with/by/in, along
han | he
af | of/by/from/off/for/in/with/on, off
for | at/for/to/from/by/of/ago, in front/before, because
ikke | not
der | who/which, there/those
var | past tense of "to be"
mig | me/myself
sig | oneself/himself/herself/itself/themselves
men | but
et | a/an/one, one (number), someone/somebody/one
har | present tense of "to have"
om | round/about/for/in/a, about/around/down, if
vi | we
min | my
havde | past tense of "to have"
ham | him
hun | she
nu | now
over | over/above/across/by/beyond/past/on/about, over/past
da | then, when/as/since
fra | from/off/since, off, since
du | you
ud | out
sin | his/her/its/one's
dem | them
os | us/ourselves
op | up
man | you/one
hans | his
hvor | where
eller | or
hvad | what
skal | must/shall etc.
selv | myself/youself/herself/ourselves etc., even
her | here
alle | all/everyone/everybody etc.
vil | will (verb)
blev | past tense of "to stay/to remain/to get/to become"
kunne | could
ind | in
når | when
være | present tense of "to be"
dog | however/yet/after all
noget | something
ville | would
jo | you know/you see (adv), yes
deres | their/theirs
efter | after/behind/according to/for/by/from, later/afterwards
ned | down
skulle | should
denne | this
end | than
dette | this
mit | my/mine
også | also
under | under/beneath/below/during, below/underneath
have | have
dig | you
anden | other
hende | her
mine | my
alt | everything
meget | much/very, plenty of
sit | his, her, its, one's
sine | his, her, its, one's
vor | our
mod | against
disse | these
hvis | if
din | your/yours
nogle | some
hos | by/at
blive | be/become
mange | many
ad | by/through
bliver | present tense of "to be/to become"
hendes | her/hers
været | be
thi | for (conj)
jer | you
sådan | such, like this/like that
''')
js_stemmer = """
var JSX={};(function(g){function j(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function I(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function i(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function J(a,b,c){return a[b]=a[b]/c|0}var E=parseInt;var D=parseFloat;function K(a){return a!==a}var A=isFinite;var z=encodeURIComponent;var y=decodeURIComponent;var x=encodeURI;var w=decodeURI;var u=Object.prototype.toString;var C=Object.prototype.hasOwnProperty;function f(){}g.require=function(b){var a=p[b];return a!==undefined?a:null};g.profilerIsRunning=function(){return f.getResults!=null};g.getProfileResults=function(){return(f.getResults||function(){return{}})()};g.postProfileResults=function(a,b){if(f.postResults==null)throw new Error('profiler has not been turned on');return f.postResults(a,b)};g.resetProfileResults=function(){if(f.resetResults==null)throw new Error('profiler has not been turned on');return f.resetResults()};g.DEBUG=false;function t(){};j([t],Error);function b(a,b,c){this.G=a.length;this.S=a;this.V=b;this.J=c;this.I=null;this.W=null};j([b],Object);function l(){};j([l],Object);function d(){var a;var b;var c;this.F={};a=this.D='';b=this._=0;c=this.A=a.length;this.B=0;this.C=b;this.E=c};j([d],l);function v(a,b){a.D=b.D;a._=b._;a.A=b.A;a.B=b.B;a.C=b.C;a.E=b.E};function n(b,d,c,e){var a;if(b._>=b.A){return false}a=b.D.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function m(b,d,c,e){var a;if(b._<=b.B){return false}a=b.D.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function r(a,d,c,e){var b;if(a._>=a.A){return false}b=a.D.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function q(a,d,c,e){var b;if(a._<=a.B){return false}b=a.D.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function h(a,b,d){var c;if(a._-a.B<b){return false}if(a.D.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function e(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.B;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.G-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.D.charCodeAt(e-1-c)-a.S.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.G){d._=e-a.G|0;if(a.I==null){return a.J}o=a.I(d);d._=e-a.G|0;if(o){return a.J}}b=a.V;if(b<0){return 0}}return-1};function s(a,b,d,e){var c;c=e.length-(d-b);a.D=a.D.slice(0,b)+e+a.D.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.C)<0||c>(d=a.E)||d>(e=a.A)||e>a.D.length?false:true){s(a,a.C,a.E,f);b=true}return b};function o(a,f){var b;var c;var d;var e;b='';if((c=a.C)<0||c>(d=a.E)||d>(e=a.A)||e>a.D.length?false:true){b=a.D.slice(a.C,a.E)}return b};d.prototype.H=function(){return false};d.prototype.T=function(b){var a;var c;var d;var e;a=this.F['.'+b];if(a==null){c=this.D=b;d=this._=0;e=this.A=c.length;this.B=0;this.C=d;this.E=e;this.H();a=this.D;this.F['.'+b]=a}return a};d.prototype.stemWord=d.prototype.T;d.prototype.U=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.F['.'+c];if(a==null){f=this.D=c;g=this._=0;h=this.A=f.length;this.B=0;this.C=g;this.E=h;this.H();a=this.D;this.F['.'+c]=a}d.push(a)}return d};d.prototype.stemWords=d.prototype.U;function a(){d.call(this);this.I_x=0;this.I_p1=0;this.S_ch=''};j([a],d);a.prototype.K=function(a){this.I_x=a.I_x;this.I_p1=a.I_p1;this.S_ch=a.S_ch;v(this,a)};a.prototype.copy_from=a.prototype.K;a.prototype.P=function(){var g;var d;var b;var e;var c;var f;var i;var j;var k;var h;this.I_p1=j=this.A;g=i=this._;b=i+3|0;if(0>b||b>j){return false}h=this._=b;this.I_x=h;this._=g;a:while(true){d=this._;e=true;b:while(e===true){e=false;if(!n(this,a.g_v,97,248)){break b}this._=d;break a}k=this._=d;if(k>=this.A){return false}this._++}a:while(true){c=true;b:while(c===true){c=false;if(!r(this,a.g_v,97,248)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p1=this._;f=true;a:while(f===true){f=false;if(!(this.I_p1<this.I_x)){break a}this.I_p1=this.I_x}return true};a.prototype.r_mark_regions=a.prototype.P;function G(b){var h;var e;var c;var f;var d;var g;var j;var k;var l;var i;b.I_p1=k=b.A;h=j=b._;c=j+3|0;if(0>c||c>k){return false}i=b._=c;b.I_x=i;b._=h;a:while(true){e=b._;f=true;b:while(f===true){f=false;if(!n(b,a.g_v,97,248)){break b}b._=e;break a}l=b._=e;if(l>=b.A){return false}b._++}a:while(true){d=true;b:while(d===true){d=false;if(!r(b,a.g_v,97,248)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p1=b._;g=true;a:while(g===true){g=false;if(!(b.I_p1<b.I_x)){break a}b.I_p1=b.I_x}return true};a.prototype.O=function(){var b;var f;var d;var g;var h;var i;f=this.A-(g=this._);if(g<this.I_p1){return false}h=this._=this.I_p1;d=this.B;this.B=h;i=this._=this.A-f;this.E=i;b=e(this,a.a_0,32);if(b===0){this.B=d;return false}this.C=this._;this.B=d;switch(b){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!m(this,a.g_s_ending,97,229)){return false}if(!c(this,'')){return false}break}return true};a.prototype.r_main_suffix=a.prototype.O;function H(b){var d;var g;var f;var h;var i;var j;g=b.A-(h=b._);if(h<b.I_p1){return false}i=b._=b.I_p1;f=b.B;b.B=i;j=b._=b.A-g;b.E=j;d=e(b,a.a_0,32);if(d===0){b.B=f;return false}b.C=b._;b.B=f;switch(d){case 0:return false;case 1:if(!c(b,'')){return false}break;case 2:if(!m(b,a.g_s_ending,97,229)){return false}if(!c(b,'')){return false}break}return true};a.prototype.N=function(){var f;var g;var b;var h;var d;var i;var j;var k;var l;f=(h=this.A)-(d=this._);g=h-d;if(d<this.I_p1){return false}i=this._=this.I_p1;b=this.B;this.B=i;j=this._=this.A-g;this.E=j;if(e(this,a.a_1,4)===0){this.B=b;return false}this.C=this._;l=this.B=b;k=this._=this.A-f;if(k<=l){return false}this._--;this.C=this._;return!c(this,'')?false:true};a.prototype.r_consonant_pair=a.prototype.N;function k(b){var i;var j;var d;var g;var f;var k;var l;var m;var h;i=(g=b.A)-(f=b._);j=g-f;if(f<b.I_p1){return false}k=b._=b.I_p1;d=b.B;b.B=k;l=b._=b.A-j;b.E=l;if(e(b,a.a_1,4)===0){b.B=d;return false}b.C=b._;h=b.B=d;m=b._=b.A-i;if(m<=h){return false}b._--;b.C=b._;return!c(b,'')?false:true};a.prototype.Q=function(){var f;var l;var m;var d;var j;var b;var g;var n;var i;var p;var o;l=this.A-this._;b=true;a:while(b===true){b=false;this.E=this._;if(!h(this,2,'st')){break a}this.C=this._;if(!h(this,2,'ig')){break a}if(!c(this,'')){return false}}i=this._=(n=this.A)-l;m=n-i;if(i<this.I_p1){return false}p=this._=this.I_p1;d=this.B;this.B=p;o=this._=this.A-m;this.E=o;f=e(this,a.a_2,5);if(f===0){this.B=d;return false}this.C=this._;this.B=d;switch(f){case 0:return false;case 1:if(!c(this,'')){return false}j=this.A-this._;g=true;a:while(g===true){g=false;if(!k(this)){break a}}this._=this.A-j;break;case 2:if(!c(this,'løs')){return false}break}return true};a.prototype.r_other_suffix=a.prototype.Q;function F(b){var d;var p;var m;var f;var l;var g;var i;var o;var j;var q;var n;p=b.A-b._;g=true;a:while(g===true){g=false;b.E=b._;if(!h(b,2,'st')){break a}b.C=b._;if(!h(b,2,'ig')){break a}if(!c(b,'')){return false}}j=b._=(o=b.A)-p;m=o-j;if(j<b.I_p1){return false}q=b._=b.I_p1;f=b.B;b.B=q;n=b._=b.A-m;b.E=n;d=e(b,a.a_2,5);if(d===0){b.B=f;return false}b.C=b._;b.B=f;switch(d){case 0:return false;case 1:if(!c(b,'')){return false}l=b.A-b._;i=true;a:while(i===true){i=false;if(!k(b)){break a}}b._=b.A-l;break;case 2:if(!c(b,'løs')){return false}break}return true};a.prototype.R=function(){var e;var b;var d;var f;var g;var i;var j;e=this.A-(f=this._);if(f<this.I_p1){return false}g=this._=this.I_p1;b=this.B;this.B=g;i=this._=this.A-e;this.E=i;if(!q(this,a.g_v,97,248)){this.B=b;return false}this.C=this._;j=this.S_ch=o(this,this.S_ch);if(j===''){return false}this.B=b;return!(d=this.S_ch,h(this,d.length,d))?false:!c(this,'')?false:true};a.prototype.r_undouble=a.prototype.R;function B(b){var f;var d;var e;var g;var i;var j;var k;f=b.A-(g=b._);if(g<b.I_p1){return false}i=b._=b.I_p1;d=b.B;b.B=i;j=b._=b.A-f;b.E=j;if(!q(b,a.g_v,97,248)){b.B=d;return false}b.C=b._;k=b.S_ch=o(b,b.S_ch);if(k===''){return false}b.B=d;return!(e=b.S_ch,h(b,e.length,e))?false:!c(b,'')?false:true};a.prototype.H=function(){var i;var g;var h;var j;var b;var c;var d;var a;var e;var l;var m;var n;var o;var p;var q;var f;i=this._;b=true;a:while(b===true){b=false;if(!G(this)){break a}}l=this._=i;this.B=l;n=this._=m=this.A;g=m-n;c=true;a:while(c===true){c=false;if(!H(this)){break a}}p=this._=(o=this.A)-g;h=o-p;d=true;a:while(d===true){d=false;if(!k(this)){break a}}f=this._=(q=this.A)-h;j=q-f;a=true;a:while(a===true){a=false;if(!F(this)){break a}}this._=this.A-j;e=true;a:while(e===true){e=false;if(!B(this)){break a}}this._=this.B;return true};a.prototype.stem=a.prototype.H;a.prototype.L=function(b){return b instanceof a};a.prototype.equals=a.prototype.L;a.prototype.M=function(){var c;var a;var b;var d;c='DanishStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.M;a.serialVersionUID=1;i(a,'methodObject',function(){return new a});i(a,'a_0',function(){return[new b('hed',-1,1),new b('ethed',0,1),new b('ered',-1,1),new b('e',-1,1),new b('erede',3,1),new b('ende',3,1),new b('erende',5,1),new b('ene',3,1),new b('erne',3,1),new b('ere',3,1),new b('en',-1,1),new b('heden',10,1),new b('eren',10,1),new b('er',-1,1),new b('heder',13,1),new b('erer',13,1),new b('s',-1,2),new b('heds',16,1),new b('es',16,1),new b('endes',18,1),new b('erendes',19,1),new b('enes',18,1),new b('ernes',18,1),new b('eres',18,1),new b('ens',16,1),new b('hedens',24,1),new b('erens',24,1),new b('ers',16,1),new b('ets',16,1),new b('erets',28,1),new b('et',-1,1),new b('eret',30,1)]});i(a,'a_1',function(){return[new b('gd',-1,-1),new b('dt',-1,-1),new b('gt',-1,-1),new b('kt',-1,-1)]});i(a,'a_2',function(){return[new b('ig',-1,1),new b('lig',0,1),new b('elig',1,1),new b('els',-1,1),new b('løst',-1,2)]});i(a,'g_v',function(){return[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128]});i(a,'g_s_ending',function(){return[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16]});var p={'src/stemmer.jsx':{Stemmer:l},'src/danish-stemmer.jsx':{DanishStemmer:a}}}(JSX))
var Stemmer = JSX.require("src/danish-stemmer.jsx").DanishStemmer;
"""
class SearchDanish(SearchLanguage):
lang = 'da'
language_name = 'Danish'
js_stemmer_rawcode = 'danish-stemmer.js'
js_stemmer_code = js_stemmer
stopwords = danish_stopwords
def init(self, options: Dict) -> None:
self.stemmer = snowballstemmer.stemmer('danish')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower()) | PypiClean |
/NeuroTorch-0.0.1b2.tar.gz/NeuroTorch-0.0.1b2/src/neurotorch/learning_algorithms/rls.py | from collections import defaultdict
from typing import Optional, Sequence, Union, Dict, Callable, List, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from .learning_algorithm import LearningAlgorithm
from ..learning_algorithms.tbptt import TBPTT
from ..transforms.base import ToDevice
from ..utils import compute_jacobian, list_insert_replace_at
class RLS(TBPTT):
r"""
Apply the recursive least squares algorithm to the given model. Different strategies are available to update the
parameters of the model. The strategy is defined by the :attr:`strategy` attribute of the class. The following
strategies are available:
- `inputs`: The parameters are updated using the inputs of the model.
- `outputs`: The parameters are updated using the outputs of the model. This one is inspired by the work of
Perich and al. :cite:t:`perich_inferring_2021` with the CURBD algorithm.
- `grad`: The parameters are updated using the gradients of the model. This one is inspired by the work of
Zhang and al. :cite:t:`zhang_revisiting_2021`.
- `jacobian`: The parameters are updated using the Jacobian of the model. This one is inspired by the work of
Al-Batah and al. :cite:t:`al-batah_modified_2010`.
- `scaled_jacobian`: The parameters are updated using the scaled Jacobian of the model.
.. note::
The `inputs` and `outputs` strategies are limited to an optimization of only one parameter. The others
strategies can be used with multiple parameters. Unfortunately, those strategies do not work as expected
at the moment. If you want to help with the development of those strategies, please open an issue on
GitHub.
"""
CHECKPOINT_OPTIMIZER_STATE_DICT_KEY: str = "optimizer_state_dict"
CHECKPOINT_P_STATES_DICT_KEY: str = "P_list"
def __init__(
self,
*,
params: Optional[Sequence[torch.nn.Parameter]] = None,
layers: Optional[Union[Sequence[torch.nn.Module], torch.nn.Module]] = None,
criterion: Optional[Union[Dict[str, Union[torch.nn.Module, Callable]], torch.nn.Module, Callable]] = None,
backward_time_steps: Optional[int] = None,
is_recurrent: bool = False,
**kwargs
):
"""
Constructor for RLS class.
:param params: The parameters to optimize. If None, the parameters of the model's trainer will be used.
:type params: Optional[Sequence[torch.nn.Parameter]]
:param layers: The layers to optimize. If not None the parameters of the layers will be added to the
parameters to optimize.
:type layers: Optional[Union[Sequence[torch.nn.Module], torch.nn.Module]]
:param criterion: The criterion to use. If not provided, torch.nn.MSELoss is used.
:type criterion: Optional[Union[Dict[str, Union[torch.nn.Module, Callable]], torch.nn.Module, Callable]]
:param backward_time_steps: The frequency of parameter optimisation. If None, the number of
time steps of the data will be used.
:type backward_time_steps: Optional[int]
:param is_recurrent: If True, the model is recurrent. If False, the model is not recurrent.
:type is_recurrent: bool
:param kwargs: The keyword arguments to pass to the BaseCallback.
:keyword bool save_state: Whether to save the state of the optimizer. Defaults to True.
:keyword bool load_state: Whether to load the state of the optimizer. Defaults to True.
"""
kwargs.setdefault("auto_backward_time_steps_ratio", 0)
kwargs.setdefault("save_state", True)
kwargs.setdefault("load_state", True)
super().__init__(
params=params,
layers=layers,
criterion=criterion,
backward_time_steps=backward_time_steps,
optimizer=None,
optim_time_steps=None,
**kwargs
)
# RLS attributes
self.P_list = None
self.delta = kwargs.get("delta", 1.0)
self.Lambda = kwargs.get("Lambda", 1.0)
self.kappa = kwargs.get("kappa", 1.0)
self._device = kwargs.get("device", None)
self.to_cpu_transform = ToDevice(device=torch.device("cpu"))
self.to_device_transform = None
self._other_dims_as_batch = kwargs.get("other_dims_as_batch", False)
self._is_recurrent = is_recurrent
self.strategy = kwargs.get("strategy", "inputs").lower()
self.strategy_to_mth = {
"inputs": self.inputs_mth_step,
"outputs": self.outputs_mth_step,
"grad": self.grad_mth_step,
"jacobian": self.jacobian_mth_step,
"scaled_jacobian": self.scaled_jacobian_mth_step,
}
self.kwargs = kwargs
self._asserts()
self._last_layers_buffer = defaultdict(list)
def _asserts(self):
assert 0.0 < self.Lambda <= 1.0, "Lambda must be between 0 and 1"
assert self.strategy in self.strategy_to_mth, f"Strategy must be one of {list(self.strategy_to_mth.keys())}"
def __repr__(self):
repr_str = f"{self.name}: ("
repr_str += f"priority={self.priority}, "
repr_str += f"save_state={self.save_state}, "
repr_str += f"load_state={self.load_state}, "
repr_str += f"strategy={self.strategy}, "
repr_str += f"delta={self.delta}, "
repr_str += f"Lambda={self.Lambda}, "
repr_str += f"kappa={self.kappa}"
repr_str += f")"
return repr_str
def initialize_P_list(self, m=None):
self.P_list = [
self.delta * torch.eye(param.numel() if m is None else m, dtype=torch.float32, device=torch.device("cpu"))
for param in self.params
]
def _maybe_update_time_steps(self):
if self._auto_set_backward_time_steps:
self.backward_time_steps = max(1, int(self._auto_backward_time_steps_ratio * self._data_n_time_steps))
def _decorate_forward(self, forward, layer_name: str):
def _forward(*args, **kwargs):
out = forward(*args, **kwargs)
t = kwargs.get("t", None)
if t is None:
return out
out_tensor = self._get_out_tensor(out)
if t == 0: # Hotfix for the first time step TODO: fix this
ready = bool(self._layers_buffer[layer_name])
else:
ready = True
list_insert_replace_at(self._layers_buffer[layer_name], t % self.backward_time_steps, out_tensor)
if len(self._layers_buffer[layer_name]) == self.backward_time_steps and ready:
self._backward_at_t(t, self.backward_time_steps, layer_name)
if self.strategy in ["grad", "jacobian", "scaled_jacobian"]:
out = self._detach_out(out)
return out
return _forward
def _backward_at_t(self, t: int, backward_t: int, layer_name: str):
if self._last_layers_buffer[layer_name]:
x_batch = self._get_x_batch_from_buffer(layer_name)
else:
x_batch = self._get_x_batch_slice_from_trainer(0, backward_t, layer_name)
y_batch = self._get_y_batch_slice_from_trainer((t + 1) - backward_t, t + 1, layer_name)
pred_batch = self._get_pred_batch_from_buffer(layer_name)
self.optimization_step(x_batch, pred_batch, y_batch)
self._last_layers_buffer[layer_name] = self._layers_buffer[layer_name].copy()
self._layers_buffer[layer_name].clear()
def _get_x_batch_slice_from_trainer(self, t_first: int, t_last: int, layer_name: str = None):
x_batch = self.trainer.current_training_state.x_batch
if isinstance(x_batch, dict):
if layer_name is None:
x_batch = {
key: val[:, t_first:t_last]
for key, val in x_batch.items()
}
else:
x_batch = x_batch[layer_name][:, t_first:t_last]
else:
x_batch = x_batch[:, t_first:t_last]
return x_batch.clone()
def _get_x_batch_from_buffer(self, layer_name: str):
pred_batch = torch.stack(self._last_layers_buffer[layer_name], dim=1)
return pred_batch
def load_checkpoint_state(self, trainer, checkpoint: dict, **kwargs):
if self.save_state:
state = checkpoint.get(self.name, {})
self.P_list = state.get(self.CHECKPOINT_P_STATES_DICT_KEY, None)
def get_checkpoint_state(self, trainer, **kwargs) -> object:
if self.save_state:
return {
self.CHECKPOINT_P_STATES_DICT_KEY: self.P_list,
}
return None
def _try_put_on_device(self, trainer):
try:
self.P_list = [self.to_device_transform(p) for p in self.P_list]
except Exception as e:
trainer.model = self.to_cpu_transform(trainer.model)
self.P_list = [self.to_device_transform(p) for p in self.P_list]
def _put_on_cpu(self):
self.P_list = [self.to_cpu_transform(p) for p in self.P_list]
def start(self, trainer, **kwargs):
LearningAlgorithm.start(self, trainer, **kwargs)
if self.params and self.optimizer is None:
self.optimizer = torch.optim.SGD(self.params, lr=self.kwargs.get("lr", 1.0))
elif not self.params and self.optimizer is not None:
self.params.extend(
[
param
for i in range(len(self.optimizer.param_groups))
for param in self.optimizer.param_groups[i]["params"]
]
)
else:
self.params = list(trainer.model.parameters())
self.optimizer = torch.optim.SGD(self.params, lr=self.kwargs.get("lr", 1.0))
if self.criterion is None and trainer.criterion is not None:
self.criterion = trainer.criterion
# filter params to get only the ones that require gradients
self.params = [param for param in self.params if param.requires_grad]
if self._device is None:
self._device = trainer.model.device
self.to_device_transform = ToDevice(device=self._device)
self.output_layers: torch.nn.ModuleDict = trainer.model.output_layers
self._initialize_original_forwards()
def on_batch_begin(self, trainer, **kwargs):
LearningAlgorithm.on_batch_begin(self, trainer, **kwargs)
self.trainer = trainer
if self._is_recurrent:
self._data_n_time_steps = self._get_data_time_steps_from_y_batch(trainer.current_training_state.y_batch)
self._maybe_update_time_steps()
self.decorate_forwards()
def on_batch_end(self, trainer, **kwargs):
super().on_batch_end(trainer)
self.undecorate_forwards()
self._layers_buffer.clear()
def on_optimization_begin(self, trainer, **kwargs):
x_batch = trainer.current_training_state.x_batch
y_batch = trainer.current_training_state.y_batch
pred_batch = trainer.format_pred_batch(trainer.current_training_state.pred_batch, y_batch)
if self._is_recurrent:
for layer_name in self._layers_buffer:
backward_t = len(self._layers_buffer[layer_name])
if backward_t > 0:
self._backward_at_t(self._data_n_time_steps - 1, backward_t, layer_name)
else:
self.optimization_step(x_batch, pred_batch, y_batch)
trainer.update_state_(batch_loss=self.apply_criterion(pred_batch, y_batch).detach_())
def optimization_step(self, x_batch: torch.Tensor, pred_batch: torch.Tensor, y_batch: torch.Tensor):
if self.strategy not in self.strategy_to_mth:
raise ValueError(f"Invalid strategy: {self.strategy}")
return self.strategy_to_mth[self.strategy](x_batch, pred_batch, y_batch)
def scaled_jacobian_mth_step(self, x_batch: torch.Tensor, pred_batch: torch.Tensor, y_batch: torch.Tensor):
"""
This method is inspired by the work of Al-Batah and al. :cite:t:`al-batah_modified_2010`. Unfortunately, this
method does not seem to work with the current implementation.
TODO: Make it work.
x.shape = [B, f_in]
y.shape = [B, f_out]
error.shape = [B, f_out]
P.shape = [f_out, f_out]
theta.shape = [ell, 1]
epsilon = mean[B](error[B, f_out]) -> [1, f_out]
phi = mean[B](y[B, f_out]) [1, f_out]
psi = jacobian[theta](phi[1, f_out]]) -> [f_out, ell]
K = P[f_out, f_out] @ psi[f_out, ell] -> [f_out, ell]
h = 1 / (labda[1] + kappa[1] * psi.T[ell, f_out] @ K[f_out, ell]) -> [ell, ell]
grad = (K[f_out, ell] @ h[ell, ell]).T[ell, f_out] @ epsilon.T[f_out, 1] -> [1, ell]
P = labda[1] * P[f_out, f_out] - kappa[1] * K[f_out, ell] @ h[ell, ell] @ K[f_out, ell].T -> [f_out, f_out]
In this case f_in must be equal to N_in.
:param x_batch: inputs of the layer
:param pred_batch: outputs of the layer
:param y_batch: targets of the layer
"""
model_device = self.trainer.model.device
assert isinstance(x_batch, torch.Tensor), "x_batch must be a torch.Tensor"
assert isinstance(pred_batch, torch.Tensor), "pred_batch must be a torch.Tensor"
assert isinstance(y_batch, torch.Tensor), "y_batch must be a torch.Tensor"
self.optimizer.zero_grad()
x_batch_view = x_batch.view(-1, x_batch.shape[-1]) # [B, f_in]
pred_batch_view = pred_batch.view(-1, pred_batch.shape[-1]) # [B, f_out]
y_batch_view = y_batch.view(-1, y_batch.shape[-1]) # [B, f_out]
error = self.to_device_transform(pred_batch_view - y_batch_view) # [B, f_out]
if self.P_list is None:
self.initialize_P_list(m=pred_batch_view.shape[-1])
self.P_list = self.to_device_transform(self.P_list)
self.params = self.to_device_transform(self.params)
epsilon = error.mean(dim=0).view(1, -1) # [1, f_out]
phi = pred_batch_view.mean(dim=0).view(1, -1) # [1, f_out]
psi_list = compute_jacobian(params=self.params, y=phi.view(-1), strategy="slow") # [f_out, ell]
K_list = [torch.matmul(P, psi) for P, psi in zip(self.P_list, psi_list)] # [f_out, f_out] @ [f_out, ell] -> [f_out, ell]
h_list = [torch.linalg.pinv(self.Lambda + self.kappa * torch.matmul(psi.T, K)) for psi, K in zip(psi_list, K_list)] # [ell, f_out] @ [f_out, ell] -> [ell, ell]
for p, K, h in zip(self.params, K_list, h_list):
p.grad = torch.matmul(torch.matmul(K, h).T, epsilon.T).view(p.shape).clone() # ([f_out, ell] @ [ell, ell]).T @ [f_out, 1] -> [ell, 1]
self.optimizer.step()
self.P_list = [
self.Lambda * P - self.kappa * torch.matmul(torch.matmul(K, h), K.T)
for P, h, K in zip(self.P_list, h_list, K_list)
] # [f_out, f_out] - [f_out, ell] @ [ell, ell] @ [ell, f_out] -> [f_out, f_out]
self._put_on_cpu()
self.trainer.model.to(model_device, non_blocking=True)
def jacobian_mth_step(self, x_batch: torch.Tensor, pred_batch: torch.Tensor, y_batch: torch.Tensor):
"""
This method is inspired by the work of Al-Batah and al. :cite:t:`al-batah_modified_2010`. Unfortunately, this
method does not seem to work with the current implementation.
TODO: Make it work.
x.shape = [B, f_in]
y.shape = [B, f_out]
error.shape = [B, f_out]
P.shape = [f_out, f_out]
theta.shape = [ell, 1]
epsilon = mean[B](error[B, f_out]) -> [1, f_out]
phi = mean[B](y[B, f_out]) [1, f_out]
psi = jacobian[theta](phi[1, f_out]]) -> [f_out, L]
K = P[f_out, f_out] @ psi[f_out, L] -> [f_out, L]
grad = epsilon[1, f_out] @ K[f_out, L] -> [L, 1]
P = labda[1] * P[f_out, f_out] - kappa[1] * K[f_out, L] @ K[f_out, L].T -> [f_out, f_out]
In this case f_in must be equal to N_in.
:param x_batch: inputs of the layer
:param pred_batch: outputs of the layer
:param y_batch: targets of the layer
"""
model_device = self.trainer.model.device
assert isinstance(x_batch, torch.Tensor), "x_batch must be a torch.Tensor"
assert isinstance(pred_batch, torch.Tensor), "pred_batch must be a torch.Tensor"
assert isinstance(y_batch, torch.Tensor), "y_batch must be a torch.Tensor"
self.optimizer.zero_grad()
x_batch_view = x_batch.view(-1, x_batch.shape[-1]) # [B, f_in]
pred_batch_view = pred_batch.view(-1, pred_batch.shape[-1]) # [B, f_out]
y_batch_view = y_batch.view(-1, y_batch.shape[-1]) # [B, f_out]
error = self.to_device_transform(pred_batch_view - y_batch_view) # [B, f_out]
if self.P_list is None:
self.initialize_P_list(m=pred_batch_view.shape[-1])
self.P_list = self.to_device_transform(self.P_list)
self.params = self.to_device_transform(self.params)
epsilon = error.mean(dim=0).view(1, -1) # [1, f_out]
phi = pred_batch_view.mean(dim=0).view(1, -1) # [1, f_out]
psi_list = compute_jacobian(params=self.params, y=phi.view(-1), strategy="slow") # [f_out, L]
K_list = [torch.matmul(P, psi) for P, psi in zip(self.P_list, psi_list)] # [f_out, f_out] @ [f_out, ell] -> [f_out, L]
for p, K in zip(self.params, K_list):
p.grad = torch.matmul(K.T, epsilon.T).view(p.shape).clone() # [L, f_out] @ [f_out, 1] -> [L, 1]
self.optimizer.step()
self.P_list = [
self.Lambda * P - self.kappa * torch.matmul(K, K.T)
for P, K in zip(self.P_list, K_list)
] # [f_out, f_out] - [f_out, L] @ [L, f_out] -> [f_out, f_out]
self._put_on_cpu()
self.trainer.model.to(model_device, non_blocking=True)
def grad_mth_step(self, x_batch: torch.Tensor, pred_batch: torch.Tensor, y_batch: torch.Tensor):
"""
This method is inspired by the work of Zhang and al. :cite:t:`zhang_revisiting_2021`. Unfortunately, this
method does not seem to work with the current implementation.
TODO: Make it work.
x.shape = [B, f_in]
y.shape = [B, f_out]
error.shape = [B, f_out]
P.shape = [f_in, f_in]
epsilon = mean[B](error[B, f_out]) -> [1, f_out]
phi = mean[B](x[B, f_in]) [1, f_in]
K = P[f_in, f_in] @ phi.T[f_in, 1] -> [f_in, 1]
h = 1 / (labda[1] + kappa[1] * phi[1, f_in] @ K[f_in, 1]) -> [1]
grad = h[1] * P[f_in, f_in] @ grad[N_in, N_out] -> [N_in, N_out]
P = labda[1] * P[f_in, f_in] - h[1] * kappa[1] * K[f_in, 1] @ K.T[1, f_in] -> [f_in, f_in]
In this case f_in must be equal to N_in.
:param x_batch: inputs of the layer
:param pred_batch: outputs of the layer
:param y_batch: targets of the layer
"""
model_device = self.trainer.model.device
assert isinstance(x_batch, torch.Tensor), "x_batch must be a torch.Tensor"
assert isinstance(pred_batch, torch.Tensor), "pred_batch must be a torch.Tensor"
assert isinstance(y_batch, torch.Tensor), "y_batch must be a torch.Tensor"
self.optimizer.zero_grad()
mse_loss = F.mse_loss(pred_batch, y_batch)
mse_loss.backward()
x_batch_view = x_batch.view(-1, x_batch.shape[-1]) # [B, f_in]
pred_batch_view = pred_batch.view(-1, pred_batch.shape[-1]) # [B, f_out]
y_batch_view = y_batch.view(-1, y_batch.shape[-1]) # [B, f_out]
error = self.to_device_transform(pred_batch_view - y_batch_view) # [B, f_out]
if self.P_list is None:
self.initialize_P_list(m=x_batch_view.shape[-1])
for p in self.params:
# making sur that f_in = N_in.
if p.shape[0] != x_batch_view.shape[-1]:
raise ValueError(
f"For inputs of shape [B, f_in], the first dimension of the parameters must be f_in, "
f"got {p.shape[0]} instead of {x_batch_view.shape[-1]}."
)
self.P_list = self.to_device_transform(self.P_list)
self.params = self.to_device_transform(self.params)
epsilon = error.mean(dim=0).view(1, -1) # [1, f_out]
phi = x_batch_view.mean(dim=0).view(1, -1).detach().clone() # [1, f_in]
K_list = [torch.matmul(P, phi.T) for P in self.P_list] # [f_in, f_in] @ [f_in, 1] -> [f_in, 1]
h_list = [1.0 / (self.Lambda + self.kappa * torch.matmul(phi, K)).item() for K in K_list] # [1, f_in] @ [f_in, 1] -> [1]
for p, P, h in zip(self.params, self.P_list, h_list):
p.grad = h * torch.matmul(P, p.grad) # [f_in, f_in] @ [N_in, N_out] -> [N_in, N_out]
self.optimizer.step()
self.P_list = [
self.Lambda * P - h * self.kappa * torch.matmul(K, K.T)
for P, h, K in zip(self.P_list, h_list, K_list)
] # [f_in, 1] @ [1, f_in] -> [f_in, f_in]
self._put_on_cpu()
self.trainer.model.to(model_device, non_blocking=True)
def inputs_mth_step(self, x_batch: torch.Tensor, pred_batch: torch.Tensor, y_batch: torch.Tensor):
"""
x.shape = [B, f_in]
y.shape = [B, f_out]
error.shape = [B, f_out]
P.shape = [f_in, f_in]
epsilon = mean[B](error[B, f_out]) -> [1, f_out]
phi = mean[B](x[B, f_in]) [1, f_in]
K = P[f_in, f_in] @ phi.T[f_in, 1] -> [f_in, 1]
h = 1 / (labda[1] + kappa[1] * phi[1, f_in] @ K[f_in, 1]) -> [1]
P = labda[1] * P[f_in, f_in] - h[1] * kappa[1] * K[f_in, 1] @ K.T[1, f_in] -> [f_in, f_in]
grad = h[1] * K[f_in, 1] @ epsilon[1, f_out] -> [N_in, N_out]
In this case [N_in, N_out] must be equal to [f_in, f_out].
:param x_batch: inputs of the layer
:param pred_batch: outputs of the layer
:param y_batch: targets of the layer
"""
model_device = self.trainer.model.device
assert isinstance(x_batch, torch.Tensor), "x_batch must be a torch.Tensor"
assert isinstance(pred_batch, torch.Tensor), "pred_batch must be a torch.Tensor"
assert isinstance(y_batch, torch.Tensor), "y_batch must be a torch.Tensor"
self.optimizer.zero_grad()
x_batch_view = x_batch.view(-1, x_batch.shape[-1]) # [B, f_in]
pred_batch_view = pred_batch.view(-1, pred_batch.shape[-1]) # [B, f_out]
y_batch_view = y_batch.view(-1, y_batch.shape[-1]) # [B, f_out]
error = self.to_device_transform(pred_batch_view - y_batch_view) # [B, f_out]
if self.P_list is None:
self.initialize_P_list(m=x_batch_view.shape[-1])
for p in self.params:
# making sur that f_in = N_in.
if p.shape[0] != x_batch_view.shape[-1]:
raise ValueError(
f"For inputs of shape [B, f_in], the first dimension of the parameters must be f_in, "
f"got {p.shape[0]} instead of {x_batch_view.shape[-1]}."
)
# making sure that f_out = N_out.
if p.shape[1] != y_batch_view.shape[-1]:
raise ValueError(
f"For targets of shape [B, f_out], the second dimension of the parameters must be f_out, "
f"got {p.shape[1]} instead of {y_batch_view.shape[-1]}."
)
self.P_list = self.to_device_transform(self.P_list)
self.params = self.to_device_transform(self.params)
epsilon = error.mean(dim=0).view(1, -1) # [1, f_out]
phi = x_batch_view.mean(dim=0).view(1, -1).detach().clone() # [1, f_in]
K_list = [torch.matmul(P, phi.T) for P in self.P_list] # [f_in, f_in] @ [f_in, 1] -> [f_in, 1]
h_list = [1.0 / (self.Lambda + self.kappa * torch.matmul(phi, K)).item() for K in K_list] # [1, f_in] @ [f_in, 1] -> [1]
for p, K, h in zip(self.params, K_list, h_list):
p.grad = h * torch.outer(K.view(-1), epsilon.view(-1)) # [f_in, 1] @ [1, f_out] -> [N_in, N_out]
self.optimizer.step()
self.P_list = [
self.Lambda * P - h * self.kappa * torch.matmul(K, K.T)
for P, h, K in zip(self.P_list, h_list, K_list)
] # [f_in, 1] @ [1, f_in] -> [f_in, f_in]
self._put_on_cpu()
self.trainer.model.to(model_device, non_blocking=True)
def outputs_mth_step(self, x_batch: torch.Tensor, pred_batch: torch.Tensor, y_batch: torch.Tensor):
"""
This method is inspired by the work of Perich and al. :cite:t:`perich_inferring_2021` with
the CURBD algorithm.
x.shape = [B, f_in]
y.shape = [B, f_out]
error.shape = [B, f_out]
epsilon = mean[B](error[B, f_out]) -> [1, f_out]
phi = mean[B](y[B, f_out]) [1, f_out]
P.shape = [f_out, f_out]
K = P[f_out, f_out] @ phi.T[f_out, 1] -> [f_out, 1]
h = 1 / (labda[1] + kappa[1] * phi[1, f_out] @ K[f_out, 1]) -> [1]
P = labda[1] * P[f_out, f_out] - h[1] * kappa[1] * K[f_out, 1] @ K.T[1, f_out] -> [f_out, f_out]
grad = h[1] * K[f_out, 1] @ epsilon[1, f_out] -> [N_in, N_out]
In this case [N_in, N_out] must be equal to [f_out, f_out].
:param x_batch: inputs of the layer
:param pred_batch: outputs of the layer
:param y_batch: targets of the layer
"""
model_device = self.trainer.model.device
assert isinstance(x_batch, torch.Tensor), "x_batch must be a torch.Tensor"
assert isinstance(pred_batch, torch.Tensor), "pred_batch must be a torch.Tensor"
assert isinstance(y_batch, torch.Tensor), "y_batch must be a torch.Tensor"
self.optimizer.zero_grad()
x_batch_view = x_batch.view(-1, x_batch.shape[-1]) # [B, f_in]
pred_batch_view = pred_batch.view(-1, pred_batch.shape[-1]) # [B, f_out]
y_batch_view = y_batch.view(-1, y_batch.shape[-1]) # [B, f_out]
error = self.to_device_transform(pred_batch_view - y_batch_view) # [B, f_out]
if self.P_list is None:
self.initialize_P_list(m=pred_batch_view.shape[-1])
for p in self.params:
# making sur that f_out = N_in.
if p.shape[0] != pred_batch_view.shape[-1]:
raise ValueError(
f"For inputs of shape [B, f_in], the first dimension of the parameters must be f_in, "
f"got {p.shape[0]} instead of {x_batch_view.shape[-1]}."
)
# making sure that f_out = N_out.
if p.shape[1] != pred_batch_view.shape[-1]:
raise ValueError(
f"For targets of shape [B, f_out], the second dimension of the parameters must be f_out, "
f"got {p.shape[1]} instead of {y_batch_view.shape[-1]}."
)
self.P_list = self.to_device_transform(self.P_list)
self.params = self.to_device_transform(self.params)
epsilon = error.mean(dim=0).view(1, -1) # [1, f_out]
phi = pred_batch_view.mean(dim=0).view(1, -1).detach().clone() # [1, f_out]
K_list = [torch.matmul(P, phi.T) for P in self.P_list] # [f_out, f_out] @ [f_out, 1] -> [f_out, 1]
h_list = [1.0 / (self.Lambda + self.kappa * torch.matmul(phi, K)).item() for K in K_list] # [1, f_out] @ [f_out, 1] -> [1]
for p, K, h in zip(self.params, K_list, h_list):
p.grad = h * torch.outer(K.view(-1), epsilon.view(-1)) # [f_out, 1] @ [1, f_out] -> [N_in, N_out]
self.optimizer.step()
self.P_list = [
self.Lambda * P - h * self.kappa * torch.matmul(K, K.T)
for P, h, K in zip(self.P_list, h_list, K_list)
] # [f_in, 1] @ [1, f_in] -> [f_in, f_in]
self._put_on_cpu()
self.trainer.model.to(model_device, non_blocking=True) | PypiClean |
/JsOnXmLSe814Rializer749-1.0.tar.gz/JsOnXmLSe814Rializer749-1.0/Lab_3/serializers/jsonserializer.py | import re
from typing import Iterator
from ..helpers.constants import JSON, BOOL_TYPE, TYPE_MAPPING, JSON_TYPE, PRIMITIVE_TYPES
from ..helpers.functions import get_items, get_key, to_number, create_object, type_from_str
class JSONSerializer:
def dumps(self, obj) -> str:
if type(obj) == str:
return f'"{obj}"'
if type(obj) in (int, float, complex):
return str(obj)
if type(obj) in [bool, type(None)]:
return BOOL_TYPE[obj]
return JSON.format(
type=type(obj) if type(obj) in TYPE_MAPPING.values() else object,
id=id(obj),
items=self.__load_to_json(get_items(obj))
)
def loads(self, json: str):
if not len(json):
return
if json == ' ':
return ...
if json.startswith('"'):
return json.strip('"')
if json in BOOL_TYPE.values():
return get_key(json, BOOL_TYPE)
if to_number(json) is not None:
return to_number(json)
return create_object(
type_from_str(json, JSON_TYPE),
self.__load_from_json(json)
)
def __load_to_json(self, obj: dict) -> str:
json_format = ""
for k, v in obj.items():
if type(v) in PRIMITIVE_TYPES:
json_format += f"\t{self.dumps(k)}: {self.dumps(v)},\n"
continue
json_format += f"\t{self.dumps(k)}: {{\n"
for line in self.dumps(v).split("\n")[1:]:
json_format += f"\t{line}\n"
return json_format
def __load_from_json(self, template: str) -> dict:
obj: dict = {}
lines: list[str] = template.split("\n")
it: Iterator[str] = enumerate(lines)
for i, line in it:
if not re.search(r'\s*(.+):\s*([^,]*)', line):
continue
key, value = re.search(r'\s*(.+):\s*([^,]*)', line).groups()
if value != "{":
obj[self.loads(key)] = self.loads(value)
elif value == "{" and "<class" not in key:
brackets = 1
start = i + 1
while brackets and i < len(lines) - 1:
i, line = next(it, None)
brackets += ("{" in lines[i]) - ("}" in lines[i])
obj[self.loads(key)] = self.loads('\n'.join(lines[start:i]))
return obj | PypiClean |
/McsPyDataTools-0.4.3.tar.gz/McsPyDataTools-0.4.3/McsPy/McsCMOSMEA.py | import h5py
from builtins import IndexError
import datetime
import math
import uuid
import collections
import numpy as np
import pandas as pd
from numpy import rec
import itertools
from numbers import Number
from inspect import signature
import re
from typing import Dict
from . import ureg, McsHdf5Types, McsHdf5Protocols
from .McsData import RawData
from pint import UndefinedUnitError
MCS_TICK = 1 * ureg.us
CLR_TICK = 100 * ureg.ns
# day -> number of clr ticks (100 ns)
DAY_TO_CLR_TIME_TICK = 24 * 60 * 60 * (10**7)
VERBOSE = False
def dprint(n, *args):
if VERBOSE:
print(n, args)
class DictProperty_for_Classes(object):
"""
"""
class _proxy(object):
def __init__(self, obj, fget, fset, fdel):
self._obj = obj
self._fget = fget
self._fset = fset
self._fdel = fdel
def __getitem__(self, key):
if self._fset is None:
raise TypeError("Cannot read item.")
return self._fget(self._obj, key)
def __setitem__(self, key, value):
if self._fset is None:
raise TypeError("Cannot set item.")
self._fset(self._obj, key, value)
def __delitem__(self, key):
if self._fdel is None:
raise TypeError("Cannot delete item.")
self._fdel(self._obj, key)
def __init__(self, fget=None, fset=None, fdel=None):
self._fget = fget
self._fset = fset
self._fdel = fdel
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self._proxy(obj, self._fget, self._fset, self._fdel)
class _property(object):
class _proxy(object):
def __init__(self, obj, fget, fset, fdel):
self._obj = obj
self._fget = fget
self._fset = fset
self._fdel = fdel
def __getitem__(self,key):
if self._fget is None:
raise TypeError("Cannot read item.")#
return self._fget(self._obj, key)
def __setitem__(self,key,value):
if self._fset is None:
raise TypeError("Cannot set item.")
self._fset(self._obj, key, value)
def __delitem__(self, key):
if self._fdel is None:
raise TypeError("Cannot delete item.")
self._fdel(self._obj, key)
def __init__(self, fget=None, fset=None, fdel=None):
self._fget = fget
self._fset = fset
self._fdel = fdel
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self._proxy(obj, self._fget, self._fset, self._fdel)
class _list_property(object):
"""
Creates helper class which is a list subclass. It is used to hand lists of streams to the McsPy user.
:param list: list of streams
"""
class McsProxy(collections.UserList):
def __init__(self, initlist=None, obj=None, fget=None, fset=None, fdel=None):
"""
ATTENTION! The collections.UserList documentation requires the init method of collections.UserList subclasses to accept zero or one argument!
"""
super().__init__(initlist)
self._obj = obj
self._fget = fget
self._fset = fset
self._fdel = fdel
def __getitem__(self,key):
if self._fget is None:
raise TypeError("Cannot read item.")
if isinstance(key, int):
return self._fget([self.data[key][1]])
return self._fget([id_set.mcs_instanceid for id_set in selection])
def __setitem__(self,key,value):
if self._fset is None:
raise TypeError("Cannot set item.")
self._fset(self._obj, key, value)
def __delitem__(self, key):
if self._fdel is None:
raise TypeError("Cannot delete item.")
self._fdel(self._obj, key)
def __str__(self):
stream_types = dict()
column_width = 35
line = '-'*(column_width*3+4)+'\n'
bold_line = '='*(column_width*3+4)+'\n'
out = '|'+'{:^{}}'.format('Subtype', column_width)+'|'+'{:^{}}'.format('McsPy name', column_width)+'|'+'{:^{}}'.format('HDF5 name', column_width)+'|\n'
out += bold_line
for id_set in self.data:
type =self._obj[id_set.h5py].attrs['ID.Type'].decode('UTF-8')
subtype = self._obj[id_set.h5py].attrs['SubType'].decode('UTF-8')
if not type in stream_types:
stream_types[type] = list()
stream_types[type].append('|'+'{:^{}}'.format(subtype, column_width)+'|'+'{:^{}}'.format(id_set.mcspy, column_width)+'|'+'{:^{}}'.format(id_set.h5py, column_width)+'|\n')
for type in stream_types:
out += type +':\n'
out += ''.join(stream_types[type])
out += line
return out
def __init__(self, content, owner_instance, fget=None, fset=None, fdel=None):
self._content = content
self._owner_instance = owner_instance
self._fget = fget
self._fset = fset
self._fdel = fdel
def __get__(self, obj, objtype=None):
#if obj is None:
# return self
return self.McsProxy(self._content, obj=obj, fget=self._fget, fset=self._fset, fdel=self._fdel)
def __str__(self):
return self.McsProxy(self._content, obj=self._owner_instance).__str__()
class McsHDF5(object):
"""
Container class that provides common structures for an Mcs HDF5 file
"""
def __init__(self, hdf5_object):
"""
Initializes the HDF5 container class from an HDF5 object
"""
self._hdf5_attributes = None
self._h5py_object = hdf5_object
if hasattr(self._h5py_object,'attrs'):
self._mcs_type = hdf5_object.attrs['ID.Type'].decode('UTF-8')
self._mcs_typeid = hdf5_object.attrs['ID.TypeID'].decode('UTF-8')
self._mcs_instance = hdf5_object.attrs['ID.Instance'].decode('UTF-8')
self._mcs_instanceid = hdf5_object.attrs['ID.InstanceID'].decode('UTF-8')
def _get_attributes(self):
"Read and convert all attributes of the HDF5 group for easy access"
if hasattr(self._h5py_object,'attrs'):
hdf5_attributes = self._h5py_object.attrs.items()
self._hdf5_attributes = {}
for (name, value) in hdf5_attributes:
if hasattr(value, "decode"):
try:
self._hdf5_attributes[name] = value.decode('utf-8').rstrip()
except:
self._hdf5_attributes[name] = value
else:
self._hdf5_attributes[name] = value[0]
else:
raise AttributeError('No Attributes')
def _get_mcspy_instance(self, h5py_object, mcspy_parent=None):
"""
takes a h5py object and returns an appropriate mcspy object
:param hdf5_object.
"""
typeID = h5py_object.attrs['ID.TypeID'].decode('utf-8').rstrip()
cls = McsHdf5Types.get_mcs_class_name(typeID)
if cls is None:
return h5py_object
elif isinstance(h5py_object, h5py.Dataset):
if isinstance(mcspy_parent, McsGroup) and 'mcspy_parent' in signature(cls.__init__).parameters and h5py_object.name.split('/')[-1] in mcspy_parent:
return cls(h5py_object, mcspy_parent=mcspy_parent)
return cls(h5py_object)
@staticmethod
def get_attributes(hdf5_object):
"Read and convert all attributes of the HDF5 group for easy access"
if hasattr(hdf5_object,'attrs'):
hdf5_attributes = hdf5_object.attrs.items()
hdf5_attributes_decoded = {}
for (name, value) in hdf5_attributes:
if hasattr(value, "decode"):
hdf5_attributes_decoded[name] = value.decode('utf-8').rstrip()
else:
hdf5_attributes_decoded[name] = value
return hdf5_attributes_decoded
else:
raise AttributeError('No Attributes')
def hdf5_to_mcspy(self, hdf5_names):
"""
receives a hdf5_name as string in Mcs CMOS MEA file system style
and converts it to python toolbox equivalent.
"""
#weird_mcf_hdf5_file_name = ["channeldata", "sensordata", "high-pass"]
if isinstance(hdf5_names,str):
return hdf5_names.strip().replace(":","").replace("(","").replace(")","").replace(" ","_").replace('@','at').replace('.','_').replace(',','_')
else:
raise TypeError("Pass a 'str' object")
@property
def attributes(self):
if self._hdf5_attributes == None:
try:
self._get_attributes()
except AttributeError as err:
print(err)
return self._hdf5_attributes
@property
def h5py_object(self):
return self._h5py_object
class McsGroup(h5py.Group, McsHDF5):
"""
this class subclasses the h5py.Group object and extends it with McsPy toolbox functionality
"""
IDSetGroup = collections.namedtuple('IDSetGroup', ['h5py', 'mcs_instanceid', 'mcspy', 'mcs_typeid'])
IDSetDataset = collections.namedtuple('IDSetDataset', ['h5py', 'mcs_instanceid', 'mcspy', 'mcs_typeid'])
def __init__(self, h5py_group_object):
if isinstance(h5py_group_object, h5py.Group):
h5py.Group.__init__(self, h5py_group_object.id)
McsHDF5.__init__(self, h5py_group_object)
self._child_storage = dict()
self._child_inventory = list()
for child in h5py_group_object:
try:
mcs_instanceid = h5py_group_object[child].attrs['ID.InstanceID'].decode('UTF-8')
mcs_typeid = h5py_group_object[child].attrs['ID.TypeID'].decode('UTF-8').rstrip()
mcspy_child_name = self.hdf5_to_mcspy(child)
if isinstance(self._h5py_object[child], h5py.Dataset):
self._child_inventory.append(McsGroup.IDSetDataset(h5py=child,
mcs_instanceid=mcs_instanceid,
mcspy=mcspy_child_name,
mcs_typeid=mcs_typeid)) # (h5py key/name, mcs instance id, mcs py key/name, mcs_typeid)
if isinstance(self._h5py_object[child], h5py.Group):
self._child_inventory.append(McsGroup.IDSetGroup(h5py=child,
mcs_instanceid=mcs_instanceid,
mcspy=mcspy_child_name,
mcs_typeid=mcs_typeid)) # (h5py key/name, mcs instance id, mcs py key/name, mcs_typeid)
except Exception as e:
print("Error opening group " + child + ": " + str(e))
else:
raise TypeError('The h5py_group_object \'{}\' is not an instance of the h5py.Group class.'.format(h5py_group_object.name))
def __repr__(self):
return '<McsGroup object at '+str(hex(id(self)))+'>'
def __str__(self):
column_width = 25
bold_line = '='*(column_width*3+4)+'\n'
line = '-'*(column_width*3+4)+'\n'
out = line + 'Parent Group: <'+str(type(self)).strip('<>')+' object at '+str(hex(id(self)))+'>\n'
header = '|'+'{:^{}}'.format('Mcs Type', column_width)+'|'+'{:^{}}'.format('HDF5 name', column_width)+'|'+'{:^{}}'.format('McsPy name', column_width)+'|\n'
dataset = 'Datasets:\n'
group = 'Groups:\n'
for child in self._child_inventory:
#h5py_key, mcs_typeid, mcspy_key, mcs_typeid = child
mcs_type = self._h5py_object[child.h5py].attrs['ID.Type'].decode('utf-8')
if isinstance(child, McsGroup.IDSetGroup):
group += '|'+'{:^{}}'.format(mcs_type, column_width)+'|'+'{:^{}}'.format(child.h5py, column_width)+'|'+'{:^{}}'.format(child.mcspy, column_width)+'|\n'
if isinstance(child, McsGroup.IDSetDataset):
dataset += '|'+'{:^{}}'.format(mcs_type, column_width)+'|'+'{:^{}}'.format(child.h5py, column_width)+'|'+'{:^{}}'.format(child.mcspy, column_width)+'|\n'
if group.count('\n') == 1:
group += ' '*4+'None\n'
if dataset.count('\n') == 1:
dataset += ' '*4+'None\n'
out += line + '\n\n' + header + bold_line + group + line + dataset
return out
def __getattr__(self, name):
id_set = self.ischild(name)
if not id_set:
raise AttributeError('There is no instance with name {} within this group'.format(name))
return self._children[id_set.mcs_instanceid]
def __dir__(self):
return super().__dir__() + [s.mcspy for s in self._child_inventory]
def ischild(self, id):
"""
Takes an identifier and checks if it is a valid identifier for a child of this group:
:param id: mcs instanceid, h5py name , mcspy name as instance of 'str'
:return: False if id is not valid, set of identifiers of the child
"""
if not isinstance(id, str):
return False
return next((set for set in self._child_inventory if id in set[0:3]), False)
def _get_child(self, key):
"""
Retrieves a child from the dictionary self._child_storage:
:param key: mcs_instanceid which indentifies a subgroup of self._h5py_object
"""
child_id_set = self.ischild(key)
if not child_id_set:
raise KeyError('key \'{}\' is not valid. Pass an instance of \'str\', which identifies a child of this group.')
if not child_id_set.mcs_instanceid in self._child_storage.keys():
self._read_child(child_id_set)
return self._child_storage[child_id_set.mcs_instanceid]
def _get_children(self, key):
"""
Retrieves a set of children from the dictionary self._child_storage:
:param key: list or tuple with mcs_instanceid which indentify a subgroup of self._h5py_object respectively
"""
if isinstance(key, (list, tuple)):
if len(key) == 1:
return self._get_child(key[0])
out = list()
for id in key:
try:
out.append(self._get_child(id))
except KeyError as err:
print(err)
return out
_children = _property(_get_child, None, None)
def _set_child(self, key, value):
pass
def _del_child(self, key):
pass
def _read_children_of_type(self, child_typeid, store_parents=True):
"""
reads all children with given typeID
:param child_typeid: mcs type id for a specific mcs hdf5 structure
"""
for id_set in self._child_inventory:
if child_typeid == id_set[3] and id_set[1] not in self._child_storage.keys():
self._readf_child(id_set, store_parents)
def _read_child(self, id_set, store_parent=True):
"""
read given child
:param id_set: id_set must be a valid id_set identifiying a child of this group
"""
if store_parent:
self._child_storage[id_set.mcs_instanceid] = self._get_mcspy_instance(self._h5py_object[id_set.h5py], self)
else:
self._child_storage[id_set.mcs_instanceid] = self._get_mcspy_instance(self._h5py_object[id_set.h5py])
def tree(self, name='mcspy', mcs_type=False, max_level=None):
"""
builds the hdf5 hierarchy beginning with the current group then traversing all subentities depth first as a string
:param name: cfg variable for the type of name that is to be printed for each entity in the
h5py group, default: 'h5py', options: 'mcspy'
:param mcs_type: cfg variable to show mcs type in the tree, default: False
:param max_level: cfg variable to limit the number of tree levels shown, default: None (show all)
"""
if not hasattr(self, '_tree_string'):
self._tree_string = ''
if not hasattr(self, '_tree_mcs_type'):
self._tree_mcs_type = ''
if not hasattr(self, '_tree_names'):
self._tree_names = ''
if not hasattr(self, '_tree_level'):
self._tree_level = None
if self._tree_string == '' or mcs_type != self._tree_mcs_type or self._tree_names != name or self._tree_level != max_level:
self._tree_string = ''
self._tree_mcs_type = mcs_type
self._tree_names = name
self._tree_level = max_level
if self.name == '/':
print(self.name)
else:
print(self.name.split('/')[-1])
name_width = 35
base_level = self.name.count('/')
if self._tree_names == 'mcspy':
def _print_mcspy_tree(name):
level = name.count('/')+1
if max_level is None or level - base_level < max_level:
mcstype = ''
if 'ID.Type' in self[name].attrs and mcs_type:
mcstype += ' - '+self[name].attrs['ID.Type'].decode('UTF-8')
name = self.hdf5_to_mcspy(name.split('/')[-1])
self._tree_string +=' '*4*level+name.ljust(name_width)+mcstype+'\n'
self.visit(_print_mcspy_tree)
elif self._tree_names == 'h5py':
def _print_h5py_tree(name):
level = name.count('/')+1
if max_level is None or level - base_level < max_level:
mcstype = ''
if 'ID.Type' in self[name].attrs and mcs_type:
mcstype += ' - '+self[name].attrs['ID.Type'].decode('UTF-8')
name = name.split('/')[-1]
self._tree_string +=' '*4*level+name.ljust(name_width)+mcstype+'\n'
self.visit(_print_h5py_tree)
else:
raise ValueError('name \'{}\' is not a valid argument. Pass \'h5py\' or \'mcspy\''.format(name))
return self._tree_string
class McsDataset(h5py.Dataset, McsHDF5):
"""
This class subclasses the h5py.Dataset object and extends it with McsPy toolbox functionality
"""
def __init__(self, h5py_dataset_object):
h5py.Dataset.__init__(self, h5py_dataset_object.id)
McsHDF5.__init__(self, h5py_dataset_object)
self._compound_dataset_names = None #compound dataset names in mcs python syntax
if self.dtype.names:
self._compound_dataset_names = [ self.hdf5_to_mcspy(name) for name in self.dtype.names ]
def __getattr__(self, name):
if self._compound_dataset_names:
if name in self._compound_dataset_names:
name = self.dtype.names[self._compound_dataset_names.index(name)]
if name in list(self.dtype.names):
if hasattr(self._h5py_object[name], "decode"):
return self._h5py_object[name].decode('utf-8').rstrip()
else:
return self[name]
else:
raise AttributeError('\'{}\' is not a valid attribute for: {}!'.format(name,self.__repr__()))
else:
raise AttributeError('\'{}\' is not a valid attribute for: {}!'.format(name,self.__repr__()))
def iscompound(self):
"""
Determines whether Dataset is a Compound Dataset
:return Boolean: True if Dataset object represents h5py Compound Dataset, False otherwise
"""
if self._compound_dataset_names:
return True
return False
def __repr__(self):
if self.iscompound():
return '<McsDataset object representing a compound dataset at '+str(hex(id(self)))+'>'
return '<McsDataset object at '+str(hex(id(self)))+'>'
def __str__(self):
first_col_width = 25
if self.iscompound():
out = 'Compound McsDataset '+self.name.split("/")[-1]+'\n\n'
else:
out = 'McsDataset '+self.name.split("/")[-1].ljust(first_col_width)+'\n\n'
out += 'location in hdf5 file:'.ljust(first_col_width)+self.name+'\n'
out += 'shape:'.ljust(first_col_width)+self.name+'{}'.format(self.shape)+'\n'
out += 'dtype:'.ljust(first_col_width)+self.name+'{}'.format(self.dtype)+'\n'
return out
def to_pdDataFrame(self):
"""
Returns the data set as a pandas DataFrame
"""
return pd.DataFrame(self[()])
class McsStreamList(collections.UserList):
"""
Creates helper class which is a list subclass. It is used to hand lists of streams to the McsPy user.
:param list: list of streams
"""
def __str__(self):
stream_types = dict()
column_width = 35
line = '-'*(column_width*3+4)+'\n'
bold_line = '='*(column_width*3+4)+'\n'
out = '|'+'{:^{}}'.format('HDF5 name', column_width)+'|'+'{:^{}}'.format('McsPy name', column_width)+'|'+'{:^{}}'.format('Stream Subtype', column_width)+'|\n'
out += bold_line
for stream in self:
if not stream.attributes['ID.Type'] in stream_types:
stream_types[stream.attributes['ID.Type']] = list()
if 'SubType' in stream.attributes:
stream_types[stream.attributes['ID.Type']].append((stream.name.rsplit('/',1)[1], stream.hdf5_to_mcspy(stream.name.rsplit('/',1)[1]), stream.attributes['SubType'])) #hdf5_name, mcspy_name, subtype
else:
stream_types[stream.attributes['ID.Type']].append((stream.name.rsplit('/',1)[1], stream.hdf5_to_mcspy(stream.name.rsplit('/',1)[1]), '')) #hdf5_name, mcspy_name, subtype
for stream_type in stream_types:
out += stream_type +':\n'
for stream in stream_types[stream_type]:
out += '|'+'{:^{}}'.format(stream[0], column_width)+'|'+'{:^{}}'.format(stream[1], column_width)+'|'+'{:^{}}'.format(stream[2], column_width)+'|\n'
out += line
return out
class McsData(object):
"""
Dummy class provides access to all types of mcs files by returning an instance the class that corresponds to the file type
"""
def __new__(cls, file_path):
"""
Creates a Data object this includes checking the validity of the passed HDF5 file and the return of a
an object that matches the MCS file type.
:param file_path: path to a HDF5 file that contains data encoded in a supported MCS-HDF5 format version
"""
h5_file = h5py.File(file_path, 'r')
try:
mcs_hdf5_protocol_type, _ = McsData.validate_mcs_hdf5_version(h5_file)
except IOError as err:
print(err)
h5_file.close()
if mcs_hdf5_protocol_type == 'CMOS_MEA':
return McsCMOSMEAData(file_path)
elif mcs_hdf5_protocol_type == 'RawData':
return RawData(file_path)
@staticmethod
def validate_mcs_hdf5_version(mcs_h5_file_obj):
"Check if the MCS-HDF5 protocol type and version of the file is supported by this class"
root_grp = mcs_h5_file_obj['/']
if 'McsHdf5ProtocolType' in root_grp.attrs: #check for old file type
mcs_hdf5_protocol_type = root_grp.attrs['McsHdf5ProtocolType'].decode('UTF-8')
if mcs_hdf5_protocol_type == "RawData":
mcs_hdf5_protocol_type_version = root_grp.attrs['McsHdf5ProtocolVersion']
McsHdf5Protocols.check_hdf5_protocol_version(mcs_hdf5_protocol_type, mcs_hdf5_protocol_type_version)
else:
raise IOError("The root group of this HDF5 file has no 'McsHdf5ProtocolVersion' attribute -> so it could't be checked if the version is supported!")
elif 'ID.Type' in root_grp.attrs: #check for CMOS MEA file type
mcs_hdf5_protocol_type = "CMOS_MEA"
if 'FileVersion' in root_grp.attrs:
mcs_hdf5_protocol_type_version = root_grp.attrs['FileVersion']
McsHdf5Protocols.check_hdf5_protocol_version(mcs_hdf5_protocol_type, mcs_hdf5_protocol_type_version)
else:
raise IOError("The root group of this HDF5 file has no 'FileID' attribute -> so it could't be checked if the version is supported!")
else:
raise IOError("The root group of this HDF5 file has no attribute that can be associated to a MCS HDF5 file type -> this file is not supported by McsPy!")
return list((mcs_hdf5_protocol_type, mcs_hdf5_protocol_type_version))
class McsCMOSMEAData(McsGroup):
"""
This class holds the information of a complete MCS CMOS-MEA data file system
"""
sensorWidth: int = 65
sensorHeight: int = 65
def __init__(self, cmos_data_path):
"""
Creates and initializes a McsCMOSMEAData object that provides access to the content of the given MCS-HDF5 file
:param cmos_data_path: path to a HDF5 file that contains raw data encoded in a supported MCS-HDF5 format version
"""
self.h5_file = h5py.File(cmos_data_path, 'r')
super().__init__(self.h5_file)
self.mcs_hdf5_protocol_type, self.mcs_hdf5_protocol_type_version = McsData.validate_mcs_hdf5_version(self.h5_file)
#self._get_session_info()
#self._acquisition = None
#self._filter_tool = None
#self._sta_explorer = None
#self._spike_explorer = None
#self._spike_sorter = None
def __del__(self):
self.h5_file.close()
def __repr__(self):
return '<McsCMOSMEAData filename=' + self.attributes['ID.Instance'] + '>'
def __str__(self):
out: str = '<McsCMOSMEAData instance at '+str(hex(id(self)))+'>\n\n'
out += 'This object represents the Mcs CMOS MEA file:\n'
#out += ''*4+'Path:'.ljust(12)+'\\'.join(self.attributes['ID.Instance'].split('\\')[:-1])+'\n'
out += ''*4+'Filename:'.ljust(12)+self.attributes['ID.Instance'].split('\\')[-1]+'\n\n'
out += 'Date'.ljust(21)+'Program'.ljust(28)+'Version'.ljust(12)+'\n'
out += '-'*19+' '*2+'-'*26+' '*2+'-'*10+'\n'
out += self.attributes['DateTime'].ljust(21) + self.attributes['ProgramName'].ljust(28)+self.attributes['ProgramVersion'].ljust(12)+'\n\n'
mcs_group_string = super().__str__().split('\n')
return out+'\nContent:\n'+'\n'.join(mcs_group_string[4:])
#def _get_session_info(self):
# "Read all session metadata/root group atributes of the Cmos mea file"
# root_grp_attributes = self.h5_file['/'].attrs.items()
# self.session_info = {}
# for (name, value) in root_grp_attributes:
# #print(name, value)
# if hasattr(value, "decode"):
# self.session_info[name] = value.decode('utf-8').rstrip()
# else:
# self.session_info[name] = value
def __read_acquisition(self):
"Read aquisition group"
if 'Acquisition' in list(self.h5_file.keys()):
acquisition_folder = self.h5_file['Acquisition']
#acquisition_attributes = self.h5_file['Acquisition'].attrs.items()
if len(acquisition_folder)>0:
self._acquisition = Acquisition(acquisition_folder)
for (name, value) in acquisition_folder.items():
dprint(name, value)
else:
raise AttributeError("The HDF5 file does not contain a group 'Acquisition'.")
def __read_sta_explorer(self):
if 'STA Explorer' in list(self.h5_file.keys()):
"Read sta explorer group"
network_explorer_folder = self.h5_file['STA Explorer']
#sta_explorer_attributes = self.h5_file['STA Explorer'].attrs.items()
if len(network_explorer_folder)>0:
self._sta_explorer = NetworkExplorer(network_explorer_folder)
for (name, value) in network_explorer_folder.items():
dprint(name, value)
elif 'Network Explorer' in list(self.h5_file.keys()):
"Read network explorer group"
network_explorer_folder = self.h5_file['Network Explorer']
#sta_explorer_attributes = self.h5_file['STA Explorer'].attrs.items()
if len(network_explorer_folder)>0:
self._sta_explorer = NetworkExplorer(network_explorer_folder)
for (name, value) in network_explorer_folder.items():
dprint(name, value)
else:
raise AttributeError("The HDF5 file does not contain a group 'STA Explorer' or 'Network Explorer'.")
def __read_filter_tool(self):
if 'Filter Tool' in list(self.h5_file.keys()):
pass
else:
raise AttributeError("The HDF5 file does not contain a group 'Filter Tool'.")
def __read_spike_explorer(self):
if 'Spike Explorer' in list(self.h5_file.keys()):
pass
else:
raise AttributeError("The HDF5 file does not contain a group 'Spike Explorer'.")
def __read_spike_sorter(self):
if 'Spike Sorter' in list(self.h5_file.keys()):
pass
else:
raise AttributeError("The HDF5 file does not contain a group 'Spike Sorter'.")
@classmethod
def sensorID_to_coordinates(self, sensorID):
"Computes the [x,y] chip coordinates of a sensor. Note: both, sensor IDs and coordinates are base 1"
if 0<sensorID and sensorID<=self.sensorWidth*self.sensorHeight:
sensorID -= 1
return np.array([(sensorID % self.sensorHeight)+1,(sensorID // self.sensorHeight)+1])
else:
raise KeyError('Sensor ID out of range!')
@classmethod
def coordinates_to_sensorID(self, row: int, col: int) -> int:
"Computes the sensor ID for row and column coordinates. Note: sensor IDs and rows and columns are base 1"
if 0<row and row<=self.sensorHeight and 0<col and col<=self.sensorWidth:
return self.sensorHeight*(col-1)+row
else:
raise KeyError('Coordinates out of range!')
class Acquisition(McsGroup):
"""
Container class for acquisition data.
Acquisition Group can hold different types of streams: Analog Streams, Event Streams, Timestamp Streams, Segment Streams, Spike Streams
"""
"holds allowed stream types in TypeID:Type pairs"
_stream_types = {"AnalogStream" : "9217aeb4-59a0-4d7f-bdcd-0371c9fd66eb",
"FrameStream" : "15e5a1fe-df2f-421b-8b60-23eeb2213c45",
"SegmentStream" : "35f15fa5-8427-4d07-8460-b77a7e9b7f8d",
"TimeStampStream" : "425ce2e0-f1d6-4604-8ab4-6a2facbb2c3e",
"SpikeStream" : "26efe891-c075-409b-94f8-eb3a7dd68c94",
"EventStream" : "09f288a5-6286-4bed-a05c-02859baea8e3"}
def __init__(self, acquisition_group):
super().__init__(acquisition_group)
setattr(Acquisition, 'ChannelStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["AnalogStream"]], self, fget=self._get_children, fset=None, fdel=None))
setattr(Acquisition, 'SensorStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["FrameStream"]], self, fget=self._get_children, fset=None, fdel=None))
setattr(Acquisition, 'SegmentStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["SegmentStream"]], self, fget=self._get_children, fset=None, fdel=None))
setattr(Acquisition, 'SpikeStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["SpikeStream"]], self, fget=self._get_children, fset=None, fdel=None))
setattr(Acquisition, 'EventStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["EventStream"]], self, fget=self._get_children, fset=None, fdel=None))
def __str__(self) -> str:
if self._child_inventory:
column_width: int = 25
bold_line: str = '='*(column_width*3+4)+'\n'
line: str = '-'*(column_width*3+4)+'\n'
out: str = line + 'Parent Group: <'+str(type(self)).strip('<>')+' object at '+str(hex(id(self)))+'>\n\n'
header: str = '|'+'{:^{}}'.format('Subtype', column_width)+'|'+'{:^{}}'.format('HDF5 name', column_width)+'|'+'{:^{}}'.format('McsPy name', column_width)+'|\n'
stream_types: Dict[str, str] = dict()
for child in self._child_inventory:
#h5py_key, mcs_typeid, mcspy_key, mcs_typeid = child
stream_type = self._h5py_object[child.h5py].attrs['ID.Type'].decode('utf-8')
stream_subtype = self._h5py_object[child.h5py].attrs['SubType'].decode('utf-8')
if not stream_type in stream_types:
stream_types[stream_type] = ""
stream_types[stream_type] += '|'+'{:^{}}'.format(stream_subtype, column_width)+'|'+'{:^{}}'.format(child.h5py, column_width)+'|'+'{:^{}}'.format(child.mcspy, column_width)+'|\n'
out += line + '\n\n' + header + bold_line
for stream_type in stream_types:
out += stream_type+'\n'+stream_types[stream_type] + line
else:
out = "No streams found"
return out
def __repr__(self):
return '<Acquisition object at '+str(hex(id(self)))+', ChannelStreams='+str(len(self.ChannelStreams))+', SensorStreams='+str(len(self.SensorStreams))+', SegmentStreams='+str(len(self.SegmentStreams))+', SpikeStreams='+str(len(self.SpikeStreams))+', EventStreams='+str(len(self.EventStreams))+'>'
class McsInfo(McsDataset):
"""
Container class for Stream Meta Data
"""
def __init__(self, meta_data_set):
"""
Initializes a Meta object from a provided HDF5 dataset
"""
super().__init__(meta_data_set)
class McsStream(McsGroup):
"""
Base class for all stream types
"""
def __init__(self, stream_grp, data_typeid, meta_typeid, *args):
"""
Initializes a stream object with its associated h5py group object
:param stream_grp: group object correponding to a folder in the HDF5 file. It contains the data of this stream
:param data_typeid: mcs type id of the data stored in the stream
:param meta_typeid: mcs type id of the meta data stored in the stream
"""
super().__init__(stream_grp)
self._data_typeid = data_typeid
self._meta_typeid = meta_typeid
self._entities = None
def _get_data_headers(self):
"""
retrieves all headers present in a dataset
return headers: all headers native to the data datasets in a certain stream instance
"""
headers = list()
try:
data_name = next(child for child in self._h5py_object if self._h5py_object[child].attrs['ID.TypeID'].decode('UTF-8') == self._data_typeid)
except StopIteration:
return list()
if hasattr(self._h5py_object[data_name].dtype, 'names'):
headers = list(self._h5py_object[data_name].dtype.names)
return headers
def _get_meta_headers(self):
"""
retrieves all headers of the meta data
return headers: all headers native to the meta datasets in a certain stream instance
"""
headers = list()
try:
meta_name = next(child for child in self._h5py_object if self._h5py_object[child].attrs['ID.TypeID'].decode('UTF-8') == self._meta_typeid)
except StopIteration:
pass
if hasattr(self._h5py_object[meta_name].dtype, 'names'):
headers = self._h5py_object[meta_name].dtype.names
return headers
@property
def Data(self):
"Access all datasets - collection of McsDataset objects"
return McsStreamList([self._children[id_set.mcs_instanceid] for id_set in self._child_inventory if id_set.mcs_typeid == self._data_typeid])
@property
def Meta(self):
"Access meta data"
return McsStreamList([self._children[id_set.mcs_instanceid] for id_set in self._child_inventory if id_set.mcs_typeid == self._meta_typeid])
Stream_Types = ["Analog Stream", "Event Stream", "Segment Stream", "TimeStamp Stream", "Frame Stream", "Spike Stream"]
class McsStreamEntity(object):
"""
Base Class for a McsStreamEntity object
"""
def __init__(self, parent, id):
self.mcspy_parent = parent
self._entity_id = id
class McsChannelStream(McsStream):
"""
Container class for one analog stream of several channels.
"""
channel_data_typeid = "5efe7932-dcfe-49ff-ba53-25accff5d622"
channel_meta_typeid = "9e8ac9cd-5571-4ee5-bbfa-8e9d9c436daa"
def __init__(self, channel_stream_grp):
"""
Initializes a channel stream object containing several sweeps of channels over time
:param channel_stream_grp: folder of the HDF5 file that contains the data of this analog stream
"""
super().__init__(channel_stream_grp, McsChannelStream.channel_data_typeid, McsChannelStream.channel_meta_typeid)
def __repr__(self):
return '<McsChannelStream object at '+str(hex(id(self)))+'>'
def _get_channel_sweeps_by_number(self, key):
"""
retrieves all dataset that belong to sweep number 'key'
:param key: key as int that identifies a sweep in the channel stream
:return: list of id set that correlates with sweeps with number 'key' in a channel stream
"""
if isinstance(key, int):
out = list()
for child in self._h5py_object.keys():
sweep_number = [int(s) for s in child if s.isdigit()]
try:
if sweep_number[0] == key and self._data_typeid == self.h5py_object[child].attrs["ID.TypeID"].decode('UTF-8'):
out.append(next(id_set for id_set in self._child_inventory if child in id_set))
except IndexError:
pass
return out
raise KeyError('{} must be an instance of int!'.format(key))
@property
def DataChunk(self):
"""
The continuous data segments in the stream
"""
sweep_numbers = np.unique(self.ChannelMeta.GroupID).tolist()
out = {}
for sweep_number in sweep_numbers:
out[sweep_number] = _list_property.McsProxy(self._get_channel_sweeps_by_number(sweep_number), obj=self, fget=self._get_children, fset=None, fdel=None)
return out
class McsChannelEntity(McsDataset, McsStreamEntity):
"""
Container class for one ChannelStream Entity.
"""
def __init__(self, channel_stream_entity_dataset, mcspy_parent):
"""
initializes a new McsChannelEntity from a h5py_dataset of a hdf5 ChannelData entity
:param channel_stream_entity_dataset: h5py_dataset of a channel
"""
id = int(channel_stream_entity_dataset.name.split()[-1]) #_entity_id is Group ID
McsDataset.__init__(self, channel_stream_entity_dataset)
McsStreamEntity.__init__(self, mcspy_parent, id)
self.dimensions = '[ \'number of channels\' x \'samples\' ]'
def __repr__(self):
return '<McsChannelEntity object at '+str(hex(id(self)))+', channels='+str(self.shape[0])+', samples='+str(self.shape[1])+'>'
@property
def Meta(self):
"""
reads the subset of Meta data that belongs to the channels
"""
index = tuple(np.where(self.mcspy_parent.Meta[0].GroupID == self._entity_id)[0])
return self.mcspy_parent.Meta[0][index,]
class McsEventStream(McsStream):
"""
Container class for one Event Stream.
"""
event_data_typeid = "abca7b0c-b6ce-49fa-ad74-a20c352fe4a7"
event_meta_typeid = "8f58017a-1279-4d0f-80b0-78f2d80402b4"
def __init__(self, event_stream_grp):
"""
Initializes an event stream object
:param event_stream_grp: folder of the HDF5 file that contains the data of this event stream
"""
super().__init__(event_stream_grp, McsEventStream.event_data_typeid, McsEventStream.event_meta_typeid)
def __repr__(self):
return '<McsEventStream object at '+str(hex(id(self)))+', EventEntities='+str(len(self.EventEntity))+'>'
def _read_entities(self, entity_class_name):
"""
reads event stream entities into entity type associated objects
:param entity_class_name: class name of the associated stream entity
"""
try:
cls = globals()[entity_class_name] #getattr(__name__, entity_class_name)
except KeyError as err:
print(err)
self._entities = list()
for entity_type in np.unique(self.EventData.EventID):
self._entities.append(cls(self, entity_type))
@property
def EventData(self):
"""
All events of all event entities in the stream
"""
return self.Data[0]
@property
def EventMeta(self):
"""
The meta data for all event entities
"""
return self.Meta[0]
@property
def EventEntity(self):
"""
All event entities in the stream
"""
if self._entities == None:
self._read_entities('McsEventEntity')
return self._entities
class McsEventEntity(McsStreamEntity):
"""
Container class for Event Entity object
"""
def __init__(self, parent, event_id):
"""
Initializes an Mcs EventEntity Object
:param parent: parent McsEventStream instances
:param event_id: identifier of the event entity (the type of event)
"""
super().__init__(parent, event_id)
def _get_data_by_header(self, header):
index = list(np.where(self.mcspy_parent.data[0]['EventID'] == self._entity_id)[0])
return self.mcspy_parent.data[0][index,header]
def _get_meta_by_header(self, header):
index = list(np.where(self.mcspy_parent.meta[0]['EventID'] == self._entity_id)[0])
return self.mcspy_parent.meta[0][index,header]
def __getattr__(self, name):
if name in self.mcspy_parent._get_data_headers():
return self._get_data_by_header(name)
if name in self.mcspy_parent._get_meta_headers():
return self._get_meta_by_header(name)
raise AttributeError('{} is not a valid event attribute'.format(name))
def __str__(self):
return 'Event Entity \"' + self.meta['Label'][0].decode('UTF-8') + '\" Headers:\n'+'Event Data Headers: '+', '.join(self.mcspy_parent._get_data_headers())+'\nEvent Meta Headers: '+', '.join(self.mcspy_parent._get_meta_headers())
def __repr__(self):
return '<McsEventEntity object at '+str(hex(id(self)))+', Label='+ self.meta['Label'][0].decode('UTF-8') +', events='+str(len(self.data))+'>'
@property
def events(self):
"""
The ids, timestamps and durations of the occurences of the event entity
"""
index = list(np.where(self.mcspy_parent.EventData['EventID'] == self._entity_id)[0])
return self.mcspy_parent.EventData[index]
@property
def meta(self):
"""
The meta data for an event entity
"""
index = list(np.where(self.mcspy_parent.EventMeta['EventID'] == self._entity_id)[0])
return self.mcspy_parent.EventMeta[index]
class McsSensorStream(McsStream):
"""
Container class for one Event Stream.
"""
sensor_data_typeid = "49da47df-f397-4121-b5da-35317a93e705"
sensor_meta_typeid = "ab2aa189-2e72-4148-a2ef-978119223412"
def __init__(self, sensor_stream_grp):
"""
Initializes an sensor stream object
:param sensor_stream_grp: folder of the HDF5 file that contains the data of this sensor stream
"""
super().__init__(sensor_stream_grp, McsSensorStream.sensor_data_typeid, McsSensorStream.sensor_meta_typeid)
def __repr__(self):
return '<McsSensorStream object at '+str(hex(id(self)))+'>'
def _read_entities(self, entity_class_name):
"""
reads event stream entities into entity type associated objects
:param entity_class_name: class name of the associated stream entity
"""
try:
cls = globals()[entity_class_name] #getattr(__name__, entity_class_name)
except KeyError as err:
print(err)
self._entities = list()
for entity_type in np.unique(self.EventData.EventID):
self._entities.append(cls(self, entity_type))
def _get_sensor_sweeps_by_number(self, key):
"""
retrieves all dataset that belong to sweep number 'key' in a sensor stream
:param key: key as int that identifies a sweep in the sensor stream
:return: list of id set that correlates with sweeps with number 'key'
"""
if isinstance(key, int):
out = list()
for child in self._h5py_object.keys():
sweep_number = [int(s) for s in child if s.isdigit()]
try:
if sweep_number[1] == key and self._data_typeid == self.h5py_object[child].attrs["ID.TypeID"].decode('UTF-8'):
out.append(next(id_set for id_set in self._child_inventory if child in id_set))
except IndexError:
pass
return out
raise KeyError('{} must be an instance of int!'.format(key))
def _get_sensor_rois_by_number(self, key):
"""
retrieves all dataset that belong to roi number 'key' in a sensor stream
:param key: key as int that identifies a roi in the sensor stream
:return: list of id set that correlates with roi with number 'key'
"""
if isinstance(key, int):
out = list()
for child in self._h5py_object.keys():
roi_number = [int(s) for s in child if s.isdigit()]
try:
if roi_number[0] == key and self._data_typeid == self.h5py_object[child].attrs["ID.TypeID"].decode('UTF-8'):
out.append(next(id_set for id_set in self._child_inventory if child in id_set))
except IndexError:
pass
return out
raise KeyError('{} must be an instance of int!'.format(key))
@property
def DataChunk(self):
"""
The groups of data that have been acquired. Intended for acquisition of multiple time windows
"""
sweep_numbers = np.unique(self.SensorMeta.GroupID).tolist()
out = dict()
for sweep_number in sweep_numbers:
out[sweep_number] = _list_property.McsProxy(self._get_sensor_sweeps_by_number(sweep_number), obj=self, fget=self._get_children, fset=None, fdel=None)
return out
@property
def Regions(self):
"""
The regions of interest (ROI) on the sensor for which data has been acquired, usually from a rectangular subset of the sensors
"""
roi_numbers = np.unique(self.SensorMeta.RegionID).tolist()
out = dict()
for roi_number in roi_numbers:
out[roi_number] = _list_property.McsProxy(self._get_sensor_rois_by_number(roi_number), obj=self, fget=self._get_children, fset=None, fdel=None)
return out
@property
def SensorData(self):
"""
The sensor data as a numpy array of shape (frames x sensors_Y x sensors_X)
"""
return self.Data
@property
def SensorMeta(self):
"""
The meta data for the acquired sensor data
"""
return self.Meta[0]
class McsSensorEntity(McsDataset, McsStreamEntity):
"""
Container class for one McsSensorEntity - a sensor stream entity.
"""
def __init__(self, sensor_stream_entity_dataset, mcspy_parent):
"""
initializes a new McsSensorEntity from a h5py_dataset of a hdf5 SensorData entity
:param channel_stream_entity_dataset: h5py_dataset of a cahn
"""
id = re.findall(r'\d+', sensor_stream_entity_dataset.name.split('/')[-1] )
id = tuple(map(int, id))
McsDataset.__init__(self, sensor_stream_entity_dataset)
McsStreamEntity.__init__( self, mcspy_parent, id )
self.dimensions = '[ \'frames\' x \'region height\' x \'region width\' ]'
def __repr__(self):
return '<McsSensorEntity object at '+str(hex(id(self)))+', frames='+str(self.shape[0])+', height='+str(self.shape[1])+', width='+str(self.shape[2])+'>'
class McsSpikeStream(McsStream):
"""
Container class for one Spike Stream.
"""
spike_data_typeid = "3e8aaacc-268b-4057-b0bb-45d7dc9ec73b"
spike_meta_typeid = "e1d7616f-621c-4a26-8f60-a7e63a9030b7"
def __init__(self, spike_stream_grp, spike_data_typeid="3e8aaacc-268b-4057-b0bb-45d7dc9ec73b"):
"""
Initializes an event stream object
:param spike_stream_grp: folder of the HDF5 file that contains the data of this spike stream
"""
super().__init__(spike_stream_grp, spike_data_typeid, McsSpikeStream.spike_meta_typeid)
def __repr__(self):
return '<McsSpikeStream object at '+str(hex(id(self)))+'>'
def get_spikes_at_sensor(self, sensor_id):
"""
retrieves all spikes that occured at the sensor with id sensor_id
:param sensor_id: valid identifier for a sensor on the MCS CMOS chip as int: 1 <= sensor_id <= 65*65
:return: numpy structured array of all spikes that have been detected on the sensor with id sensor_id
"""
if not isinstance(sensor_id, int):
raise TypeError('The given sensor id \'{}\' must be of type \'int\'.'.format(sensor_id))
if not sensor_id in range(1,65**2+1):
raise ValueError('The given sensor id \'{}\' must satify 1 <= sensor_id <= 65*65'.format(sensor_id))
row_numbers = np.where(self.SpikeData['SensorID'] == sensor_id)[0]
return self.SpikeData[tuple(row_numbers),]
def get_spikes_in_interval(self, interval):
"""
Retrieves all spikes that occured in a given time interval. Intervals exceeding the time range of the dataset will throw a warning,
and retrieval of maximally sized subset of the interval is attempted.
:param interval: interval in s as instance of
- list(start,stop) of length 2
- tuple(start,stop) of length 2
start must be a number, stop must be a number or the keyword 'end', start and stop must satisfy start < stop
:result: numpy structured array which includes all spikes occuring in the given interval
"""
if not isinstance(interval, (list,tuple)):
raise TypeError('The given interval \'{}\' must be an instance of list(start,stop) or tuple(start,stop)'.format(interval))
if not len(interval) == 2:
raise ValueError('The given interval \'{}\' must provide a start and a stop value'.format(interval))
if not isinstance(interval[0], Number):
raise TypeError('start \'{}\' must be a number'.format(interval[0]))
if not (isinstance(interval[1], Number) or interval[1]=='end'):
raise TypeError('stop \'{}\' must be a number or the keyword \'end\''.format(interval[0]))
#tick = self.SpikeMeta.Tick[0]
if interval[1]=='end':
interval[1] = self.SpikeData.TimeStamp[-1]*(10**-6)
if interval[0]>=interval[1]:
raise ValueError('start={} and stop={} do not satisfy start < stop'.format(interval[0], interval[1]))
interval[0] *= (10**6)
interval[1] *= (10**6)
row_numbers = np.logical_and(interval[0] <= self.SpikeData['TimeStamp'], self.SpikeData['TimeStamp'] <= interval[1])
return self.SpikeData[row_numbers,]
def get_spike_timestamps_at_sensors(self, sensor_ids):
"""
Retrieves all spike timestamps for all given sensors as a dictionary
:param sensor_ids: valid identifiers for sensors on the MCS CMOS chip as int: 1 <= sensor_id <= 65*65
:return: dictionary of all spike timestamps that have been detected on the given sensors. Key: sensor_id, value: spike timestamps
"""
if isinstance(sensor_ids, Number):
sensor_ids = [sensor_ids]
spike_dict = {}
for sensor in sensor_ids:
spikes = self.get_spikes_at_sensor(sensor)
timestamps = [t[1] for t in spikes]
spike_dict[sensor] = timestamps
return spike_dict
def get_spike_cutouts_at_sensor(self, sensor_id):
"""
Retrieves the spike cutouts for all spikes for the given sensor_id
:param sensor_id: valid identifier for a sensor on the MCS CMOS chip as int: 1 <= sensor_id <= 65*65
:return: Numpy array spikes x samples of the spike cutouts
"""
spikes = self.get_spikes_at_sensor(sensor_id)
cutouts = [list(s)[2:] for s in spikes]
return np.array(cutouts)
@property
def SpikeStreamEntity(self):
return self.Data
@property
def SpikeData(self):
"""
The detected spikes, each with a sensor ID, a timestamp and (optionally) with a cutout
"""
return self.Data[0]
@property
def SpikeMeta(self):
"""
The meta data for spike detection, e.g. pre- and post interval
"""
return self.Meta[0]
class McsSpikeEntity(McsDataset, McsStreamEntity):
"""
Container class for one SpikeStream Entity.
"""
def __init__(self, spike_stream_entity_dataset, mcspy_parent):
"""
initializes a new McsSpikeEntity from a h5py_dataset of a hdf5 SpikeData entity
:param spike_stream_entity_dataset: h5py_dataset of a cahn
"""
McsDataset.__init__(self, spike_stream_entity_dataset)
McsStreamEntity.__init__(self, mcspy_parent, 0)
self.dimensions = '[ \'# of spikes\' x \'SensorID + Timestamp + n cutout values\' ]'
def __repr__(self):
return '<McsSpikeEntity object at '+str(hex(id(self)))+', spikes='+str(self.shape[0])+'>'
class McsSegmentStream(McsStream):
"""
Container class for one segment stream of different segment entities
"""
def __init__(self, segment_stream_grp):
super().__init__(self, segment_stream_grp)
def __repr__(self):
return '<McsSegmentStream object at '+str(hex(id(self)))+'>'
class McsSegmentStreamEntity(object):
"""
Segment entity class,
"""
pass
class McsTimeStampStream(McsStream):
"""
Container class for one TimeStamp stream
"""
def __init__(self, timestamp_stream_grp):
super().__init__(self, timestamp_stream_grp)
def __repr__(self):
return '<McsTimeStampStream object at '+str(hex(id(self)))+'>'
class McsTimeStampStreamEntity(object):
"""
TimeStamp stream entity class
"""
pass
class NetworkExplorer(McsGroup):
"""
Container class for a NetworkExplorer object
"""
def __init__(self, network_explorer_group):
self.__network_explorer_group = network_explorer_group
self._sta_key_type = self.get_sta_entity_by_sourceID
self._map_sensorID_to_sourceID = {}
self._sta_entity = None
super().__init__(network_explorer_group)
def __str__(self):
"""
provides a string method that prepares the object attributes for printing
"""
if(self.__network_explorer_group):
out = 'The NetworkExplorer objects hold the following information:\n'
out += 'Attributes:\n'
for (name, value) in self.__network_explorer_group.attrs.items():
if hasattr(value, "decode"):
out += ("\t"+name.ljust(20)+"\t"+value.decode('UTF-8')+"\n")
else:
out += ("\t"+name.ljust(20)+"\t"+str(value).strip('[]')+"\n")
out += '------------------------------------------\nSubgroups\n'
out += '------------------------------------------\nDatasets\n'
for (name, value) in self.__network_explorer_group.items():
if hasattr(value, "decode"):
out += ("\t"+name.ljust(20)+"\t"+value.decode('UTF-8')+"\n")
else:
out += ("\t"+name.ljust(20)+"\t"+str(value).strip('[]')+"\n")
return out
def __repr__(self):
if self._sta_entity is None:
return '<NetworkExplorer object at '+str(hex(id(self)))+'>'
else:
return '<NetworkExplorer object at '+str(hex(id(self)))+', entities='+str(len(self._sta_entity))+'>'
def _read_sta_entities(self):
"""
Retrieves all stored sta_entities and saves them in a dictionary with special access methods
"""
self._sta_entity = {}
self._neural_network = {}
entity_dict = {}
sta_type = b'442b7514-fe3a-4c66-8ae9-4f249ef48f2f'
spikes_type = b'1b4e0b8b-6af1-4b55-a685-a6d28a922eb3'
stddev_type = b'a056832a-013d-4215-b8a6-cb1debeb1c56'
network_type = b'235c3c9c-1e94-40ca-8d4b-c5db5b079f16'
for (name, _) in self.__network_explorer_group.items():
type_id = self.__network_explorer_group[name].attrs['ID.TypeID']
if type_id in [sta_type, spikes_type, stddev_type]:
source_id = int(self.__network_explorer_group[name].attrs['SourceID'])
if not source_id in entity_dict.keys():
entity_dict[source_id] = {}
entity_dict[source_id][type_id] = name
elif type_id == network_type:
self._read_neural_network(self.__network_explorer_group[name])
for source_id in entity_dict.keys():
new_sta_entity = STAEntity(self.__network_explorer_group,entity_dict[source_id][sta_type],
entity_dict[source_id].get(spikes_type, None), entity_dict[source_id].get(stddev_type, None),
self.get_axon_for_entity_by_sourceID(source_id))
self._sta_entity[new_sta_entity._sta_entity_sourceID] = new_sta_entity
self._map_sensorID_to_sourceID[new_sta_entity._sta_entity_sensorID] = new_sta_entity._sta_entity_sourceID
def _read_neural_network(self, group):
for entry in group:
unit_id = int(entry['UnitID'])
axon_id = int(entry['AxonID'])
segment_id = int(entry['SegmentID'])
if not unit_id in self._neural_network.keys():
self._neural_network[unit_id] = {}
if axon_id != -1 and not axon_id in self._neural_network[unit_id].keys():
self._neural_network[unit_id][axon_id] = {}
if segment_id != -1 and not segment_id in self._neural_network[unit_id][axon_id].keys():
self._neural_network[unit_id][axon_id][segment_id] = []
if axon_id != -1 and segment_id != -1:
self._neural_network[unit_id][axon_id][segment_id].append((entry['PosX'], entry['PosY']))
def get_sta_entity_by_sourceID(self, key):
"""
Retrieve the STA Entity for the given source ID.
:param key: A valid source ID. See the sourceIDs attribute for a list of valid source IDs
:return: The STA Entity for the given source ID
"""
if self._sta_entity is None:
self._read_sta_entities()
try:
return self._sta_entity[key]
except KeyError:
print("Oops! That was not a valid sourceID. For a list of all available sourceIDs use My_sta_explorer_object.sourceIDs ")
except TypeError as err:
print(err)
def get_sta_entity_by_sensorID(self, key):
"""
Retrieve the STA Entity for the given sensor ID.
:param key: A valid sensor ID. See the sensorIDs attribute for a list of valid sensor IDs
:return: The STA Entity for the given sensor ID
"""
if self._sta_entity is None:
self._read_sta_entities()
try:
return self._sta_entity[self._map_sensorID_to_sourceID[key]]
except KeyError:
print("Oops! That was not a valid sensorID. For a list of all available sensorIDs use My_sta_explorer_object.sensorIDs ")
except TypeError as err:
print(err)
def get_sta_entity(self, key):
"""
Retrieve the STA Entity for the given key.
:param key: A valid key, either a sensor or a source ID, depending on the sta_key_type attribute
:return: The STA Entity for the given key
"""
if self._sta_entity is None:
self._read_sta_entities()
return self._sta_key_type(key)
#if self.sta_key_type == 'sensorID':
# return self._sta_entity[self._map_sensorID_to_sourceID[key]].data
#return self._sta_entity[key].data
def set_sta_entity(self, key, value):
"""
Sets an entity to a value
"""
dprint("Setting _sta_entity[",key,"] to ",value)
self._sta_entity[key]=value
def del_sta_entity(self, key):
"""
Deletes an entity
"""
dprint("Deleting _sta_entity[",key,"]")
del self._sta_entity[key]
def get_axon_for_entity_by_sourceID(self, key, axon=1, segment=1):
"""
Retrieve the path of the axon for a given sensor or source ID.
:param key: A valid key, either a sensor or a source ID, depending on the sta_key_type attribute
:param axon: A valid axon ID, in case multiple axons have been found for a unit. Default: 1
:param segment: A valid axon ID, in case multiple segments have been found for an axon. Default: 1
:return: The axon path as a list of (X,Y) tuples in sensor coordinates. Returns None if no axon is found
"""
if self._sta_entity is None:
self._read_sta_entities()
if not key in self._neural_network.keys():
return None
if not axon in self._neural_network[key] or not segment in self._neural_network[key][axon]:
return None
return self._neural_network[key][axon][segment]
sta_entity = DictProperty_for_Classes(get_sta_entity, set_sta_entity, del_sta_entity)
@property
def sta_key_type(self):
"""
The type of key used in the access functions. Either 'sourceID' or 'sensorID'
"""
if self._sta_key_type == self.get_sta_entity_by_sourceID:
return 'sourceID'
elif self._sta_key_type == self.get_sta_entity_by_sensorID:
return 'sensorID'
else:
return None
@sta_key_type.setter
def sta_key_type(self, value):
if value=='sourceID':
print("All STA entity retrievals are now by "+value)
_sta_key_type = self.get_sta_entity_by_sourceID
elif value=='sensorID':
print("All STA entity retrievals are now by "+value)
_sta_key_type = self.get_sta_entity_by_sourceID
else:
print("Oops! That is not a valid way of selecting STA entities. Try 'sourceID' or 'sensorID'")
@property
def sourceIDs(self):
"""
A list of valid source IDs
"""
if self._sta_entity is None:
self._read_sta_entities()
return list(self._map_sensorID_to_sourceID.values())
@property
def sensorIDs(self):
"""
A list of valid sensor IDs
"""
if self._sta_entity is None:
self._read_sta_entities()
return list(self._map_sensorID_to_sourceID.keys())
@property
def attributes(self):
return self.__network_expl_group.attrs.items()
class STAEntity(object):
"""
Container Class for a STAEntity object
"""
def __init__(self, sta_explorer, sta_entity, spikes_entity=None, stastddev_entity=None, axon=None):
self._sta_explorer = sta_explorer
self._sta_entity_string = sta_entity
self._sta_entity_sourceID = int(sta_explorer[sta_entity].attrs['SourceID'])
self._sta_entity_sensorID = int(sta_explorer[sta_entity].attrs['SensorID'])
x,y = McsCMOSMEAData.sensorID_to_coordinates(self._sta_entity_sensorID)
self._sta_entity_coordinates = np.array([int(x),int(y)])
self._spikes_entity = spikes_entity
self._stastddev_entity = stastddev_entity
self._axon = axon
def __repr__(self):
return '<STAEntity object at '+str(hex(id(self)))+'>'
@property
def data(self):
"""
The STA data as a numpy array of shape (frames x sensors_Y x sensor_X)
"""
return self._sta_explorer[self._sta_entity_string]
@property
def spikes(self):
"""
Detected spikes in the STA
"""
if self._spikes_entity is None:
return None
return self._sta_explorer[self._spikes_entity]
@property
def sta_stddev(self):
"""
Returns the standard deviation for each channel in the STA. Used for spike detection on the STA
"""
if self._stastddev_entity is None:
return None
return self._sta_explorer[self._stastddev_entity]
@property
def sensor_coordinates(self):
"""
Returns the STA source coordinates on the chip as [X,Y]. Note: X and Y are 1-based
"""
return self._sta_entity_coordinates
@property
def axon(self):
"""
Returns the axon path as a list of (X,Y) tuples in sensor coordinates. None if no axon has been found
"""
return self._axon
class SpikeExplorer(McsSpikeStream):
"""
Container Class for an SpikeExplorer object
"""
def __init__(self, spike_explorer_group):
self._spike_explorer_group = spike_explorer_group
super().__init__(spike_explorer_group, spike_data_typeid='1b4e0b8b-6af1-4b55-a685-a6d28a922eb3')
def __repr__(self):
return '<SpikeExplorer object at '+str(hex(id(self)))+'>'
class SpikeSorter(McsGroup):
"""
Container for SpikeSorter object
"""
def __init__(self, spike_sorter_group):
self._spike_sorter_group = spike_sorter_group
self._units = {}
super().__init__(spike_sorter_group)
unit_type = b'0e5a97df-9de0-4a22-ab8c-54845c1ff3b9'
for (name, _) in self._spike_sorter_group.items():
type_id = self._spike_sorter_group[name].attrs['ID.TypeID']
if type_id == unit_type:
unit_id = int(self._spike_sorter_group[name].attrs['UnitID'])
child = self.ischild(name)
self._units[unit_id] = getattr(self, child.mcspy)
def __repr__(self):
return '<SpikeSorter object at '+str(hex(id(self)))+'>'
def get_unit(self, unit_id):
"""
Retrieves a single unit by its UnitID
:param unit_id: A valid unit ID.
"""
return self._units[unit_id]
def get_units_by_id(self):
"""
Returns a list of units sorted by unit ID
"""
unit_ids = list(self._units.keys())
unit_ids.sort()
return [self._units[i] for i in unit_ids]
def get_units_by_measure(self, measure, descending=True):
"""
Returns a list of units ordered by the given quality measure.
:param measure: The name of a quality measure. See get_unit_measures() for a list of valid quality measure names.
:param descending: The ordering of the list. Default: True (=descending order)
"""
if not measure in self.get_unit_measures():
raise ValueError(measure + " is not a valid measure. See get_unit_measures() for valid parameters")
m = self.Units[measure]
idx = np.argsort(m)
ids = self.Units['UnitID'][idx]
if descending:
ids = ids[::-1]
return [self._units[i] for i in ids]
def get_unit_measures(self):
"""
Returns a list of the available unit quality measure names
"""
lf = list(self.Units.dtype.fields)
return lf[5:]
class SpikeSorterUnitEntity(McsGroup):
"""
Container for Spike Sorter Units
"""
def __init__(self, unit_group):
self._unit_group = unit_group
self._unit_entity_unitID = int(unit_group.attrs['UnitID'])
self._unit_entity_sensorID = int(unit_group.attrs['SensorID'])
x,y = McsCMOSMEAData.sensorID_to_coordinates(self._unit_entity_sensorID)
self._unit_entity_coordinates = np.array([int(x),int(y)])
self._included_peaks = None
super().__init__(unit_group)
def __repr__(self):
return '<SpikeSorterUnitEntity object at '+str(hex(id(self)))+', id='+str(self._unit_entity_unitID)+', sensor='+str(self._unit_entity_coordinates)+'>'
def get_peaks(self):
"""
Retrieves all peaks in the source signal where the 'IncludePeak' flag is set.
"""
if self._included_peaks is None:
self._included_peaks = self.Peaks['IncludePeak'] == 1
return self.Peaks[self._included_peaks]
def get_peaks_timestamps(self):
"""
Retrieves the timestamps for all peaks in the source signal where the 'IncludePeak' flag is set.
"""
return self.get_peaks()['Timestamp']
def get_peaks_amplitudes(self):
"""
Retrieves the peak amplitudes for all peaks in the source signal where the 'IncludePeak' flag is set.
"""
return self.get_peaks()['PeakAmplitude']
def get_peaks_cutouts(self):
"""
Retrieves the cutouts for all peaks in the source signal where the 'IncludePeak' flag is set.
"""
peaks = self.get_peaks()
cutouts = [list(p)[3:] for p in peaks]
return np.stack(cutouts)
def get_measures(self):
"""
Gets a list of valid unit quality measures names
"""
lf = list(self.Unit_Info.dtype.fields)
return lf[5:]
def get_measure(self, measure):
"""
Gets a quality measure for this unit
:param measure: The name of a quality measure. See get_measures() for a list of valid quality measure names.
"""
if not measure in self.get_measures():
raise ValueError(measure + " is not a valid measure. See get_measures() for valid parameters")
return self.Unit_Info[measure][0]
class FilterTool(McsGroup):
"""
Container for FilterTool object
"""
def __init__(self, filter_tool):
self._filter_tool = filter_tool
super().__init__(filter_tool)
def __repr__(self):
return '<SpikeSorter object at '+str(hex(id(self)))+'>'
class ActivitySummary(McsGroup):
"""
Container for ActivitySummary object
"""
def __init__(self, activity_summary):
self._activity_summary = activity_summary
super().__init__(activity_summary)
def __repr__(self):
return '<ActivitySummary object at '+str(hex(id(self)))+'>' | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/lang/sk.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['sk']={"editor":"Editor formátovaného textu","editorPanel":"Panel editora formátovaného textu","common":{"editorHelp":"Stlačením ALT 0 spustiť pomocníka","browseServer":"Prehliadať server","url":"URL","protocol":"Protokol","upload":"Odoslať","uploadSubmit":"Odoslať na server","image":"Obrázok","flash":"Flash","form":"Formulár","checkbox":"Zaškrtávacie pole","radio":"Prepínač","textField":"Textové pole","textarea":"Textová oblasť","hiddenField":"Skryté pole","button":"Tlačidlo","select":"Rozbaľovací zoznam","imageButton":"Obrázkové tlačidlo","notSet":"<nenastavené>","id":"Id","name":"Meno","langDir":"Orientácia jazyka","langDirLtr":"Zľava doprava (LTR)","langDirRtl":"Sprava doľava (RTL)","langCode":"Kód jazyka","longDescr":"Dlhý popis URL","cssClass":"Trieda štýlu","advisoryTitle":"Pomocný titulok","cssStyle":"Štýl","ok":"OK","cancel":"Zrušiť","close":"Zatvoriť","preview":"Náhľad","resize":"Zmeniť veľkosť","generalTab":"Hlavné","advancedTab":"Rozšírené","validateNumberFailed":"Hodnota nie je číslo.","confirmNewPage":"Prajete si načítat novú stránku? Všetky neuložené zmeny budú stratené. ","confirmCancel":"Niektore možnosti boli zmenené. Naozaj chcete zavrieť okno?","options":"Možnosti","target":"Cieľ","targetNew":"Nové okno (_blank)","targetTop":"Najvrchnejšie okno (_top)","targetSelf":"To isté okno (_self)","targetParent":"Rodičovské okno (_parent)","langDirLTR":"Zľava doprava (LTR)","langDirRTL":"Sprava doľava (RTL)","styles":"Štýl","cssClasses":"Triedy štýlu","width":"Šírka","height":"Výška","align":"Zarovnanie","left":"Vľavo","right":"Vpravo","center":"Na stred","justify":"Do bloku","alignLeft":"Zarovnať vľavo","alignRight":"Zarovnať vpravo","alignCenter":"Zarovnať na stred","alignTop":"Nahor","alignMiddle":"Na stred","alignBottom":"Dole","alignNone":"Žiadne","invalidValue":"Neplatná hodnota.","invalidHeight":"Výška musí byť číslo.","invalidWidth":"Šírka musí byť číslo.","invalidLength":"Hodnota uvedená v poli \"%1\" musí byť kladné číslo a s platnou mernou jednotkou (%2), alebo bez nej.","invalidCssLength":"Špecifikovaná hodnota pre pole \"%1\" musí byť kladné číslo s alebo bez platnej CSS mernej jednotky (px, %, in, cm, mm, em, ex, pt alebo pc).","invalidHtmlLength":"Špecifikovaná hodnota pre pole \"%1\" musí byť kladné číslo s alebo bez platnej HTML mernej jednotky (px alebo %).","invalidInlineStyle":"Zadaná hodnota pre inline štýl musí pozostávať s jedného, alebo viac dvojíc formátu \"názov: hodnota\", oddelených bodkočiarkou.","cssLengthTooltip":"Vložte číslo pre hodnotu v pixeloch alebo číslo so správnou CSS jednotou (px, %, in, cm, mm, em, ex, pt alebo pc).","unavailable":"%1<span class=\"cke_accessibility\">, nedostupný</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Medzerník","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Klávesová skratka","optionDefault":"Predvolený"},"about":{"copy":"Copyright © $1. Všetky práva vyhradené.","dlgTitle":"O aplikácii CKEditor 4","moreInfo":"Pre informácie o licenciách, prosíme, navštívte našu web stránku:"},"basicstyles":{"bold":"Tučné","italic":"Kurzíva","strike":"Prečiarknuté","subscript":"Dolný index","superscript":"Horný index","underline":"Podčiarknuté"},"bidi":{"ltr":"Smer textu zľava doprava","rtl":"Smer textu sprava doľava"},"blockquote":{"toolbar":"Citácia"},"notification":{"closed":"Notifikácia zatvorená."},"toolbar":{"toolbarCollapse":"Zbaliť lištu nástrojov","toolbarExpand":"Rozbaliť lištu nástrojov","toolbarGroups":{"document":"Dokument","clipboard":"Schránka pre kopírovanie/Späť","editing":"Upravovanie","forms":"Formuláre","basicstyles":"Základné štýly","paragraph":"Odsek","links":"Odkazy","insert":"Vložiť","styles":"Štýly","colors":"Farby","tools":"Nástroje"},"toolbars":"Lišty nástrojov editora"},"clipboard":{"copy":"Kopírovať","copyError":"Bezpečnostné nastavenia vášho prehliadača nedovoľujú editoru automaticky spustiť operáciu kopírovania. Použite na to klávesnicu (Ctrl/Cmd+C).","cut":"Vystrihnúť","cutError":"Bezpečnostné nastavenia vášho prehliadača nedovoľujú editoru automaticky spustiť operáciu vystrihnutia. Použite na to klávesnicu (Ctrl/Cmd+X).","paste":"Vložiť","pasteNotification":"Stlačte %1 na vloženie. Váš prehliadač nepodporuje vloženie prostredníctvom tlačidla v nástrojovej lište alebo voľby v kontextovom menu.","pasteArea":"Miesto pre vloženie","pasteMsg":"Vložte svoj obsah do nasledujúcej oblasti a stlačte OK."},"colorbutton":{"auto":"Automaticky","bgColorTitle":"Farba pozadia","colors":{"000":"Čierna","800000":"Gaštanová","8B4513":"Sedlová hnedá","2F4F4F":"Tmavo bridlicovo sivá","008080":"Modrozelená","000080":"Tmavomodrá","4B0082":"Indigo","696969":"Tmavá sivá","B22222":"Ohňová tehlová","A52A2A":"Hnedá","DAA520":"Zlatobyľ","006400":"Tmavá zelená","40E0D0":"Tyrkysová","0000CD":"Stredná modrá","800080":"Purpurová","808080":"Sivá","F00":"Červená","FF8C00":"Tmavá oranžová","FFD700":"Zlatá","008000":"Zelená","0FF":"Azúrová","00F":"Modrá","EE82EE":"Fialová","A9A9A9":"Tmavá sivá","FFA07A":"Svetlá lososová","FFA500":"Oranžová","FFFF00":"Žltá","00FF00":"Vápenná","AFEEEE":"Svetlá tyrkysová","ADD8E6":"Svetlá modrá","DDA0DD":"Slivková","D3D3D3":"Svetlá sivá","FFF0F5":"Levanduľovo červená","FAEBD7":"Antická biela","FFFFE0":"Svetlá žltá","F0FFF0":"Medová","F0FFFF":"Azúrová","F0F8FF":"Alicovo modrá","E6E6FA":"Levanduľová","FFF":"Biela","1ABC9C":"Silno tyrkysová","2ECC71":"Smaragdová","3498DB":"Svetlo modrá","9B59B6":"Ametystová","4E5F70":"Sivo modrá","F1C40F":"Sýto žltá","16A085":"Tmavo tyrkysová","27AE60":"Tmavo smaragdová","2980B9":"Silno modrá","8E44AD":"Tmavo fialová","2C3E50":"Nesýto modrá","F39C12":"Oranžová","E67E22":"Mrkvová","E74C3C":"Bledo červená","ECF0F1":"Svetlá bronzová","95A5A6":"Svetlá sivo-tyrkysová","DDD":"Svetlo sivá","D35400":"Tekvicová","C0392B":"Silno červená","BDC3C7":"Strieborná","7F8C8D":"Sivo tyrkysová","999":"Tmavo sivá"},"more":"Viac farieb...","panelTitle":"Farby","textColorTitle":"Farba textu"},"colordialog":{"clear":"Vyčistiť","highlight":"Zvýrazniť","options":"Možnosti farby","selected":"Vybraná farba","title":"Vybrať farbu"},"templates":{"button":"Šablóny","emptyListMsg":"(Žiadne šablóny nedefinované)","insertOption":"Nahradiť aktuálny obsah","options":"Možnosti šablóny","selectPromptMsg":"Prosím vyberte šablónu na otvorenie v editore","title":"Šablóny obsahu"},"contextmenu":{"options":"Možnosti kontextového menu"},"copyformatting":{"label":"Copy Formatting","notification":{"copied":"Formatting copied","applied":"Formatting applied","canceled":"Formatting canceled","failed":"Formatting failed. You cannot apply styles without copying them first."}},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Pomocný titulok","cssClassInputLabel":"Triedy štýlu","edit":"Upraviť Div","inlineStyleInputLabel":"Inline štýl","langDirLTRLabel":"Zľava doprava (LTR)","langDirLabel":"Smer jazyka","langDirRTLLabel":"Zprava doľava (RTL)","languageCodeInputLabel":"Kód jazyka","remove":"Odstrániť Div","styleSelectLabel":"Štýl","title":"Vytvoriť Div kontajner","toolbar":"Vytvoriť Div kontajner"},"elementspath":{"eleLabel":"Cesta prvkov","eleTitle":"%1 prvok"},"filetools":{"loadError":"Počas čítania súboru nastala chyba.","networkError":"Počas nahrávania súboru nastala chyba siete.","httpError404":"Počas nahrávania súboru nastala HTTP chyba (404: Súbor nebol nájdený).","httpError403":"Počas nahrávania súboru nastala HTTP chyba (403: Zakázaný).","httpError":"Počas nahrávania súboru nastala HTTP chyba (error status: %1).","noUrlError":"URL nahrávania nie je definovaný.","responseError":"Nesprávna odpoveď servera."},"find":{"find":"Vyhľadať","findOptions":"Možnosti vyhľadávania","findWhat":"Čo hľadať:","matchCase":"Rozlišovať malé a veľké písmená","matchCyclic":"Po dosiahnutí konca pokračovať od začiatku","matchWord":"Len celé slová","notFoundMsg":"Hľadaný text nebol nájdený.","replace":"Nahradiť","replaceAll":"Nahradiť všetko","replaceSuccessMsg":"%1 výskyt(ov) nahradených.","replaceWith":"Čím nahradiť:","title":"Vyhľadať a nahradiť"},"fakeobjects":{"anchor":"Kotva","flash":"Flash animácia","hiddenfield":"Skryté pole","iframe":"IFrame","unknown":"Neznámy objekt"},"flash":{"access":"Prístup skriptu","accessAlways":"Vždy","accessNever":"Nikdy","accessSameDomain":"Rovnaká doména","alignAbsBottom":"Úplne dole","alignAbsMiddle":"Do stredu","alignBaseline":"Na základnú čiaru","alignTextTop":"Na horný okraj textu","bgcolor":"Farba pozadia","chkFull":"Povoliť zobrazenie na celú obrazovku (fullscreen)","chkLoop":"Opakovanie","chkMenu":"Povoliť Flash Menu","chkPlay":"Automatické prehrávanie","flashvars":"Premenné pre Flash","hSpace":"H-medzera","properties":"Vlastnosti Flashu","propertiesTab":"Vlastnosti","quality":"Kvalita","qualityAutoHigh":"Automaticky vysoká","qualityAutoLow":"Automaticky nízka","qualityBest":"Najlepšia","qualityHigh":"Vysoká","qualityLow":"Nízka","qualityMedium":"Stredná","scale":"Mierka","scaleAll":"Zobraziť všetko","scaleFit":"Roztiahnuť, aby sedelo presne","scaleNoBorder":"Bez okrajov","title":"Vlastnosti Flashu","vSpace":"V-medzera","validateHSpace":"H-medzera musí byť číslo.","validateSrc":"URL nesmie byť prázdne.","validateVSpace":"V-medzera musí byť číslo","windowMode":"Mód okna","windowModeOpaque":"Nepriehľadný","windowModeTransparent":"Priehľadný","windowModeWindow":"Okno"},"font":{"fontSize":{"label":"Veľkosť","voiceLabel":"Veľkosť písma","panelTitle":"Veľkosť písma"},"label":"Písmo","panelTitle":"Názov písma","voiceLabel":"Písmo"},"forms":{"button":{"title":"Vlastnosti tlačidla","text":"Text (Hodnota)","type":"Typ","typeBtn":"Tlačidlo","typeSbm":"Odoslať","typeRst":"Resetovať"},"checkboxAndRadio":{"checkboxTitle":"Vlastnosti zaškrtávacieho políčka","radioTitle":"Vlastnosti prepínača (radio button)","value":"Hodnota","selected":"Vybrané (selected)","required":"Povinný"},"form":{"title":"Vlastnosti formulára","menu":"Vlastnosti formulára","action":"Akcia (action)","method":"Metóda (method)","encoding":"Kódovanie (encoding)"},"hidden":{"title":"Vlastnosti skrytého poľa","name":"Názov (name)","value":"Hodnota"},"select":{"title":"Vlastnosti rozbaľovacieho zoznamu","selectInfo":"Informácie o výbere","opAvail":"Dostupné možnosti","value":"Hodnota","size":"Veľkosť","lines":"riadkov","chkMulti":"Povoliť viacnásobný výber","required":"Povinný","opText":"Text","opValue":"Hodnota","btnAdd":"Pridať","btnModify":"Upraviť","btnUp":"Hore","btnDown":"Dole","btnSetValue":"Nastaviť ako vybranú hodnotu","btnDelete":"Vymazať"},"textarea":{"title":"Vlastnosti textovej oblasti (textarea)","cols":"Stĺpcov","rows":"Riadkov"},"textfield":{"title":"Vlastnosti textového poľa","name":"Názov (name)","value":"Hodnota","charWidth":"Šírka poľa (podľa znakov)","maxChars":"Maximálny počet znakov","required":"Povinný","type":"Typ","typeText":"Text","typePass":"Heslo","typeEmail":"Email","typeSearch":"Hľadať","typeTel":"Telefónne číslo","typeUrl":"URL"}},"format":{"label":"Formát","panelTitle":"Odsek","tag_address":"Adresa","tag_div":"Normálny (DIV)","tag_h1":"Nadpis 1","tag_h2":"Nadpis 2","tag_h3":"Nadpis 3","tag_h4":"Nadpis 4","tag_h5":"Nadpis 5","tag_h6":"Nadpis 6","tag_p":"Normálny","tag_pre":"Formátovaný"},"horizontalrule":{"toolbar":"Vložiť vodorovnú čiaru"},"iframe":{"border":"Zobraziť rám frame-u","noUrl":"Prosím, vložte URL iframe","scrolling":"Povoliť skrolovanie","title":"Vlastnosti IFrame","toolbar":"IFrame"},"image":{"alt":"Alternatívny text","border":"Rám (border)","btnUpload":"Odoslať to na server","button2Img":"Chcete zmeniť vybrané obrázkové tlačidlo na jednoduchý obrázok?","hSpace":"H-medzera","img2Button":"Chcete zmeniť vybraný obrázok na obrázkové tlačidlo?","infoTab":"Informácie o obrázku","linkTab":"Odkaz","lockRatio":"Pomer zámky","menu":"Vlastnosti obrázka","resetSize":"Pôvodná veľkosť","title":"Vlastnosti obrázka","titleButton":"Vlastnosti obrázkového tlačidla","upload":"Nahrať","urlMissing":"Chýba URL zdroja obrázka.","vSpace":"V-medzera","validateBorder":"Rám (border) musí byť celé číslo.","validateHSpace":"H-medzera musí byť celé číslo.","validateVSpace":"V-medzera musí byť celé číslo."},"indent":{"indent":"Zväčšiť odsadenie","outdent":"Zmenšiť odsadenie"},"smiley":{"options":"Možnosti smajlíkov","title":"Vložiť smajlíka","toolbar":"Smajlíky"},"language":{"button":"Nastaviť jazyk","remove":"Odstrániť jazyk"},"link":{"acccessKey":"Prístupový kľúč","advanced":"Rozšírené","advisoryContentType":"Pomocný typ obsahu","advisoryTitle":"Pomocný titulok","anchor":{"toolbar":"Kotva","menu":"Upraviť kotvu","title":"Vlastnosti kotvy","name":"Názov kotvy","errorName":"Zadajte prosím názov kotvy","remove":"Odstrániť kotvu"},"anchorId":"Podľa Id objektu","anchorName":"Podľa mena kotvy","charset":"Priradená znaková sada","cssClasses":"Triedy štýlu","download":"Vynútené sťahovanie.","displayText":"Zobraziť text","emailAddress":"E-Mailová adresa","emailBody":"Telo správy","emailSubject":"Predmet správy","id":"Id","info":"Informácie o odkaze","langCode":"Orientácia jazyka","langDir":"Orientácia jazyka","langDirLTR":"Zľava doprava (LTR)","langDirRTL":"Sprava doľava (RTL)","menu":"Upraviť odkaz","name":"Názov","noAnchors":"(V dokumente nie sú dostupné žiadne kotvy)","noEmail":"Zadajte prosím e-mailovú adresu","noUrl":"Zadajte prosím URL odkazu","noTel":"Zadajte prosím telefónne číslo","other":"<iný>","phoneNumber":"Telefónne číslo","popupDependent":"Závislosť (Netscape)","popupFeatures":"Vlastnosti vyskakovacieho okna","popupFullScreen":"Celá obrazovka (IE)","popupLeft":"Ľavý okraj","popupLocationBar":"Panel umiestnenia (location bar)","popupMenuBar":"Panel ponuky (menu bar)","popupResizable":"Meniteľná veľkosť (resizable)","popupScrollBars":"Posuvníky (scroll bars)","popupStatusBar":"Stavový riadok (status bar)","popupToolbar":"Panel nástrojov (toolbar)","popupTop":"Horný okraj","rel":"Vzťah (rel)","selectAnchor":"Vybrať kotvu","styles":"Štýl","tabIndex":"Poradie prvku (tab index)","target":"Cieľ","targetFrame":"<rámec>","targetFrameName":"Názov rámu cieľa","targetPopup":"<vyskakovacie okno>","targetPopupName":"Názov vyskakovacieho okna","title":"Odkaz","toAnchor":"Odkaz na kotvu v texte","toEmail":"E-mail","toUrl":"URL","toPhone":"Telefón","toolbar":"Odkaz","type":"Typ odkazu","unlink":"Odstrániť odkaz","upload":"Nahrať"},"list":{"bulletedlist":"Vložiť/odstrániť zoznam s odrážkami","numberedlist":"Vložiť/odstrániť číslovaný zoznam"},"liststyle":{"bulletedTitle":"Vlastnosti odrážkového zoznamu","circle":"Kruh","decimal":"Číselné (1, 2, 3, atď.)","disc":"Disk","lowerAlpha":"Malé latinské (a, b, c, d, e, atď.)","lowerRoman":"Malé rímske (i, ii, iii, iv, v, atď.)","none":"Nič","notset":"<nenastavené>","numberedTitle":"Vlastnosti číselného zoznamu","square":"Štvorec","start":"Začiatok","type":"Typ","upperAlpha":"Veľké latinské (A, B, C, D, E, atď.)","upperRoman":"Veľké rímske (I, II, III, IV, V, atď.)","validateStartNumber":"Začiatočné číslo číselného zoznamu musí byť celé číslo."},"magicline":{"title":"Odsek vložiť sem"},"maximize":{"maximize":"Maximalizovať","minimize":"Minimalizovať"},"newpage":{"toolbar":"Nová stránka"},"pagebreak":{"alt":"Zalomenie strany","toolbar":"Vložiť oddeľovač stránky pre tlač"},"pastetext":{"button":"Vložiť ako čistý text","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Vložiť ako čistý text"},"pastefromword":{"confirmCleanup":"Zdá sa, že vkladaný text pochádza z programu MS Word. Chcete ho pred vkladaním automaticky vyčistiť?","error":"Kvôli internej chybe nebolo možné vložené dáta vyčistiť","title":"Vložiť z Wordu","toolbar":"Vložiť z Wordu"},"preview":{"preview":"Náhľad"},"print":{"toolbar":"Tlač"},"removeformat":{"toolbar":"Odstrániť formátovanie"},"save":{"toolbar":"Uložiť"},"selectall":{"toolbar":"Vybrať všetko"},"showblocks":{"toolbar":"Ukázať bloky"},"sourcearea":{"toolbar":"Zdroj"},"specialchar":{"options":"Možnosti špeciálneho znaku","title":"Výber špeciálneho znaku","toolbar":"Vložiť špeciálny znak"},"scayt":{"btn_about":"O KPPP (Kontrola pravopisu počas písania)","btn_dictionaries":"Slovníky","btn_disable":"Zakázať KPPP (Kontrola pravopisu počas písania)","btn_enable":"Povoliť KPPP (Kontrola pravopisu počas písania)","btn_langs":"Jazyky","btn_options":"Možnosti","text_title":"Kontrola pravopisu počas písania"},"stylescombo":{"label":"Štýly","panelTitle":"Formátovanie štýlov","panelTitle1":"Štýly bloku","panelTitle2":"Znakové štýly","panelTitle3":"Štýly objektu"},"table":{"border":"Šírka orámovania","caption":"Popis","cell":{"menu":"Bunka","insertBefore":"Vložiť bunku pred","insertAfter":"Vložiť bunku za","deleteCell":"Vymazať bunky","merge":"Zlúčiť bunky","mergeRight":"Zlúčiť doprava","mergeDown":"Zlúčiť dole","splitHorizontal":"Rozdeliť bunky horizontálne","splitVertical":"Rozdeliť bunky vertikálne","title":"Vlastnosti bunky","cellType":"Typ bunky","rowSpan":"Rozsah riadkov","colSpan":"Rozsah stĺpcov","wordWrap":"Zalamovanie riadkov","hAlign":"Horizontálne zarovnanie","vAlign":"Vertikálne zarovnanie","alignBaseline":"Základná čiara (baseline)","bgColor":"Farba pozadia","borderColor":"Farba orámovania","data":"Dáta","header":"Hlavička","yes":"Áno","no":"Nie","invalidWidth":"Šírka bunky musí byť číslo.","invalidHeight":"Výška bunky musí byť číslo.","invalidRowSpan":"Rozsah riadkov musí byť celé číslo.","invalidColSpan":"Rozsah stĺpcov musí byť celé číslo.","chooseColor":"Vybrať"},"cellPad":"Odsadenie obsahu (cell padding)","cellSpace":"Vzdialenosť buniek (cell spacing)","column":{"menu":"Stĺpec","insertBefore":"Vložiť stĺpec pred","insertAfter":"Vložiť stĺpec po","deleteColumn":"Zmazať stĺpce"},"columns":"Stĺpce","deleteTable":"Vymazať tabuľku","headers":"Hlavička","headersBoth":"Obe","headersColumn":"Prvý stĺpec","headersNone":"Žiadne","headersRow":"Prvý riadok","heightUnit":"height unit","invalidBorder":"Šírka orámovania musí byť číslo.","invalidCellPadding":"Odsadenie v bunkách (cell padding) musí byť kladné číslo.","invalidCellSpacing":"Medzera mädzi bunkami (cell spacing) musí byť kladné číslo.","invalidCols":"Počet stĺpcov musí byť číslo väčšie ako 0.","invalidHeight":"Výška tabuľky musí byť číslo.","invalidRows":"Počet riadkov musí byť číslo väčšie ako 0.","invalidWidth":"Širka tabuľky musí byť číslo.","menu":"Vlastnosti tabuľky","row":{"menu":"Riadok","insertBefore":"Vložiť riadok pred","insertAfter":"Vložiť riadok po","deleteRow":"Vymazať riadky"},"rows":"Riadky","summary":"Prehľad","title":"Vlastnosti tabuľky","toolbar":"Tabuľka","widthPc":"percent","widthPx":"pixelov","widthUnit":"jednotka šírky"},"undo":{"redo":"Znovu","undo":"Späť"},"widget":{"move":"Kliknite a potiahnite pre presunutie","label":"%1 widget"},"uploadwidget":{"abort":"Nahrávanie zrušené používateľom.","doneOne":"Súbor úspešne nahraný.","doneMany":"Úspešne nahraných %1 súborov.","uploadOne":"Nahrávanie súboru ({percentage}%)...","uploadMany":"Nahrávanie súborov, {current} z {max} hotovo ({percentage}%)..."},"wsc":{"btnIgnore":"Ignorovať","btnIgnoreAll":"Ignorovať všetko","btnReplace":"Prepísat","btnReplaceAll":"Prepísat všetko","btnUndo":"Späť","changeTo":"Zmeniť na","errorLoading":"Chyba pri načítaní slovníka z adresy: %s.","ieSpellDownload":"Kontrola pravopisu nie je naištalovaná. Chcete ju teraz stiahnuť?","manyChanges":"Kontrola pravopisu dokončená: Bolo zmenených %1 slov","noChanges":"Kontrola pravopisu dokončená: Neboli zmenené žiadne slová","noMispell":"Kontrola pravopisu dokončená: Neboli nájdené žiadne chyby pravopisu","noSuggestions":"- Žiadny návrh -","notAvailable":"Prepáčte, ale služba je momentálne nedostupná.","notInDic":"Nie je v slovníku","oneChange":"Kontrola pravopisu dokončená: Bolo zmenené jedno slovo","progress":"Prebieha kontrola pravopisu...","title":"Skontrolovať pravopis","toolbar":"Kontrola pravopisu"}}; | PypiClean |
/FORD-6.2.5-py3-none-any.whl/ford/tipuesearch/tipuesearch.min.js | (function($){$.fn.tipuesearch=function(options){var set=$.extend({"show":7,"newWindow":false,"showURL":true,"minimumLength":3,"descriptiveWords":25,"highlightTerms":true,"highlightEveryTerm":false,"mode":"static","liveDescription":"*","liveContent":"*","contentLocation":"tipuesearch/tipuesearch_content.json"},options);return this.each(function(){var tipuesearch_in={pages:[]};$.ajaxSetup({async:false});if(set.mode=="live")for(var i=0;i<tipuesearch_pages.length;i++)$.get(tipuesearch_pages[i],"",function(html){var cont=
$(set.liveContent,html).text();cont=cont.replace(/\s+/g," ");var desc=$(set.liveDescription,html).text();desc=desc.replace(/\s+/g," ");var t_1=html.toLowerCase().indexOf("<title>");var t_2=html.toLowerCase().indexOf("</title>",t_1+7);if(t_1!=-1&&t_2!=-1)var tit=html.slice(t_1+7,t_2);else var tit="No title";tipuesearch_in.pages.push({"title":tit,"text":desc,"tags":cont,"loc":tipuesearch_pages[i]})});if(set.mode=="json")$.getJSON(set.contentLocation,function(json){tipuesearch_in=$.extend({},json)});
if(set.mode=="static")tipuesearch_in=$.extend({},tipuesearch);var tipue_search_w="";if(set.newWindow)tipue_search_w=' target="_blank"';function getURLP(name){return decodeURIComponent(((new RegExp("[?|&]"+name+"="+"([^&;]+?)(&|#|;|$)")).exec(location.search)||[,""])[1].replace(/\+/g,"%20"))||null}if(getURLP("q")){$("#tipue_search_input").val(getURLP("q"));getTipueSearch(0,true)}$(this).keyup(function(event){if(event.keyCode=="13")getTipueSearch(0,true)});function getTipueSearch(start,replace){$("#tipue_search_content").hide();
var out="";var results="";var show_replace=false;var show_stop=false;var standard=true;var c=0;found=new Array;var d=$("#tipue_search_input").val().toLowerCase();d=$.trim(d);if(d.match('^"')&&d.match('"$')||d.match("^'")&&d.match("'$"))standard=false;if(standard){var d_w=d.split(" ");d="";for(var i=0;i<d_w.length;i++){var a_w=true;for(var f=0;f<tipuesearch_stop_words.length;f++)if(d_w[i]==tipuesearch_stop_words[f]){a_w=false;show_stop=true}if(a_w)d=d+" "+d_w[i]}d=$.trim(d);d_w=d.split(" ")}else d=
d.substring(1,d.length-1);if(d.length>=set.minimumLength){if(standard){if(replace){var d_r=d;for(var i=0;i<d_w.length;i++)for(var f=0;f<tipuesearch_replace.words.length;f++)if(d_w[i]==tipuesearch_replace.words[f].word){d=d.replace(d_w[i],tipuesearch_replace.words[f].replace_with);show_replace=true}d_w=d.split(" ")}var d_t=d;for(var i=0;i<d_w.length;i++)for(var f=0;f<tipuesearch_stem.words.length;f++)if(d_w[i]==tipuesearch_stem.words[f].word)d_t=d_t+" "+tipuesearch_stem.words[f].stem;d_w=d_t.split(" ");
for(var i=0;i<tipuesearch_in.pages.length;i++){var score=1E9;var s_t=tipuesearch_in.pages[i].text;for(var f=0;f<d_w.length;f++){var pat=new RegExp(d_w[f],"i");if(tipuesearch_in.pages[i].title.search(pat)!=-1)score-=2E5-i;if(tipuesearch_in.pages[i].text.search(pat)!=-1)score-=15E4-i;if(set.highlightTerms){if(set.highlightEveryTerm)var patr=new RegExp("("+d_w[f]+")","gi");else var patr=new RegExp("("+d_w[f]+")","i");s_t=s_t.replace(patr,'<span class="h01">$1</span>')}if(tipuesearch_in.pages[i].tags.search(pat)!=
-1)score-=1E5-i;if(d_w[f].match("^-")){pat=new RegExp(d_w[f].substring(1),"i");if(tipuesearch_in.pages[i].title.search(pat)!=-1||tipuesearch_in.pages[i].text.search(pat)!=-1||tipuesearch_in.pages[i].tags.search(pat)!=-1)score=1E9}}if(score<1E9)found[c++]=score+"^"+tipuesearch_in.pages[i].title+"^"+s_t+"^"+tipuesearch_in.pages[i].loc}}else for(var i=0;i<tipuesearch_in.pages.length;i++){var score=1E9;var s_t=tipuesearch_in.pages[i].text;var pat=new RegExp(d,"i");if(tipuesearch_in.pages[i].title.search(pat)!=
-1)score-=2E5-i;if(tipuesearch_in.pages[i].text.search(pat)!=-1)score-=15E4-i;if(set.highlightTerms){if(set.highlightEveryTerm)var patr=new RegExp("("+d+")","gi");else var patr=new RegExp("("+d+")","i");s_t=s_t.replace(patr,'<span class="h01">$1</span>')}if(tipuesearch_in.pages[i].tags.search(pat)!=-1)score-=1E5-i;if(score<1E9)found[c++]=score+"^"+tipuesearch_in.pages[i].title+"^"+s_t+"^"+tipuesearch_in.pages[i].loc}if(c!=0){if(show_replace==1){out+='<div id="tipue_search_warning_head">Showing results for '+
d+"</div>";out+='<div id="tipue_search_warning">Search instead for <a href="javascript:void(0)" id="tipue_search_replaced">'+d_r+"</a></div>"}if(c==1)out+='<div id="tipue_search_results_count">1 result</div>';else{c_c=c.toString().replace(/\B(?=(\d{3})+(?!\d))/g,",");out+='<div id="tipue_search_results_count">'+c_c+" results</div>"}found.sort();var l_o=0;for(var i=0;i<found.length;i++){var fo=found[i].split("^");if(l_o>=start&&l_o<set.show+start){out+='<div class="tipue_search_content_title"><a href="'+
fo[3]+'"'+tipue_search_w+">"+fo[1]+"</a></div>";if(set.showURL)out+='<div class="tipue_search_content_url"><a href="'+fo[3]+'"'+tipue_search_w+">"+fo[3]+"</a></div>";var t=fo[2];var t_d="";var t_w=t.split(" ");if(t_w.length<set.descriptiveWords)t_d=t;else for(var f=0;f<set.descriptiveWords;f++)t_d+=t_w[f]+" ";t_d=$.trim(t_d);if(t_d.charAt(t_d.length-1)!=".")t_d+=" ...";out+='<div class="tipue_search_content_text">'+t_d+"</div>"}l_o++}if(c>set.show){var pages=Math.ceil(c/set.show);var page=start/set.show;
out+='<div id="tipue_search_foot"><ul id="tipue_search_foot_boxes">';if(start>0)out+='<li><a href="javascript:void(0)" class="tipue_search_foot_box" id="'+(start-set.show)+"_"+replace+'">Prev</a></li>';if(page<=2){var p_b=pages;if(pages>3)p_b=3;for(var f=0;f<p_b;f++)if(f==page)out+='<li class="current">'+(f+1)+"</li>";else out+='<li><a href="javascript:void(0)" class="tipue_search_foot_box" id="'+f*set.show+"_"+replace+'">'+(f+1)+"</a></li>"}else{var p_b=page+2;if(p_b>pages)p_b=pages;for(var f=page-
1;f<p_b;f++)if(f==page)out+='<li class="current">'+(f+1)+"</li>";else out+='<li><a href="javascript:void(0)" class="tipue_search_foot_box" id="'+f*set.show+"_"+replace+'">'+(f+1)+"</a></li>"}if(page+1!=pages)out+='<li><a href="javascript:void(0)" class="tipue_search_foot_box" id="'+(start+set.show)+"_"+replace+'">Next</a></li>';out+="</ul></div>"}}else out+='<div id="tipue_search_warning_head">Nothing found</div>'}else if(show_stop)out+='<div id="tipue_search_warning_head">Nothing found</div><div id="tipue_search_warning">Common words are largely ignored</div>';
else{out+='<div id="tipue_search_warning_head">Search too short</div>';if(set.minimumLength==1)out+='<div id="tipue_search_warning">Should be one character or more</div>';else out+='<div id="tipue_search_warning">Should be '+set.minimumLength+" characters or more</div>"}$("#tipue_search_content").html(out);$("#tipue_search_content").slideDown(200);$("#tipue_search_replaced").click(function(){getTipueSearch(0,false)});$(".tipue_search_foot_box").click(function(){var id_v=$(this).attr("id");var id_a=
id_v.split("_");getTipueSearch(parseInt(id_a[0]),id_a[1])})}})}})(jQuery); | PypiClean |
/Halocoin-0.1.0.4.tar.gz/Halocoin-0.1.0.4/halocoin/static/dashboard/js/arrive.min.js | var Arrive=function(e,t,n){"use strict";function r(e,t,n){l.addMethod(t,n,e.unbindEvent),l.addMethod(t,n,e.unbindEventWithSelectorOrCallback),l.addMethod(t,n,e.unbindEventWithSelectorAndCallback)}function i(e){e.arrive=f.bindEvent,r(f,e,"unbindArrive"),e.leave=d.bindEvent,r(d,e,"unbindLeave")}if(e.MutationObserver&&"undefined"!=typeof HTMLElement){var o=0,l=function(){var t=HTMLElement.prototype.matches||HTMLElement.prototype.webkitMatchesSelector||HTMLElement.prototype.mozMatchesSelector||HTMLElement.prototype.msMatchesSelector;return{matchesSelector:function(e,n){return e instanceof HTMLElement&&t.call(e,n)},addMethod:function(e,t,r){var i=e[t];e[t]=function(){return r.length==arguments.length?r.apply(this,arguments):"function"==typeof i?i.apply(this,arguments):n}},callCallbacks:function(e,t){t&&t.options.onceOnly&&1==t.firedElems.length&&(e=[e[0]]);for(var n,r=0;n=e[r];r++)n&&n.callback&&n.callback.call(n.elem,n.elem);t&&t.options.onceOnly&&1==t.firedElems.length&&t.me.unbindEventWithSelectorAndCallback.call(t.target,t.selector,t.callback)},checkChildNodesRecursively:function(e,t,n,r){for(var i,o=0;i=e[o];o++)n(i,t,r)&&r.push({callback:t.callback,elem:i}),i.childNodes.length>0&&l.checkChildNodesRecursively(i.childNodes,t,n,r)},mergeArrays:function(e,t){var n,r={};for(n in e)e.hasOwnProperty(n)&&(r[n]=e[n]);for(n in t)t.hasOwnProperty(n)&&(r[n]=t[n]);return r},toElementsArray:function(t){return n===t||"number"==typeof t.length&&t!==e||(t=[t]),t}}}(),c=function(){var e=function(){this._eventsBucket=[],this._beforeAdding=null,this._beforeRemoving=null};return e.prototype.addEvent=function(e,t,n,r){var i={target:e,selector:t,options:n,callback:r,firedElems:[]};return this._beforeAdding&&this._beforeAdding(i),this._eventsBucket.push(i),i},e.prototype.removeEvent=function(e){for(var t,n=this._eventsBucket.length-1;t=this._eventsBucket[n];n--)if(e(t)){this._beforeRemoving&&this._beforeRemoving(t);var r=this._eventsBucket.splice(n,1);r&&r.length&&(r[0].callback=null)}},e.prototype.beforeAdding=function(e){this._beforeAdding=e},e.prototype.beforeRemoving=function(e){this._beforeRemoving=e},e}(),a=function(t,r){var i=new c,o=this,a={fireOnAttributesModification:!1};return i.beforeAdding(function(n){var i,l=n.target;(l===e.document||l===e)&&(l=document.getElementsByTagName("html")[0]),i=new MutationObserver(function(e){r.call(this,e,n)});var c=t(n.options);i.observe(l,c),n.observer=i,n.me=o}),i.beforeRemoving(function(e){e.observer.disconnect()}),this.bindEvent=function(e,t,n){t=l.mergeArrays(a,t);for(var r=l.toElementsArray(this),o=0;o<r.length;o++)i.addEvent(r[o],e,t,n)},this.unbindEvent=function(){var e=l.toElementsArray(this);i.removeEvent(function(t){for(var r=0;r<e.length;r++)if(this===n||t.target===e[r])return!0;return!1})},this.unbindEventWithSelectorOrCallback=function(e){var t,r=l.toElementsArray(this),o=e;t="function"==typeof e?function(e){for(var t=0;t<r.length;t++)if((this===n||e.target===r[t])&&e.callback===o)return!0;return!1}:function(t){for(var i=0;i<r.length;i++)if((this===n||t.target===r[i])&&t.selector===e)return!0;return!1},i.removeEvent(t)},this.unbindEventWithSelectorAndCallback=function(e,t){var r=l.toElementsArray(this);i.removeEvent(function(i){for(var o=0;o<r.length;o++)if((this===n||i.target===r[o])&&i.selector===e&&i.callback===t)return!0;return!1})},this},s=function(){function e(e){var t={attributes:!1,childList:!0,subtree:!0};return e.fireOnAttributesModification&&(t.attributes=!0),t}function t(e,t){e.forEach(function(e){var n=e.addedNodes,i=e.target,o=[];null!==n&&n.length>0?l.checkChildNodesRecursively(n,t,r,o):"attributes"===e.type&&r(i,t,o)&&o.push({callback:t.callback,elem:i}),l.callCallbacks(o,t)})}function r(e,t){return l.matchesSelector(e,t.selector)&&(e._id===n&&(e._id=o++),-1==t.firedElems.indexOf(e._id))?(t.firedElems.push(e._id),!0):!1}var i={fireOnAttributesModification:!1,onceOnly:!1,existing:!1};f=new a(e,t);var c=f.bindEvent;return f.bindEvent=function(e,t,r){n===r?(r=t,t=i):t=l.mergeArrays(i,t);var o=l.toElementsArray(this);if(t.existing){for(var a=[],s=0;s<o.length;s++)for(var u=o[s].querySelectorAll(e),f=0;f<u.length;f++)a.push({callback:r,elem:u[f]});if(t.onceOnly&&a.length)return r.call(a[0].elem,a[0].elem);setTimeout(l.callCallbacks,1,a)}c.call(this,e,t,r)},f},u=function(){function e(){var e={childList:!0,subtree:!0};return e}function t(e,t){e.forEach(function(e){var n=e.removedNodes,i=[];null!==n&&n.length>0&&l.checkChildNodesRecursively(n,t,r,i),l.callCallbacks(i,t)})}function r(e,t){return l.matchesSelector(e,t.selector)}var i={};d=new a(e,t);var o=d.bindEvent;return d.bindEvent=function(e,t,r){n===r?(r=t,t=i):t=l.mergeArrays(i,t),o.call(this,e,t,r)},d},f=new s,d=new u;t&&i(t.fn),i(HTMLElement.prototype),i(NodeList.prototype),i(HTMLCollection.prototype),i(HTMLDocument.prototype),i(Window.prototype);var h={};return r(f,h,"unbindAllArrive"),r(d,h,"unbindAllLeave"),h}}(window,"undefined"==typeof jQuery?null:jQuery,void 0); | PypiClean |
/AdyanUtils-0.7.2.tar.gz/AdyanUtils-0.7.2/Utils/crawler/middleware.py |
import logging
import random
import time
from requests import sessions
from scrapy import signals
from scrapy.core.downloader.handlers.http11 import TunnelError, TimeoutError
from scrapy.http import TextResponse
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.error import ConnectionRefusedError
from w3lib.http import basic_auth_header
from Utils.crawler.proxy import GetProxy
class Proxy(object):
def __init__(self, settings, spider):
self.settings = settings
self.ip_list = []
try:
self.proxy = spider.proxy
if self.proxy.get("name"):
self.proxies = GetProxy(self.proxy)
except:
self.proxy = {}
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings, crawler.spider)
def process_response(self, request, response, spider):
if self.settings.getbool('PROXY', False):
start_time = request.meta.get('_start_time', time.time())
if self.settings.getbool('LOGGER_PROXY', False):
logging.info(
f'【代理{request.meta["proxy"][8:]}消耗时间{time.time() - start_time}】{request.url}'
)
del request.meta["proxy"]
return response
def process_request(self, request, spider):
if spider.proxy.get("name") and self.settings.getbool('PROXY', False):
request.meta.update({'_start_time': time.time()})
if isinstance(self.ip_list, list):
if len(self.ip_list) < 5:
while True:
proxies = self.proxies.get_proxies()
if proxies:
break
self.ip_list = proxies
request.meta['download_timeout'] = 5
ip_raw = random.choice(self.ip_list)
self.ip_list.remove(ip_raw)
request.meta["proxy"] = ip_raw
else:
logging.info('代理列表为空')
if spider.proxy.get("username") and self.settings.getbool('PROXY', False):
request.meta['proxy'] = f"http://{self.proxy.get('proxies')}"
request.headers['Proxy-Authorization'] = basic_auth_header(
self.proxy.get("username"),
self.proxy.get("password")
)
def process_exception(self, request, exception, spider):
if isinstance(exception, (TunnelError, TimeoutError, ConnectionRefusedError)):
return request
class RequestsDownloader(object):
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
@defer.inlineCallbacks
def process_request(self, request, spider):
kwargs = request.meta.get("params")
if kwargs:
container = []
out = defer.Deferred()
reactor.callInThread(self._get_res, request, container, out, kwargs)
yield out
if len(container) > 0:
defer.returnValue(container[0])
def _get_res(self, request, container, out, kwargs):
try:
url = request.url
method = kwargs.pop('method')
r = sessions.Session().request(method=method, url=url, **kwargs)
r.encoding = request.encoding
text = r.content
encoding = None
response = TextResponse(url=r.url, encoding=encoding, body=text, request=request)
container.append(response)
reactor.callFromThread(out.callback, response)
except Exception as e:
err = str(type(e)) + ' ' + str(e)
reactor.callFromThread(out.errback, ValueError(err))
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name) | PypiClean |
/FAdo3-1.0.tar.gz/FAdo3-1.0/FAdo/comboperations.py | from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import *
from .fa import *
from .common import *
def starConcat(fa1, fa2, strict=False):
""" Star of concatenation of two languages: (L1.L2)*
:param fa1: first automaton
:param fa2: second automaton
:param strict: should the alphabets be necessary equal?
:type strict: Boolean
:type fa1: DFA
:type fa2: DFA
:rtype: DFA
.. seealso::
Yuan Gao, Kai Salomaa, and Sheng Yu. 'The state complexity of two combined operations: Star of catenation and
star of reversal'. Fundamenta Informaticae, 83:75–89, Jan 2008."""
if strict and fa1.Sigma != fa2.Sigma:
raise DFAdifferentSigma
NSigma = fa1.Sigma.union(fa2.Sigma)
d1, d2 = fa1.dup(), fa2.dup()
d1.setSigma(NSigma)
d2.setSigma(NSigma)
d1.complete()
d2.complete()
if len(d1.States) > 1 and len(d2.States) == 1:
if d2.finalP(d2.Initial):
new = d1.addState()
iold = d1.Initial
d1.setInitial(new)
d1.addFinal(new)
for sym in d1.Sigma:
d1.addTransition(new, sym, d1.delta[iold][sym])
for s in d1.Final:
d1.delta[s][sym] = s
return d1
c = DFA()
c.setSigma(d1.Sigma)
s0, s1 = c.addState(0), c.addState(1)
c.setInitial(s0)
c.addFinal(s0)
for sym in c.Sigma:
c.addTransition(s0, sym, s1)
c.addTransition(s1, sym, s1)
return c
if len(d2.States) > 1 and len(d1.States) == 1:
if d1.finalP(d1.Initial):
c = NFA()
c.setSigma(d2.Sigma)
c.States = d2.States[:]
p1 = c.addState(0)
p2 = c.addState(1)
c.addInitial(p1)
c.setFinal(list(d2.Final))
for sym in c.Sigma:
c.addTransition(p1, sym, d2.delta[d2.Initial][sym])
c.addTransition(p1, sym, p2)
c.addTransition(p2, sym, d2.delta[d2.Initial][sym])
c.addTransition(p2, sym, p2)
for s in d2.States:
c.addTransition(s, sym, d2.delta[s][sym])
return c.toDFA()
# Epsilon automaton
c = DFA()
c.setSigma(d1.Sigma)
s0, s1 = c.addState(0), c.addState(1)
c.setInitial(s0)
c.addFinal(s0)
for sym in c.Sigma:
c.addTransition(s0, sym, s1)
c.addTransition(s1, sym, s1)
return c
c = DFA()
c.setSigma(d1.Sigma)
lStates = []
i = c.addState("initial")
c.setInitial(i)
c.addFinal(i)
lStates.append(i)
for sym in c.Sigma:
s1 = {d1.evalSymbol(d1.Initial, sym)}
s2 = set([])
#Z1
if s1 & d1.Final != set([]):
s2.add(d2.Initial)
if d2.finalP(d2.Initial):
s1.add(d1.Initial)
if d1.finalP(d1.Initial):
s2.add(d2.evalSymbol(d2.Initial, sym))
#Z2
if s2 & d2.Final != set([]):
s1.add(d1.Initial)
s2.add(d2.Initial)
stn = (s1, s2)
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
if stn[1] & d2.Final != set([]):
c.addFinal(new)
else:
new = c.stateIndex(stn)
c.addTransition(i, sym, new)
if len(c.States) == 1:
return c
j = 1
while True:
stu = lStates[j]
s = c.stateIndex(stu)
for sym in c.Sigma:
stn = (d1.evalSymbolL(stu[0], sym),
d2.evalSymbolL(stu[1], sym))
if stn[0] & d1.Final != set([]):
stn[1].add(d2.Initial)
if d2.finalP(d2.Initial):
stn[0].add(d1.Initial)
if stn[1] & d2.Final != set([]):
stn[0].add(d1.Initial)
if d1.finalP(d1.Initial):
stn[1].add(d2.Initial)
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
if stn[1] & d2.Final != set([]):
c.addFinal(new)
else:
new = c.stateIndex(stn)
c.addTransition(s, sym, new)
if j == len(lStates) - 1:
break
else:
j += 1
return c
def concatWStar(fa1, fa2, strict=False):
"""Concatenation combined with star: (L1.L2*)
:param fa1: first automaton
:param fa2: second automaton
:param strict: should the alphabets be necessary equal?
:type strict: bool
:type fa1: DFA
:type fa2: DFA
:rtype: DFA
.. seealso::
Bo Cui, Yuan Gao, Lila Kari, and Sheng Yu. 'State complexity of two combined operations: Reversal-catenation
and star-catenation'. CoRR, abs/1006.4646, 2010."""
if len(fa1.States) == 0 or len(fa1.Final) == 0 or len(fa2.States) == 0 or len(fa2.Final) == 0 or \
(len(fa1.States) == 1 and len(fa2.States) > 0):
return fa1
if strict and fa1.Sigma != fa2.Sigma:
raise DFAdifferentSigma
NSigma = fa1.Sigma.union(fa2.Sigma)
d1, d2 = fa1.dup(), fa2.dup()
d1.setSigma(NSigma)
d2.setSigma(NSigma)
d1.complete()
d2.complete()
if len(d2.Final) == 1 and d2.finalP(d2.Initial):
return d1.concat(d2)
c = DFA()
c.setSigma(d1.Sigma)
lStates = []
if d1.finalP(d1.Initial):
s2 = 1
else:
s2 = 0
i = (d1.Initial, s2, set([]))
lStates.append(i)
j = c.addState(i)
c.setInitial(j)
if s2 == 1:
c.addFinal(j)
F0 = d2.Final - {d2.Initial}
while True:
stu = lStates[j]
s = c.stateIndex(stu)
for sym in c.Sigma:
s1 = d1.evalSymbol(stu[0], sym)
if d1.finalP(s1):
s2 = 1
else:
s2 = 0
if stu[1] == 1:
s3 = {d2.evalSymbol(d2.Initial, sym)}
# correction
if s3 & F0 != set([]):
s3.add(d2.Initial)
else:
s3 = set([])
s4 = d2.evalSymbolL(stu[2], sym)
if s4 & F0 != set([]):
s4.add(d2.Initial)
stn = (s1, s2, s3.union(s4))
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
if stn[1] == 1 or (d2.Final & stn[2] != set([])):
c.addFinal(new)
else:
new = c.stateIndex(stn)
c.addTransition(s, sym, new)
if j == len(lStates) - 1:
break
else:
j += 1
return c
def starWConcat(fa1, fa2, strict=False):
"""Star combined with concatenation: (L1*.L2)
:param fa1: first automaton
:param fa2: second automaton
:param strict: should the alphabets be necessary equal?
:type strict: Boolean
:type fa1: DFA
:type fa2: DFA
:rtype: DFA
.. seealso::
Bo Cui, Yuan Gao, Lila Kari, and Sheng Yu. 'State complexity of catenation combined with star and reversal'.
CoRR, abs/1008.1648, 2010"""
if len(fa1.States) == 0 or len(fa1.Final) == 0 or len(fa2.States) == 0 or len(fa2.Final) == 0 \
or (len(fa2.States) == 1 and len(fa1.States) > 0):
return fa2
if strict and fa1.Sigma != fa2.Sigma:
raise DFAdifferentSigma
NSigma = fa1.Sigma.union(fa2.Sigma)
d1, d2 = fa1.dup(), fa2.dup()
d1.setSigma(NSigma)
d2.setSigma(NSigma)
d1.complete()
d2.complete()
c = DFA()
c.setSigma(d1.Sigma)
if len(d1.Final) == 1 and d1.finalP(d1.Initial):
i = (d1.Initial, {d2.Initial})
j = c.addState(i)
c.setInitial(j)
if i[1] & d2.Final != set([]):
c.addFinal(j)
while True:
s = c.States[j]
for sym in c.Sigma:
stn = (d1.evalSymbol(s[0], sym), d2.evalSymbolL(s[1], sym))
if d1.initialP(s[0]):
stn[1].add(d2.Initial)
try:
new = c.addState(stn)
if stn[1] & d2.Final != set([]):
c.addFinal(new)
except DuplicateName:
new = c.stateIndex(stn)
c.addTransition(s, sym, new)
if j == len(c.States) - 1:
break
else:
j += 1
return c
# |Final1|>1
j = c.addState(({d1.Initial}, {d2.Initial}))
c.setInitial(j)
if d2.finalP(d2.Initial):
c.addFinal(j)
while True:
s = c.States[j]
for sym in c.Sigma:
stn = (d1.evalSymbolL(s[0], sym), d2.evalSymbolL(s[1], sym))
if stn[0] & d1.Final != set([]):
stn[1].add(d2.Initial)
stn[0].add(d1.Initial)
try:
new = c.addState(stn)
if stn[1] & d2.Final != set([]):
c.addFinal(new)
except DuplicateName:
new = c.stateIndex(stn)
c.addTransition(j, sym, new)
if j == len(c.States) - 1:
break
else:
j += 1
return c
def starDisj(fa1, fa2, strict=False):
"""Star of Union of two DFAs: (L1 + L2)*
:param fa1: first automaton
:param fa2: second automaton
:param strict: should the alphabets be necessary equal?
:type strict: Boolean
:type fa1: DFA
:type fa2: DFA
:rtype: DFA
.. seealso::
Arto Salomaa, Kai Salomaa, and Sheng Yu. 'State complexity of combined operations'. Theor. Comput. Sci.,
383(2-3):140–152, 2007."""
if strict and fa1.Sigma != fa2.Sigma:
raise DFAdifferentSigma
NSigma = fa1.Sigma.union(fa2.Sigma)
d1, d2 = fa1.dup(), fa2.dup()
d1.setSigma(NSigma)
d2.setSigma(NSigma)
d1.complete()
d2.complete()
c = DFA()
c.setSigma(NSigma)
lStates = []
if d1.Initial in d1.Final or d2.Initial in d2.Final:
i = ({d1.Initial}, {d2.Initial})
else:
i = "initial"
lStates.append(i)
j = c.addState(i)
c.setInitial(j)
c.addFinal(j)
for sym in c.Sigma:
stn = ({d1.evalSymbol(d1.Initial, sym)}, {d2.evalSymbol(d2.Initial, sym)})
if stn[0] & d1.Final or stn[1] & d2.Final:
stn[0].add(d1.Initial)
stn[1].add(d2.Initial)
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
if stn[0] & d1.Final or stn[1] & d2.Final:
c.addFinal(new)
else:
new = c.stateIndex(stn)
c.addTransition(j, sym, new)
if len(lStates) < 2:
return c
j = 1
while True:
stu = lStates[j]
s = c.stateIndex(stu)
for sym in c.Sigma:
stn = (d1.evalSymbolL(stu[0], sym), d2.evalSymbolL(stu[1], sym))
if stn[0] & d1.Final or stn[1] & d2.Final:
stn[0].add(d1.Initial)
stn[1].add(d2.Initial)
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
if stn[0] & d1.Final or stn[1] & d2.Final:
c.addFinal(new)
else:
new = c.stateIndex(stn)
c.addTransition(s, sym, new)
if j == len(lStates) - 1:
break
else:
j += 1
return c
def starInter0(fa1, fa2, strict=False):
"""Star of Intersection of two DFAs: (L1 & L2)*
:param fa1: first automaton
:param fa2: second automaton
:param strict: should the alphabets be necessary equal?
:type strict: Boolean
:type fa1: DFA
:type fa2: DFA
:rtype: DFA
.. seealso::
Arto Salomaa, Kai Salomaa, and Sheng Yu. 'State complexity of combined operations'. Theor. Comput. Sci.,
383(2-3):140–152, 2007."""
if strict and fa1.Sigma != fa2.Sigma:
raise DFAdifferentSigma
NSigma = fa1.Sigma.union(fa2.Sigma)
d1, d2 = fa1.dup(), fa2.dup()
d1.setSigma(NSigma)
d2.setSigma(NSigma)
d1.complete()
d2.complete()
c = DFA()
c.setSigma(NSigma)
lStates = []
if d1.finalP(d1.Initial) and d2.finalP(d2.Initial):
i = ({d1.Initial}, {d2.Initial})
else:
i = "initial"
lStates.append(i)
j = c.addState(i)
c.setInitial(j)
c.addFinal(j)
for sym in c.Sigma:
stn = ({d1.evalSymbol(d1.Initial, sym)}, {d2.evalSymbol(d2.Initial, sym)})
if stn[0] & d1.Final and stn[1] & d2.Final:
stn[0].add(d1.Initial)
stn[1].add(d2.Initial)
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
if stn[0] & d1.Final and stn[1] & d2.Final:
c.addFinal(new)
else:
new = c.stateIndex(stn)
c.addTransition(j, sym, new)
if len(lStates) < 2:
return c
j = 1
while True:
stu = lStates[j]
s = c.stateIndex(stu)
for sym in c.Sigma:
stn = (d1.evalSymbolL(stu[0], sym), d2.evalSymbolL(stu[1], sym))
if stn[0] & d1.Final and stn[1] & d2.Final:
stn[0].add(d1.Initial)
stn[1].add(d2.Initial)
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
if stn[0] & d1.Final and stn[1] & d2.Final:
c.addFinal(new)
else:
new = c.stateIndex(stn)
c.addTransition(s, sym, new)
if j == len(lStates) - 1:
break
else:
j += 1
return c
def starInter(fa1, fa2, strict=False):
"""Star of Intersection of two DFAs: (L1 & L2)*
:param fa1: first automaton
:param fa2: second automaton
:param strict: should the alphabets be necessary equal?
:type strict: Boolean
:type fa1: DFA
:type fa2: DFA
:rtype: DFA """
if strict and fa1.Sigma != fa2.Sigma:
raise DFAdifferentSigma
NSigma = fa1.Sigma.union(fa2.Sigma)
d1, d2 = fa1.dup(), fa2.dup()
d1.setSigma(NSigma)
d2.setSigma(NSigma)
d1.complete()
d2.complete()
c = DFA()
c.setSigma(NSigma)
lStates = []
if d1.finalP(d1.Initial) and d2.finalP(d2.Initial):
i = {(d1.Initial, d2.Initial)}
else:
i = "initial"
lStates.append(i)
j = c.addState(i)
c.setInitial(j)
c.addFinal(j)
for sym in c.Sigma:
stn = {(d1.evalSymbol(d1.Initial, sym), d2.evalSymbol(d2.Initial, sym))}
for sub in stn:
if d1.finalP(sub[0]) and d2.finalP(sub[1]):
stn.add((d1.Initial, d2.Initial))
break
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
for sub in stn:
if d1.finalP(sub[0]) and d2.finalP(sub[1]):
c.addFinal(new)
break
else:
new = c.stateIndex(stn)
c.addTransition(j, sym, new)
if len(lStates) < 2:
return c
j = 1
while True:
stu = lStates[j]
s = c.stateIndex(stu)
for sym in c.Sigma:
stn = set([])
flag = 1
for sub in stu:
one = (d1.evalSymbol(sub[0], sym), d2.evalSymbol(sub[1], sym))
stn.add(one)
if flag == 1 and d1.finalP(one[0]) and d2.finalP(one[1]):
stn.add((d1.Initial, d2.Initial))
flag = 0
if stn not in lStates:
lStates.append(stn)
new = c.addState(stn)
for sub in stn:
if d1.finalP(sub[0]) and d2.finalP(sub[1]):
c.addFinal(new)
break
else:
new = c.stateIndex(stn)
c.addTransition(s, sym, new)
if j == len(lStates) - 1:
break
else:
j += 1
return c
def disjWStar(f1, f2, strict=True):
"""Union with star: (L1 + L2*)
:param f1: first automaton
:param f2: second automaton
:param strict: should the alphabets be necessary equal?
:type strict: Boolean
:type f1: DFA
:type f2: DFA
:rtype: DFA
.. seealso::
Yuan Gao and Sheng Yu. 'State complexity of union and intersection combined with star and reversal'. CoRR,
abs/1006.3755, 2010."""
if strict and f1.Sigma != f2.Sigma:
raise DFAdifferentSigma
return f1.star() | f2
def interWStar(f1, f2, strict=True):
"""Intersection with star: (L1 & L2*)
:param f1: first automaton
:param f2: second automaton
:param strict: should the alphabets be necessary equal?
:type strict: Boolean
:type f1: DFA
:type f2: DFA
:rtype: DFA
.. seealso::
Yuan Gao and Sheng Yu. 'State complexity of union and intersection combined with star and reversal'. CoRR,
abs/1006.3755, 2010."""
if strict and f1.Sigma != f2.Sigma:
raise DFAdifferentSigma
return f1.star() & f2 | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/prism/components/prism-haml.js | (function(Prism) {
Prism.languages.haml = {
// Multiline stuff should appear before the rest
'multiline-comment': {
pattern: /((?:^|\r?\n|\r)([\t ]*))(?:\/|-#).*(?:(?:\r?\n|\r)\2[\t ]+.+)*/,
lookbehind: true,
alias: 'comment'
},
'multiline-code': [
{
pattern: /((?:^|\r?\n|\r)([\t ]*)(?:[~-]|[&!]?=)).*,[\t ]*(?:(?:\r?\n|\r)\2[\t ]+.*,[\t ]*)*(?:(?:\r?\n|\r)\2[\t ]+.+)/,
lookbehind: true,
inside: {
rest: Prism.languages.ruby
}
},
{
pattern: /((?:^|\r?\n|\r)([\t ]*)(?:[~-]|[&!]?=)).*\|[\t ]*(?:(?:\r?\n|\r)\2[\t ]+.*\|[\t ]*)*/,
lookbehind: true,
inside: {
rest: Prism.languages.ruby
}
}
],
// See at the end of the file for known filters
'filter': {
pattern: /((?:^|\r?\n|\r)([\t ]*)):[\w-]+(?:(?:\r?\n|\r)(?:\2[\t ]+.+|\s*?(?=\r?\n|\r)))+/,
lookbehind: true,
inside: {
'filter-name': {
pattern: /^:[\w-]+/,
alias: 'variable'
}
}
},
'markup': {
pattern: /((?:^|\r?\n|\r)[\t ]*)<.+/,
lookbehind: true,
inside: {
rest: Prism.languages.markup
}
},
'doctype': {
pattern: /((?:^|\r?\n|\r)[\t ]*)!!!(?: .+)?/,
lookbehind: true
},
'tag': {
// Allows for one nested group of braces
pattern: /((?:^|\r?\n|\r)[\t ]*)[%.#][\w\-#.]*[\w\-](?:\([^)]+\)|\{(?:\{[^}]+\}|[^}])+\}|\[[^\]]+\])*[\/<>]*/,
lookbehind: true,
inside: {
'attributes': [
{
// Lookbehind tries to prevent interpolations from breaking it all
// Allows for one nested group of braces
pattern: /(^|[^#])\{(?:\{[^}]+\}|[^}])+\}/,
lookbehind: true,
inside: {
rest: Prism.languages.ruby
}
},
{
pattern: /\([^)]+\)/,
inside: {
'attr-value': {
pattern: /(=\s*)(?:"(?:\\.|[^\\"\r\n])*"|[^)\s]+)/,
lookbehind: true
},
'attr-name': /[\w:-]+(?=\s*!?=|\s*[,)])/,
'punctuation': /[=(),]/
}
},
{
pattern: /\[[^\]]+\]/,
inside: {
rest: Prism.languages.ruby
}
}
],
'punctuation': /[<>]/
}
},
'code': {
pattern: /((?:^|\r?\n|\r)[\t ]*(?:[~-]|[&!]?=)).+/,
lookbehind: true,
inside: {
rest: Prism.languages.ruby
}
},
// Interpolations in plain text
'interpolation': {
pattern: /#\{[^}]+\}/,
inside: {
'delimiter': {
pattern: /^#\{|\}$/,
alias: 'punctuation'
},
rest: Prism.languages.ruby
}
},
'punctuation': {
pattern: /((?:^|\r?\n|\r)[\t ]*)[~=\-&!]+/,
lookbehind: true
}
};
var filter_pattern = '((?:^|\\r?\\n|\\r)([\\t ]*)):{{filter_name}}(?:(?:\\r?\\n|\\r)(?:\\2[\\t ]+.+|\\s*?(?=\\r?\\n|\\r)))+';
// Non exhaustive list of available filters and associated languages
var filters = [
'css',
{filter:'coffee',language:'coffeescript'},
'erb',
'javascript',
'less',
'markdown',
'ruby',
'scss',
'textile'
];
var all_filters = {};
for (var i = 0, l = filters.length; i < l; i++) {
var filter = filters[i];
filter = typeof filter === 'string' ? {filter: filter, language: filter} : filter;
if (Prism.languages[filter.language]) {
all_filters['filter-' + filter.filter] = {
pattern: RegExp(filter_pattern.replace('{{filter_name}}', filter.filter)),
lookbehind: true,
inside: {
'filter-name': {
pattern: /^:[\w-]+/,
alias: 'variable'
},
rest: Prism.languages[filter.language]
}
}
}
}
Prism.languages.insertBefore('haml', 'filter', all_filters);
}(Prism)); | PypiClean |
/maxbot-0.3.0b2-py3-none-any.whl/maxbot/dialog_manager.py | import logging
from .context import RpcContext, RpcRequest, TurnContext, get_utc_time_default
from .flows.dialog_flow import DialogFlow
from .resources import InlineResources
from .rpc import RpcManager
from .schemas import CommandSchema, DialogSchema, MessageSchema
logger = logging.getLogger(__name__)
class DialogManager:
"""Orchestrates the flow of the conversation."""
def __init__(
self,
nlu=None,
dialog_flow=None,
rpc=None,
dialog_schema=None,
message_schema=None,
command_schema=None,
):
"""Create new class instance.
:param Nlu nlu: NLU component.
:param RpcManager rpc: RPC manager.
:param type dialog_schema: A schema class for dialog informatin.
:param type message_schema: A schema class for user message.
:param type command_schema: A schema class for response commands.
"""
self.DialogSchema = dialog_schema or DialogSchema
self.MessageSchema = message_schema or MessageSchema
self.CommandSchema = command_schema or CommandSchema
self._nlu = nlu # the default value is initialized lazily
self.dialog_flow = dialog_flow or DialogFlow(context={"schema": self.CommandSchema})
self.rpc = rpc or RpcManager()
self._journal_logger = logging.getLogger("maxbot.journal")
self._journal = self.default_journal
self.utc_time_provider = get_utc_time_default
self._dialog_is_ready = False
self._dialog_is_ready = False
@property
def nlu(self):
"""NLU component used to recognize intent and entities from user's utterance."""
if self._nlu is None:
# lazy import to speed up load time
from .nlu import Nlu
self._nlu = Nlu()
return self._nlu
def load_resources(self, resources):
"""Load dialog resources.
:param Resources resources: Bot resources.
"""
self._dialog_is_ready = False
self.rpc.load_resources(resources)
self.dialog_flow.load_resources(resources)
if hasattr(self.nlu, "load_resources"):
self.nlu.load_resources(resources)
self._dialog_is_ready = True
def load_inline_resources(self, source):
"""Load dialog resources from YAML-string.
:param str source: A YAML-string with resources.
"""
self.load_resources(InlineResources(source))
async def process_message(self, message, dialog, state):
"""Process user message.
:param dict message: A message received from the user.
:param dict dialog: Information about the dialog from which the message was received.
:param StateVariables state: A container for state variables.
:raise BotError: Something went wrong with the bot.
:return List[dict]: A list of commands to respond to the user.
"""
if not self._dialog_is_ready:
logger.warning(
"The dialog is not ready, messages is skipped until you load the resources."
)
return []
logger.debug("process message %s, %s", message, dialog)
message = self.MessageSchema().load(message)
dialog = self.DialogSchema().load(dialog)
utc_time = self.utc_time_provider()
intents, entities = await self.nlu(message, utc_time=utc_time)
ctx = TurnContext(
dialog,
state,
utc_time,
message=message,
intents=intents,
entities=entities,
command_schema=self.CommandSchema(many=True),
)
await self.dialog_flow.turn(ctx)
self._journal(ctx)
return ctx.commands
async def process_rpc(self, request, dialog, state):
"""Process RPC request.
:param dict request: A request received from the RPC client.
:param dict dialog: Information about the dialog from which the message was received.
:param StateVariables state: A container for state variables (optional).
:raise BotError: Something went wrong with the bot.
:return List[dict]: A list of commands to send to the user.
"""
if not self._dialog_is_ready:
logger.warning(
"The dialog is not ready, rpc requests is skipped until you load the resources."
)
return []
logger.debug("process rpc %s, %s", request, dialog)
dialog = self.DialogSchema().load(dialog)
request = self.rpc.parse_request(request)
ctx = TurnContext(
dialog,
state,
self.utc_time_provider(),
rpc=RpcContext(RpcRequest(**request)),
command_schema=self.CommandSchema(many=True),
)
await self.dialog_flow.turn(ctx)
self._journal(ctx)
return ctx.commands
def default_journal(self, ctx):
"""Get the default implementaton of journal.
:param TurnContext ctx: Turn context.
"""
for event in ctx.journal_events:
level, message = TurnContext.extract_log_event(event)
if isinstance(logging.getLevelName(level), int):
level = getattr(logging, level)
if self._journal_logger.isEnabledFor(level):
self._journal_logger.log(level, message)
if ctx.error:
raise ctx.error
def journal(self, fn):
"""Register the journal callback.
:param callable fn: The journal callback.
"""
self._journal = fn
return fn | PypiClean |
/Flask-Boto3-0.3.2.tar.gz/Flask-Boto3-0.3.2/flask_boto3/__init__.py | import boto3
from botocore.exceptions import UnknownServiceError
from flask import _app_ctx_stack as stack
from flask import current_app
class Boto3(object):
"""Stores a bunch of boto3 conectors inside Flask's application context
for easier handling inside view functions.
All connectors are stored inside the dict `boto3_cns` where the keys are
the name of the services and the values their associated boto3 client.
"""
def __init__(self, app=None):
self.app = app
if self.app is not None:
self.init_app(app)
def init_app(self, app):
app.teardown_appcontext(self.teardown)
def connect(self):
"""Iterate through the application configuration and instantiate
the services.
"""
requested_services = set(
svc.lower() for svc in current_app.config.get('BOTO3_SERVICES', [])
)
region = current_app.config.get('BOTO3_REGION')
sess_params = {
'aws_access_key_id': current_app.config.get('BOTO3_ACCESS_KEY'),
'aws_secret_access_key': current_app.config.get('BOTO3_SECRET_KEY'),
'profile_name': current_app.config.get('BOTO3_PROFILE'),
'region_name': region
}
sess = boto3.session.Session(**sess_params)
try:
cns = {}
for svc in requested_services:
# Check for optional parameters
params = current_app.config.get(
'BOTO3_OPTIONAL_PARAMS', {}
).get(svc, {})
# Get session params and override them with kwargs
# `profile_name` cannot be passed to clients and resources
kwargs = sess_params.copy()
kwargs.update(params.get('kwargs', {}))
del kwargs['profile_name']
# Override the region if one is defined as an argument
args = params.get('args', [])
if len(args) >= 1:
del kwargs['region_name']
if not(isinstance(args, list) or isinstance(args, tuple)):
args = [args]
# Create resource or client
if svc in sess.get_available_resources():
cns.update({svc: sess.resource(svc, *args, **kwargs)})
else:
cns.update({svc: sess.client(svc, *args, **kwargs)})
except UnknownServiceError:
raise
return cns
def teardown(self, exception):
ctx = stack.top
if hasattr(ctx, 'boto3_cns'):
for c in ctx.boto3_cns:
con = ctx.boto3_cns[c]
if hasattr(con, 'close') and callable(con.close):
ctx.boto3_cns[c].close()
@property
def resources(self):
c = self.connections
return {k: v for k, v in c.items() if hasattr(c[k].meta, 'client')}
@property
def clients(self):
"""
Get all clients (with and without associated resources)
"""
clients = {}
for k, v in self.connections.items():
if hasattr(v.meta, 'client'): # has boto3 resource
clients[k] = v.meta.client
else: # no boto3 resource
clients[k] = v
return clients
@property
def connections(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'boto3_cns'):
ctx.boto3_cns = self.connect()
return ctx.boto3_cns | PypiClean |
/ImSwitch-2.0.0.tar.gz/ImSwitch-2.0.0/imswitch/imcommon/view/CheckUpdatesDialog.py | from qtpy import QtCore, QtWidgets
import imswitch
class CheckUpdatesDialog(QtWidgets.QDialog):
""" Dialog for checking for ImSwitch updates. """
def __init__(self, parent=None, *args, **kwargs):
super().__init__(parent, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint,
*args, **kwargs)
self.setWindowTitle('Check for updates')
self.setMinimumWidth(540)
self.informationLabel = QtWidgets.QLabel()
self.informationLabel.setWordWrap(True)
self.informationLabel.setStyleSheet('font-size: 10pt')
self.linkLabel = QtWidgets.QLabel()
self.linkLabel.setWordWrap(True)
self.linkLabel.setTextFormat(QtCore.Qt.RichText)
self.linkLabel.setOpenExternalLinks(True)
self.linkLabel.setVisible(False)
self.linkLabel.setStyleSheet('font-size: 10pt')
self.buttons = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok,
QtCore.Qt.Horizontal,
self
)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(16)
layout.addWidget(self.informationLabel)
layout.addWidget(self.linkLabel)
layout.addWidget(self.buttons)
self.setLayout(layout)
def resetUpdateInfo(self):
self.informationLabel.setText('Checking for updates, please wait…')
self.linkLabel.setText('')
self.linkLabel.setVisible(False)
def showFailed(self):
self.informationLabel.setText('Failed to check for updates.')
self.linkLabel.setText('')
self.linkLabel.setVisible(False)
def showNoUpdateAvailable(self):
self.informationLabel.setText('No updates available.')
self.linkLabel.setText('')
self.linkLabel.setVisible(False)
def showPyInstallerUpdate(self, newVersion):
self.informationLabel.setText(
f'ImSwitch {newVersion} is now available. '
f' Your current version is {imswitch.__version__}.'
f'\n\nTo update, download the new version archive from the link below and extract it'
f' into a new folder. Do NOT overwrite your current installation; instead, delete it'
f' after you have updated.'
)
self.linkLabel.setText(
'The new version may be downloaded from '
'<a href="https://github.com/kasasxav/ImSwitch/releases" style="color: orange">'
'the GitHub releases page'
'</a>'
'.'
)
self.linkLabel.setVisible(True)
def showPyPIUpdate(self, newVersion):
self.informationLabel.setText(
f'ImSwitch {newVersion} is now available. '
f' Your current version is {imswitch.__version__}.'
f'\n\nTo update, run the command: pip install --upgrade imswitch'
)
self.linkLabel.setText(
'The changelog is available '
'<a href="https://imswitch.readthedocs.io/en/stable/changelog.html"'
'style="color: orange">'
'here'
'</a>'
'.'
)
self.linkLabel.setVisible(True)
# Copyright (C) 2020-2021 ImSwitch developers
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/Hello-World-Package-0.1.3.tar.gz/Hello-World-Package-0.1.3/CONTRIBUTING.rst | ============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/granwilliams/hello-world/issues.
If you are reporting a bug, please include:
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Hello World Package could always use more documentation, whether
as part of the official Hello World Package docs, in docstrings,
or even on the web in blog posts, articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/granwilliams/hello-world/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `Hello-World-Package` for local development.
1. Fork the `hello-world` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/hello-world.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv hello-world
$ cd hello-world/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox::
$ flake8 hello_world tests
$ python setup.py test
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.7, 3.3, 3.4, 3.5 and for PyPy. Check
https://travis-ci.org/granwilliams/hello-world/pull_requests
and make sure that the tests pass for all supported Python versions.
| PypiClean |
/FlaskBB-2.0.2.tar.gz/FlaskBB-2.0.2/flaskbb/core/tokens.py | from abc import abstractmethod
import attr
from flask_babelplus import gettext as _
from .._compat import ABC
from .exceptions import BaseFlaskBBError
class TokenError(BaseFlaskBBError):
"""
Raised when there is an issue with deserializing
a token. Has helper classmethods to ensure
consistent verbiage.
:param str reason: An explanation of why the token is invalid
"""
def __init__(self, reason):
self.reason = reason
super(TokenError, self).__init__(reason)
@classmethod
def invalid(cls):
"""
Used to raise an exception about a token that is invalid
due to being signed incorrectly, has been tampered with,
is unparsable or contains an inappropriate action.
"""
return cls(_('Token is invalid'))
@classmethod
def expired(cls):
"""
Used to raise an exception about a token that has expired and is
no longer usable.
"""
return cls(_('Token is expired'))
# in theory this would never be raised
# but it's provided for a generic catchall
# when processing goes horribly wrong
@classmethod # pragma: no cover
def bad(cls):
return cls(_('Token cannot be processed'))
# holder for token actions
# not an enum so plugins can add to it
class TokenActions:
"""
Collection of token actions.
.. note::
This is just a class rather than an enum because enums cannot be
extended at runtime which would limit the number of token actions
to the ones implemented by FlaskBB itself and block extension of
tokens by plugins.
"""
RESET_PASSWORD = 'reset_password'
ACTIVATE_ACCOUNT = 'activate_account'
@attr.s(frozen=True, cmp=True, hash=True)
class Token(object):
"""
:param int user_id:
:param str operation: An operation taken from
:class:`TokenActions<flaskbb.core.tokens.TokenActions>`
"""
user_id = attr.ib()
operation = attr.ib()
class TokenSerializer(ABC):
"""
"""
@abstractmethod
def dumps(self, token):
"""
This method is abstract.
Used to transform a token into a string representation of it.
:param token:
:type token: :class:`Token<flaskbb.core.tokens.Token>`
:returns str:
"""
pass
@abstractmethod
def loads(self, raw_token):
"""
This method is abstract
Used to transform a string representation of a token into an
actual :class:`Token<flaskbb.core.tokens.Token>` instance
:param str raw_token:
:returns token: The parsed token
:rtype: :class:`Token<flaskbb.core.tokens.Token`>
"""
pass
class TokenVerifier(ABC):
"""
Used to verify the validatity of tokens post
deserialization, such as an email matching the
user id in the provided token.
Should raise a
:class:`ValidationError<flaskbb.core.exceptions.ValidationError>`
if verification fails.
"""
@abstractmethod
def verify_token(self, token, **kwargs):
"""
This method is abstract.
:param token: The parsed token to verify
:param kwargs: Arbitrary context for validation of the token
:type token: :class:`Token<flaskbb.core.tokens.Token>`
"""
pass
def __call__(self, token, **kwargs):
return self.verify_token(token, **kwargs) | PypiClean |
/EsiPy-1.2.3.tar.gz/EsiPy-1.2.3/README.rst | EsiPy |PyPI Version| |PyPI Python Version|
==========================================
|Build Status| |Coverage Status| |Code Health|
| **Full Documentation can be found here:**
| |Documentation Status|
What is EsiPy
-------------
EsiPy is a python swagger client, taking advantages of `pyswagger`_
while rewriting some parts of it to be better used with EVE Online ESI
API.
Example using EsiPy
-------------------
If you need it, you can find here an example of webapp using `Flask+EsiPy`_
Contacts
--------
- Kyria
- Github: @Kyria
- `TweetFleet Slack <https://www.fuzzwork.co.uk/tweetfleet-slack-invites/>`_: @althalus
- Reddit: /u/Karlyna
.. _pyswagger: https://github.com/mission-liao/pyswagger
.. _Flask+EsiPy: https://github.com/Kyria/flask-esipy-example
.. |PyPI Version| image:: https://img.shields.io/pypi/v/EsiPy.svg
:target: https://pypi.python.org/pypi/EsiPy
.. |PyPI Python Version| image:: https://img.shields.io/pypi/pyversions/EsiPy.svg
:target: https://pypi.python.org/pypi/EsiPy
.. |Documentation Status| image:: https://img.shields.io/badge/Documentation-GitHub%20Page-lightgrey.svg
:target: https://kyria.github.io/EsiPy/
.. |Build Status| image:: https://travis-ci.org/Kyria/EsiPy.svg?branch=master
:target: https://travis-ci.org/Kyria/EsiPy
.. |Coverage Status| image:: https://coveralls.io/repos/github/Kyria/EsiPy/badge.svg
:target: https://coveralls.io/github/Kyria/EsiPy
.. |Code Health| image:: https://landscape.io/github/Kyria/EsiPy/master/landscape.svg?style=flat
:target: https://landscape.io/github/Kyria/EsiPy/master
| PypiClean |
/ClueBin-0.2.3.tar.gz/ClueBin-0.2.3/src/cluebin/pastebin.py | import webob
from StringIO import StringIO
import pygments
from pygments import lexers
from pygments import formatters
from pygments import util
from xml.sax import saxutils
from cluebin import paste as pastebase
from cluebin import utils
class PasteBinApp(object):
"""WSGI app representing a pastebin.
>>> app = PasteBinApp()
"""
COOKIE_LANGUAGE = 'cluebin.last_lang'
COOKIE_AUTHOR = 'cluebin.last_author'
def __init__(self, display_tag_line=True):
self.pmanager = pastebase.PasteManager()
self.display_tag_line = display_tag_line
def __call__(self, environ, start_response):
request = webob.Request(environ)
response = webob.Response(content_type='text/html')
out = StringIO()
handler = self.index
pieces = [x for x in environ['PATH_INFO'].split('/') if x]
if pieces and hasattr(self, pieces[0]):
handler = getattr(self, pieces[0])
handler(request, response, out, *pieces[1:])
if response.status_int != 200:
return response(environ, start_response)
# Lazy man's templating
version = '0.2.2'
tag_line = ''
if self.display_tag_line:
tag_line = 'ClueBin v%s by ' \
'<a href="http://www.serverzen.com">' \
'ServerZen Software</a>' % version
top = u'''
<html>
<head>
<title>PasteBin</title>
<style>
PRE { margin: 0; }
.code, .linenos { font-size: 90%; }
.source { border: 1px #999 dashed; margin: 0; padding: 1em }
.left { width: 70%; float: left; }
.right { margin-left: 2em; width: 20%; float: left; }
.field { margin-bottom: 1em; }
.field LABEL { font-weight: bold; width: 20%; display: block; float: left; }
.field INPUT { width: 80% }
.field TEXTAREA { width: 100%; height: 10em }
.previous_paste DD { margin-left: 0; }
.clear { display: block; clear; both; }
.header { font-size: 90%; float: right; }
</style>
</head>'''
top += u'<body><div id="main"><div class="header">%s</div>' % tag_line
footer = ''
bottom = u'<div class="footer">%s</div><div class="clear"><!-- --></div></div></body></html>' % footer
response.unicode_body = top + out.getvalue() + bottom
return response(environ, start_response)
def paste_listing(self, request, response, out):
print >> out, u'<fieldset><legend>Previous Pastes</legend><ul>'
for pobj in self.pmanager.get_pastes():
if pobj.date is not None:
pdate = pobj.date.strftime('%x at %X')
else:
pdate = 'UNKNOWN'
print >> out, u'<li><a href="%s">Post by: %s on %s</a></li>' % \
(utils.url(request, 'pasted/%i' % pobj.pasteid),
pobj.author_name, pdate)
print >> out, u'</ul></fieldset>'
def preferred_author(self, request):
author_name = request.params.get('author_name', u'')
if not author_name:
author_name = request.cookies.get(self.COOKIE_AUTHOR, u'')
if isinstance(author_name, str):
author_name = unicode(author_name, 'utf-8')
return author_name
def preferred_language(self, request):
language = request.cookies.get(self.COOKIE_LANGUAGE, u'')
if isinstance(language, str):
language = unicode(language, 'utf-8')
def index(self, request, response, out, msg=u'', paste_obj=None):
if msg:
msg = u'<div class="message">%s</div>' % msg
paste = u''
language = self.preferred_language(request)
if paste_obj is not None:
paste = paste_obj.paste or u''
try:
if paste_obj.language:
l = lexers.get_lexer_by_name(paste_obj.language)
else:
l = lexers.guess_lexer(paste_obj.paste)
language = l.aliases[0]
except util.ClassNotFound, err:
# couldn't guess lexer
l = lexers.TextLexer()
formatter = formatters.HtmlFormatter(linenos=True, cssclass="source")
formatted_paste = pygments.highlight(paste, l, formatter)
print >> out, u'''
<style>%s</style>
<dl class="previous_paste">
<dt>Previous Paste</dt>
<dd>Format: %s</dd>
<dd>%s</dd>
</dl>
''' % (formatter.get_style_defs(), l.name, formatted_paste)
lexer_options = u'<option value="">-- Auto-detect --</option>'
all = [x for x in lexers.get_all_lexers()]
all.sort()
for name, aliases, filetypes, mimetypes_ in all:
selected = u''
if language == aliases[0]:
selected = u' selected'
lexer_options += u'<option value="%s"%s>%s</option>' % (aliases[0],
selected,
name)
print >> out, u'''
%s
<div class="left">
''' % msg
print >> out, u'''
<form action="%(action)s" method="POST">
<fieldset>
<legend>Paste Info</legend>
<div class="field">
<label for="author_name">Name</label>
<input type="text" name="author_name" value="%(author_name)s" />
</div>
<div class="field">
<label for="language">Language</label>
<select name="language">
%(lexers)s
</select>
</div>
<div class="field">
<label for="paste">Paste Text</label>
<textarea name="paste">%(paste)s</textarea>
</div>
<input type="submit" />
</fieldset>
</form>
</div>
''' % {'action': utils.url(request, 'paste'),
'paste': saxutils.escape(paste),
'lexers': lexer_options,
'author_name': self.preferred_author(request)}
print >> out, u'<div class="right">'
self.paste_listing(request, response, out)
print >> out, u'</div>'
def pasted(self, request, response, out, *args):
pobj = self.pmanager.get_paste(args[0])
self.index(request, response, out, paste_obj=pobj)
def paste(self, request, response, out):
if not request.params.get('paste', None):
self.index(request, response, out, msg=u"* You did not fill in body")
else:
paste = request.params['paste']
author_name = request.params['author_name']
language = request.params['language']
response.set_cookie(self.COOKIE_AUTHOR, author_name)
response.set_cookie(self.COOKIE_LANGUAGE, language)
if isinstance(author_name, str):
author_name = unicode(author_name, 'utf-8')
if isinstance(language, str):
language = unicode(language, 'utf-8')
if isinstance(paste, str):
paste = unicode(paste, 'utf-8')
pobj = self.pmanager.save_paste(author_name, paste, language)
newurl = utils.url(request, 'pasted/%s' % str(pobj.pasteid))
response.status = '301 Moved Permanently'
response.headers['Location'] = newurl
def make_app(global_config, datastore=None):
app = PasteBinApp()
if datastore is not None:
app.pmanager.datastore = datastore
return app
def build_datastore(datastore_name, *datastore_args):
f = utils.importattr(datastore_name)
return f(*datastore_args)
def main(cmdargs=None):
from wsgiref import simple_server
import sys
import optparse
logger = utils.setup_logger()
if cmdargs is None:
cmdargs = sys.argv[1:]
storages = ['cluebin.googledata.GooglePasteDataStore',
'cluebin.sqldata.SqlPasteDataStore']
parser = optparse.OptionParser()
parser.add_option('-i', '--interface', dest='interface',
default='0.0.0.0',
help='Interface to listen on (by default it is '
'0.0.0.0 which '
'is shorthand for all interfaces)')
parser.add_option('-p', '--port', dest='port',
default='8080',
help='Port to listen on (by default 8080)')
parser.add_option('-s', '--storage', dest='storage_name',
default='',
help='Storage to use for pastes (by default '
'non-persistent), cluebin-provided options are: %s'
% str(storages))
(opts, args) = parser.parse_args(cmdargs)
appargs = [{}]
datastore = None
if opts.storage_name:
datastore = build_datastore(opts.storage_name, *args)
logger.info('Using storage: %s' % (opts.storage_name))
logger.info('Using storage arguments: %s' % str(args))
app = make_app(datastore)
server = simple_server.make_server(opts.interface, int(opts.port), app)
logger.info("ClueBin now listening on %s:%s using non-persistent datastore"
% (opts.interface, opts.port))
server.serve_forever()
return 0
if __name__ == '__main__':
import sys
sys.exit(main()) | PypiClean |
/OBITools-1.2.13.tar.gz/OBITools-1.2.13/distutils.ext/obidistutils/serenity/pip/vcs/mercurial.py | import os
import tempfile
import re
import sys
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.log import logger
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip.backwardcompat import ConfigParser
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
bundle_file = 'hg-clone.txt'
guide = ('# This was a Mercurial repo; to make it a repo again run:\n'
'hg init\nhg pull %(url)s\nhg update -r %(rev)s\n')
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'hg\s*pull\s*(.*)\s*', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^hg\s*update\s*-r\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
call_subprocess(
[self.cmd, 'archive', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = ConfigParser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
config_file = open(repo_config, 'w')
config.write(config_file)
config_file.close()
except (OSError, ConfigParser.NoSectionError):
e = sys.exc_info()[1]
logger.warn(
'Could not switch Mercurial repository to %s: %s'
% (url, e))
else:
call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
call_subprocess([self.cmd, 'pull', '-q'], cwd=dest)
call_subprocess(
[self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning hg %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '--noupdate', '-q', url, dest])
call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
if "tip" != tag:
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_branch_revs(self, location):
branches = call_subprocess(
[self.cmd, 'branches'], show_stdout=False, cwd=location)
branch_revs = []
for line in branches.splitlines():
branches_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if branches_match:
branch = branches_match.group(1)
rev = branches_match.group(2)
if "default" != branch:
branch_revs.append((rev.strip(), branch.strip()))
return dict(branch_revs)
def get_revision(self, location):
current_revision = call_subprocess(
[self.cmd, 'parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = call_subprocess(
[self.cmd, 'parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
current_rev_hash = self.get_revision_hash(location)
tag_revs = self.get_tag_revs(location)
branch_revs = self.get_branch_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
elif current_rev in branch_revs:
# It's the tip of a branch
full_egg_name = '%s-%s' % (egg_project_name, branch_revs[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev_hash, full_egg_name)
vcs.register(Mercurial) | PypiClean |
/Mathics3-6.0.2.tar.gz/Mathics3-6.0.2/mathics/builtin/arithfns/basic.py | from mathics.builtin.arithmetic import _MPMathFunction, create_infix
from mathics.builtin.base import BinaryOperator, Builtin, PrefixOperator, SympyFunction
from mathics.core.atoms import (
Complex,
Integer,
Integer1,
Integer3,
Integer310,
IntegerM1,
Number,
Rational,
RationalOneHalf,
Real,
String,
)
from mathics.core.attributes import (
A_FLAT,
A_LISTABLE,
A_NUMERIC_FUNCTION,
A_ONE_IDENTITY,
A_ORDERLESS,
A_PROTECTED,
A_READ_PROTECTED,
)
from mathics.core.convert.expression import to_expression
from mathics.core.convert.sympy import from_sympy
from mathics.core.expression import Expression
from mathics.core.list import ListExpression
from mathics.core.symbols import (
Symbol,
SymbolDivide,
SymbolHoldForm,
SymbolNull,
SymbolPower,
SymbolTimes,
)
from mathics.core.systemsymbols import (
SymbolBlank,
SymbolComplexInfinity,
SymbolIndeterminate,
SymbolInfix,
SymbolLeft,
SymbolMinus,
SymbolPattern,
SymbolSequence,
)
from mathics.eval.arithmetic import eval_Plus, eval_Times
from mathics.eval.nevaluator import eval_N
from mathics.eval.numerify import numerify
class CubeRoot(Builtin):
"""
<url>
:Cube root:
https://en.wikipedia.org/wiki/Cube_root</url> (<url> :WMA:
https://reference.wolfram.com/language/ref/CubeRoot.html</url>)
<dl>
<dt>'CubeRoot[$n$]'
<dd>finds the real-valued cube root of the given $n$.
</dl>
>> CubeRoot[16]
= 2 2 ^ (1 / 3)
#> CubeRoot[-5]
= -5 ^ (1 / 3)
#> CubeRoot[-510000]
= -10 510 ^ (1 / 3)
#> CubeRoot[-5.1]
= -1.7213
#> CubeRoot[b]
= b ^ (1 / 3)
#> CubeRoot[-0.5]
= -0.793701
#> CubeRoot[3 + 4 I]
: The parameter 3 + 4 I should be real valued.
= (3 + 4 I) ^ (1 / 3)
"""
attributes = A_LISTABLE | A_NUMERIC_FUNCTION | A_PROTECTED | A_READ_PROTECTED
messages = {
"preal": "The parameter `1` should be real valued.",
}
rules = {
"CubeRoot[n_?NumberQ]": "If[n > 0, Power[n, Divide[1, 3]], Times[-1, Power[Times[-1, n], Divide[1, 3]]]]",
"CubeRoot[n_]": "Power[n, Divide[1, 3]]",
"MakeBoxes[CubeRoot[x_], f:StandardForm|TraditionalForm]": (
"RadicalBox[MakeBoxes[x, f], 3]"
),
}
summary_text = "cube root"
def eval(self, n, evaluation):
"CubeRoot[n_Complex]"
evaluation.message("CubeRoot", "preal", n)
return Expression(
SymbolPower,
n,
Integer1 / Integer3,
)
class Divide(BinaryOperator):
"""
<url>
:Division:
https://en.wikipedia.org/wiki/Division_(mathematics)</url> (<url>
:WMA link:
https://reference.wolfram.com/language/ref/Divide.html</url>)
<dl>
<dt>'Divide[$a$, $b$]'
<dt>'$a$ / $b$'
<dd>represents the division of $a$ by $b$.
</dl>
>> 30 / 5
= 6
>> 1 / 8
= 1 / 8
>> Pi / 4
= Pi / 4
Use 'N' or a decimal point to force numeric evaluation:
>> Pi / 4.0
= 0.785398
>> 1 / 8
= 1 / 8
>> N[%]
= 0.125
Nested divisions:
>> a / b / c
= a / (b c)
>> a / (b / c)
= a c / b
>> a / b / (c / (d / e))
= a d / (b c e)
>> a / (b ^ 2 * c ^ 3 / e)
= a e / (b ^ 2 c ^ 3)
#> 1 / 4.0
= 0.25
#> 10 / 3 // FullForm
= Rational[10, 3]
#> a / b // FullForm
= Times[a, Power[b, -1]]
"""
attributes = A_LISTABLE | A_NUMERIC_FUNCTION | A_PROTECTED
default_formats = False
formats = {
(("InputForm", "OutputForm"), "Divide[x_, y_]"): (
'Infix[{HoldForm[x], HoldForm[y]}, "/", 400, Left]'
),
}
grouping = "Left"
operator = "/"
precedence = 470
rules = {
"Divide[x_, y_]": "Times[x, Power[y, -1]]",
"MakeBoxes[Divide[x_, y_], f:StandardForm|TraditionalForm]": (
"FractionBox[MakeBoxes[x, f], MakeBoxes[y, f]]"
),
}
summary_text = "divide"
class Minus(PrefixOperator):
"""
<url>
:Additive inverse:
https://en.wikipedia.org/wiki/Additive_inverse</url> (<url>
:WMA:
https://reference.wolfram.com/language/ref/Minus.html</url>)
<dl>
<dt>'Minus[$expr$]'
<dd> is the negation of $expr$.
</dl>
>> -a //FullForm
= Times[-1, a]
'Minus' automatically distributes:
>> -(x - 2/3)
= 2 / 3 - x
'Minus' threads over lists:
>> -Range[10]
= {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}
"""
attributes = A_LISTABLE | A_NUMERIC_FUNCTION | A_PROTECTED
formats = {
"Minus[x_]": 'Prefix[{HoldForm[x]}, "-", 480]',
# don't put e.g. -2/3 in parentheses
"Minus[expr_Divide]": 'Prefix[{HoldForm[expr]}, "-", 399]',
"Minus[Infix[expr_, op_, 400, grouping_]]": (
'Prefix[{Infix[expr, op, 400, grouping]}, "-", 399]'
),
}
operator = "-"
precedence = 480
rules = {
"Minus[x_]": "Times[-1, x]",
}
summary_text = "arithmetic negate"
def eval_int(self, x: Integer, evaluation):
"Minus[x_Integer]"
return Integer(-x.value)
class Plus(BinaryOperator, SympyFunction):
"""
<url>
:Addition:
https://en.wikipedia.org/wiki/Addition</url> (<url>
:SymPy:
https://docs.sympy.org/latest/modules/core.html#id48</url>, <url>
:WMA:
https://reference.wolfram.com/language/ref/Plus.html</url>)
<dl>
<dt>'Plus[$a$, $b$, ...]'
<dt>$a$ + $b$ + ...
<dd>represents the sum of the terms $a$, $b$, ...
</dl>
>> 1 + 2
= 3
'Plus' performs basic simplification of terms:
>> a + b + a
= 2 a + b
>> a + a + 3 * a
= 5 a
>> a + b + 4.5 + a + b + a + 2 + 1.5 b
= 6.5 + 3 a + 3.5 b
Apply 'Plus' on a list to sum up its elements:
>> Plus @@ {2, 4, 6}
= 12
The sum of the first 1000 integers:
>> Plus @@ Range[1000]
= 500500
'Plus' has default value 0:
>> DefaultValues[Plus]
= {HoldPattern[Default[Plus]] :> 0}
>> a /. n_. + x_ :> {n, x}
= {0, a}
The sum of 2 red circles and 3 red circles is...
>> 2 Graphics[{Red,Disk[]}] + 3 Graphics[{Red,Disk[]}]
= 5 -Graphics-
#> -2a - 2b
= -2 a - 2 b
#> -4+2x+2*Sqrt[3]
= -4 + 2 Sqrt[3] + 2 x
#> 2a-3b-c
= 2 a - 3 b - c
#> 2a+5d-3b-2c-e
= 2 a - 3 b - 2 c + 5 d - e
#> 1 - I * Sqrt[3]
= 1 - I Sqrt[3]
#> Head[3 + 2 I]
= Complex
#> N[Pi, 30] + N[E, 30]
= 5.85987448204883847382293085463
#> % // Precision
= 30.
"""
attributes = (
A_FLAT
| A_LISTABLE
| A_NUMERIC_FUNCTION
| A_ONE_IDENTITY
| A_ORDERLESS
| A_PROTECTED
)
default_formats = False
defaults = {
None: "0",
}
operator = "+"
precedence = 310
summary_text = "add"
# FIXME Note this is deprecated in 1.11
# Remember to up sympy doc link when this is corrected
sympy_name = "Add"
def format_plus(self, items, evaluation):
"Plus[items__]"
def negate(item): # -> Expression (see FIXME below)
if item.has_form("Times", 1, None):
if isinstance(item.elements[0], Number):
neg = -item.elements[0]
if neg.sameQ(Integer1):
if len(item.elements) == 1:
return neg
else:
return Expression(SymbolTimes, *item.elements[1:])
else:
return Expression(SymbolTimes, neg, *item.elements[1:])
else:
return Expression(SymbolTimes, IntegerM1, *item.elements)
elif isinstance(item, Number):
return from_sympy(-item.to_sympy())
else:
return Expression(SymbolTimes, IntegerM1, item)
def is_negative(value) -> bool:
if isinstance(value, Complex):
real, imag = value.to_sympy().as_real_imag()
if real <= 0 and imag <= 0:
return True
elif isinstance(value, Number) and value.to_sympy() < 0:
return True
return False
elements = items.get_sequence()
values = [to_expression(SymbolHoldForm, element) for element in elements[:1]]
ops = []
for element in elements[1:]:
if (
element.has_form("Times", 1, None) and is_negative(element.elements[0])
) or is_negative(element):
element = negate(element)
op = "-"
else:
op = "+"
values.append(Expression(SymbolHoldForm, element))
ops.append(String(op))
return Expression(
SymbolInfix,
ListExpression(*values),
ListExpression(*ops),
Integer310,
SymbolLeft,
)
def eval(self, items, evaluation):
"Plus[items___]"
items_tuple = numerify(items, evaluation).get_sequence()
return eval_Plus(*items_tuple)
class Power(BinaryOperator, _MPMathFunction):
"""
<url>
:Exponentiation:
https://en.wikipedia.org/wiki/Exponentiation</url> (<url>
:SymPy:
https://docs.sympy.org/latest/modules/core.html#sympy.core.power.Pow</url>, <url>
:WMA:
https://reference.wolfram.com/language/ref/Power.html</url>)
<dl>
<dt>'Power[$a$, $b$]'
<dt>'$a$ ^ $b$'
<dd>represents $a$ raised to the power of $b$.
</dl>
>> 4 ^ (1/2)
= 2
>> 4 ^ (1/3)
= 2 ^ (2 / 3)
>> 3^123
= 48519278097689642681155855396759336072749841943521979872827
>> (y ^ 2) ^ (1/2)
= Sqrt[y ^ 2]
>> (y ^ 2) ^ 3
= y ^ 6
>> Plot[Evaluate[Table[x^y, {y, 1, 5}]], {x, -1.5, 1.5}, AspectRatio -> 1]
= -Graphics-
Use a decimal point to force numeric evaluation:
>> 4.0 ^ (1/3)
= 1.5874
'Power' has default value 1 for its second argument:
>> DefaultValues[Power]
= {HoldPattern[Default[Power, 2]] :> 1}
>> a /. x_ ^ n_. :> {x, n}
= {a, 1}
'Power' can be used with complex numbers:
>> (1.5 + 1.0 I) ^ 3.5
= -3.68294 + 6.95139 I
>> (1.5 + 1.0 I) ^ (3.5 + 1.5 I)
= -3.19182 + 0.645659 I
#> 1/0
: Infinite expression 1 / 0 encountered.
= ComplexInfinity
#> 0 ^ -2
: Infinite expression 1 / 0 ^ 2 encountered.
= ComplexInfinity
#> 0 ^ (-1/2)
: Infinite expression 1 / Sqrt[0] encountered.
= ComplexInfinity
#> 0 ^ -Pi
: Infinite expression 1 / 0 ^ 3.14159 encountered.
= ComplexInfinity
#> 0 ^ (2 I E)
: Indeterminate expression 0 ^ (0. + 5.43656 I) encountered.
= Indeterminate
#> 0 ^ - (Pi + 2 E I)
: Infinite expression 0 ^ (-3.14159 - 5.43656 I) encountered.
= ComplexInfinity
#> 0 ^ 0
: Indeterminate expression 0 ^ 0 encountered.
= Indeterminate
#> Sqrt[-3+2. I]
= 0.550251 + 1.81735 I
#> Sqrt[-3+2 I]
= Sqrt[-3 + 2 I]
#> (3/2+1/2I)^2
= 2 + 3 I / 2
#> I ^ I
= (-1) ^ (I / 2)
#> 2 ^ 2.0
= 4.
#> Pi ^ 4.
= 97.4091
#> a ^ b
= a ^ b
"""
attributes = A_LISTABLE | A_NUMERIC_FUNCTION | A_ONE_IDENTITY | A_PROTECTED
default_formats = False
defaults = {
2: "1",
}
formats = {
Expression(
SymbolPower,
Expression(SymbolPattern, Symbol("x"), Expression(SymbolBlank)),
RationalOneHalf,
): "HoldForm[Sqrt[x]]",
(("InputForm", "OutputForm"), "x_ ^ y_"): (
'Infix[{HoldForm[x], HoldForm[y]}, "^", 590, Right]'
),
("", "x_ ^ y_"): (
"PrecedenceForm[Superscript[PrecedenceForm[HoldForm[x], 590],"
" HoldForm[y]], 590]"
),
("", "x_ ^ y_?Negative"): (
"HoldForm[Divide[1, #]]&[If[y==-1, HoldForm[x], HoldForm[x]^-y]]"
),
("", "x_?Negative ^ y_"): (
'Infix[{HoldForm[(x)], HoldForm[y]},"^", 590, Right]'
),
}
grouping = "Right"
mpmath_name = "power"
messages = {
"infy": "Infinite expression `1` encountered.",
"indet": "Indeterminate expression `1` encountered.",
}
nargs = {2}
operator = "^"
precedence = 590
rules = {
"Power[]": "1",
"Power[x_]": "x",
}
summary_text = "exponentiate"
# FIXME Note this is deprecated in 1.11
# Remember to up sympy doc link when this is corrected
sympy_name = "Pow"
def eval_check(self, x, y, evaluation):
"Power[x_, y_]"
# Power uses _MPMathFunction but does some error checking first
if isinstance(x, Number) and x.is_zero:
if isinstance(y, Number):
y_err = y
else:
y_err = eval_N(y, evaluation)
if isinstance(y_err, Number):
py_y = y_err.round_to_float(permit_complex=True).real
if py_y > 0:
return x
elif py_y == 0.0:
evaluation.message(
"Power", "indet", Expression(SymbolPower, x, y_err)
)
return SymbolIndeterminate
elif py_y < 0:
evaluation.message(
"Power", "infy", Expression(SymbolPower, x, y_err)
)
return SymbolComplexInfinity
if isinstance(x, Complex) and x.real.is_zero:
yhalf = Expression(SymbolTimes, y, RationalOneHalf)
factor = self.eval(Expression(SymbolSequence, x.imag, y), evaluation)
return Expression(
SymbolTimes, factor, Expression(SymbolPower, IntegerM1, yhalf)
)
result = self.eval(Expression(SymbolSequence, x, y), evaluation)
if result is None or result != SymbolNull:
return result
class Sqrt(SympyFunction):
"""
<url>
:Square root:
https://en.wikipedia.org/wiki/Square_root</url> (<url>
:SymPy:
https://docs.sympy.org/latest/modules/codegen.html#sympy.codegen.cfunctions.Sqrt</url>, <url>
:WMA:
https://reference.wolfram.com/language/ref/Sqrt.html</url>)
<dl>
<dt>'Sqrt[$expr$]'
<dd>returns the square root of $expr$.
</dl>
>> Sqrt[4]
= 2
>> Sqrt[5]
= Sqrt[5]
>> Sqrt[5] // N
= 2.23607
>> Sqrt[a]^2
= a
Complex numbers:
>> Sqrt[-4]
= 2 I
>> I == Sqrt[-1]
= True
>> Plot[Sqrt[a^2], {a, -2, 2}]
= -Graphics-
#> N[Sqrt[2], 50]
= 1.4142135623730950488016887242096980785696718753769
"""
attributes = A_LISTABLE | A_NUMERIC_FUNCTION | A_PROTECTED
rules = {
"Sqrt[x_]": "x ^ (1/2)",
"MakeBoxes[Sqrt[x_], f:StandardForm|TraditionalForm]": (
"SqrtBox[MakeBoxes[x, f]]"
),
}
summary_text = "square root"
class Subtract(BinaryOperator):
"""
<url>
:Subtraction:
https://en.wikipedia.org/wiki/Subtraction</url>, (<url>:WMA:
https://reference.wolfram.com/language/ref/Subtract.html</url>)
<dl>
<dt>'Subtract[$a$, $b$]'
<dt>$a$ - $b$
<dd>represents the subtraction of $b$ from $a$.
</dl>
>> 5 - 3
= 2
>> a - b // FullForm
= Plus[a, Times[-1, b]]
>> a - b - c
= a - b - c
>> a - (b - c)
= a - b + c
"""
attributes = A_LISTABLE | A_NUMERIC_FUNCTION | A_PROTECTED
grouping = "Left"
operator = "-"
precedence = 310
precedence_parse = 311
rules = {
"Subtract[x_, y_]": "Plus[x, Times[-1, y]]",
}
summary_text = "subtract"
class Times(BinaryOperator, SympyFunction):
"""
<url>
:Multiplication:
https://en.wikipedia.org/wiki/Multiplication</url> (<url>
:SymPy:
https://docs.sympy.org/latest/modules/core.html#sympy.core.mul.Mul</url>, <url>
:WMA:https://reference.wolfram.com/language/ref/Times.html</url>)
<dl>
<dt>'Times[$a$, $b$, ...]'
<dt>'$a$ * $b$ * ...'
<dt>'$a$ $b$ ...'
<dd>represents the product of the terms $a$, $b$, ...
</dl>
>> 10 * 2
= 20
>> 10 2
= 20
>> a * a
= a ^ 2
>> x ^ 10 * x ^ -2
= x ^ 8
>> {1, 2, 3} * 4
= {4, 8, 12}
>> Times @@ {1, 2, 3, 4}
= 24
>> IntegerLength[Times@@Range[5000]]
= 16326
'Times' has default value 1:
>> DefaultValues[Times]
= {HoldPattern[Default[Times]] :> 1}
>> a /. n_. * x_ :> {n, x}
= {1, a}
#> -a*b // FullForm
= Times[-1, a, b]
#> -(x - 2/3)
= 2 / 3 - x
#> -x*2
= -2 x
#> -(h/2) // FullForm
= Times[Rational[-1, 2], h]
#> x / x
= 1
#> 2x^2 / x^2
= 2
#> 3. Pi
= 9.42478
#> Head[3 * I]
= Complex
#> Head[Times[I, 1/2]]
= Complex
#> Head[Pi * I]
= Times
#> 3 * a //InputForm
= 3*a
#> 3 * a //OutputForm
= 3 a
#> -2.123456789 x
= -2.12346 x
#> -2.123456789 I
= 0. - 2.12346 I
#> N[Pi, 30] * I
= 3.14159265358979323846264338328 I
#> N[I Pi, 30]
= 3.14159265358979323846264338328 I
#> N[Pi * E, 30]
= 8.53973422267356706546355086955
#> N[Pi, 30] * N[E, 30]
= 8.53973422267356706546355086955
#> N[Pi, 30] * E
= 8.53973422267356706546355086955
#> % // Precision
= 30.
"""
attributes = (
A_FLAT
| A_LISTABLE
| A_NUMERIC_FUNCTION
| A_ONE_IDENTITY
| A_ORDERLESS
| A_PROTECTED
)
defaults = {
None: "1",
}
default_formats = False
formats = {}
operator = "*"
operator_display = " "
precedence = 400
rules = {}
# FIXME Note this is deprecated in 1.11
# Remember to up sympy doc link when this is corrected
sympy_name = "Mul"
summary_text = "mutiply"
def format_times(self, items, evaluation, op="\u2062"):
"Times[items__]"
def inverse(item):
if item.has_form("Power", 2) and isinstance( # noqa
item.elements[1], (Integer, Rational, Real)
):
neg = -item.elements[1]
if neg.sameQ(Integer1):
return item.elements[0]
else:
return Expression(SymbolPower, item.elements[0], neg)
else:
return item
items = items.get_sequence()
positive = []
negative = []
for item in items:
if (
item.has_form("Power", 2)
and isinstance(item.elements[1], (Integer, Rational, Real))
and item.elements[1].to_sympy() < 0
): # nopep8
negative.append(inverse(item))
elif isinstance(item, Rational):
numerator = item.numerator()
if not numerator.sameQ(Integer1):
positive.append(numerator)
negative.append(item.denominator())
else:
positive.append(item)
if positive and positive[0].get_int_value() == -1:
del positive[0]
minus = True
else:
minus = False
positive = [Expression(SymbolHoldForm, item) for item in positive]
negative = [Expression(SymbolHoldForm, item) for item in negative]
if positive:
positive = create_infix(positive, op, 400, "None")
else:
positive = Integer1
if negative:
negative = create_infix(negative, op, 400, "None")
result = Expression(
SymbolDivide,
Expression(SymbolHoldForm, positive),
Expression(SymbolHoldForm, negative),
)
else:
result = positive
if minus:
result = Expression(
SymbolMinus, result
) # Expression('PrecedenceForm', result, 481))
result = Expression(SymbolHoldForm, result)
return result
def format_inputform(self, items, evaluation):
"InputForm: Times[items__]"
return self.format_times(items, evaluation, op="*")
def format_standardform(self, items, evaluation):
"StandardForm: Times[items__]"
return self.format_times(items, evaluation, op=" ")
def format_outputform(self, items, evaluation):
"OutputForm: Times[items__]"
return self.format_times(items, evaluation, op=" ")
def eval(self, items, evaluation):
"Times[items___]"
items = numerify(items, evaluation).get_sequence()
return eval_Times(*items) | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/mathpnl.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def mathpnl(path):
"""mathpnl
Data loads lazily. Type data(mathpnl) into the console.
A data.frame with 3850 rows and 52 variables:
- distid. district identifier
- intid. intermediate school district
- lunch. percent eligible for free lunch
- enrol. school enrollment
- ptr. pupil/teacher: 1995-98
- found. foundation grant, $: 1995-98
- expp. expenditure per pupil
- revpp. revenue per pupil
- avgsal. average teacher salary
- drop. high school dropout rate, percent
- grad. high school grad. rate, percent
- math4. percent satisfactory, 4th grade math
- math7. percent satisfactory, 7th grade math
- choice. number choice students
- psa. # public school academy studs.
- year. 1992-1998
- staff. staff per 1000 students
- avgben. avg teacher fringe benefits
- y92. =1 if year == 1992
- y93. =1 if year == 1993
- y94. =1 if year == 1994
- y95. =1 if year == 1995
- y96. =1 if year == 1996
- y97. =1 if year == 1997
- y98. =1 if year == 1998
- lexpp. log(expp)
- lfound. log(found)
- lexpp\_1. lexpp[\_n-1]
- lfnd\_1. lfnd[\_n-1]
- lenrol. log(enrol)
- lenrolsq. lenrol^2
- lunchsq. lunch^2
- lfndsq. lfnd^2
- math4\_1. math4[\_n-1]
- cmath4. math4 - math4\_1
- gexpp. lexpp - lexpp\_1
- gexpp\_1. gexpp[\_n-1
- gfound. lfound - lfnd\_1
- gfnd\_1. gfound[\_n-1]
- clunch. lunch - lunch[\_n-1]
- clnchsq. lunchsq - lunchsq[\_n-1]
- genrol. lenrol - lenrol[\_n-1]
- genrolsq. genrol^2
- expp92. expp in 1992
- lexpp92. log(expp92)
- math4\_92. math4 in 1992
- cpi. consumer price index
- rexpp. real spending per pupil, 1997$
- lrexpp. log(rexpp)
- lrexpp\_1. lrexpp[\_n-1]
- grexpp. lrexpp - lrexpp\_1
- grexpp\_1. grexpp[\_n-1]
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `mathpnl.csv`.
Returns:
Tuple of np.ndarray `x_train` with 3850 rows and 52 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'mathpnl.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/mathpnl.csv'
maybe_download_and_extract(path, url,
save_file_name='mathpnl.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/AdaptivePELE-1.7.1.tar.gz/AdaptivePELE-1.7.1/README.rst | ============
AdaptivePELE
============
|MIT license| |GitHub release| |PyPI release| |Conda release| |DOI|
AdaptivePELE is a Python module to perform enhancing sampling of molecular
simulation built around the Protein Energy Landscape Exploration method (`PELE <https://pele.bsc.es/pele.wt>`_) developed in the Electronic and Atomic Protein Modelling grop (`EAPM <https://www.bsc.es/discover-bsc/organisation/scientific-structure/electronic-and-atomic-protein-modeling-eapm>`_) at the Barcelona Supercomputing Center (`BSC <https://www.bsc.es>`_).
Usage
-----
AdaptivePELE is called with a control file as input
parameter. The control file is a json document that contains 4 sections:
general parameters, simulation parameters, clustering parameters and spawning
parameters. The first block refers to general parameters of the adaptive run,
while the other three blocks configure the three steps of an adaptive sampling
run, first run a propagation algorithm (simulation), then cluster the
trajectories obtained (clustering) and finally select the best point to start
the next iteration (spawning).
An example of usage::
python -m AdaptivePELE.adaptiveSampling controlFile.conf
Installation
------------
There are two methods to install AdaptivePELE, from repositories, either PyPI or Conda (recommended), or directly from source.
To install from PyPI simply run::
pip install AdaptivePELE
To install from Conda simply run::
conda install -c nostrumbiodiscovery -c conda-forge adaptive_pele
To install from source, you need to install and compile cython files in the base folder with::
git clone https://github.com/AdaptivePELE/AdaptivePELE.git
cd AdaptivePELE
python setup.py build_ext --inplace
Also, if AdaptivePELE was not installed in a typical library directory, a common option is to add it to your local PYTHONPATH::
export PYTHONPATH="/location/of/AdaptivePELE:$PYTHONPATH"
Documentation
-------------
The documentation for AdaptivePELE can be found `here <https://adaptivepele.github.io/AdaptivePELE/>`_
Contributors
------------
`Daniel Lecina <https://github.com/lecina>`_, `Joan Francesc Gilabert <https://github.com/cescgina>`_, `Oriol Gracia <https://github.com/OriolGraCar>`_, `Daniel Soler <https://github.com/danielSoler93>`_
Mantainer
---------
Joan Francesc Gilabert ([email protected])
Citation
--------
AdaptivePELE is research software. If you make use of AdaptivePELE in scientific publications, please cite it. The BibTeX reference is::
@article{Lecina2017,
author = {Lecina, Daniel and Gilabert, Joan Francesc and Guallar, Victor},
doi = {10.1038/s41598-017-08445-5},
issn = {2045-2322},
journal = {Scientific Reports},
number = {1},
pages = {8466},
pmid = {28814780},
title = {{Adaptive simulations, towards interactive protein-ligand modeling}},
url = {http://www.nature.com/articles/s41598-017-08445-5},
volume = {7},
year = {2017}
}
.. |MIT license| image:: https://img.shields.io/badge/License-MIT-blue.svg
:target: https://lbesson.mit-license.org/
.. |GitHub release| image:: https://img.shields.io/github/release/AdaptivePELE/AdaptivePELE.svg
:target: https://github.com/AdaptivePELE/AdaptivePELE/releases/
.. |PyPI release| image:: https://img.shields.io/pypi/v/AdaptivePELE.svg
:target: https://pypi.org/project/AdaptivePELE/
.. |DOI| image:: https://zenodo.org/badge/DOI/10.1038/s41598-017-08445-5.svg
:target: https://doi.org/10.1038/s41598-017-08445-5
.. |Conda release| image:: https://anaconda.org/nostrumbiodiscovery/adaptive_pele/badges/version.svg
:target: https://anaconda.org/NostrumBioDiscovery/adaptive_pele
| PypiClean |
/CallFlow-1.3.0.tar.gz/CallFlow-1.3.0/callflow/modules/gradients.py | import numpy as np
import pandas as pd
# TODO: Avoid the performance error in the future pass.
import warnings
import callflow
from callflow.utils.utils import histogram
from callflow.utils.df import df_unique
from callflow.datastructures.metrics import TIME_COLUMNS
from callflow.modules.histogram import Histogram
LOGGER = callflow.get_logger(__name__)
warnings.simplefilter(action="ignore", category=pd.errors.PerformanceWarning)
# ------------------------------------------------------------------------------
class Gradients:
"""
Computes the ensemble gradients for the a given dictionary of dataframes.
"""
def __init__(
self, sg, node, bins: int = 20, proxy_columns={}
):
"""
Constructor function for the class
:param sg: Dictinary of dataframes keyed by the dataset_name. For e.g., { "dataset_name": df }.
:param node: Super node or node
:param bins: Number of bins to distribute the runtime information.
:param proxy_columns: Proxy columns
"""
assert isinstance(sg, callflow.SuperGraph)
assert node.get("type") in ["callsite", "module"]
assert isinstance(bins, int)
assert isinstance(proxy_columns, dict)
assert bins > 0
self.node = node
self.name = sg.get_name(node.get("id"), node.get("type"))
indexers = ["dataset"]
if node.get("type") == "callsite":
indexers.append("name")
elif node.get("type") == "module":
indexers.append("module")
# TODO: Could be slow for large datasets!!..
self.df = sg.dataframe.set_index(indexers)
# # gradient should be computed only for ensemble dataframe
# # i.e., multiple values in dataframe column
self.datasets = list(self.df.index.levels[0])
assert len(self.datasets) >= 1
self.bins = bins
self.proxy_columns = proxy_columns
self.time_columns = [self.proxy_columns.get(_, _) for _ in TIME_COLUMNS]
self.max_ranks = max(df_unique(self.df, "rank"))
self.result = self.compute()
@staticmethod
def convert_dictmean_to_list(dictionary):
"""
Convert a dictionary by taking its mean and converting to a list.
:param dictionary: (dict) Input dictionary
:return: (list) mean of all values in the dictionary
"""
return [np.mean(np.array(list(dictionary[_].values()))) for _ in dictionary]
@staticmethod
def convert_dictmean_to_dict(dictionary):
"""
Convert a dictionary by taking its mean and converting to a list.
:param dictionary: (dict) Input dictionary
:return: (dict) Dictionary of mean values indexed by the keys in the
input dictionary.
"""
return {_: np.mean(np.array(list(dictionary[_].values()))) for _ in dictionary}
# --------------------------------------------------------------------------
@staticmethod
def map_datasets_to_bins(bins, dataset_dict={}):
"""
Map dataset information to the corresponding bins.
:param bins: (int) Bin size
:param dataset_dict: Dataset dictionary
:return: Mapping of the datases to the corresponding bins.
"""
# TODO: previously, this logic applied to bin edges
# but, now, we are working on bin_centers
binw = bins[1] - bins[0]
bin_edges = np.append(bins - 0.5 * binw, bins[-1] + 0.5 * binw)
# Map the datasets to their histogram indexes.
dataset_position_dict = {}
for dataset in dataset_dict:
mean = dataset_dict[dataset]
for idx, x in np.ndenumerate(bin_edges):
if x > float(mean):
if idx[0] != 0:
pos = idx[0] - 1
else:
pos = idx[0]
dataset_position_dict[dataset] = pos
break
if idx[0] == len(bin_edges) - 1:
dataset_position_dict[dataset] = len(bin_edges) - 2
return dataset_position_dict
# --------------------------------------------------------------------------
def compute(self):
"""
Compute the required results.
:return: (JSON) data
"""
dists = {tk: {} for tk, tv in zip(TIME_COLUMNS, self.time_columns)}
# Get the runtimes for all the runs.
levels = self.df.index.unique().tolist()
for idx, dataset in enumerate(self.datasets):
# If the level doesn't exist, it means this callsite is not present
# in the dataset.
if (dataset, self.node.get("id")) not in levels:
continue
node_df = self.df.xs((dataset, self.node.get("id")))
for tk, tv in zip(TIME_COLUMNS, self.time_columns):
if node_df.empty:
dists[tk][dataset] = dict(
(rank, 0) for rank in range(0, self.max_ranks)
)
else:
dists[tk][dataset] = dict(zip(node_df["rank"], node_df[tv]))
# Calculate appropriate number of bins automatically.
# num_of_bins = min(self.freedman_diaconis_bins(np.array(dist_list)),
num_of_bins = self.bins
# convert the dictionary of values to list of values.
results = {}
for tk, tv in zip(TIME_COLUMNS, self.time_columns):
dists_list = np.array(Gradients.convert_dictmean_to_list(dists[tk]))
datasets_dict = Gradients.convert_dictmean_to_dict(dists[tk])
dists_dict = Gradients.convert_dictmean_to_dict(dists[tk])
hist_grid = histogram(dists_list, bins=num_of_bins)
# kde_grid = kde(dists_list, gridsize=num_of_bins)
dataset_pos = Gradients.map_datasets_to_bins(hist_grid[0], datasets_dict)
pos_dataset = {bin: [] for bin in range(0, self.bins)}
for dataset in dataset_pos:
position = dataset_pos[dataset]
if dataset not in pos_dataset[position]:
pos_dataset[position].append(dataset)
results[tk] = {
"bins": num_of_bins,
"dataset": {"mean": dists_dict, "d2p": dataset_pos, "p2d": pos_dataset},
# "kde": Histogram._format_data(kde_grid),
"hist": Histogram._format_data(hist_grid),
}
return results
# ------------------------------------------------------------------------------ | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/dns/rdtypes/ANY/NSEC3.py |
# Copyright (C) 2004-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import base64
import binascii
import string
import struct
import dns.exception
import dns.rdata
import dns.rdatatype
from dns._compat import xrange, text_type, PY3
# pylint: disable=deprecated-string-function
if PY3:
b32_hex_to_normal = bytes.maketrans(b'0123456789ABCDEFGHIJKLMNOPQRSTUV',
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
b32_normal_to_hex = bytes.maketrans(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
b'0123456789ABCDEFGHIJKLMNOPQRSTUV')
else:
b32_hex_to_normal = string.maketrans('0123456789ABCDEFGHIJKLMNOPQRSTUV',
'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567')
b32_normal_to_hex = string.maketrans('ABCDEFGHIJKLMNOPQRSTUVWXYZ234567',
'0123456789ABCDEFGHIJKLMNOPQRSTUV')
# pylint: enable=deprecated-string-function
# hash algorithm constants
SHA1 = 1
# flag constants
OPTOUT = 1
class NSEC3(dns.rdata.Rdata):
"""NSEC3 record
@ivar algorithm: the hash algorithm number
@type algorithm: int
@ivar flags: the flags
@type flags: int
@ivar iterations: the number of iterations
@type iterations: int
@ivar salt: the salt
@type salt: string
@ivar next: the next name hash
@type next: string
@ivar windows: the windowed bitmap list
@type windows: list of (window number, string) tuples"""
__slots__ = ['algorithm', 'flags', 'iterations', 'salt', 'next', 'windows']
def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt,
next, windows):
super(NSEC3, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.flags = flags
self.iterations = iterations
if isinstance(salt, text_type):
self.salt = salt.encode()
else:
self.salt = salt
self.next = next
self.windows = windows
def to_text(self, origin=None, relativize=True, **kw):
next = base64.b32encode(self.next).translate(
b32_normal_to_hex).lower().decode()
if self.salt == b'':
salt = '-'
else:
salt = binascii.hexlify(self.salt).decode()
text = u''
for (window, bitmap) in self.windows:
bits = []
for i in xrange(0, len(bitmap)):
byte = bitmap[i]
for j in xrange(0, 8):
if byte & (0x80 >> j):
bits.append(dns.rdatatype.to_text(window * 256 +
i * 8 + j))
text += (u' ' + u' '.join(bits))
return u'%u %u %u %s %s%s' % (self.algorithm, self.flags,
self.iterations, salt, next, text)
@classmethod
def from_text(cls, rdclass, rdtype, tok, origin=None, relativize=True):
algorithm = tok.get_uint8()
flags = tok.get_uint8()
iterations = tok.get_uint16()
salt = tok.get_string()
if salt == u'-':
salt = b''
else:
salt = binascii.unhexlify(salt.encode('ascii'))
next = tok.get_string().encode(
'ascii').upper().translate(b32_hex_to_normal)
next = base64.b32decode(next)
rdtypes = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
nrdtype = dns.rdatatype.from_text(token.value)
if nrdtype == 0:
raise dns.exception.SyntaxError("NSEC3 with bit 0")
if nrdtype > 65535:
raise dns.exception.SyntaxError("NSEC3 with bit > 65535")
rdtypes.append(nrdtype)
rdtypes.sort()
window = 0
octets = 0
prior_rdtype = 0
bitmap = bytearray(b'\0' * 32)
windows = []
for nrdtype in rdtypes:
if nrdtype == prior_rdtype:
continue
prior_rdtype = nrdtype
new_window = nrdtype // 256
if new_window != window:
if octets != 0:
windows.append((window, bitmap[0:octets]))
bitmap = bytearray(b'\0' * 32)
window = new_window
offset = nrdtype % 256
byte = offset // 8
bit = offset % 8
octets = byte + 1
bitmap[byte] = bitmap[byte] | (0x80 >> bit)
if octets != 0:
windows.append((window, bitmap[0:octets]))
return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next,
windows)
def to_wire(self, file, compress=None, origin=None):
l = len(self.salt)
file.write(struct.pack("!BBHB", self.algorithm, self.flags,
self.iterations, l))
file.write(self.salt)
l = len(self.next)
file.write(struct.pack("!B", l))
file.write(self.next)
for (window, bitmap) in self.windows:
file.write(struct.pack("!BB", window, len(bitmap)))
file.write(bitmap)
@classmethod
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin=None):
(algorithm, flags, iterations, slen) = \
struct.unpack('!BBHB', wire[current: current + 5])
current += 5
rdlen -= 5
salt = wire[current: current + slen].unwrap()
current += slen
rdlen -= slen
nlen = wire[current]
current += 1
rdlen -= 1
next = wire[current: current + nlen].unwrap()
current += nlen
rdlen -= nlen
windows = []
while rdlen > 0:
if rdlen < 3:
raise dns.exception.FormError("NSEC3 too short")
window = wire[current]
octets = wire[current + 1]
if octets == 0 or octets > 32:
raise dns.exception.FormError("bad NSEC3 octets")
current += 2
rdlen -= 2
if rdlen < octets:
raise dns.exception.FormError("bad NSEC3 bitmap length")
bitmap = bytearray(wire[current: current + octets].unwrap())
current += octets
rdlen -= octets
windows.append((window, bitmap))
return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next,
windows) | PypiClean |
/Mesh-Client-2.1.2.tar.gz/Mesh-Client-2.1.2/SECURITY.md | # Security
NHS Digital takes security and the protection of private data extremely
seriously. If you believe you have found a vulnerability or other issue which
has compromised or could compromise the security of any of our systems and/or
private data managed by our systems, please do not hesitate to contact us using
the methods outlined below.
## Reporting a vulnerability
**PLEASE NOTE: Email and HackerOne are our preferred methods of receiving
reports.**
### Email
If you wish to notify us of a vulnerability via email, please include detailed
information on the nature of the vulnerability and any steps required to
reproduce it.
You can reach us at:
* [email protected]
* [email protected]
### HackerOne
If you are registered with HackerOne and have been admitted to the NHS
Programme, you can report directly to us at: https://hackerone.com/nhs
### NCSC
You can send your report to the National Cyber Security Centre, who will assess
your report and pass it on to NHS Digital if necessary.
You can report vulnerabilities here:
https://www.ncsc.gov.uk/information/vulnerability-reporting
### OpenBugBounty
We also accept bug reports via OpenBugBounty: https://www.openbugbounty.org/
## General Security Enquiries
If you have general enquiries regarding our cyber security, please reach out
to us at [email protected]
| PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/configs/sabl/sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py | _base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) | PypiClean |
/Krakatau-noff-v0.20181212.tar.gz/Krakatau-noff-v0.20181212/Krakatau/graph_util.py | import itertools
def tarjanSCC(roots, getChildren):
"""Return a list of strongly connected components in a graph. If getParents is passed instead of getChildren, the result will be topologically sorted.
roots - list of root nodes to search from
getChildren - function which returns children of a given node
"""
sccs = []
indexCounter = itertools.count()
index = {}
lowlink = {}
removed = set()
subtree = []
# Use iterative version to avoid stack limits for large datasets
stack = [(node, 0) for node in roots]
while stack:
current, state = stack.pop()
if state == 0: # before recursing
if current not in index: # if it's in index, it was already visited (possibly earlier on the current search stack)
lowlink[current] = index[current] = next(indexCounter)
subtree.append(current)
stack.append((current, 1))
stack.extend((child, 0) for child in getChildren(current) if child not in removed)
else: # after recursing
children = [child for child in getChildren(current) if child not in removed]
for child in children:
if index[child] <= index[current]: # backedge (or selfedge)
lowlink[current] = min(lowlink[current], index[child])
else:
lowlink[current] = min(lowlink[current], lowlink[child])
assert lowlink[current] <= index[current]
if index[current] == lowlink[current]:
scc = []
while not scc or scc[-1] != current:
scc.append(subtree.pop())
sccs.append(tuple(scc))
removed.update(scc)
return sccs
def topologicalSort(roots, getParents):
"""Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node
"""
results = []
visited = set()
# Use iterative version to avoid stack limits for large datasets
stack = [(node,0) for node in roots]
while stack:
current, state = stack.pop()
if state == 0: # before recursing
if current not in visited:
visited.add(current)
stack.append((current,1))
stack.extend((parent,0) for parent in getParents(current))
else: # after recursing
assert current in visited
results.append(current)
return results | PypiClean |
/NlvWxPython-4.2.0-cp37-cp37m-win_amd64.whl/wx/lib/ogl/drawn.py | import os.path
from .basic import RectangleShape
from .oglmisc import *
METAFLAGS_OUTLINE = 1
METAFLAGS_ATTACHMENTS = 2
DRAWN_ANGLE_0 = 0
DRAWN_ANGLE_90 = 1
DRAWN_ANGLE_180 = 2
DRAWN_ANGLE_270 = 3
# Drawing operations
DRAWOP_SET_PEN = 1
DRAWOP_SET_BRUSH = 2
DRAWOP_SET_FONT = 3
DRAWOP_SET_TEXT_COLOUR = 4
DRAWOP_SET_BK_COLOUR = 5
DRAWOP_SET_BK_MODE = 6
DRAWOP_SET_CLIPPING_RECT = 7
DRAWOP_DESTROY_CLIPPING_RECT = 8
DRAWOP_DRAW_LINE = 20
DRAWOP_DRAW_POLYLINE = 21
DRAWOP_DRAW_POLYGON = 22
DRAWOP_DRAW_RECT = 23
DRAWOP_DRAW_ROUNDED_RECT = 24
DRAWOP_DRAW_ELLIPSE = 25
DRAWOP_DRAW_POINT = 26
DRAWOP_DRAW_ARC = 27
DRAWOP_DRAW_TEXT = 28
DRAWOP_DRAW_SPLINE = 29
DRAWOP_DRAW_ELLIPTIC_ARC = 30
class DrawOp(object):
def __init__(self, theOp):
self._op = theOp
def GetOp(self):
return self._op
def GetPerimeterPoint(self, x1, y1, x2, y2, xOffset, yOffset, attachmentMode):
return False
def Scale(self,scaleX, scaleY):
"""not implemented???"""
pass
def Translate(self, x, y):
"""not implemented???"""
pass
def Rotate(self, x, y, theta, sinTheta, cosTheta):
"""not implemented???"""
pass
class OpSetGDI(DrawOp):
"""Set font, brush, text colour."""
def __init__(self, theOp, theImage, theGdiIndex, theMode = 0):
DrawOp.__init__(self, theOp)
self._gdiIndex = theGdiIndex
self._image = theImage
self._mode = theMode
def Do(self, dc, xoffset = 0, yoffset = 0):
if self._op == DRAWOP_SET_PEN:
# Check for overriding this operation for outline colour
if self._gdiIndex in self._image._outlineColours:
if self._image._outlinePen:
dc.SetPen(self._image._outlinePen)
else:
try:
dc.SetPen(self._image._gdiObjects[self._gdiIndex])
except IndexError:
pass
elif self._op == DRAWOP_SET_BRUSH:
# Check for overriding this operation for outline or fill colour
if self._gdiIndex in self._image._outlineColours:
# Need to construct a brush to match the outline pen's colour
if self._image._outlinePen:
br = wx.Brush(self._image._outlinePen, wx.BRUSHSTYLE_SOLID)
if br:
dc.SetBrush(br)
elif self._gdiIndex in self._image._fillColours:
if self._image._fillBrush:
dc.SetBrush(self._image._fillBrush)
else:
brush = self._image._gdiObjects[self._gdiIndex]
if brush:
dc.SetBrush(brush)
elif self._op == DRAWOP_SET_FONT:
try:
dc.SetFont(self._image._gdiObjects[self._gdiIndex])
except IndexError:
pass
elif self._op == DRAWOP_SET_TEXT_COLOUR:
dc.SetTextForeground(wx.Colour(self._r, self._g, self._b))
elif self._op == DRAWOP_SET_BK_COLOUR:
dc.SetTextBackground(wx.Colour(self._r, self._g, self._b))
elif self._op == DRAWOP_SET_BK_MODE:
dc.SetBackgroundMode(self._mode)
class OpSetClipping(DrawOp):
"""Set/destroy clipping."""
def __init__(self, theOp, theX1, theY1, theX2, theY2):
DrawOp.__init__(self, theOp)
self._x1 = theX1
self._y1 = theY1
self._x2 = theX2
self._y2 = theY2
def Do(self, dc, xoffset, yoffset):
if self._op == DRAWOP_SET_CLIPPING_RECT:
dc.SetClippingRegion(self._x1 + xoffset, self._y1 + yoffset, self._x2 + xoffset, self._y2 + yoffset)
elif self._op == DRAWOP_DESTROY_CLIPPING_RECT:
dc.DestroyClippingRegion()
def Scale(self, scaleX, scaleY):
self._x1 *= scaleX
self._y1 *= scaleY
self._x2 *= scaleX
self._y2 *= scaleY
def Translate(self, x, y):
self._x1 += x
self._y1 += y
class OpDraw(DrawOp):
"""Draw line, rectangle, rounded rectangle, ellipse, point, arc, text."""
def __init__(self, theOp, theX1, theY1, theX2, theY2, theRadius = 0.0, s = ""):
DrawOp.__init__(self, theOp)
self._x1 = theX1
self._y1 = theY1
self._x2 = theX2
self._y2 = theY2
self._x3 = 0.0
self._y3 = 0.0
self._radius = theRadius
self._textString = s
def Do(self, dc, xoffset, yoffset):
if self._op == DRAWOP_DRAW_LINE:
dc.DrawLine(self._x1 + xoffset, self._y1 + yoffset, self._x2 + xoffset, self._y2 + yoffset)
elif self._op == DRAWOP_DRAW_RECT:
dc.DrawRectangle(self._x1 + xoffset, self._y1 + yoffset, self._x2, self._y2)
elif self._op == DRAWOP_DRAW_ROUNDED_RECT:
dc.DrawRoundedRectangle(self._x1 + xoffset, self._y1 + yoffset, self._x2, self._y2, self._radius)
elif self._op == DRAWOP_DRAW_ELLIPSE:
dc.DrawEllipse(self._x1 + xoffset, self._y1 + yoffset, self._x2, self._y2)
elif self._op == DRAWOP_DRAW_ARC:
dc.DrawArc(self._x2 + xoffset, self._y2 + yoffset, self._x3 + xoffset, self._y3 + yoffset, self._x1 + xoffset, self._y1 + yoffset)
elif self._op == DRAWOP_DRAW_ELLIPTIC_ARC:
dc.DrawEllipticArc(self._x1 + xoffset, self._y1 + yoffset, self._x2, self._y2, self._x3 * 360 / (2 * math.pi), self._y3 * 360 / (2 * math.pi))
elif self._op == DRAWOP_DRAW_POINT:
dc.DrawPoint(self._x1 + xoffset, self._y1 + yoffset)
elif self._op == DRAWOP_DRAW_TEXT:
dc.DrawText(self._textString, self._x1 + xoffset, self._y1 + yoffset)
def Scale(self, scaleX, scaleY):
self._x1 *= scaleX
self._y1 *= scaleY
self._x2 *= scaleX
self._y2 *= scaleY
if self._op != DRAWOP_DRAW_ELLIPTIC_ARC:
self._x3 *= scaleX
self._y3 *= scaleY
self._radius *= scaleX
def Translate(self, x, y):
self._x1 += x
self._y1 += y
if self._op == DRAWOP_DRAW_LINE:
self._x2 += x
self._y2 += y
elif self._op == DRAWOP_DRAW_ARC:
self._x2 += x
self._y2 += y
self._x3 += x
self._y3 += y
def Rotate(self, x, y, theta, sinTheta, cosTheta):
newX1 = self._x1 * cosTheta + self._y1 * sinTheta + x * (1 - cosTheta) + y * sinTheta
newY1 = self._x1 * sinTheta + self._y1 * cosTheta + y * (1 - cosTheta) + x * sinTheta
if self._op == DRAWOP_DRAW_LINE:
newX2 = self._x2 * cosTheta - self._y2 * sinTheta + x * (1 - cosTheta) + y * sinTheta
newY2 = self._x2 * sinTheta + self._y2 * cosTheta + y * (1 - cosTheta) + x * sinTheta;
self._x1 = newX1
self._y1 = newY1
self._x2 = newX2
self._y2 = newY2
elif self._op in [DRAWOP_DRAW_RECT, DRAWOP_DRAW_ROUNDED_RECT, DRAWOP_DRAW_ELLIPTIC_ARC]:
# Assume only 0, 90, 180, 270 degree rotations.
# oldX1, oldY1 represents the top left corner. Find the
# bottom right, and rotate that. Then the width/height is
# the difference between x/y values.
oldBottomRightX = self._x1 + self._x2
oldBottomRightY = self._y1 + self._y2
newBottomRightX = oldBottomRightX * cosTheta - oldBottomRightY * sinTheta + x * (1 - cosTheta) + y * sinTheta
newBottomRightY = oldBottomRightX * sinTheta + oldBottomRightY * cosTheta + y * (1 - cosTheta) + x * sinTheta
# Now find the new top-left, bottom-right coordinates.
minX = min(newX1, newBottomRightX)
minY = min(newY1, newBottomRightY)
maxX = max(newX1, newBottomRightX)
maxY = max(newY1, newBottomRightY)
self._x1 = minX
self._y1 = minY
self._x2 = maxX - minX # width
self._y2 = maxY - minY # height
if self._op == DRAWOP_DRAW_ELLIPTIC_ARC:
# Add rotation to angles
self._x3 += theta
self._y3 += theta
elif self._op == DRAWOP_DRAW_ARC:
newX2 = self._x2 * cosTheta - self._y2 * sinTheta + x * (1 - cosTheta) + y * sinTheta
newY2 = self._x2 * sinTheta + self._y2 * cosTheta + y * (1 - cosTheta) + x * sinTheta
newX3 = self._x3 * cosTheta - self._y3 * sinTheta + x * (1 - cosTheta) + y * sinTheta
newY3 = self._x3 * sinTheta + self._y3 * cosTheta + y * (1 - cosTheta) + x * sinTheta
self._x1 = newX1
self._y1 = newY1
self._x2 = newX2
self._y2 = newY2
self._x3 = newX3
self._y3 = newY3
class OpPolyDraw(DrawOp):
"""Draw polygon, polyline, spline."""
def __init__(self, theOp, thePoints):
DrawOp.__init__(self, theOp)
self._noPoints = len(thePoints)
self._points = thePoints
def Do(self, dc, xoffset, yoffset):
if self._op == DRAWOP_DRAW_POLYLINE:
dc.DrawLines(self._points, xoffset, yoffset)
elif self._op == DRAWOP_DRAW_POLYGON:
dc.DrawPolygon(self._points, xoffset, yoffset)
elif self._op == DRAWOP_DRAW_SPLINE:
dc.DrawSpline(self._points) # no offsets in DrawSpline
def Scale(self, scaleX, scaleY):
for i in range(self._noPoints):
self._points[i] = wx.Point(self._points[i][0] * scaleX, self._points[i][1] * scaleY)
def Translate(self, x, y):
for i in range(self._noPoints):
self._points[i][0] += x
self._points[i][1] += y
def Rotate(self, x, y, theta, sinTheta, cosTheta):
for i in range(self._noPoints):
x1 = self._points[i][0]
y1 = self._points[i][1]
self._points[i] = x1 * cosTheta - y1 * sinTheta + x * (1 - cosTheta) + y * sinTheta, x1 * sinTheta + y1 * cosTheta + y * (1 - cosTheta) + x * sinTheta
def OnDrawOutline(self, dc, x, y, w, h, oldW, oldH):
dc.SetBrush(wx.TRANSPARENT_BRUSH)
# Multiply all points by proportion of new size to old size
x_proportion = abs(w / oldW)
y_proportion = abs(h / oldH)
dc.DrawPolygon([(x_proportion * x, y_proportion * y) for x, y in self._points], x, y)
def GetPerimeterPoint(self, x1, y1, x2, y2, xOffset, yOffset, attachmentMode):
# First check for situation where the line is vertical,
# and we would want to connect to a point on that vertical --
# oglFindEndForPolyline can't cope with this (the arrow
# gets drawn to the wrong place).
if attachmentMode == ATTACHMENT_MODE_NONE and x1 == x2:
# Look for the point we'd be connecting to. This is
# a heuristic...
for point in self._points:
if point[0] == 0:
if y2 > y1 and point[1] > 0:
return point[0]+xOffset, point[1]+yOffset
elif y2 < y1 and point[1] < 0:
return point[0]+xOffset, point[1]+yOffset
return FindEndForPolyline([ p[0] + xOffset for p in self._points ],
[ p[1] + yOffset for p in self._points ],
x1, y1, x2, y2)
class PseudoMetaFile(object):
"""
A simple metafile-like class which can load data from a Windows
metafile on all platforms.
"""
def __init__(self):
self._currentRotation = 0
self._rotateable = True
self._width = 0.0
self._height = 0.0
self._outlinePen = None
self._fillBrush = None
self._outlineOp = -1
self._ops = []
self._gdiObjects = []
self._outlineColours = []
self._fillColours = []
def Clear(self):
self._ops = []
self._gdiObjects = []
self._outlineColours = []
self._fillColours = []
self._outlineColours = -1
def IsValid(self):
return self._ops != []
def GetOps(self):
return self._ops
def SetOutlineOp(self, op):
self._outlineOp = op
def GetOutlineOp(self):
return self._outlineOp
def SetOutlinePen(self, pen):
self._outlinePen = pen
def GetOutlinePen(self, pen):
return self._outlinePen
def SetFillBrush(self, brush):
self._fillBrush = brush
def GetFillBrush(self):
return self._fillBrush
def SetSize(self, w, h):
self._width = w
self._height = h
def SetRotateable(self, rot):
self._rotateable = rot
def GetRotateable(self):
return self._rotateable
def GetFillColours(self):
return self._fillColours
def GetOutlineColours(self):
return self._outlineColours
def Draw(self, dc, xoffset, yoffset):
for op in self._ops:
op.Do(dc, xoffset, yoffset)
def Scale(self, sx, sy):
for op in self._ops:
op.Scale(sx, sy)
self._width *= sx
self._height *= sy
def Translate(self, x, y):
for op in self._ops:
op.Translate(x, y)
def Rotate(self, x, y, theta):
theta1 = theta - self._currentRotation
if theta1 == 0:
return
cosTheta = math.cos(theta1)
sinTheta = math.sin(theta1)
for op in self._ops:
op.Rotate(x, y, theta, sinTheta, cosTheta)
self._currentRotation = theta
def LoadFromMetaFile(self, filename, rwidth, rheight):
if not os.path.exist(filename):
return False
print("LoadFromMetaFile not implemented yet.")
return False # TODO
# Scale to fit size
def ScaleTo(self, w, h):
scaleX = w / self._width
scaleY = h / self._height
self.Scale(scaleX, scaleY)
def GetBounds(self):
maxX, maxY, minX, minY = -99999.9, -99999.9, 99999.9, 99999.9
for op in self._ops:
if op.GetOp() in [DRAWOP_DRAW_LINE, DRAWOP_DRAW_RECT, DRAWOP_DRAW_ROUNDED_RECT, DRAWOP_DRAW_ELLIPSE, DRAWOP_DRAW_POINT, DRAWOP_DRAW_TEXT]:
if op._x1 < minX:
minX = op._x1
if op._x1 > maxX:
maxX = op._x1
if op._y1 < minY:
minY = op._y1
if op._y1 > maxY:
maxY = op._y1
if op.GetOp() == DRAWOP_DRAW_LINE:
if op._x2 < minX:
minX = op._x2
if op._x2 > maxX:
maxX = op._x2
if op._y2 < minY:
minY = op._y2
if op._y2 > maxY:
maxY = op._y2
elif op.GetOp() in [ DRAWOP_DRAW_RECT, DRAWOP_DRAW_ROUNDED_RECT, DRAWOP_DRAW_ELLIPSE]:
if op._x1 + op._x2 < minX:
minX = op._x1 + op._x2
if op._x1 + op._x2 > maxX:
maxX = op._x1 + op._x2
if op._y1 + op._y2 < minY:
minY = op._y1 + op._y2
if op._y1 + op._y2 > maxX:
maxY = op._y1 + op._y2
elif op.GetOp() == DRAWOP_DRAW_ARC:
# TODO: don't yet know how to calculate the bounding box
# for an arc. So pretend it's a line; to get a correct
# bounding box, draw a blank rectangle first, of the
# correct size.
if op._x1 < minX:
minX = op._x1
if op._x1 > maxX:
maxX = op._x1
if op._y1 < minY:
minY = op._y1
if op._y1 > maxY:
maxY = op._y1
if op._x2 < minX:
minX = op._x2
if op._x2 > maxX:
maxX = op._x2
if op._y2 < minY:
minY = op._y2
if op._y2 > maxY:
maxY = op._y2
elif op.GetOp() in [DRAWOP_DRAW_POLYLINE, DRAWOP_DRAW_POLYGON, DRAWOP_DRAW_SPLINE]:
for point in op._points:
if point[0] < minX:
minX = point[0]
if point[0] > maxX:
maxX = point[0]
if point[1] < minY:
minY = point[1]
if point[1] > maxY:
maxY = point[1]
return [minX, minY, maxX, maxY]
# Calculate size from current operations
def CalculateSize(self, shape):
boundMinX, boundMinY, boundMaxX, boundMaxY = self.GetBounds()
# By Pierre Hjälm: This is NOT in the old version, which
# gets this totally wrong. Since the drawing is centered, we
# cannot get the width by measuring from left to right, we
# must instead make enough room to handle the largest
# coordinates
#self.SetSize(boundMaxX - boundMinX, boundMaxY - boundMinY)
w = max(abs(boundMinX), abs(boundMaxX)) * 2
h = max(abs(boundMinY), abs(boundMaxY)) * 2
self.SetSize(w, h)
if shape:
shape.SetWidth(self._width)
shape.SetHeight(self._height)
# Set of functions for drawing into a pseudo metafile
def DrawLine(self, pt1, pt2):
op = OpDraw(DRAWOP_DRAW_LINE, pt1[0], pt1[1], pt2[0], pt2[1])
self._ops.append(op)
def DrawRectangle(self, rect):
op = OpDraw(DRAWOP_DRAW_RECT, rect[0], rect[1], rect[2], rect[3])
self._ops.append(op)
def DrawRoundedRectangle(self, rect, radius):
op = OpDraw(DRAWOP_DRAW_ROUNDED_RECT, rect[0], rect[1], rect[2], rect[3])
op._radius = radius
self._ops.append(op)
def DrawEllipse(self, rect):
op = OpDraw(DRAWOP_DRAW_ELLIPSE, rect[0], rect[1], rect[2], rect[3])
self._ops.append(op)
def DrawArc(self, centrePt, startPt, endPt):
op = OpDraw(DRAWOP_DRAW_ARC, centrePt[0], centrePt[1], startPt[0], startPt[1])
op._x3, op._y3 = endPt
self._ops.append(op)
def DrawEllipticArc(self, rect, startAngle, endAngle):
startAngleRadians = startAngle * math.pi * 2 / 360
endAngleRadians = endAngle * math.pi * 2 / 360
op = OpDraw(DRAWOP_DRAW_ELLIPTIC_ARC, rect[0], rect[1], rect[2], rect[3])
op._x3 = startAngleRadians
op._y3 = endAngleRadians
self._ops.append(op)
def DrawPoint(self, pt):
op = OpDraw(DRAWOP_DRAW_POINT, pt[0], pt[1], 0, 0)
self._ops.append(op)
def DrawText(self, text, pt):
op = OpDraw(DRAWOP_DRAW_TEXT, pt[0], pt[1], 0, 0)
op._textString = text
self._ops.append(op)
def DrawLines(self, pts):
op = OpPolyDraw(DRAWOP_DRAW_POLYLINE, pts)
self._ops.append(op)
# flags:
# oglMETAFLAGS_OUTLINE: will be used for drawing the outline and
# also drawing lines/arrows at the circumference.
# oglMETAFLAGS_ATTACHMENTS: will be used for initialising attachment
# points at the vertices (perhaps a rare case...)
def DrawPolygon(self, pts, flags = 0):
op = OpPolyDraw(DRAWOP_DRAW_POLYGON, pts)
self._ops.append(op)
if flags & METAFLAGS_OUTLINE:
self._outlineOp = len(self._ops) - 1
def DrawSpline(self, pts):
op = OpPolyDraw(DRAWOP_DRAW_SPLINE, pts)
self._ops.append(op)
def SetClippingRect(self, rect):
OpSetClipping(DRAWOP_SET_CLIPPING_RECT, rect[0], rect[1], rect[2], rect[3])
def DestroyClippingRect(self):
op = OpSetClipping(DRAWOP_DESTROY_CLIPPING_RECT, 0, 0, 0, 0)
self._ops.append(op)
def SetPen(self, pen, isOutline = False):
self._gdiObjects.append(pen)
op = OpSetGDI(DRAWOP_SET_PEN, self, len(self._gdiObjects) - 1)
self._ops.append(op)
if isOutline:
self._outlineColours.append(len(self._gdiObjects) - 1)
def SetBrush(self, brush, isFill = False):
self._gdiObjects.append(brush)
op = OpSetGDI(DRAWOP_SET_BRUSH, self, len(self._gdiObjects) - 1)
self._ops.append(op)
if isFill:
self._fillColours.append(len(self._gdiObjects) - 1)
def SetFont(self, font):
self._gdiObjects.append(font)
op = OpSetGDI(DRAWOP_SET_FONT, self, len(self._gdiObjects) - 1)
self._ops.append(op)
def SetTextColour(self, colour):
op = OpSetGDI(DRAWOP_SET_TEXT_COLOUR, self, 0)
op._r, op._g, op._b = colour.Red(), colour.Green(), colour.Blue()
self._ops.append(op)
def SetBackgroundColour(self, colour):
op = OpSetGDI(DRAWOP_SET_BK_COLOUR, self, 0)
op._r, op._g, op._b = colour.Red(), colour.Green(), colour.Blue()
self._ops.append(op)
def SetBackgroundMode(self, mode):
op = OpSetGDI(DRAWOP_SET_BK_MODE, self, 0)
self._ops.append(op)
class DrawnShape(RectangleShape):
"""
Draws a pseudo-metafile shape, which can be loaded from a simple
Windows metafile.
wxDrawnShape allows you to specify a different shape for each of four
orientations (North, West, South and East). It also provides a set of
drawing functions for programmatic drawing of a shape, so that during
construction of the shape you can draw into it as if it were a device
context.
Derived from:
RectangleShape
"""
def __init__(self):
RectangleShape.__init__(self, 100, 50)
self._saveToFile = True
self._currentAngle = DRAWN_ANGLE_0
self._metafiles=PseudoMetaFile(), PseudoMetaFile(), PseudoMetaFile(), PseudoMetaFile()
def OnDraw(self, dc):
# Pass pen and brush in case we have force outline
# and fill colours
if self._shadowMode != SHADOW_NONE:
if self._shadowBrush:
self._metafiles[self._currentAngle]._fillBrush = self._shadowBrush
self._metafiles[self._currentAngle]._outlinePen = wx.Pen(wx.WHITE, 1, wx.PENSTYLE_TRANSPARENT)
self._metafiles[self._currentAngle].Draw(dc, self._xpos + self._shadowOffsetX, self._ypos + self._shadowOffsetY)
self._metafiles[self._currentAngle]._outlinePen = self._pen
self._metafiles[self._currentAngle]._fillBrush = self._brush
self._metafiles[self._currentAngle].Draw(dc, self._xpos, self._ypos)
def SetSize(self, w, h, recursive = True):
self.SetAttachmentSize(w, h)
if self.GetWidth() == 0.0:
scaleX = 1
else:
scaleX = w / self.GetWidth()
if self.GetHeight() == 0.0:
scaleY = 1
else:
scaleY = h / self.GetHeight()
for i in range(4):
if self._metafiles[i].IsValid():
self._metafiles[i].Scale(scaleX, scaleY)
self._width = w
self._height = h
self.SetDefaultRegionSize()
def Scale(self, sx, sy):
"""Scale the shape by the given amount."""
for i in range(4):
if self._metafiles[i].IsValid():
self._metafiles[i].Scale(sx, sy)
self._metafiles[i].CalculateSize(self)
def Translate(self, x, y):
"""Translate the shape by the given amount."""
for i in range(4):
if self._metafiles[i].IsValid():
self._metafiles[i].Translate(x, y)
self._metafiles[i].CalculateSize(self)
# theta is absolute rotation from the zero position
def Rotate(self, x, y, theta):
"""Rotate about the given axis by the given amount in radians."""
self._currentAngle = self.DetermineMetaFile(theta)
if self._currentAngle == 0:
# Rotate metafile
if not self._metafiles[0].GetRotateable():
return
self._metafiles[0].Rotate(x, y, theta)
actualTheta = theta - self._rotation
# Rotate attachment points
sinTheta = math.sin(actualTheta)
cosTheta = math.cos(actualTheta)
for point in self._attachmentPoints:
x1 = point._x
y1 = point._y
point._x = x1 * cosTheta - y1 * sinTheta + x * (1.0 - cosTheta) + y * sinTheta
point._y = x1 * sinTheta + y1 * cosTheta + y * (1.0 - cosTheta) + x * sinTheta
self._rotation = theta
self._metafiles[self._currentAngle].CalculateSize(self)
# Which metafile do we use now? Based on current rotation and validity
# of metafiles.
def DetermineMetaFile(self, rotation):
tolerance = 0.0001
angles = [0.0, math.pi / 2, math.pi, 3 * math.pi / 2]
whichMetaFile = 0
for i in range(4):
if RoughlyEqual(rotation, angles[i], tolerance):
whichMetaFile = i
break
if whichMetaFile > 0 and not self._metafiles[whichMetaFile].IsValid():
whichMetaFile = 0
return whichMetaFile
def OnDrawOutline(self, dc, x, y, w, h):
if self._metafiles[self._currentAngle].GetOutlineOp() != -1:
op = self._metafiles[self._currentAngle].GetOps()[self._metafiles[self._currentAngle].GetOutlineOp()]
if op.OnDrawOutline(dc, x, y, w, h, self._width, self._height):
return
# Default... just use a rectangle
RectangleShape.OnDrawOutline(self, dc, x, y, w, h)
# Get the perimeter point using the special outline op, if there is one,
# otherwise use default wxRectangleShape scheme
def GetPerimeterPoint(self, x1, y1, x2, y2):
if self._metafiles[self._currentAngle].GetOutlineOp() != -1:
op = self._metafiles[self._currentAngle].GetOps()[self._metafiles[self._currentAngle].GetOutlineOp()]
p = op.GetPerimeterPoint(x1, y1, x2, y2, self.GetX(), self.GetY(), self.GetAttachmentMode())
if p:
return p
return RectangleShape.GetPerimeterPoint(self, x1, y1, x2, y2)
def LoadFromMetaFile(self, filename):
"""Load a (very simple) Windows metafile, created for example by
Top Draw, the Windows shareware graphics package."""
return self._metafiles[0].LoadFromMetaFile(filename)
# Set of functions for drawing into a pseudo metafile.
# They use integers, but doubles are used internally for accuracy
# when scaling.
def DrawLine(self, pt1, pt2):
self._metafiles[self._currentAngle].DrawLine(pt1, pt2)
def DrawRectangle(self, rect):
self._metafiles[self._currentAngle].DrawRectangle(rect)
def DrawRoundedRectangle(self, rect, radius):
"""Draw a rounded rectangle.
radius is the corner radius. If radius is negative, it expresses
the radius as a proportion of the smallest dimension of the rectangle.
"""
self._metafiles[self._currentAngle].DrawRoundedRectangle(rect, radius)
def DrawEllipse(self, rect):
self._metafiles[self._currentAngle].DrawEllipse(rect)
def DrawArc(self, centrePt, startPt, endPt):
"""Draw an arc."""
self._metafiles[self._currentAngle].DrawArc(centrePt, startPt, endPt)
def DrawEllipticArc(self, rect, startAngle, endAngle):
"""Draw an elliptic arc."""
self._metafiles[self._currentAngle].DrawEllipticArc(rect, startAngle, endAngle)
def DrawPoint(self, pt):
self._metafiles[self._currentAngle].DrawPoint(pt)
def DrawText(self, text, pt):
self._metafiles[self._currentAngle].DrawText(text, pt)
def DrawLines(self, pts):
self._metafiles[self._currentAngle].DrawLines(pts)
def DrawPolygon(self, pts, flags = 0):
"""Draw a polygon.
flags can be one or more of:
METAFLAGS_OUTLINE (use this polygon for the drag outline) and
METAFLAGS_ATTACHMENTS (use the vertices of this polygon for attachments).
"""
if flags and METAFLAGS_ATTACHMENTS:
self.ClearAttachments()
for i in range(len(pts)):
# TODO: AttachmentPoint does not exist as per PyLint, what should it be???
self._attachmentPoints.append(AttachmentPoint(i,pts[i][0],pts[i][1]))
self._metafiles[self._currentAngle].DrawPolygon(pts, flags)
def DrawSpline(self, pts):
self._metafiles[self._currentAngle].DrawSpline(pts)
def SetClippingRect(self, rect):
"""Set the clipping rectangle."""
self._metafiles[self._currentAngle].SetClippingRect(rect)
def DestroyClippingRect(self):
"""Destroy the clipping rectangle."""
self._metafiles[self._currentAngle].DestroyClippingRect()
def SetDrawnPen(self, pen, isOutline = False):
"""Set the pen for this metafile.
If isOutline is True, this pen is taken to indicate the outline
(and if the outline pen is changed for the whole shape, the pen
will be replaced with the outline pen).
"""
self._metafiles[self._currentAngle].SetPen(pen, isOutline)
def SetDrawnBrush(self, brush, isFill = False):
"""Set the brush for this metafile.
If isFill is True, the brush is used as the fill brush.
"""
self._metafiles[self._currentAngle].SetBrush(brush, isFill)
def SetDrawnFont(self, font):
self._metafiles[self._currentAngle].SetFont(font)
def SetDrawnTextColour(self, colour):
"""Set the current text colour for the current metafile."""
self._metafiles[self._currentAngle].SetTextColour(colour)
def SetDrawnBackgroundColour(self, colour):
"""Set the current background colour for the current metafile."""
self._metafiles[self._currentAngle].SetBackgroundColour(colour)
def SetDrawnBackgroundMode(self, mode):
"""Set the current background mode for the current metafile."""
self._metafiles[self._currentAngle].SetBackgroundMode(mode)
def CalculateSize(self):
"""Calculate the wxDrawnShape size from the current metafile.
Call this after you have drawn into the shape.
"""
self._metafiles[self._currentAngle].CalculateSize(self)
def DrawAtAngle(self, angle):
"""Set the metafile for the given orientation, which can be one of:
* DRAWN_ANGLE_0
* DRAWN_ANGLE_90
* DRAWN_ANGLE_180
* DRAWN_ANGLE_270
"""
self._currentAngle = angle
def GetAngle(self):
"""Return the current orientation, which can be one of:
* DRAWN_ANGLE_0
* DRAWN_ANGLE_90
* DRAWN_ANGLE_180
* DRAWN_ANGLE_270
"""
return self._currentAngle
def GetRotation(self):
"""Return the current rotation of the shape in radians."""
return self._rotation
def SetSaveToFile(self, save):
"""If save is True, the image will be saved along with the shape's
other attributes. The reason why this might not be desirable is that
if there are many shapes with the same image, it would be more
efficient for the application to save one copy, and not duplicate
the information for every shape. The default is True.
"""
self._saveToFile = save
def GetMetaFile(self, which = 0):
"""Return a reference to the internal 'pseudo-metafile'."""
return self._metafiles[which] | PypiClean |
/MatchZoo-test-1.0.tar.gz/MatchZoo-test-1.0/matchzoo/modules/stacked_brnn.py | import torch
import torch.nn as nn
from torch.nn import functional as F
class StackedBRNN(nn.Module):
"""
Stacked Bi-directional RNNs.
Differs from standard PyTorch library in that it has the option to save
and concat the hidden states between layers. (i.e. the output hidden size
for each sequence input is num_layers * hidden_size).
Examples:
>>> import torch
>>> rnn = StackedBRNN(
... input_size=10,
... hidden_size=10,
... num_layers=2,
... dropout_rate=0.2,
... dropout_output=True,
... concat_layers=False
... )
>>> x = torch.randn(2, 5, 10)
>>> x.size()
torch.Size([2, 5, 10])
>>> x_mask = (torch.ones(2, 5) == 1)
>>> rnn(x, x_mask).shape
torch.Size([2, 5, 20])
"""
def __init__(self, input_size, hidden_size, num_layers,
dropout_rate=0, dropout_output=False, rnn_type=nn.LSTM,
concat_layers=False):
"""Stacked Bidirectional LSTM."""
super().__init__()
self.dropout_output = dropout_output
self.dropout_rate = dropout_rate
self.num_layers = num_layers
self.concat_layers = concat_layers
self.rnns = nn.ModuleList()
for i in range(num_layers):
input_size = input_size if i == 0 else 2 * hidden_size
self.rnns.append(rnn_type(input_size, hidden_size,
num_layers=1,
bidirectional=True))
def forward(self, x, x_mask):
"""Encode either padded or non-padded sequences."""
if x_mask.data.sum() == 0:
# No padding necessary.
output = self._forward_unpadded(x, x_mask)
output = self._forward_unpadded(x, x_mask)
return output.contiguous()
def _forward_unpadded(self, x, x_mask):
"""Faster encoding that ignores any padding."""
# Transpose batch and sequence dims
x = x.transpose(0, 1)
# Encode all layers
outputs = [x]
for i in range(self.num_layers):
rnn_input = outputs[-1]
# Apply dropout to hidden input
if self.dropout_rate > 0:
rnn_input = F.dropout(rnn_input,
p=self.dropout_rate,
training=self.training)
# Forward
rnn_output = self.rnns[i](rnn_input)[0]
outputs.append(rnn_output)
# Concat hidden layers
if self.concat_layers:
output = torch.cat(outputs[1:], 2)
else:
output = outputs[-1]
# Transpose back
output = output.transpose(0, 1)
# Dropout on output layer
if self.dropout_output and self.dropout_rate > 0:
output = F.dropout(output,
p=self.dropout_rate,
training=self.training)
return output | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/db/backends/mysql/features.py | import operator
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
allows_group_by_selected_pks = True
related_fields_match_type = True
# MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME.
allow_sliced_subqueries_with_in = False
has_select_for_update = True
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
atomic_transactions = False
can_clone_databases = True
supports_comments = True
supports_comments_inline = True
supports_temporal_subtraction = True
supports_slicing_ordering_in_compound = True
supports_index_on_text_field = False
supports_update_conflicts = True
create_test_procedure_without_params_sql = """
CREATE PROCEDURE test_procedure ()
BEGIN
DECLARE V_I INTEGER;
SET V_I = 1;
END;
"""
create_test_procedure_with_int_param_sql = """
CREATE PROCEDURE test_procedure (P_I INTEGER)
BEGIN
DECLARE V_I INTEGER;
SET V_I = P_I;
END;
"""
create_test_table_with_composite_primary_key = """
CREATE TABLE test_table_composite_pk (
column_1 INTEGER NOT NULL,
column_2 INTEGER NOT NULL,
PRIMARY KEY(column_1, column_2)
)
"""
# Neither MySQL nor MariaDB support partial indexes.
supports_partial_indexes = False
# COLLATE must be wrapped in parentheses because MySQL treats COLLATE as an
# indexed expression.
collate_as_index_expression = True
supports_order_by_nulls_modifier = False
order_by_nulls_first = True
supports_logical_xor = True
@cached_property
def minimum_database_version(self):
if self.connection.mysql_is_mariadb:
return (10, 4)
else:
return (8,)
@cached_property
def test_collations(self):
charset = "utf8"
if (
self.connection.mysql_is_mariadb
and self.connection.mysql_version >= (10, 6)
) or (
not self.connection.mysql_is_mariadb
and self.connection.mysql_version >= (8, 0, 30)
):
# utf8 is an alias for utf8mb3 in MariaDB 10.6+ and MySQL 8.0.30+.
charset = "utf8mb3"
return {
"ci": f"{charset}_general_ci",
"non_default": f"{charset}_esperanto_ci",
"swedish_ci": f"{charset}_swedish_ci",
}
test_now_utc_template = "UTC_TIMESTAMP(6)"
@cached_property
def django_test_skips(self):
skips = {
"This doesn't work on MySQL.": {
"db_functions.comparison.test_greatest.GreatestTests."
"test_coalesce_workaround",
"db_functions.comparison.test_least.LeastTests."
"test_coalesce_workaround",
},
"Running on MySQL requires utf8mb4 encoding (#18392).": {
"model_fields.test_textfield.TextFieldTests.test_emoji",
"model_fields.test_charfield.TestCharField.test_emoji",
},
"MySQL doesn't support functional indexes on a function that "
"returns JSON": {
"schema.tests.SchemaTests.test_func_index_json_key_transform",
},
"MySQL supports multiplying and dividing DurationFields by a "
"scalar value but it's not implemented (#25287).": {
"expressions.tests.FTimeDeltaTests.test_durationfield_multiply_divide",
},
"UPDATE ... ORDER BY syntax on MySQL/MariaDB does not support ordering by"
"related fields.": {
"update.tests.AdvancedTests."
"test_update_ordered_by_inline_m2m_annotation",
"update.tests.AdvancedTests.test_update_ordered_by_m2m_annotation",
},
}
if self.connection.mysql_is_mariadb and (
10,
4,
3,
) < self.connection.mysql_version < (10, 5, 2):
skips.update(
{
"https://jira.mariadb.org/browse/MDEV-19598": {
"schema.tests.SchemaTests."
"test_alter_not_unique_field_to_primary_key",
},
}
)
if self.connection.mysql_is_mariadb and (
10,
4,
12,
) < self.connection.mysql_version < (10, 5):
skips.update(
{
"https://jira.mariadb.org/browse/MDEV-22775": {
"schema.tests.SchemaTests."
"test_alter_pk_with_self_referential_field",
},
}
)
if not self.supports_explain_analyze:
skips.update(
{
"MariaDB and MySQL >= 8.0.18 specific.": {
"queries.test_explain.ExplainTests.test_mysql_analyze",
},
}
)
if "ONLY_FULL_GROUP_BY" in self.connection.sql_mode:
skips.update(
{
"GROUP BY cannot contain nonaggregated column when "
"ONLY_FULL_GROUP_BY mode is enabled on MySQL, see #34262.": {
"aggregation.tests.AggregateTestCase."
"test_group_by_nested_expression_with_params",
},
}
)
return skips
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
return self.connection.mysql_server_data["default_storage_engine"]
@cached_property
def allows_auto_pk_0(self):
"""
Autoincrement primary key can be set to 0 if it doesn't generate new
autoincrement values.
"""
return "NO_AUTO_VALUE_ON_ZERO" in self.connection.sql_mode
@cached_property
def update_can_self_select(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
10,
3,
2,
)
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != "MyISAM"
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
"BinaryField": "TextField",
"BooleanField": "IntegerField",
"DurationField": "BigIntegerField",
"GenericIPAddressField": "CharField",
}
@cached_property
def can_return_columns_from_insert(self):
return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
10,
5,
0,
)
can_return_rows_from_bulk_insert = property(
operator.attrgetter("can_return_columns_from_insert")
)
@cached_property
def has_zoneinfo_database(self):
return self.connection.mysql_server_data["has_zoneinfo_database"]
@cached_property
def is_sql_auto_is_null_enabled(self):
return self.connection.mysql_server_data["sql_auto_is_null"]
@cached_property
def supports_over_clause(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 2)
supports_frame_range_fixed_distance = property(
operator.attrgetter("supports_over_clause")
)
@cached_property
def supports_column_check_constraints(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 16)
supports_table_check_constraints = property(
operator.attrgetter("supports_column_check_constraints")
)
@cached_property
def can_introspect_check_constraints(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 16)
@cached_property
def has_select_for_update_skip_locked(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 6)
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def has_select_for_update_nowait(self):
if self.connection.mysql_is_mariadb:
return True
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def has_select_for_update_of(self):
return (
not self.connection.mysql_is_mariadb
and self.connection.mysql_version >= (8, 0, 1)
)
@cached_property
def supports_explain_analyze(self):
return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (
8,
0,
18,
)
@cached_property
def supported_explain_formats(self):
# Alias MySQL's TRADITIONAL to TEXT for consistency with other
# backends.
formats = {"JSON", "TEXT", "TRADITIONAL"}
if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (
8,
0,
16,
):
formats.add("TREE")
return formats
@cached_property
def supports_transactions(self):
"""
All storage engines except MyISAM support transactions.
"""
return self._mysql_storage_engine != "MyISAM"
uses_savepoints = property(operator.attrgetter("supports_transactions"))
can_release_savepoints = property(operator.attrgetter("supports_transactions"))
@cached_property
def ignores_table_name_case(self):
return self.connection.mysql_server_data["lower_case_table_names"]
@cached_property
def supports_default_in_lead_lag(self):
# To be added in https://jira.mariadb.org/browse/MDEV-12981.
return not self.connection.mysql_is_mariadb
@cached_property
def can_introspect_json_field(self):
if self.connection.mysql_is_mariadb:
return self.can_introspect_check_constraints
return True
@cached_property
def supports_index_column_ordering(self):
if self._mysql_storage_engine != "InnoDB":
return False
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 8)
return self.connection.mysql_version >= (8, 0, 1)
@cached_property
def supports_expression_indexes(self):
return (
not self.connection.mysql_is_mariadb
and self._mysql_storage_engine != "MyISAM"
and self.connection.mysql_version >= (8, 0, 13)
)
@cached_property
def supports_select_intersection(self):
is_mariadb = self.connection.mysql_is_mariadb
return is_mariadb or self.connection.mysql_version >= (8, 0, 31)
supports_select_difference = property(
operator.attrgetter("supports_select_intersection")
)
@cached_property
def can_rename_index(self):
if self.connection.mysql_is_mariadb:
return self.connection.mysql_version >= (10, 5, 2)
return True | PypiClean |
/CluSex-2.3.6-py3-none-any.whl/clusex/lib/join.py |
import numpy as np
import argparse
import os
import sys
from clusex.lib.check import CheckKron
from clusex.lib.check import CheckFlag
from clusex.lib.check import CheckSatReg2
def joinsexcat (maincat,secondcat,output,JoinScale,incFlag=False, red=0.1,minrad = 5):
"merges two Sextractor catalogs"
f_out = open(output, "w")
KronScale2 = 1
try:
#maincat
N,Alpha,Delta,X,Y,Mg,Kr,Fluxr,Isoa,Ai,E,Theta,Bkgd,Idx,Flg=np.genfromtxt(maincat,delimiter="",unpack=True)
except IOError:
print("bad sex file or wrong number of columns ")
sys.exit(1)
AR = 1 - E
RKron = JoinScale * Ai * Kr
maskron = RKron <= 0
RKron[maskron]=1
maskar = AR <= 0.005
AR[maskar]=0.005
for idx, item in enumerate(N):
line="{0:.0f} {1} {2} {3} {4} {5} {6} {7} {8:.0f} {9} {10} {11} {12} {13} {14:.0f} \n".format(N[idx], Alpha[idx], Delta[idx], X[idx], Y[idx], Mg[idx], Kr[idx], Fluxr[idx], Isoa[idx], Ai[idx], E[idx], Theta[idx], Bkgd[idx], Idx[idx], Flg[idx])
f_out.write(line)
total =len(N)
NewN=total + 1
try:
#second cat
N2,Alpha2,Delta2,X2,Y2,Mg2,Kr2,Fluxr2,Isoa2,Ai2,E2,Theta2,Bkgd2,Idx2,Flg2=np.genfromtxt(secondcat,delimiter="",unpack=True)
except:
print("bad sextractor file or wrong number of columns ")
sys.exit(1)
AR2 = 1 - E2
RKron2 = KronScale2 * Ai2 * Kr2
maskar2 = AR2 <= 0.005
AR2[maskar2]=0.005
count=0
count2=0
flag1 = False
flag2 = False
distmax = 5 # dist max to compare with incFlag=True
dist = 0
if incFlag:
print("Including all the galaxies from the second catalog that are not in the first catalog" )
else:
print("Including all the galaxies from the second catalog that are outside from ellipse objects from the first catalog" )
for idx2, item2 in enumerate(N2):
if incFlag:
flagf = True
for idx, item in enumerate(N):
dx = X[idx]-X2[idx2]
dy = Y[idx]-Y2[idx2]
dist=np.sqrt( dx**2 + dy**2 )
if dist < distmax:
flagf = False
break
if flagf:
Kr2[idx2] = red * Kr2[idx2]
if Kr2[idx2] * Ai2[idx2] < minrad:
Kr2[idx2] = minrad/Ai2[idx2]
line="{0:.0f} {1} {2} {3} {4} {5} {6} {7} {8:.0f} {9} {10} {11} {12} {13} {14:.0f} \n".format(NewN, Alpha2[idx2], Delta2[idx], X2[idx2], Y2[idx2], Mg2[idx2], Kr2[idx2], Fluxr2[idx2], Isoa2[idx2], Ai2[idx2], E2[idx2], Theta2[idx2], Bkgd2[idx2], Idx2[idx2], Flg2[idx2])
f_out.write(line)
NewN+=1
count2+=1
else:
count+=1
else:
flagf =False
for idx, item in enumerate(N):
flag1=CheckKron(X2[idx2],Y2[idx2],X[idx],Y[idx],RKron[idx],Theta[idx],AR[idx])
#flag2=CheckKron(X[idx],Y[idx],X2[idx2],Y2[idx2],RKron2[idx2],Theta2[idx2],AR2[idx2])
flagf=flag1 or flag2
if flagf: # boolean value
break
if not flagf:
line="{0:.0f} {1} {2} {3} {4} {5} {6} {7} {8:.0f} {9} {10} {11} {12} {13} {14:.0f} \n".format(NewN, Alpha2[idx2], Delta2[idx], X2[idx2], Y2[idx2], Mg2[idx2], Kr2[idx2], Fluxr2[idx2], Isoa2[idx2], Ai2[idx2], E2[idx2], Theta2[idx2], Bkgd2[idx2], Idx2[idx2], Flg2[idx2])
f_out.write(line)
NewN+=1
count2+=1
else:
count+=1
f_out.close()
linout="{} objects from second run rejected ".format(count)
print(linout)
linout="{} objects were added from second run ".format(count2)
print(linout)
def putFlagSat(sexfile,sexfile2,regfile):
"""Put flags on objects which are inside saturated regions"""
f_out= open(sexfile2, "w")
scale = 1
offset=0
flagsat=4 ## flag value when object is saturated (or close to)
N,Alpha,Delta,X,Y,Mg,Kr,Fluxr,Isoa,Ai,E,Theta,Bkgd,Idx,Flg=np.genfromtxt(sexfile,delimiter="",unpack=True)
for idx, item in enumerate(N):
Rkron = scale * Ai[idx] * Kr[idx] + offset
if Rkron == 0:
Rkron = 1
q = (1 - E)
bim = q * Rkron
check=CheckFlag(Flg[idx],flagsat) ## check if object doesn't has saturated regions
# regflag=CheckSatReg(X[idx],Y[idx],Rkron,Theta[idx],E[idx],regfile) ## check if object is inside of a saturaded box region indicated by user in ds9
regflag=CheckSatReg2(X[idx],Y[idx],regfile) ## check if object is inside of a saturaded box region indicated by user in ds9
if (check == False ) and ( regflag == True) :
Flg[idx] = Flg[idx] + 4
line="{0:.0f} {1} {2} {3} {4} {5} {6} {7} {8:.0f} {9} {10} {11} {12} {13} {14:.0f} \n".format(N[idx], Alpha[idx], Delta[idx], X[idx], Y[idx], Mg[idx], Kr[idx], Fluxr[idx], Isoa[idx], Ai[idx], E[idx], Theta[idx], Bkgd[idx], Idx[idx], Flg[idx])
f_out.write(line)
else:
line="{0:.0f} {1} {2} {3} {4} {5} {6} {7} {8:.0f} {9} {10} {11} {12} {13} {14:.0f} \n".format(N[idx], Alpha[idx], Delta[idx], X[idx], Y[idx], Mg[idx], Kr[idx], Fluxr[idx], Isoa[idx], Ai[idx], E[idx], Theta[idx], Bkgd[idx], Idx[idx], Flg[idx])
f_out.write(line)
f_out.close() | PypiClean |
/Aesthete-0.4.2.tar.gz/Aesthete-0.4.2/aesthete/glancer/Pie.py | import os, math, sys, getopt, string
import pango
import random
from gtk import gdk
from ..tablemaker import PreferencesTableMaker
import threading
import cairo, gtk, gobject
import matplotlib
import numpy, numpy.fft
import scipy, scipy.interpolate, scipy.optimize
from matplotlib.backends.backend_cairo import RendererCairo
from matplotlib.backends.backend_gtkcairo import FigureCanvasGTKCairo as mpl_Canvas
from matplotlib.backends.backend_gtkcairo import NavigationToolbar2Cairo as mpl_Navbar
import pylab
from PIL import Image
from Canvas import *
from aobject.aobject import *
legend_locs = matplotlib.legend.Legend.codes
legend_locs_rev = dict((legend_locs[k],k) for k in legend_locs)
class GlancerPie(AObject) :
fig = None
axes = None
canvas = None
pie = None
source = None
legend_object = None
legend = True
title = ''
read_series = True
scroll_speed = 0.1
def replot(self) :
self._pie_from_source()
def _pie_from_source(self) :
if self.source is None :
return
x_range = self.axes.get_xlim() if self.source.needs_x_range else None
values = self.source.source_get_values(multi_array=True, x_range=x_range)[0]
points = values['values']
series = range(0, len(points[0]))
dim = self.source.source_get_max_dim()
if dim > 1 :
trans = zip(*points)
if not self.read_series :
trans[0] = series
else :
trans = [series, points]
if self.pie is None :
self.pie = self.axes.pie(trans[1])[0]
else :
self.pie.set_xdata(trans[0])
self.pie.set_ydata(trans[1])
self.axes.figure.canvas.draw()
def do_mpl_scroll_event(self, event) :
'''Handle scrolling ourselves.'''
if event.inaxes != self.axes :
return False
self.axes.set_autoscale_on(False)
xl = self.axes.get_xlim()
yl = self.axes.get_ylim()
ec = (event.xdata, event.ydata)
# event.step tells direction
spd = (1+self.scroll_speed) ** (-event.step)
# unfortunately, this seems to be the only sensible way to
# get to the modifiers. Phrased oddly, but says do_x if we're
# not told to only do y, and v.v.
do_specific = event.guiEvent.state & gtk.gdk.CONTROL_MASK
do_x = not (do_specific and (event.guiEvent.state & gtk.gdk.SHIFT_MASK))
do_y = not (do_specific and do_x)
if do_x :
self.axes.set_xlim(ec[0] - (ec[0]-xl[0])*spd,
ec[0] - (ec[0]-xl[1])*spd)
if do_y :
self.axes.set_ylim(ec[1] - (ec[1]-yl[0])*spd,
ec[1] - (ec[1]-yl[1])*spd)
self.queue_draw()
return True
_move_from = None
_move_from_xl = None
_move_from_yl = None
def do_mpl_button_press_event(self, event) :
'''Check button presses.'''
if event.inaxes != self.axes :
return False
m_control = event.guiEvent.state & gtk.gdk.CONTROL_MASK
if event.button == 2 :
if m_control :
self.axes.autoscale_view()
self.axes.set_autoscale_on(True)
self.queue_draw()
else :
self.axes.set_autoscale_on(False)
self._move_from = (event.x, event.y)
self._move_from_xl = self.axes.get_xlim()
self._move_from_yl = self.axes.get_ylim()
self.queue_draw()
return True
return False
def do_mpl_button_release_event(self, event) :
'''Check button releases.'''
if event.button == 2 :
self._move_from = None
self._move_from_xl = None
self._move_from_yl = None
self.queue_draw()
return True
return False
def do_mpl_motion_notify_event(self, event) :
'''Check motion notifications.'''
if event.inaxes != self.axes :
return False
do_specific = event.guiEvent.state & gtk.gdk.CONTROL_MASK
do_x = not (do_specific and (event.guiEvent.state & gtk.gdk.SHIFT_MASK))
do_y = not (do_specific and do_x)
if self._move_from is not None :
dx = (event.x-self._move_from[0])
dy = (event.y-self._move_from[1])
l,b,r,t = self.axes.bbox.extents
el,er = self.axes.get_xlim()
eb,et = self.axes.get_ylim()
dx = dx*(er-el)/(r-l)
dy = dy*(et-eb)/(t-b)
if do_x :
self.axes.set_xlim(self._move_from_xl[0]-dx,
self._move_from_xl[1]-dx)
if do_y :
self.axes.set_ylim(self._move_from_yl[0]-dy,
self._move_from_yl[1]-dy)
self.queue_draw()
return True
def __init__(self, fig, queue_draw, env=None):
self.queue_draw = queue_draw
self.fig = fig
self.canvas = GlancerCanvas(self.fig)
self.axes = self.fig.add_subplot(1,1,1)
self.canvas.mpl_connect('scroll_event', self.do_mpl_scroll_event)
self.canvas.mpl_connect('button_press_event', self.do_mpl_button_press_event)
self.canvas.mpl_connect('button_release_event',
self.do_mpl_button_release_event)
self.canvas.mpl_connect('motion_notify_event',
self.do_mpl_motion_notify_event)
AObject.__init__(self, "GlancerPie", env, view_object=False)
def load_series(self, source, series=None, vals=None):
if series is not None :
raise RuntimeError(
'Sorry, GlypherPie can only plot single series Sources')
self.source = source
self._pie_from_source()
def redraw(self) :
self.do_legend()
self.figure.canvas.draw()
def __del__(self) :
get_object_dictionary().disconnect(self.sd_chg_conn)
AObject.__del__(self)
def do_legend(self, loc = None) :
if len(self.lines) > 0 and self.legend :
self.axes.legend(loc=loc)
if self.legend_object is not None :
self.legend_object.aes_remove()
self.legend_object = None
self.legend_object = GlancerLegend(self.axes.legend_, env = self.get_aenv())
self.absorb_properties(self.legend_object, as_self = False)
if loc==None : self.emit_property_change("legend_loc")
else : self.axes.legend_ = None
self.canvas.draw()
def check_legend(self) :
if self.legend_object :
self.legend_object.aes_remove()
self.legend_object = None
def check_clear(self, force = False) :
self.axes.clear()
self.check_legend()
self.queue_draw()
#PROPERTIES
def get_aesthete_properties(self):
return {
'source' : [None, self.get_source, True],
'legend' : [self.change_legend, self.get_legend, True],
'figure_facecolor' : [self.change_figure_facecolor, self.get_figure_facecolor, True],
'axes_axis_bgcolor' : [self.change_axes_axis_bgcolor, self.get_axes_axis_bgcolor, True],
'axes_xlabel' : [self.change_axes_xlabel, self.get_axes_xlabel, True],
'axes_ylabel' : [self.change_axes_ylabel, self.get_axes_ylabel, True],
'title_font' : [self.change_title_font, self.get_title_font, True],
'xlabel_font' : [self.change_xlabel_font, self.get_xlabel_font, True],
'ylabel_font' : [self.change_ylabel_font, self.get_ylabel_font, True],
'xhide_oom' : [self.change_xhide_oom, self.get_xhide_oom, True],
'yhide_oom' : [self.change_yhide_oom, self.get_yhide_oom, True],
'xtick_font' : [self.change_xtick_font, self.get_xtick_font, True],
'ytick_font' : [self.change_ytick_font, self.get_ytick_font, True],
'xmultiplier' : [self.change_xmultiplier, self.get_xmultiplier, True],
'ymultiplier' : [self.change_ymultiplier, self.get_ymultiplier, True],
'read_series' : [self.change_read_series, self.get_read_series, True],
'legend_loc' : [self.change_legend_loc, self.get_legend_loc, True],
'title' : [self.change_title, self.get_title, True] }
#BEGIN PROPERTIES FUNCTIONS
def get_source(self, val=None) : return self.source if val==None else val
def get_xmultiplier(self, val=None) : return 1. if val==None else float(val)
def get_ymultiplier(self, val=None) : return 1. if val==None else float(val)
def get_legend(self, val=None): return self.legend if val==None else (val=='True')
def get_read_series(self, val=None): return self.read_series if val==None else (val=='True')
def get_title(self, val=None): return self.title if val==None else val
def get_legend_loc(self, val=None) :
return (legend_locs_rev[self.legend_object.get_loc()] if self.legend_object else '')\
if val==None else val
def get_axes_axis_bgcolor(self, val=None):
return mpl_to_tuple(self.axes.get_axis_bgcolor()) \
if val==None else string_to_float_tup(val)
def get_figure_facecolor(self, val=None):
return mpl_to_tuple(self.fig.get_facecolor()) \
if val==None else string_to_float_tup(val)
def get_axes_xlabel(self, val=None) : return self.axes.get_xlabel() if val==None else val
def get_axes_ylabel(self, val=None) : return self.axes.get_ylabel() if val==None else val
def get_xhide_oom(self, val=None) :
return False \
if val==None else (val=='True')
def get_yhide_oom(self, val=None) :
return False \
if val==None else (val=='True')
def get_xtick_font(self, val=None) :
tick_props = self.axes.get_xaxis().get_major_ticks()[0].label1.get_fontproperties()
return mpl_to_font(tick_props) \
if val==None else val
def get_ytick_font(self, val=None) :
tick_props = self.axes.get_yaxis().get_major_ticks()[0].label1.get_fontproperties()
return mpl_to_font(tick_props) \
if val==None else val
def get_xlabel_font(self, val=None) :
label_props = self.axes.get_xaxis().get_label().get_fontproperties()
return mpl_to_font(label_props) \
if val==None else val
def get_ylabel_font(self, val=None) :
label_props = self.axes.get_yaxis().get_label().get_fontproperties()
return mpl_to_font(label_props) \
if val==None else val
def get_title_font(self, val=None) :
label_props = self.axes.title.get_fontproperties()
return mpl_to_font(label_props) \
if val==None else val
def change_legend_loc(self, val) : self.do_legend(loc = val)
def change_title(self, val) :
self.title = val
self.axes.set_title(self.title, visible = (self.title!=''))
self.queue_draw()
def change_xhide_oom(self, val) :
self.axes.get_xaxis().major.formatter.hide_oom = val
self.queue_draw()
def change_yhide_oom(self, val) :
self.axes.get_yaxis().major.formatter.hide_oom = val
self.queue_draw()
def change_ytick_font(self, val) :
ticks = self.axes.get_yaxis().get_major_ticks()
for tick in ticks :
font_to_mpl(tick.label1.get_fontproperties(), val)
def change_xtick_font(self, val) :
ticks = self.axes.get_xaxis().get_major_ticks()
for tick in ticks :
font_to_mpl(tick.label1.get_fontproperties(), val)
def change_xlabel_font(self, val) :
label_props = self.axes.get_xaxis().get_label().get_fontproperties()
font_to_mpl(label_props, val)
def change_ylabel_font(self, val) :
label_props = self.axes.get_yaxis().get_label().get_fontproperties()
font_to_mpl(label_props, val)
def change_title_font(self, val) :
label_props = self.axes.title.get_fontproperties()
font_to_mpl(label_props, val)
def change_read_series(self, val) : self.read_series = val
def change_xmultiplier(self, val=None) : self.axes.xaxis.get_major_formatter().multiplier = val; self.queue_draw()
def change_ymultiplier(self, val=None) : self.axes.yaxis.get_major_formatter().multiplier = val; self.queue_draw()
def change_legend(self, val) : self.legend = val; self.do_legend()
def change_axes_axis_bgcolor(self, val) : self.axes.set_axis_bgcolor(val); self.queue_draw()
def change_axes_xlabel(self, val) : self.axes.set_xlabel(val); self.queue_draw()
def change_axes_ylabel(self, val) : self.axes.set_ylabel(val); self.queue_draw()
def change_figure_facecolor(self, val) : self.fig.set_facecolor(val); self.queue_draw()
#END PROPERTIES FUNCTIONS
def replot_all(self) :
for line in self.lines :
line.replot()
def get_method_window(self) :
#fram = gtk.Frame()
#fram.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(1, 1, 1))
win = gtk.VBox()
who_algn = gtk.Alignment(0.5, 0.5)
who_algn.set_property("top_padding", 10)
who_hbox = gtk.HBox(spacing=5)
who_hbox.pack_start(gtk.image_new_from_stock('aes-glancer-pie',
gtk.ICON_SIZE_BUTTON),
False)
who_hbox.pack_start(gtk.Label("Pie chart"), False)
who_algn.add(who_hbox)
win.pack_start(who_algn)
icon_table = gtk.Table(1, 5)
win.pack_start(icon_table)
# Visual Config
config_butt = gtk.Button()
config_butt.set_image(gtk.image_new_from_stock(gtk.STOCK_PAGE_SETUP,
gtk.ICON_SIZE_BUTTON))
config_butt.set_tooltip_text("Appearance preferences...")
icon_table.attach(config_butt, 0, 1, 0, 1)
config_win = gtk.Window(); config_win.set_size_request(400, -1)
config_win.set_title("Configure plot appearance")
config_vbox = self.methods_make_visual_config()
config_win.add(config_vbox); config_win.set_transient_for(self.get_aenv().toplevel)
config_butt.connect("clicked", lambda o : config_win.show())
config_remove_butt = gtk.Button("Close")
config_remove_butt.connect("clicked", lambda o : config_win.hide())
config_remove_butt.show_all()
config_hbox = gtk.HBox(); config_hbox.show()
config_hbox.pack_start(config_remove_butt, False, False, 5)
config_vbox.pack_end(config_hbox, False, False, 5)
# Import Config
legend_amtb = self.aes_method_toggle_button("legend", None,
preferencable=False)
legend_amtb.set_image(gtk.image_new_from_stock(gtk.STOCK_JUSTIFY_RIGHT,
gtk.ICON_SIZE_BUTTON))
legend_amtb.set_tooltip_text("Toggle legend")
icon_table.attach(legend_amtb, 1, 2, 0, 1)
# From Sim
sim_hbox = gtk.HBox()
#sim_cmbo = gtk.ComboBox( get_object_dictionary().get_liststore_by_am('Source') )
#sim_cllr = gtk.CellRendererText(); sim_cmbo.pack_start(sim_cllr); sim_cllr.props.ellipsize = pango.ELLIPSIZE_END;
#sim_cmbo.add_attribute(sim_cllr, 'text', 1)
#self.sim_cmbo = sim_cmbo
#sim_hbox.pack_start(sim_cmbo)
clear_butt = gtk.Button()
clear_butt.set_image(gtk.image_new_from_stock(gtk.STOCK_CLEAR,
gtk.ICON_SIZE_BUTTON))
clear_butt.set_tooltip_text("Clear all lines")
icon_table.attach(clear_butt, 0, 1, 1, 2)
clear_butt.connect("clicked", lambda o : self.check_clear(force=True))
replot_butt = gtk.Button()
replot_butt.set_image(gtk.image_new_from_stock(gtk.STOCK_REFRESH,
gtk.ICON_SIZE_BUTTON))
replot_butt.set_tooltip_text("Replot all lines")
replot_butt.connect("clicked", lambda o : self.replot_all())
icon_table.attach(replot_butt, 1, 2, 1, 2)
#fram.add(win)
win.show_all()
return win
def methods_make_visual_config(self) :
config_vbox = gtk.VBox()
config_ntbk = gtk.Notebook()
general_table_maker = PreferencesTableMaker()
general_table_maker.append_heading("Title")
general_table_maker.append_row("Title", self.aes_method_entry("title"))
general_table_maker.append_row("Title Font", self.aes_method_font_button("title_font", "Set title font"))
general_table_maker.append_heading("Colours")
general_table_maker.append_row("Face Colour", self.aes_method_colour_button("figure_facecolor", "Set figure colour"))
general_table_maker.append_row("Axes Background",self.aes_method_colour_button("axes_axis_bgcolor", "Axes Background Colour"))
config_tabl = general_table_maker.make_table()
config_tabl_vbox = gtk.VBox(); config_tabl_vbox.pack_start(config_tabl, False)
config_ntbk.append_page(config_tabl_vbox, gtk.Label("General"))
legend_table_maker = PreferencesTableMaker()
legend_table_maker.append_heading("Geometry")
legend_position_cmbo = gtk.combo_box_new_text()
for loc in legend_locs : legend_position_cmbo.append_text(loc)
self.aes_method_automate_combo_text(legend_position_cmbo, "legend_loc")
legend_table_maker.append_row("Position", legend_position_cmbo)
config_tabl = legend_table_maker.make_table()
config_tabl_vbox = gtk.VBox(); config_tabl_vbox.pack_start(config_tabl, False)
config_ntbk.append_page(config_tabl_vbox, gtk.Label("Legend"))
axes = { 'x' : "X" , 'y' : "Y" }
for axis in axes :
axes_table_maker = PreferencesTableMaker()
axes_table_maker.append_heading("Labeling")
axes_table_maker.append_row(axes[axis]+" Axes Label", self.aes_method_entry("axes_"+axis+"label"))
axes_table_maker.append_row(axes[axis]+" Axis Font", self.aes_method_font_button(axis+"label_font", "Set "+axes[axis]+" axis font"))
axes_table_maker.append_row(axes[axis]+" Tick Font", self.aes_method_font_button(axis+"tick_font", "Set "+axes[axis]+" axis font"))
axes_table_maker.append_row(axes[axis]+" Multiplier", self.aes_method_entry(axis+"multiplier", wait_until_parsable_float = True))
config_tabl = axes_table_maker.make_table()
config_tabl_vbox = gtk.VBox(); config_tabl_vbox.pack_start(config_tabl, False);
config_ntbk.append_page(config_tabl_vbox, gtk.Label(axes[axis]+" Axis"))
config_vbox.pack_start(config_ntbk)
config_vbox.show_all()
return config_vbox | PypiClean |
/CT3-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl/Cheetah/Tests/Test.py | import sys
args_l = len(sys.argv)
if args_l == 1:
pass
elif args_l == 2 and sys.argv[1] == 'test':
pass
elif args_l == 2 and sys.argv[1] == '--namemapper-pure':
try:
from Cheetah import _namemapper # noqa
except ImportError:
# _namemapper hasn't been compiled so Tests/NameMapper.py
# tests pure-python NameMapper.py; no need to duplicate these tests.
print('Ok')
sys.exit(0)
sys.modules['Cheetah._namemapper'] = None
sys._cheetah_namemapper_pure = True
else:
sys.exit('Wrong argument or wrong number of arguments')
import unittest # noqa: E402 module level import not at top of file
from Cheetah.Tests import Analyzer # noqa: E402
from Cheetah.Tests import CheetahWrapper # noqa: E402
from Cheetah.Tests import Filters # noqa: E402
from Cheetah.Tests import ImportHooks # noqa: E402
from Cheetah.Tests import LoadTemplate # noqa: E402
from Cheetah.Tests import Misc # noqa: E402
from Cheetah.Tests import NameMapper # noqa: E402
from Cheetah.Tests import Parser # noqa: E402
from Cheetah.Tests import Regressions # noqa: E402
from Cheetah.Tests import SyntaxAndOutput # noqa: E402
from Cheetah.Tests import Template # noqa: E402
from Cheetah.Tests import TemplateCmdLineIface # noqa: E402
from Cheetah.Tests import Unicode # noqa: E402
SyntaxAndOutput.install_eols()
suites = [
unittest.defaultTestLoader.loadTestsFromModule(Analyzer),
unittest.defaultTestLoader.loadTestsFromModule(Filters),
unittest.defaultTestLoader.loadTestsFromModule(ImportHooks),
unittest.defaultTestLoader.loadTestsFromModule(LoadTemplate),
unittest.defaultTestLoader.loadTestsFromModule(Misc),
unittest.defaultTestLoader.loadTestsFromModule(NameMapper),
unittest.defaultTestLoader.loadTestsFromModule(Parser),
unittest.defaultTestLoader.loadTestsFromModule(Regressions),
unittest.defaultTestLoader.loadTestsFromModule(SyntaxAndOutput),
unittest.defaultTestLoader.loadTestsFromModule(Template),
unittest.defaultTestLoader.loadTestsFromModule(TemplateCmdLineIface),
unittest.defaultTestLoader.loadTestsFromModule(Unicode),
]
if not sys.platform.startswith('java'):
suites.append(
unittest.defaultTestLoader.loadTestsFromModule(CheetahWrapper))
if __name__ == '__main__':
if 'xml' in sys.argv:
from Cheetah.Tests import xmlrunner
runner = xmlrunner.XMLTestRunner(filename='Cheetah-Tests.xml')
else:
runner = unittest.TextTestRunner()
results = runner.run(unittest.TestSuite(suites))
if results.wasSuccessful():
sys.exit(0)
else:
sys.exit(1) | PypiClean |
/Flask-AppBuilder-jwi078-2.1.13.tar.gz/Flask-AppBuilder-jwi078-2.1.13/flask_appbuilder/fieldwidgets.py | from flask_babel import lazy_gettext as _
from wtforms import widgets
from wtforms.widgets import html_params, HTMLString
class DatePickerWidget(object):
"""
Date Time picker from Eonasdan GitHub
"""
data_template = (
'<div class="input-group date appbuilder_date" id="datepicker">'
'<span class="input-group-addon"><i class="fa fa-calendar cursor-hand"></i>'
"</span>"
'<input class="form-control" data-format="yyyy-MM-dd" %(text)s />'
"</div>"
)
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
if not field.data:
field.data = ""
template = self.data_template
return HTMLString(
template % {"text": html_params(type="text", value=field.data, **kwargs)}
)
class DateTimePickerWidget(object):
"""
Date Time picker from Eonasdan GitHub
"""
data_template = (
'<div class="input-group date appbuilder_datetime" id="datetimepicker">'
'<span class="input-group-addon"><i class="fa fa-calendar cursor-hand"></i>'
"</span>"
'<input class="form-control" data-format="yyyy-MM-dd hh:mm:ss" %(text)s />'
"</div>"
)
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
if not field.data:
field.data = ""
template = self.data_template
return HTMLString(
template % {"text": html_params(type="text", value=field.data, **kwargs)}
)
class BS3TextFieldWidget(widgets.TextInput):
def __call__(self, field, **kwargs):
kwargs["class"] = u"form-control"
if field.label:
kwargs["placeholder"] = field.label.text
if "name_" in kwargs:
field.name = kwargs["name_"]
return super(BS3TextFieldWidget, self).__call__(field, **kwargs)
class BS3TextAreaFieldWidget(widgets.TextArea):
def __call__(self, field, **kwargs):
kwargs["class"] = u"form-control"
kwargs["rows"] = 3
if field.label:
kwargs["placeholder"] = field.label.text
return super(BS3TextAreaFieldWidget, self).__call__(field, **kwargs)
class BS3PasswordFieldWidget(widgets.PasswordInput):
def __call__(self, field, **kwargs):
kwargs["class"] = u"form-control"
if field.label:
kwargs["placeholder"] = field.label.text
return super(BS3PasswordFieldWidget, self).__call__(field, **kwargs)
class Select2AJAXWidget(object):
data_template = "<input %(text)s />"
def __init__(self, endpoint, extra_classes=None, style=None):
self.endpoint = endpoint
self.extra_classes = extra_classes
self.style = style or u"width:250px"
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
kwargs.setdefault("endpoint", self.endpoint)
kwargs.setdefault("style", self.style)
input_classes = "input-group my_select2_ajax"
if self.extra_classes:
input_classes = input_classes + " " + self.extra_classes
kwargs.setdefault("class", input_classes)
if not field.data:
field.data = ""
template = self.data_template
return HTMLString(
template % {"text": html_params(type="text", value=field.data, **kwargs)}
)
class Select2SlaveAJAXWidget(object):
data_template = '<input class="input-group my_select2_ajax_slave" %(text)s />'
def __init__(self, master_id, endpoint, extra_classes=None, style=None):
self.endpoint = endpoint
self.master_id = master_id
self.extra_classes = extra_classes
self.style = style or u"width:250px"
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
kwargs.setdefault("endpoint", self.endpoint)
kwargs.setdefault("master_id", self.master_id)
kwargs.setdefault("style", self.style)
input_classes = "input-group my_select2_ajax"
if self.extra_classes:
input_classes = input_classes + " " + self.extra_classes
kwargs.setdefault("class", input_classes)
if not field.data:
field.data = ""
template = self.data_template
return HTMLString(
template % {"text": html_params(type="text", value=field.data, **kwargs)}
)
class Select2Widget(widgets.Select):
extra_classes = None
def __init__(self, extra_classes=None, style=None):
self.extra_classes = extra_classes
self.style = style or u"width:250px"
return super(Select2Widget, self).__init__()
def __call__(self, field, **kwargs):
kwargs["class"] = u"my_select2 form-control"
if self.extra_classes:
kwargs["class"] = kwargs["class"] + " " + self.extra_classes
kwargs["style"] = self.style
kwargs["data-placeholder"] = _("Select Value")
if "name_" in kwargs:
field.name = kwargs["name_"]
return super(Select2Widget, self).__call__(field, **kwargs)
class Select2ManyWidget(widgets.Select):
extra_classes = None
def __init__(self, extra_classes=None, style=None):
self.extra_classes = extra_classes
self.style = style or u"width:250px"
return super(Select2ManyWidget, self).__init__()
def __call__(self, field, **kwargs):
kwargs["class"] = u"my_select2 form-control"
if self.extra_classes:
kwargs["class"] = kwargs["class"] + " " + self.extra_classes
kwargs["style"] = self.style
kwargs["data-placeholder"] = _("Select Value")
kwargs["multiple"] = u"true"
if "name_" in kwargs:
field.name = kwargs["name_"]
return super(Select2ManyWidget, self).__call__(field, **kwargs) | PypiClean |
/Adeepspeed-0.9.2.tar.gz/Adeepspeed-0.9.2/deepspeed/autotuning/constants.py |
# DeepSpeed Team
#########################################
# autotunner implementation constants
#########################################
import os
DEFAULT_TEMPLATE_PATH_ZERO_0 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero0.json")
DEFAULT_TEMPLATE_PATH_ZERO_1 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero1.json")
DEFAULT_TEMPLATE_PATH_ZERO_2 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero2.json")
DEFAULT_TEMPLATE_PATH_ZERO_3 = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config_templates",
"template_zero3.json")
METRIC_PERCENT_DIFF_CONST = 0.05
DS_CONFIG = "ds_config"
BUFSIZE = 1 # line buffer size for writing files
#########################################
# autotuner configuration constants
#########################################
# Autotuner. By default, this feature is not enabled.
# Users can configure in ds_config.json as below example:
AUTOTUNING_FORMAT = """
autotuner should be enabled as:
"session_params": {
"autotuning": {
"enabled": true,
"start_step": 5,
"end_step": 15
}
}
"""
AUTOTUNING = "autotuning"
AUTOTUNING_ENABLED = "enabled"
AUTOTUNING_ENABLED_DEFAULT = False
AUTOTUNING_FAST = "fast"
AUTOTUNING_FAST_DEFAULT = True
AUTOTUNING_RESULTS_DIR = "results_dir"
AUTOTUNING_RESULTS_DIR_DEFAULT = "autotuning_results"
AUTOTUNING_EXPS_DIR = "exps_dir"
AUTOTUNING_EXPS_DIR_DEFAULT = "autotuning_exps"
AUTOTUNING_OVERWRITE = "overwrite"
AUTOTUNING_OVERWRITE_DEFAULT = True
AUTOTUNING_START_PROFILE_STEP = "start_profile_step"
AUTOTUNING_START_PROFILE_STEP_DEFAULT = 3
AUTOTUNING_END_PROFILE_STEP = "end_profile_step"
AUTOTUNING_END_PROFILE_STEP_DEFAULT = 5
AUTOTUNING_METRIC_PATH = "metric_path"
AUTOTUNING_METRIC_PATH_DEFAULT = None
AUTOTUNING_TUNER_TYPE = "tuner_type"
AUTOTUNING_TUNER_GRIDSEARCH = "gridsearch"
AUTOTUNING_TUNER_RANDOM = "random"
AUTOTUNING_TUNER_MODELBASED = "model_based"
AUTOTUNING_TUNER_TYPE_DEFAULT = AUTOTUNING_TUNER_GRIDSEARCH
AUTOTUNING_TUNER_EARLY_STOPPING = "tuner_early_stopping"
AUTOTUNING_TUNER_EARLY_STOPPING_DEFAULT = 5
AUTOTUNING_TUNER_NUM_TRIALS = "tuner_num_trials"
AUTOTUNING_TUNER_NUM_TRIALS_DEFAULT = 50
AUTOTUNING_ARG_MAPPINGS = "arg_mappings"
AUTOTUNING_ARG_MAPPINGS_DEFAULT = None
AUTOTUNING_MAX_TRAIN_BATCH_SIZE = "max_train_batch_size"
AUTOTUNING_MAX_TRAIN_BATCH_SIZE_DEFAULT = None
AUTOTUNING_MIN_TRAIN_BATCH_SIZE = "min_train_batch_size"
AUTOTUNING_MIN_TRAIN_BATCH_SIZE_DEFAULT = 1
AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "max_train_micro_batch_size_per_gpu"
AUTOTUNING_MAX_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1024
AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU = "min_train_micro_batch_size_per_gpu"
AUTOTUNING_MIN_TRAIN_MICRO_BATCH_SIZE_PER_GPU_DEFAULT = 1
AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES = "num_tuning_micro_batch_sizes"
AUTOTUNING_NUM_TUNING_MICRO_BATCH_SIZES_DEFAULT = 3
AUTOTUNING_MP_SIZE = "mp_size"
AUTOTUNING_MP_SIZE_DEFAULT = 1
AUTOTUNING_METRIC = "metric"
AUTOTUNING_METRIC_LATENCY = "latency"
AUTOTUNING_METRIC_THROUGHPUT = "throughput"
AUTOTUNING_METRIC_FLOPS = "flops"
AUTOTUNING_METRIC_FORWARD = "forward"
AUTOTUNING_METRIC_BACKWRAD = "flops"
AUTOTUNING_METRIC_STEPS = "step"
AUTOTUNING_METRIC_DEFAULT = AUTOTUNING_METRIC_THROUGHPUT
#########################################
# MODEL INFO
#########################################
AUTOTUNING_MODEL_INFO_PATH = "model_info_path"
AUTOTUNING_MODEL_INFO_PATH_DEFAULT = None
MODEL_INFO_FORMAT = '''
"model_info": {
"num_params": 1000000000,
"hidden_size": 10,
"num_layers": 12,
}
'''
MODEL_INFO = "model_info"
MODEL_INFO_PROFILE = "profile"
MODEL_INFO_PROFILE_DEFAULT = False
MODEL_INFO_NUM_PARAMS = "num_params"
MODEL_INFO_NUM_PARAMS_DEFAULT = None
MODEL_INFO_HIDDEN_SIZE = "hideen_size"
MODEL_INFO_HIDDEN_SIZE_DEFAULT = None
MODEL_INFO_NUM_LAYERS = "num_layers"
MODEL_INFO_NUM_LAYERS_DEFAULT = None
MODEL_INFO_KEY_DEFAULT_DICT = {
MODEL_INFO_PROFILE: MODEL_INFO_PROFILE_DEFAULT,
MODEL_INFO_NUM_PARAMS: MODEL_INFO_NUM_PARAMS_DEFAULT,
MODEL_INFO_HIDDEN_SIZE: MODEL_INFO_HIDDEN_SIZE_DEFAULT,
MODEL_INFO_NUM_LAYERS: MODEL_INFO_NUM_LAYERS_DEFAULT
}
#########################################
# autotunner search space constants
#########################################
DEFAULT_HF_CONFIG = {
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": "auto",
}
DEFAULT_MIN_MEM_CONFIG = {
"train_micro_batch_size_per_gpu": 1,
"zero_optimization": {
"stage": 3
},
"memory_break_down": False
}
DEFAULT_TUNING_SPACE_ZERO_0 = {"zero_optimization": {"stage": 0}}
DEFAULT_TUNING_SPACE_ZERO_1 = {
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_bucket_size": [5e7, 5e8, 1e9],
}
}
DEFAULT_TUNING_SPACE_ZERO_2 = {
"zero_optimization": {
"stage": 2,
"overlap_comm": [True, False],
"reduce_scatter": [False, True],
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_bucket_size": [5e7, 5e8, 1e9],
"contiguous_gradients": [False, True]
},
}
DEFAULT_TUNING_SPACE_ZERO_3 = {
"zero_optimization": {
"stage": 3,
"overlap_comm": [True, False],
"reduce_scatter": [False, True],
"reduce_bucket_size": [5e7, 5e8, 1e9],
"allgather_partitions": [True, False],
"allgather_bucket_size": [5e7, 5e8, 1e9],
"contiguous_gradients": [False, True]
},
}
GLOBAL_TUNING_SPACE = 'global'
# TUNING_MICRO_BATCH_SIZE_PREFIX="tune_micro_batch_size_z"
TUNING_MICRO_BATCH_SIZE_PREFIX = "z" | PypiClean |
/NREL_shift-0.1.0a0-py3-none-any.whl/shift/cli/shift.py |
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module handles command line interface for this package. """
import json
import click
import yaml
from shift.config_template import ConfigTemplate
from shift.facade import generate_feeder_from_yaml
@click.command()
@click.option("-c", "--config-yaml", help="Path to config yaml file.")
def create_config_file(config_yaml: str) -> None:
"""Creates a default config yaml file. Update this yaml file and use it to
create the synthetic distribution feeder.
"""
config = ConfigTemplate().dict()
print(json.loads(json.dumps(config)))
with open(config_yaml, "w", encoding="utf-8") as fpointer:
yaml.dump(json.loads(json.dumps(config)), fpointer)
@click.command()
@click.option("-c", "--config-yaml", help="Path to config yaml file.")
def create_feeder(config_yaml: str) -> None:
"""Creates a synthetic distribution feeder
by taking the config yaml file as an input.
"""
generate_feeder_from_yaml(config_yaml)
@click.group()
def cli():
"""Entry point"""
pass
cli.add_command(create_config_file)
cli.add_command(create_feeder) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/shadycss/scoping-shim.min.js | (function(){/*
Copyright (c) 2017 The Polymer Project Authors. All rights reserved.
This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt
The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt
The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt
Code distributed by Google as part of the polymer project is also
subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
*/
'use strict';var l,aa="undefined"!=typeof window&&window===this?this:"undefined"!=typeof global&&null!=global?global:this,m={};function n(){this.end=this.start=0;this.rules=this.parent=this.previous=null;this.cssText=this.parsedCssText="";this.atRule=!1;this.type=0;this.parsedSelector=this.selector=this.keyframesName=""}
function p(a){a=a.replace(ba,"").replace(ca,"");var b=da,c=a,e=new n;e.start=0;e.end=c.length;for(var d=e,f=0,h=c.length;f<h;f++)if("{"===c[f]){d.rules||(d.rules=[]);var g=d,k=g.rules[g.rules.length-1]||null;d=new n;d.start=f+1;d.parent=g;d.previous=k;g.rules.push(d)}else"}"===c[f]&&(d.end=f+1,d=d.parent||e);return b(e,a)}
function da(a,b){var c=b.substring(a.start,a.end-1);a.parsedCssText=a.cssText=c.trim();a.parent&&(c=b.substring(a.previous?a.previous.end:a.parent.start,a.start-1),c=ea(c),c=c.replace(fa," "),c=c.substring(c.lastIndexOf(";")+1),c=a.parsedSelector=a.selector=c.trim(),a.atRule=0===c.indexOf("@"),a.atRule?0===c.indexOf("@media")?a.type=ha:c.match(ia)&&(a.type=r,a.keyframesName=a.selector.split(fa).pop()):a.type=0===c.indexOf("--")?ja:ka);if(c=a.rules)for(var e=0,d=c.length,f;e<d&&(f=c[e]);e++)da(f,b);
return a}function ea(a){return a.replace(/\\([0-9a-f]{1,6})\s/gi,function(a,c){a=c;for(c=6-a.length;c--;)a="0"+a;return"\\"+a})}
function la(a,b,c){c=void 0===c?"":c;var e="";if(a.cssText||a.rules){var d=a.rules,f;if(f=d)f=d[0],f=!(f&&f.selector&&0===f.selector.indexOf("--"));if(f){f=0;for(var h=d.length,g;f<h&&(g=d[f]);f++)e=la(g,b,e)}else b?b=a.cssText:(b=a.cssText,b=b.replace(ma,"").replace(na,""),b=b.replace(oa,"").replace(pa,"")),(e=b.trim())&&(e=" "+e+"\n")}e&&(a.selector&&(c+=a.selector+" {\n"),c+=e,a.selector&&(c+="}\n\n"));return c}
var ka=1,r=7,ha=4,ja=1E3,ba=/\/\*[^*]*\*+([^/*][^*]*\*+)*\//gim,ca=/@import[^;]*;/gim,ma=/(?:^[^;\-\s}]+)?--[^;{}]*?:[^{};]*?(?:[;\n]|$)/gim,na=/(?:^[^;\-\s}]+)?--[^;{}]*?:[^{};]*?{[^}]*?}(?:[;\n]|$)?/gim,oa=/@apply\s*\(?[^);]*\)?\s*(?:[;\n]|$)?/gim,pa=/[^;:]*?:[^;]*?var\([^;]*\)(?:[;\n]|$)?/gim,ia=/^@[^\s]*keyframes/,fa=/\s+/g;var qa=Promise.resolve();function ra(a){if(a=m[a])a._applyShimCurrentVersion=a._applyShimCurrentVersion||0,a._applyShimValidatingVersion=a._applyShimValidatingVersion||0,a._applyShimNextVersion=(a._applyShimNextVersion||0)+1}function sa(a){return a._applyShimCurrentVersion===a._applyShimNextVersion}function ta(a){a._applyShimValidatingVersion=a._applyShimNextVersion;a.b||(a.b=!0,qa.then(function(){a._applyShimCurrentVersion=a._applyShimNextVersion;a.b=!1}))};var t=!(window.ShadyDOM&&window.ShadyDOM.inUse),u;function ua(a){u=a&&a.shimcssproperties?!1:t||!(navigator.userAgent.match(/AppleWebKit\/601|Edge\/15/)||!window.CSS||!CSS.supports||!CSS.supports("box-shadow","0 0 0 var(--foo)"))}window.ShadyCSS&&void 0!==window.ShadyCSS.nativeCss?u=window.ShadyCSS.nativeCss:window.ShadyCSS?(ua(window.ShadyCSS),window.ShadyCSS=void 0):ua(window.WebComponents&&window.WebComponents.flags);var v=u;var w=/(?:^|[;\s{]\s*)(--[\w-]*?)\s*:\s*(?:((?:'(?:\\'|.)*?'|"(?:\\"|.)*?"|\([^)]*?\)|[^};{])+)|\{([^}]*)\}(?:(?=[;\s}])|$))/gi,y=/(?:^|\W+)@apply\s*\(?([^);\n]*)\)?/gi,va=/(--[\w-]+)\s*([:,;)]|$)/gi,wa=/(animation\s*:)|(animation-name\s*:)/,xa=/@media\s(.*)/,ya=/\{[^}]*\}/g;var za=new Set;function z(a,b){if(!a)return"";"string"===typeof a&&(a=p(a));b&&A(a,b);return la(a,v)}function B(a){!a.__cssRules&&a.textContent&&(a.__cssRules=p(a.textContent));return a.__cssRules||null}function Aa(a){return!!a.parent&&a.parent.type===r}function A(a,b,c,e){if(a){var d=!1,f=a.type;if(e&&f===ha){var h=a.selector.match(xa);h&&(window.matchMedia(h[1]).matches||(d=!0))}f===ka?b(a):c&&f===r?c(a):f===ja&&(d=!0);if((a=a.rules)&&!d){d=0;f=a.length;for(var g;d<f&&(g=a[d]);d++)A(g,b,c,e)}}}
function C(a,b,c,e){var d=document.createElement("style");b&&d.setAttribute("scope",b);d.textContent=a;Ba(d,c,e);return d}var D=null;function Ba(a,b,c){b=b||document.head;b.insertBefore(a,c&&c.nextSibling||b.firstChild);D?a.compareDocumentPosition(D)===Node.DOCUMENT_POSITION_PRECEDING&&(D=a):D=a}
function Ca(a,b){var c=a.indexOf("var(");if(-1===c)return b(a,"","","");a:{var e=0;var d=c+3;for(var f=a.length;d<f;d++)if("("===a[d])e++;else if(")"===a[d]&&0===--e)break a;d=-1}e=a.substring(c+4,d);c=a.substring(0,c);a=Ca(a.substring(d+1),b);d=e.indexOf(",");return-1===d?b(c,e.trim(),"",a):b(c,e.substring(0,d).trim(),e.substring(d+1).trim(),a)}function E(a,b){t?a.setAttribute("class",b):window.ShadyDOM.nativeMethods.setAttribute.call(a,"class",b)}
function F(a){var b=a.localName,c="";b?-1<b.indexOf("-")||(c=b,b=a.getAttribute&&a.getAttribute("is")||""):(b=a.is,c=a.extends);return{is:b,u:c}};var G=null,Da=window.HTMLImports&&window.HTMLImports.whenReady||null,H;function Ea(a){requestAnimationFrame(function(){Da?Da(a):(G||(G=new Promise(function(a){H=a}),"complete"===document.readyState?H():document.addEventListener("readystatechange",function(){"complete"===document.readyState&&H()})),G.then(function(){a&&a()}))})};function I(){}function J(a,b,c){var e=K;a.__styleScoped?a.__styleScoped=null:Fa(e,a,b||"",c)}function Fa(a,b,c,e){b.nodeType===Node.ELEMENT_NODE&&Ga(b,c,e);if(b="template"===b.localName?(b.content||b.R).childNodes:b.children||b.childNodes)for(var d=0;d<b.length;d++)Fa(a,b[d],c,e)}
function Ga(a,b,c){if(b)if(a.classList)c?(a.classList.remove("style-scope"),a.classList.remove(b)):(a.classList.add("style-scope"),a.classList.add(b));else if(a.getAttribute){var e=a.getAttribute(Ha);c?e&&(b=e.replace("style-scope","").replace(b,""),E(a,b)):E(a,(e?e+" ":"")+"style-scope "+b)}}function L(a,b,c){var e=K,d=a.__cssBuild;t||"shady"===d?b=z(b,c):(a=F(a),b=Ia(e,b,a.is,a.u,c)+"\n\n");return b.trim()}
function Ia(a,b,c,e,d){var f=M(c,e);c=c?Ja+c:"";return z(b,function(b){b.c||(b.selector=b.g=Ka(a,b,a.b,c,f),b.c=!0);d&&d(b,c,f)})}function M(a,b){return b?"[is="+a+"]":a}function Ka(a,b,c,e,d){var f=b.selector.split(La);if(!Aa(b)){b=0;for(var h=f.length,g;b<h&&(g=f[b]);b++)f[b]=c.call(a,g,e,d)}return f.join(La)}function Ma(a){return a.replace(Na,function(a,c,e){-1<e.indexOf("+")?e=e.replace(/\+/g,"___"):-1<e.indexOf("___")&&(e=e.replace(/___/g,"+"));return":"+c+"("+e+")"})}
I.prototype.b=function(a,b,c){var e=!1;a=a.trim();var d=Na.test(a);d&&(a=a.replace(Na,function(a,b,c){return":"+b+"("+c.replace(/\s/g,"")+")"}),a=Ma(a));a=a.replace(Oa,Pa+" $1");a=a.replace(Qa,function(a,d,g){e||(a=Ra(g,d,b,c),e=e||a.stop,d=a.H,g=a.value);return d+g});d&&(a=Ma(a));return a};
function Ra(a,b,c,e){var d=a.indexOf(Sa);0<=a.indexOf(Pa)?a=Ta(a,e):0!==d&&(a=c?Ua(a,c):a);c=!1;0<=d&&(b="",c=!0);if(c){var f=!0;c&&(a=a.replace(Va,function(a,b){return" > "+b}))}a=a.replace(Wa,function(a,b,c){return'[dir="'+c+'"] '+b+", "+b+'[dir="'+c+'"]'});return{value:a,H:b,stop:f}}function Ua(a,b){a=a.split(Xa);a[0]+=b;return a.join(Xa)}
function Ta(a,b){var c=a.match(Ya);return(c=c&&c[2].trim()||"")?c[0].match(Za)?a.replace(Ya,function(a,c,f){return b+f}):c.split(Za)[0]===b?c:$a:a.replace(Pa,b)}function ab(a){a.selector===bb&&(a.selector="html")}I.prototype.c=function(a){return a.match(Sa)?this.b(a,cb):Ua(a.trim(),cb)};aa.Object.defineProperties(I.prototype,{a:{configurable:!0,enumerable:!0,get:function(){return"style-scope"}}});
var Na=/:(nth[-\w]+)\(([^)]+)\)/,cb=":not(.style-scope)",La=",",Qa=/(^|[\s>+~]+)((?:\[.+?\]|[^\s>+~=[])+)/g,Za=/[[.:#*]/,Pa=":host",bb=":root",Sa="::slotted",Oa=new RegExp("^("+Sa+")"),Ya=/(:host)(?:\(((?:\([^)(]*\)|[^)(]*)+?)\))/,Va=/(?:::slotted)(?:\(((?:\([^)(]*\)|[^)(]*)+?)\))/,Wa=/(.*):dir\((?:(ltr|rtl))\)/,Ja=".",Xa=":",Ha="class",$a="should_not_match",K=new I;function db(){}
function eb(a){for(var b=0;b<a.length;b++){var c=a[b];if(c.target!==document.documentElement&&c.target!==document.head)for(var e=0;e<c.addedNodes.length;e++){var d=c.addedNodes[e];if(d.nodeType===Node.ELEMENT_NODE){var f=d.getRootNode();var h=d;var g=[];h.classList?g=Array.from(h.classList):h instanceof window.SVGElement&&h.hasAttribute("class")&&(g=h.getAttribute("class").split(/\s+/));h=g;g=h.indexOf(K.a);if((h=-1<g?h[g+1]:"")&&f===d.ownerDocument)J(d,h,!0);else if(f.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&(f=
f.host))if(f=F(f).is,h===f)for(d=window.ShadyDOM.nativeMethods.querySelectorAll.call(d,":not(."+K.a+")"),f=0;f<d.length;f++)Ga(d[f],h);else h&&J(d,h,!0),J(d,f)}}}}
if(!t){var fb=new MutationObserver(eb),gb=function(a){fb.observe(a,{childList:!0,subtree:!0})};if(window.customElements&&!window.customElements.polyfillWrapFlushCallback)gb(document);else{var hb=function(){gb(document.body)};window.HTMLImports?window.HTMLImports.whenReady(hb):requestAnimationFrame(function(){if("loading"===document.readyState){var a=function(){hb();document.removeEventListener("readystatechange",a)};document.addEventListener("readystatechange",a)}else hb()})}db=function(){eb(fb.takeRecords())}}
var ib=db;function N(a,b,c,e,d){this.j=a||null;this.b=b||null;this.B=c||[];this.s=null;this.u=d||"";this.a=this.h=this.m=null}function O(a){return a?a.__styleInfo:null}function jb(a,b){return a.__styleInfo=b}N.prototype.c=function(){return this.j};N.prototype._getStyleRules=N.prototype.c;var Q=window.Element.prototype,kb=Q.matches||Q.matchesSelector||Q.mozMatchesSelector||Q.msMatchesSelector||Q.oMatchesSelector||Q.webkitMatchesSelector,lb=navigator.userAgent.match("Trident");function mb(){}function nb(a){var b={},c=[],e=0;A(a,function(a){R(a);a.index=e++;a=a.f.cssText;for(var c;c=va.exec(a);){var d=c[1];":"!==c[2]&&(b[d]=!0)}},function(a){c.push(a)});a.b=c;a=[];for(var d in b)a.push(d);return a}
function R(a){if(!a.f){var b={},c={};S(a,c)&&(b.i=c,a.rules=null);b.cssText=a.parsedCssText.replace(ya,"").replace(w,"");a.f=b}}function S(a,b){var c=a.f;if(c){if(c.i)return Object.assign(b,c.i),!0}else{c=a.parsedCssText;for(var e;a=w.exec(c);){e=(a[2]||a[3]).trim();if("inherit"!==e||"unset"!==e)b[a[1].trim()]=e;e=!0}return e}}
function T(a,b,c){b&&(b=0<=b.indexOf(";")?ob(a,b,c):Ca(b,function(b,d,f,h){if(!d)return b+h;(d=T(a,c[d],c))&&"initial"!==d?"apply-shim-inherit"===d&&(d="inherit"):d=T(a,c[f]||f,c)||f;return b+(d||"")+h}));return b&&b.trim()||""}
function ob(a,b,c){b=b.split(";");for(var e=0,d,f;e<b.length;e++)if(d=b[e]){y.lastIndex=0;if(f=y.exec(d))d=T(a,c[f[1]],c);else if(f=d.indexOf(":"),-1!==f){var h=d.substring(f);h=h.trim();h=T(a,h,c)||h;d=d.substring(0,f)+h}b[e]=d&&d.lastIndexOf(";")===d.length-1?d.slice(0,-1):d||""}return b.join(";")}
function pb(a,b){var c={},e=[];A(a,function(a){a.f||R(a);var d=a.g||a.parsedSelector;b&&a.f.i&&d&&kb.call(b,d)&&(S(a,c),a=a.index,d=parseInt(a/32,10),e[d]=(e[d]||0)|1<<a%32)},null,!0);return{i:c,key:e}}
function qb(a,b,c,e,d){c.f||R(c);if(c.f.i){b=F(b);a=b.is;b=b.u;b=a?M(a,b):"html";var f=c.parsedSelector,h=":host > *"===f||"html"===f,g=0===f.indexOf(":host")&&!h;"shady"===e&&(h=f===b+" > *."+b||-1!==f.indexOf("html"),g=!h&&0===f.indexOf(b));"shadow"===e&&(h=":host > *"===f||"html"===f,g=g&&!h);if(h||g)e=b,g&&(t&&!c.g&&(c.g=Ka(K,c,K.b,a?Ja+a:"",b)),e=c.g||b),d({M:e,K:g,S:h})}}
function rb(a,b){var c={},e={},d=U,f=b&&b.__cssBuild;A(b,function(b){qb(d,a,b,f,function(d){kb.call(a.A||a,d.M)&&(d.K?S(b,c):S(b,e))})},null,!0);return{L:e,J:c}}
function sb(a,b,c,e){var d=F(b),f=M(d.is,d.u),h=new RegExp("(?:^|[^.#[:])"+(b.extends?"\\"+f.slice(0,-1)+"\\]":f)+"($|[.:[\\s>+~])");d=O(b).j;var g=tb(d,e);return L(b,d,function(b){var d="";b.f||R(b);b.f.cssText&&(d=ob(a,b.f.cssText,c));b.cssText=d;if(!t&&!Aa(b)&&b.cssText){var k=d=b.cssText;null==b.C&&(b.C=wa.test(d));if(b.C)if(null==b.w){b.w=[];for(var q in g)k=g[q],k=k(d),d!==k&&(d=k,b.w.push(q))}else{for(q=0;q<b.w.length;++q)k=g[b.w[q]],d=k(d);k=d}b.cssText=k;b.g=b.g||b.selector;d="."+e;q=b.g.split(",");
k=0;for(var zb=q.length,P;k<zb&&(P=q[k]);k++)q[k]=P.match(h)?P.replace(f,d):d+" "+P;b.selector=q.join(",")}})}function tb(a,b){a=a.b;var c={};if(!t&&a)for(var e=0,d=a[e];e<a.length;d=a[++e]){var f=d,h=b;f.l=new RegExp(f.keyframesName,"g");f.a=f.keyframesName+"-"+h;f.g=f.g||f.selector;f.selector=f.g.replace(f.keyframesName,f.a);c[d.keyframesName]=ub(d)}return c}function ub(a){return function(b){return b.replace(a.l,a.a)}}
function vb(a,b){var c=U,e=B(a);a.textContent=z(e,function(a){var d=a.cssText=a.parsedCssText;a.f&&a.f.cssText&&(d=d.replace(ma,"").replace(na,""),a.cssText=ob(c,d,b))})}aa.Object.defineProperties(mb.prototype,{a:{configurable:!0,enumerable:!0,get:function(){return"x-scope"}}});var U=new mb;var wb={},V=window.customElements;if(V&&!t){var xb=V.define;V.define=function(a,b,c){var e=document.createComment(" Shady DOM styles for "+a+" "),d=document.head;d.insertBefore(e,(D?D.nextSibling:null)||d.firstChild);D=e;wb[a]=e;return xb.call(V,a,b,c)}};var W=new function(){this.cache={};this.a=100};function X(){var a=this;this.A={};this.c=document.documentElement;var b=new n;b.rules=[];this.l=jb(this.c,new N(b));this.v=!1;this.b=this.a=null;Ea(function(){Y(a)})}l=X.prototype;l.F=function(){ib()};l.I=function(a){return B(a)};l.O=function(a){return z(a)};
l.prepareTemplate=function(a,b,c){if(!a.l){a.l=!0;a.name=b;a.extends=c;m[b]=a;var e=(e=a.content.querySelector("style"))?e.getAttribute("css-build")||"":"";var d=[];for(var f=a.content.querySelectorAll("style"),h=0;h<f.length;h++){var g=f[h];if(g.hasAttribute("shady-unscoped")){if(!t){var k=g.textContent;za.has(k)||(za.add(k),k=g.cloneNode(!0),document.head.appendChild(k));g.parentNode.removeChild(g)}}else d.push(g.textContent),g.parentNode.removeChild(g)}d=d.join("").trim();c={is:b,extends:c,P:e};
t||J(a.content,b);Y(this);f=y.test(d)||w.test(d);y.lastIndex=0;w.lastIndex=0;d=p(d);f&&v&&this.a&&this.a.transformRules(d,b);a._styleAst=d;a.v=e;e=[];v||(e=nb(a._styleAst));if(!e.length||v)d=t?a.content:null,b=wb[b],f=L(c,a._styleAst),b=f.length?C(f,c.is,d,b):void 0,a.a=b;a.c=e}};
function yb(a){!a.b&&window.ShadyCSS&&window.ShadyCSS.CustomStyleInterface&&(a.b=window.ShadyCSS.CustomStyleInterface,a.b.transformCallback=function(b){a.D(b)},a.b.validateCallback=function(){requestAnimationFrame(function(){(a.b.enqueued||a.v)&&a.o()})})}function Y(a){!a.a&&window.ShadyCSS&&window.ShadyCSS.ApplyShim&&(a.a=window.ShadyCSS.ApplyShim,a.a.invalidCallback=ra);yb(a)}
l.o=function(){Y(this);if(this.b){var a=this.b.processStyles();if(this.b.enqueued){if(v)for(var b=0;b<a.length;b++){var c=this.b.getStyleForCustomStyle(a[b]);if(c&&v&&this.a){var e=B(c);Y(this);this.a.transformRules(e);c.textContent=z(e)}}else for(Ab(this,this.c,this.l),b=0;b<a.length;b++)(c=this.b.getStyleForCustomStyle(a[b]))&&vb(c,this.l.m);this.b.enqueued=!1;this.v&&!v&&this.styleDocument()}}};
l.styleElement=function(a,b){var c=F(a).is,e=O(a);if(!e){var d=F(a);e=d.is;d=d.u;var f=wb[e];e=m[e];if(e){var h=e._styleAst;var g=e.c}e=jb(a,new N(h,f,g,0,d))}a!==this.c&&(this.v=!0);b&&(e.s=e.s||{},Object.assign(e.s,b));if(v){if(e.s){b=e.s;for(var k in b)null===k?a.style.removeProperty(k):a.style.setProperty(k,b[k])}if(((k=m[c])||a===this.c)&&k&&k.a&&!sa(k)){if(sa(k)||k._applyShimValidatingVersion!==k._applyShimNextVersion)Y(this),this.a&&this.a.transformRules(k._styleAst,c),k.a.textContent=L(a,
e.j),ta(k);t&&(c=a.shadowRoot)&&(c.querySelector("style").textContent=L(a,e.j));e.j=k._styleAst}}else if(Ab(this,a,e),e.B&&e.B.length){c=e;k=F(a).is;a:{if(b=W.cache[k])for(h=b.length-1;0<=h;h--){g=b[h];b:{e=c.B;for(d=0;d<e.length;d++)if(f=e[d],g.i[f]!==c.m[f]){e=!1;break b}e=!0}if(e){b=g;break a}}b=void 0}e=b?b.styleElement:null;h=c.h;(g=b&&b.h)||(g=this.A[k]=(this.A[k]||0)+1,g=k+"-"+g);c.h=g;g=c.h;d=U;d=e?e.textContent||"":sb(d,a,c.m,g);f=O(a);var x=f.a;x&&!t&&x!==e&&(x._useCount--,0>=x._useCount&&
x.parentNode&&x.parentNode.removeChild(x));t?f.a?(f.a.textContent=d,e=f.a):d&&(e=C(d,g,a.shadowRoot,f.b)):e?e.parentNode||(lb&&-1<d.indexOf("@media")&&(e.textContent=d),Ba(e,null,f.b)):d&&(e=C(d,g,null,f.b));e&&(e._useCount=e._useCount||0,f.a!=e&&e._useCount++,f.a=e);g=e;t||(e=c.h,f=d=a.getAttribute("class")||"",h&&(f=d.replace(new RegExp("\\s*x-scope\\s*"+h+"\\s*","g")," ")),f+=(f?" ":"")+"x-scope "+e,d!==f&&E(a,f));b||(a=W.cache[k]||[],a.push({i:c.m,styleElement:g,h:c.h}),a.length>W.a&&a.shift(),
W.cache[k]=a)}};function Bb(a,b){return(b=b.getRootNode().host)?O(b)?b:Bb(a,b):a.c}function Ab(a,b,c){a=Bb(a,b);var e=O(a);a=Object.create(e.m||null);var d=rb(b,c.j);b=pb(e.j,b).i;Object.assign(a,d.J,b,d.L);b=c.s;for(var f in b)if((d=b[f])||0===d)a[f]=d;f=U;b=Object.getOwnPropertyNames(a);for(d=0;d<b.length;d++)e=b[d],a[e]=T(f,a[e],a);c.m=a}l.styleDocument=function(a){this.styleSubtree(this.c,a)};
l.styleSubtree=function(a,b){var c=a.shadowRoot;(c||a===this.c)&&this.styleElement(a,b);if(b=c&&(c.children||c.childNodes))for(a=0;a<b.length;a++)this.styleSubtree(b[a]);else if(a=a.children||a.childNodes)for(b=0;b<a.length;b++)this.styleSubtree(a[b])};l.D=function(a){var b=this,c=B(a);A(c,function(a){if(t)ab(a);else{var c=K;a.selector=a.parsedSelector;ab(a);a.selector=a.g=Ka(c,a,c.c,void 0,void 0)}v&&(Y(b),b.a&&b.a.transformRule(a))});v?a.textContent=z(c):this.l.j.rules.push(c)};
l.getComputedStyleValue=function(a,b){var c;v||(c=(O(a)||O(Bb(this,a))).m[b]);return(c=c||window.getComputedStyle(a).getPropertyValue(b))?c.trim():""};l.N=function(a,b){var c=a.getRootNode();b=b?b.split(/\s/):[];c=c.host&&c.host.localName;if(!c){var e=a.getAttribute("class");if(e){e=e.split(/\s/);for(var d=0;d<e.length;d++)if(e[d]===K.a){c=e[d+1];break}}}c&&b.push(K.a,c);v||(c=O(a))&&c.h&&b.push(U.a,c.h);E(a,b.join(" "))};l.G=function(a){return O(a)};X.prototype.flush=X.prototype.F;
X.prototype.prepareTemplate=X.prototype.prepareTemplate;X.prototype.styleElement=X.prototype.styleElement;X.prototype.styleDocument=X.prototype.styleDocument;X.prototype.styleSubtree=X.prototype.styleSubtree;X.prototype.getComputedStyleValue=X.prototype.getComputedStyleValue;X.prototype.setElementClass=X.prototype.N;X.prototype._styleInfoForNode=X.prototype.G;X.prototype.transformCustomStyleForDocument=X.prototype.D;X.prototype.getStyleAst=X.prototype.I;X.prototype.styleAstToString=X.prototype.O;
X.prototype.flushCustomStyles=X.prototype.o;Object.defineProperties(X.prototype,{nativeShadow:{get:function(){return t}},nativeCss:{get:function(){return v}}});var Z=new X,Cb,Db;window.ShadyCSS&&(Cb=window.ShadyCSS.ApplyShim,Db=window.ShadyCSS.CustomStyleInterface);window.ShadyCSS={ScopingShim:Z,prepareTemplate:function(a,b,c){Z.o();Z.prepareTemplate(a,b,c)},styleSubtree:function(a,b){Z.o();Z.styleSubtree(a,b)},styleElement:function(a){Z.o();Z.styleElement(a)},styleDocument:function(a){Z.o();Z.styleDocument(a)},getComputedStyleValue:function(a,b){return Z.getComputedStyleValue(a,b)},nativeCss:v,nativeShadow:t};Cb&&(window.ShadyCSS.ApplyShim=Cb);
Db&&(window.ShadyCSS.CustomStyleInterface=Db);}).call(this);
//# sourceMappingURL=scoping-shim.min.js.map | PypiClean |
/Hummingbird-XFEL-1.3b0.tar.gz/Hummingbird-XFEL-1.3b0/hummingbird/simulation/simulated_tof.py | import os
import numpy as np
# Loading a test object (binary hummingbird logo)
#test_object = np.load(os.path.dirname(os.path.realpath(__file__)) + '/test_object.npy')*1e-2
#test_diffraction = np.abs(np.fft.fftshift(np.fft.fft2(test_object)))**2
class Simulation:
"""
Base class for simulation of typical single particle imaging data.
Kwargs:
hitrate (float): Ratio of hits to be simulated, default is 0.1
sigma (int): Sigma used for simulation of detector noise (normal distribution), default is 1
"""
def __init__(self, hitrate=0.1, sigma=1):
self.hitrate = hitrate
self.sigma = sigma
self.shape = (256,256)
self._is_hit = None
def next_event(self):
"""Based on a given hitrate, the event is defined to be either a hit or a miss."""
if np.random.rand() < self.hitrate:
self._is_hit = True
else:
self._is_hit = False
def get_tof_trace(self):
trace=np.random.normal(loc=0, scale=10, size=5000)
trace.clip(0)
trace[500:510]+=np.random.normal(loc=100, scale=5,size=10)
trace[1000:1010]+=np.random.normal(loc=500, scale=5,size=10)
trace[1300:1310]+=np.random.normal(loc=300, scale=5,size=10)
trace+=50
return trace*-1
def get_pattern(self):
"""Returns a diffraction pattern (hit or miss)"""
noise = np.random.normal(0, self.sigma, self.shape)
if self._is_hit:
return test_diffraction + noise
else:
return noise
def get_pulse_energy(self):
"""Returns a randomized pulse energy [J]"""
return np.random.random()*1e-3
def get_injector_x(self):
"""Returns a randomized injector position in x [m]"""
return np.random.random()*1e-6
def get_injector_y(self):
"""Returns a randomized injector position in y [m]"""
return np.random.random()*1e-6
def get_injector_z(self):
"""Returns a randomized injector position in z [m]"""
return np.random.random()*1e-6 | PypiClean |
/ESautomation-0.0.1-py3-none-any.whl/es-automation/es_setup_efk.py | from main_set_conns import es_user_mixin, es_role_mixin
class es_get_input_data_mixin(es_user_mixin):
"""
Get efk team's configuration and variables.
"""
def get_efkteam_file(self, path: str)->dict:
"""
Get EFK team's configs and validation.
Paramrs:
Return:
- maintain_dict: (dict) efk team's configuration
"""
maintain_dict = self.read_config(path)
self.validate_config(maintain_dict)
return maintain_dict
def validate_config(self, maintain_dict: dict):
"""
Validate EFK team's config's format
Params:
- maintain_dict: (dict) input json file that needs to be validated.
"""
if maintain_dict.get('platform') != "EFK":
self.logger.error("Check maintain_dict, it might be wrong path or wrong user_tag ")
self.logger.info(f"maintain_dict: {maintain_dict}")
raise ValueError("plz check ur mapping of user_tag and config files")
class es_delete_role_mixin(es_role_mixin):
"""
ES delete role command class.
"""
def delrole_run(self):
"""
ES delete role main run
"""
del_roles = self.maintain_dict['del_role']
if len(del_roles) > 0:
for rolename in del_roles:
try:
self.delete_role(rolename)
except Exception:
self.logger.error("The role was not existed", exc_info=True)
else:
self.logger.info("There is no role to ba deleted.")
def delete_role(self, rolename):
"""
Delete role
"""
self.es.security.delete_role(rolename)
self.logger.info(f"Role {rolename} was deleted.")
class es_delete_user_mixin(es_user_mixin):
"""
ES delete user command class.
"""
def deluser_run(self, deltag):
"""
Delete user's operation, there are 2 situations: initiate or efk-team's work.(optional)
Params:
- deltag: if it is efk-team's work to delete specific users, then deltag='certain', if you want to kill all ESB users, then deltag='all'
"""
self.validate_inputparams(deltag)
if deltag == "all":
self.delete_all_ESB_users()
elif deltag == "certain":
self.delete_certain_ESB_users()
def validate_inputparams(self, deltag):
"""
Validate input params
params:
- deltag: tag that decides delete all users or delete certain user
"""
if deltag not in ["all", "certain"]:
self.logger.error("InputError: deltag is invalid")
raise ValueError("InputError: deltag is invalid")
def delete_all_ESB_users(self):
"""
Delete all ESB users
"""
delete_namelst = self.get_all_delete_namelst()
if len(delete_namelst) == 0:
self.logger.info("No one to be deleted")
elif len(delete_namelst) > 0:
self.logger.info(f"Someone was deleted, deleted list: "+''.join(x for x in delete_namelst))
for item in delete_namelst:
self.es.security.delete_user(item)
def delete_certain_ESB_users(self):
"""
Delete specific ESB users
"""
efk_del_namelist = self.maintain_dict['del_user']
for item in efk_del_namelist:
try:
self.logger.info(f"User {item} was deleted.")
self.es.security.delete_user(item)
except Exception as e:
self.logger.error("User is not found in ES Users.", exc_info=True)
raise e
def get_all_delete_namelst(self):
"""
Get the namelist that needs to delete
"""
delete_namelst = []
for currentuser in self.search_users():
if "ESB" in currentuser or "esb" in currentuser:
delete_namelst.append(currentuser)
self.logger.info(f"Here is list that is going to be deleted, {delete_namelst}")
return delete_namelst
class main_mixin(es_get_input_data_mixin, es_delete_user_mixin, es_delete_role_mixin):
def read_config_run(self)->dict:
"""
Read efk_team.json
"""
return self.get_efkteam_file("efk_team.json")
def main_run(self, maintain_dict, deltag):
"""
EFK team's main run.
Only delete operation, if you want to add system account, u need to setup manually.
"""
self.maintain_dict = maintain_dict
if deltag is not None:
self.deluser_run(deltag)
else:
self.logger.info("We don't do any deleting things.")
return True
self.delrole_run()
return True | PypiClean |
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/diag_scripts/climate_metrics/tcr.py | import logging
import os
from copy import deepcopy
from pprint import pformat
import cf_units
import iris
import iris.coord_categorisation
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import yaml
from scipy import stats
from esmvaltool.diag_scripts.shared import (
ProvenanceLogger,
get_diagnostic_filename,
get_plot_filename,
group_metadata,
io,
run_diagnostic,
select_metadata,
sorted_metadata,
variables_available,
)
logger = logging.getLogger(os.path.basename(__file__))
START_YEAR_IDX = 60
END_YEAR_IDX = 80
def _get_anomaly_cube(onepct_cube, pi_cube):
"""Get anomaly cube."""
iris.coord_categorisation.add_year(onepct_cube, 'time')
onepct_cube = onepct_cube.aggregated_by('year', iris.analysis.MEAN)
iris.coord_categorisation.add_year(pi_cube, 'time')
pi_cube = pi_cube.aggregated_by('year', iris.analysis.MEAN)
# Check cube
if onepct_cube.ndim != 1:
raise ValueError(
f"This diagnostics needs 1D cubes, got {onepct_cube.ndim:d}D cube "
f"for '1pctCO2' experiment")
if pi_cube.ndim != 1:
raise ValueError(
f"This diagnostics needs 1D cubes, got {pi_cube.ndim:d}D cube for "
f"'piControl' experiment")
if onepct_cube.shape != pi_cube.shape:
raise ValueError(
f"Cube shapes of '1pctCO2' and 'piControl' are not identical, got "
f"{onepct_cube.shape} and {pi_cube.shape}")
if onepct_cube.shape[0] < END_YEAR_IDX:
raise ValueError(
f"Cubes need at least {END_YEAR_IDX:d} points for TCR "
f"calculation, got only {onepct_cube.shape[0]:d}")
# Calculate anomaly
reg = stats.linregress(pi_cube.coord('year').points, pi_cube.data)
onepct_cube.data -= (reg.slope * pi_cube.coord('year').points +
reg.intercept)
# Adapt metadata
onepct_cube.standard_name = None
onepct_cube.var_name += '_anomaly'
onepct_cube.long_name += ' (Anomaly)'
onepct_cube.attributes['anomaly'] = ('relative to linear fit of piControl '
'run')
onepct_cube.convert_units('K')
return onepct_cube
def _get_anomaly_cubes(cfg):
"""Get all anomaly cubes."""
logger.info("Calculating anomalies")
cubes = {}
ancestors = {}
input_data = cfg['input_data'].values()
input_data = sorted_metadata(input_data, ['short_name', 'exp', 'dataset'])
onepct_data = select_metadata(input_data, short_name='tas', exp='1pctCO2')
# Process data
for dataset in onepct_data:
dataset_name = dataset['dataset']
pi_data = select_metadata(input_data,
short_name='tas',
exp='piControl',
dataset=dataset_name)
if not pi_data:
raise ValueError("No 'piControl' data available for dataset "
"'dataset_name'")
onepct_cube = iris.load_cube(dataset['filename'])
pi_cube = iris.load_cube(pi_data[0]['filename'])
anomaly_cube = _get_anomaly_cube(onepct_cube, pi_cube)
cubes[dataset_name] = anomaly_cube
ancestors[dataset_name] = [dataset['filename'], pi_data[0]['filename']]
# Calculate multi-model mean if desired
if cfg.get('calculate_mmm', True):
(mmm_cube, mmm_ancestors) = _get_mmm_anomaly(cubes, ancestors, cfg)
cubes['MultiModelMean'] = mmm_cube
ancestors['MultiModelMean'] = mmm_ancestors
return (cubes, ancestors)
def _get_mmm_anomaly(cubes, ancestors, cfg):
"""Get multi-model mean anomaly."""
logger.info("Calculating multi-model mean anomaly")
mmm_ancestors = [f for sublist in ancestors.values() for f in sublist]
project = list(cfg['input_data'].values())[0]['project']
datasets = []
mmm_anomaly = []
for (dataset_name, cube) in cubes.items():
datasets.append(dataset_name)
mmm_anomaly.append(cube.data)
mmm_anomaly = np.ma.array(mmm_anomaly)
dataset_0 = list(cubes.keys())[0]
mmm_cube = cubes[dataset_0].copy(data=np.ma.mean(mmm_anomaly, axis=0))
mmm_cube.attributes = {
'ancestors': mmm_ancestors,
'dataset': 'MultiModelMean',
'datasets': '|'.join(datasets),
'project': project,
'short_name': mmm_cube.var_name,
}
time_coord = iris.coords.DimCoord(
np.arange(mmm_cube.coord('time').shape[0]),
var_name='time',
standard_name='time',
long_name='time',
units='years',
)
mmm_cube.remove_coord('time')
mmm_cube.add_dim_coord(time_coord, 0)
return (mmm_cube, mmm_ancestors)
def _plot(cfg, cube, dataset_name, tcr):
"""Create scatterplot of temperature anomaly vs. time."""
if not cfg.get('plot', True):
return (None, None, None)
logger.debug("Plotting temperature anomaly vs. time for '%s'",
dataset_name)
(_, axes) = plt.subplots()
# Plot data
x_data = np.arange(cube.shape[0])
y_data = cube.data
axes.scatter(x_data, y_data, color='b', marker='o')
# Plot lines
line_kwargs = {'color': 'k', 'linewidth': 1.0, 'linestyle': '--'}
axes.axhline(tcr, **line_kwargs)
axes.axvline(START_YEAR_IDX, **line_kwargs)
axes.axvline(END_YEAR_IDX, **line_kwargs)
# Appearance
units_str = (cube.units.symbol
if cube.units.origin is None else cube.units.origin)
axes.set_title(dataset_name)
axes.set_xlabel('Years after experiment start')
axes.set_ylabel(f'Temperature anomaly / {units_str}')
axes.set_ylim([x_data[0] - 1, x_data[-1] + 1])
axes.set_ylim([-1.0, 7.0])
axes.text(0.0, tcr + 0.1, 'TCR = {:.1f} {}'.format(tcr, units_str))
# Save cube
netcdf_path = get_diagnostic_filename(dataset_name, cfg)
io.iris_save(cube, netcdf_path)
# Save plot
plot_path = get_plot_filename(dataset_name, cfg)
plt.savefig(plot_path, **cfg['savefig_kwargs'])
logger.info("Wrote %s", plot_path)
plt.close()
# Provenance
provenance_record = get_provenance_record(
f"Time series of the global mean surface air temperature anomaly "
f"(relative to the linear fit of the pre-industrial control run) of "
f"{dataset_name} for the 1% CO2 increase per year experiment. The "
f"horizontal dashed line indicates the transient climate response "
f"(TCR) defined as the 20 year average temperature anomaly centered "
f"at the time of CO2 doubling (vertical dashed lines).")
provenance_record.update({
'plot_types': ['times'],
})
return (netcdf_path, plot_path, provenance_record)
def calculate_tcr(cfg):
"""Calculate transient climate response (TCR)."""
tcr = {}
# Get anomaly cubes
(anomaly_cubes, ancestors) = _get_anomaly_cubes(cfg)
# Iterate over cubes and calculate TCR
for (dataset_name, anomaly_cube) in anomaly_cubes.items():
tas_2x = anomaly_cube[START_YEAR_IDX:END_YEAR_IDX].collapsed(
'time', iris.analysis.MEAN).data
new_tcr = tas_2x
tcr[dataset_name] = new_tcr
logger.info("TCR (%s) = %.2f %s", dataset_name, new_tcr,
anomaly_cube.units)
# Plot
(path, plot_path, provenance_record) = _plot(cfg, anomaly_cube,
dataset_name, new_tcr)
if path is not None:
provenance_record['ancestors'] = ancestors[dataset_name]
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(path, provenance_record)
provenance_logger.log(plot_path, provenance_record)
return tcr
def check_input_data(cfg):
"""Check input data."""
if not variables_available(cfg, ['tas']):
raise ValueError(
"This diagnostic needs variable 'tas' if 'read_external_file' is "
"not given")
input_data = cfg['input_data'].values()
project_group = group_metadata(input_data, 'project')
projects = list(project_group.keys())
if len(projects) > 1:
raise ValueError(
f"This diagnostic supports only unique 'project' attributes, got "
f"{projects}")
exp_group = group_metadata(input_data, 'exp')
exps = set(exp_group.keys())
if exps != {'piControl', '1pctCO2'}:
raise ValueError(
f"This diagnostic needs '1pctCO2' and 'piControl' experiment, got "
f"{exps}")
def get_provenance_record(caption):
"""Create a provenance record describing the diagnostic data and plot."""
record = {
'caption': caption,
'statistics': ['mean', 'diff'],
'domains': ['global'],
'authors': ['schlund_manuel'],
'references': ['gregory08jgr'],
'realms': ['atmos'],
'themes': ['phys'],
}
return record
def read_external_file(cfg):
"""Read external file to get TCR."""
filepath = os.path.expanduser(os.path.expandvars(
cfg['read_external_file']))
if not os.path.isabs(filepath):
filepath = os.path.join(os.path.dirname(__file__), filepath)
if not os.path.isfile(filepath):
raise FileNotFoundError(
f"Desired external file '{filepath}' does not exist")
with open(filepath, 'r') as infile:
external_data = yaml.safe_load(infile)
tcr = external_data.get('tcr', {})
logger.info("Reading external file '%s'", filepath)
logger.info("Found TCR (K):")
logger.info("%s", pformat(tcr))
return (tcr, filepath)
def set_default_cfg(cfg):
"""Set default values for cfg."""
cfg = deepcopy(cfg)
cfg.setdefault('savefig_kwargs', {
'dpi': 300,
'orientation': 'landscape',
'bbox_inches': 'tight',
})
return cfg
def write_data(cfg, tcr, external_file=None):
"""Write netcdf files."""
var_attr = {
'short_name': 'tcr',
'long_name': 'Transient Climate Response (TCR)',
'units': cf_units.Unit('K'),
}
path = get_diagnostic_filename(var_attr['short_name'], cfg)
project = list(cfg['input_data'].values())[0]['project']
io.save_scalar_data(tcr, path, var_attr, attributes={'project': project})
caption = "{long_name} for multiple climate models.".format(**var_attr)
provenance_record = get_provenance_record(caption)
ancestor_files = []
for dataset_name in tcr.keys():
datasets = select_metadata(cfg['input_data'].values(),
dataset=dataset_name)
ancestor_files.extend(sorted([d['filename'] for d in datasets]))
if external_file is not None:
ancestor_files.append(external_file)
provenance_record['ancestors'] = ancestor_files
with ProvenanceLogger(cfg) as provenance_logger:
provenance_logger.log(path, provenance_record)
def main(cfg):
"""Run the diagnostic."""
cfg = set_default_cfg(cfg)
sns.set_theme(**cfg.get('seaborn_settings', {}))
# Read external file if desired
if cfg.get('read_external_file'):
(tcr, external_file) = read_external_file(cfg)
else:
check_input_data(cfg)
tcr = {}
external_file = None
# Calculate TCR directly
new_tcr = calculate_tcr(cfg)
for dataset_name in new_tcr:
if dataset_name in tcr:
logger.warning(
"Overwriting externally given TCR from file '%s' for '%s'",
external_file, dataset_name)
tcr.update(new_tcr)
# Write TCR
write_data(cfg, tcr)
if __name__ == '__main__':
with run_diagnostic() as config:
main(config) | PypiClean |
/InvokeAI-3.1.0-py3-none-any.whl/invokeai/app/services/image_file_storage.py | import json
from abc import ABC, abstractmethod
from pathlib import Path
from queue import Queue
from typing import Dict, Optional, Union
from PIL import Image, PngImagePlugin
from PIL.Image import Image as PILImageType
from send2trash import send2trash
from invokeai.app.util.thumbnails import get_thumbnail_name, make_thumbnail
# TODO: Should these excpetions subclass existing python exceptions?
class ImageFileNotFoundException(Exception):
"""Raised when an image file is not found in storage."""
def __init__(self, message="Image file not found"):
super().__init__(message)
class ImageFileSaveException(Exception):
"""Raised when an image cannot be saved."""
def __init__(self, message="Image file not saved"):
super().__init__(message)
class ImageFileDeleteException(Exception):
"""Raised when an image cannot be deleted."""
def __init__(self, message="Image file not deleted"):
super().__init__(message)
class ImageFileStorageBase(ABC):
"""Low-level service responsible for storing and retrieving image files."""
@abstractmethod
def get(self, image_name: str) -> PILImageType:
"""Retrieves an image as PIL Image."""
pass
@abstractmethod
def get_path(self, image_name: str, thumbnail: bool = False) -> str:
"""Gets the internal path to an image or thumbnail."""
pass
# TODO: We need to validate paths before starlette makes the FileResponse, else we get a
# 500 internal server error. I don't like having this method on the service.
@abstractmethod
def validate_path(self, path: str) -> bool:
"""Validates the path given for an image or thumbnail."""
pass
@abstractmethod
def save(
self,
image: PILImageType,
image_name: str,
metadata: Optional[dict] = None,
workflow: Optional[str] = None,
thumbnail_size: int = 256,
) -> None:
"""Saves an image and a 256x256 WEBP thumbnail. Returns a tuple of the image name, thumbnail name, and created timestamp."""
pass
@abstractmethod
def delete(self, image_name: str) -> None:
"""Deletes an image and its thumbnail (if one exists)."""
pass
class DiskImageFileStorage(ImageFileStorageBase):
"""Stores images on disk"""
__output_folder: Path
__cache_ids: Queue # TODO: this is an incredibly naive cache
__cache: Dict[Path, PILImageType]
__max_cache_size: int
def __init__(self, output_folder: Union[str, Path]):
self.__cache = dict()
self.__cache_ids = Queue()
self.__max_cache_size = 10 # TODO: get this from config
self.__output_folder: Path = output_folder if isinstance(output_folder, Path) else Path(output_folder)
self.__thumbnails_folder = self.__output_folder / "thumbnails"
# Validate required output folders at launch
self.__validate_storage_folders()
def get(self, image_name: str) -> PILImageType:
try:
image_path = self.get_path(image_name)
cache_item = self.__get_cache(image_path)
if cache_item:
return cache_item
image = Image.open(image_path)
self.__set_cache(image_path, image)
return image
except FileNotFoundError as e:
raise ImageFileNotFoundException from e
def save(
self,
image: PILImageType,
image_name: str,
metadata: Optional[dict] = None,
workflow: Optional[str] = None,
thumbnail_size: int = 256,
) -> None:
try:
self.__validate_storage_folders()
image_path = self.get_path(image_name)
pnginfo = PngImagePlugin.PngInfo()
if metadata is not None or workflow is not None:
if metadata is not None:
pnginfo.add_text("invokeai_metadata", json.dumps(metadata))
if workflow is not None:
pnginfo.add_text("invokeai_workflow", workflow)
else:
# For uploaded images, we want to retain metadata. PIL strips it on save; manually add it back
# TODO: retain non-invokeai metadata on save...
original_metadata = image.info.get("invokeai_metadata", None)
if original_metadata is not None:
pnginfo.add_text("invokeai_metadata", original_metadata)
original_workflow = image.info.get("invokeai_workflow", None)
if original_workflow is not None:
pnginfo.add_text("invokeai_workflow", original_workflow)
image.save(image_path, "PNG", pnginfo=pnginfo)
thumbnail_name = get_thumbnail_name(image_name)
thumbnail_path = self.get_path(thumbnail_name, thumbnail=True)
thumbnail_image = make_thumbnail(image, thumbnail_size)
thumbnail_image.save(thumbnail_path)
self.__set_cache(image_path, image)
self.__set_cache(thumbnail_path, thumbnail_image)
except Exception as e:
raise ImageFileSaveException from e
def delete(self, image_name: str) -> None:
try:
image_path = self.get_path(image_name)
if image_path.exists():
send2trash(image_path)
if image_path in self.__cache:
del self.__cache[image_path]
thumbnail_name = get_thumbnail_name(image_name)
thumbnail_path = self.get_path(thumbnail_name, True)
if thumbnail_path.exists():
send2trash(thumbnail_path)
if thumbnail_path in self.__cache:
del self.__cache[thumbnail_path]
except Exception as e:
raise ImageFileDeleteException from e
# TODO: make this a bit more flexible for e.g. cloud storage
def get_path(self, image_name: str, thumbnail: bool = False) -> Path:
path = self.__output_folder / image_name
if thumbnail:
thumbnail_name = get_thumbnail_name(image_name)
path = self.__thumbnails_folder / thumbnail_name
return path
def validate_path(self, path: Union[str, Path]) -> bool:
"""Validates the path given for an image or thumbnail."""
path = path if isinstance(path, Path) else Path(path)
return path.exists()
def __validate_storage_folders(self) -> None:
"""Checks if the required output folders exist and create them if they don't"""
folders: list[Path] = [self.__output_folder, self.__thumbnails_folder]
for folder in folders:
folder.mkdir(parents=True, exist_ok=True)
def __get_cache(self, image_name: Path) -> Optional[PILImageType]:
return None if image_name not in self.__cache else self.__cache[image_name]
def __set_cache(self, image_name: Path, image: PILImageType):
if image_name not in self.__cache:
self.__cache[image_name] = image
self.__cache_ids.put(image_name) # TODO: this should refresh position for LRU cache
if len(self.__cache) > self.__max_cache_size:
cache_id = self.__cache_ids.get()
if cache_id in self.__cache:
del self.__cache[cache_id] | PypiClean |
/Dangee-0.0.3.tar.gz/Dangee-0.0.3/dangee/util/__init__.py | import copy
import operator
from quark.Objects.bytecodeobject import BytecodeObject
from quark.Evaluator.pyeval import PyEval
MAX_SEARCH_LAYER = 3
def get_method_bytecode(method_analysis):
"""
Return the corresponding bytecode according to the
given class name and method name.
:param method_analysis: the method analysis in androguard
:return: a generator of all bytecode instructions
"""
try:
for _, ins in method_analysis.get_method().get_instructions_idx():
bytecode_obj = None
reg_list = []
# count the number of the registers.
length_operands = len(ins.get_operands())
if length_operands == 0:
# No register, no parameter
bytecode_obj = BytecodeObject(
ins.get_name(),
None,
None,
)
elif length_operands == 1:
# Only one register
reg_list.append(
f"v{ins.get_operands()[length_operands - 1][1]}",
)
bytecode_obj = BytecodeObject(
ins.get_name(),
reg_list,
None,
)
elif length_operands >= 2:
# the last one is parameter, the other are registers.
parameter = ins.get_operands()[length_operands - 1]
for i in range(0, length_operands - 1):
reg_list.append(
"v" + str(ins.get_operands()[i][1]),
)
if len(parameter) == 3:
# method or value
parameter = parameter[2]
else:
# Operand.OFFSET
parameter = parameter[1]
bytecode_obj = BytecodeObject(
ins.get_name(),
reg_list,
parameter,
)
yield bytecode_obj
except AttributeError as error:
# TODO Log the rule here
pass
def contains(subset_to_check, target_list):
"""
Check the sequence pattern within two list.
-----------------------------------------------------------------
subset_to_check = ["getCellLocation", "sendTextMessage"]
target_list = ["put", "getCellLocation", "query", "sendTextMessage"]
then it will return true.
-----------------------------------------------------------------
subset_to_check = ["getCellLocation", "sendTextMessage"]
target_list = ["sendTextMessage", "put", "getCellLocation", "query"]
then it will return False.
"""
target_copy = copy.copy(target_list)
# Delete elements that do not exist in the subset_to_check list
for item in target_copy:
if item not in subset_to_check:
target_copy.remove(item)
for i in range(len(target_copy) - len(subset_to_check) + 1):
for j in range(len(subset_to_check)):
if target_copy[i + j] != subset_to_check[j]:
break
else:
return True
return False
def get_xref_from(method_analysis):
xref_from_result = set()
for _, call, _ in method_analysis.get_xref_from():
# Call is the MethodAnalysis in the androguard
# call.class_name, call.name, call.descriptor
xref_from_result.add(call)
return xref_from_result
def get_xref_to(method_analysis):
xref_to_result = set()
for _, call, _ in method_analysis.get_xref_to():
# Call is the MethodAnalysis in the androguard
# call.class_name, call.name, call.descriptor
xref_to_result.add(call)
return xref_to_result
def find_previous_method(base_method, parent_function, wrapper, visited_methods=None):
"""
Find the method under the parent function, based on base_method before to parent_function.
This will append the method into wrapper.
:param base_method: the base function which needs to be searched.
:param parent_function: the top-level function which calls the basic function.
:param wrapper: list is used to track each function.
:param visited_methods: set with tested method.
:return: None
"""
if visited_methods is None:
visited_methods = set()
method_set = get_xref_from(base_method)
visited_methods.add(base_method)
if method_set is not None:
if parent_function in method_set:
wrapper.append(base_method)
else:
for item in method_set:
# prevent to test the tested methods.
if item in visited_methods:
continue
find_previous_method(item, parent_function, wrapper, visited_methods)
def hasMutualParentFunction(first_method_set, second_method_set, depth=1):
"""
Find the first_method_list ∩ second_method_list.
[MethodAnalysis, MethodAnalysis,...]
:param first_method_set: first list that contains each MethodAnalysis.
:param second_method_set: second list that contains each MethodAnalysis.
:param depth: maximum number of recursive search functions.
:return: a set of first_method_set ∩ second_method_set or None.
"""
# Find the `cross reference from` function from given function
if not isinstance(first_method_set, set):
first_method_set = get_xref_from(first_method_set)
if not isinstance(second_method_set, set):
second_method_set = get_xref_from(second_method_set)
# Check both lists are not null
if first_method_set and second_method_set:
# find ∩
result = first_method_set & second_method_set
if result:
return result
else:
# Not found same mutual parent function, try to find the next layer.
depth += 1
if depth > MAX_SEARCH_LAYER:
return None
# Append first layer into next layer.
next_level_set_1 = first_method_set.copy()
next_level_set_2 = second_method_set.copy()
# Extend the xref from function into next layer.
for method in first_method_set:
if get_xref_from(method):
next_level_set_1 = get_xref_from(method) | next_level_set_1
for method in second_method_set:
if get_xref_from(method):
next_level_set_2 = get_xref_from(method) | next_level_set_2
return hasMutualParentFunction(next_level_set_1, next_level_set_2, depth)
else:
raise ValueError("Set is Null")
def hasOrder(first_method, second_method):
"""
Check if the first function appeared before the second function.
:param mutual_parent: function that call the first function and second functions at the same time.
:param first_wrapper: the first show up function, which is a MethodAnalysis
:param second_wrapper: the second show up function, which is a MethodAnalysis
:return: True or False
"""
result = set()
if hasMutualParentFunction(first_method, second_method):
for mutual_parent in hasMutualParentFunction(first_method, second_method):
first_wrapper = []
second_wrapper = []
find_previous_method(first_method, mutual_parent, first_wrapper)
find_previous_method(second_method, mutual_parent, second_wrapper)
for first_call_method in first_wrapper:
for second_call_method in second_wrapper:
seq_table = []
for _, call, number in mutual_parent.get_xref_to():
if call in (first_call_method, second_call_method):
seq_table.append((call, number))
# sorting based on the value of the number
if len(seq_table) < 2:
# Not Found sequence in same_method
continue
seq_table.sort(key=operator.itemgetter(1))
# seq_table would look like: [(getLocation, 1256), (sendSms, 1566), (sendSms, 2398)]
method_list_need_check = [x[0] for x in seq_table]
sequence_pattern_method = [first_call_method, second_call_method]
if contains(sequence_pattern_method, method_list_need_check):
result.add(mutual_parent)
if result:
return result
return None
def hasHandleRegister(first_method, second_method):
"""
Check the usage of the same parameter between two method.
:param first_method: function which calls before the second method.
:param second_method: function which calls after the first method.
:return: True or False
"""
state = False
result = set()
if hasOrder(first_method, second_method):
for mutual_parent in hasOrder(first_method, second_method):
first_wrapper = []
second_wrapper = []
find_previous_method(first_method, mutual_parent, first_wrapper)
find_previous_method(second_method, mutual_parent, second_wrapper)
for first_call_method in first_wrapper:
for second_call_method in second_wrapper:
pyeval = PyEval()
# Check if there is an operation of the same register
for bytecode_obj in get_method_bytecode(mutual_parent):
# ['new-instance', 'v4', Lcom/google/progress/SMSHelper;]
instruction = [bytecode_obj.mnemonic]
if bytecode_obj.registers is not None:
instruction.extend(bytecode_obj.registers)
if bytecode_obj.parameter is not None:
instruction.append(bytecode_obj.parameter)
# for the case of MUTF8String
instruction = [str(x) for x in instruction]
if instruction[0] in pyeval.eval.keys():
pyeval.eval[instruction[0]](instruction)
for table in pyeval.show_table():
for val_obj in table:
for c_func in val_obj.called_by_func:
first_method_pattern = f"{first_call_method.class_name}->{first_call_method.name}{first_call_method.descriptor}"
second_method_pattern = f"{second_call_method.class_name}->{second_call_method.name}{second_call_method.descriptor}"
if (
first_method_pattern in c_func
and second_method_pattern in c_func
):
state = True
result.add(mutual_parent)
if state:
return result
return None | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/mmdet/models/dense_heads/gfl_head.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, Scale
from mmcv.runner import force_fp32
from mmdet.core import (anchor_inside_flags, bbox_overlaps, build_assigner,
build_sampler, images_to_levels, multi_apply,
reduce_mean, unmap)
from mmdet.core.utils import filter_scores_and_topk
from ..builder import HEADS, build_loss
from .anchor_head import AnchorHead
class Integral(nn.Module):
"""A fixed layer for calculating integral result from distribution.
This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
P(y_i) denotes the softmax vector that represents the discrete distribution
y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
Args:
reg_max (int): The maximal value of the discrete set. Default: 16. You
may want to reset it according to your new dataset or related
settings.
"""
def __init__(self, reg_max=16):
super(Integral, self).__init__()
self.reg_max = reg_max
self.register_buffer('project',
torch.linspace(0, self.reg_max, self.reg_max + 1))
def forward(self, x):
"""Forward feature from the regression head to get integral result of
bounding box location.
Args:
x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
n is self.reg_max.
Returns:
x (Tensor): Integral result of box locations, i.e., distance
offsets from the box center in four directions, shape (N, 4).
"""
x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
return x
@HEADS.register_module()
class GFLHead(AnchorHead):
"""Generalized Focal Loss: Learning Qualified and Distributed Bounding
Boxes for Dense Object Detection.
GFL head structure is similar with ATSS, however GFL uses
1) joint representation for classification and localization quality, and
2) flexible General distribution for bounding box locations,
which are supervised by
Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
https://arxiv.org/abs/2006.04388
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
stacked_convs (int): Number of conv layers in cls and reg tower.
Default: 4.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='GN', num_groups=32, requires_grad=True).
loss_qfl (dict): Config of Quality Focal Loss (QFL).
bbox_coder (dict): Config of bbox coder. Defaults
'DistancePointBBoxCoder'.
reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`
in QFL setting. Default: 16.
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> self = GFLHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_quality_score, bbox_pred = self.forward(feats)
>>> assert len(cls_quality_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
bbox_coder=dict(type='DistancePointBBoxCoder'),
reg_max=16,
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='gfl_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reg_max = reg_max
super(GFLHead, self).__init__(
num_classes,
in_channels,
bbox_coder=bbox_coder,
init_cfg=init_cfg,
**kwargs)
self.sampling = False
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# SSD sampling=False so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.integral = Integral(self.reg_max)
self.loss_dfl = build_loss(loss_dfl)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
assert self.num_anchors == 1, 'anchor free version'
self.gfl_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.gfl_reg = nn.Conv2d(
self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)
self.scales = nn.ModuleList(
[Scale(1.0) for _ in self.prior_generator.strides])
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification and quality (IoU)
joint scores for all scale levels, each is a 4D-tensor,
the channel number is num_classes.
bbox_preds (list[Tensor]): Box distribution logits for all
scale levels, each is a 4D-tensor, the channel number is
4*(n+1), n is max value of integral set.
"""
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
Returns:
tuple:
cls_score (Tensor): Cls and quality joint scores for a single
scale level the channel number is num_classes.
bbox_pred (Tensor): Box distribution logits for a single scale
level, the channel number is 4*(n+1), n is max value of
integral set.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.gfl_cls(cls_feat)
bbox_pred = scale(self.gfl_reg(reg_feat)).float()
return cls_score, bbox_pred
def anchor_center(self, anchors):
"""Get anchor centers from anchors.
Args:
anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
Returns:
Tensor: Anchor centers with shape (N, 2), "xy" format.
"""
anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2
anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2
return torch.stack([anchors_cx, anchors_cy], dim=-1)
def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
bbox_targets, stride, num_total_samples):
"""Compute loss of a single scale level.
Args:
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
cls_score (Tensor): Cls and quality joint scores for each scale
level has shape (N, num_classes, H, W).
bbox_pred (Tensor): Box distribution logits for each scale
level with shape (N, 4*(n+1), H, W), n is max value of integral
set.
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor
weight shape (N, num_total_anchors, 4).
stride (tuple): Stride in this scale level.
num_total_samples (int): Number of positive samples that is
reduced over all GPUs.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert stride[0] == stride[1], 'h stride is not equal to w stride!'
anchors = anchors.reshape(-1, 4)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(-1, 4 * (self.reg_max + 1))
bbox_targets = bbox_targets.reshape(-1, 4)
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
score = label_weights.new_zeros(labels.shape)
if len(pos_inds) > 0:
pos_bbox_targets = bbox_targets[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_anchors = anchors[pos_inds]
pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
weight_targets = cls_score.detach().sigmoid()
weight_targets = weight_targets.max(dim=1)[0][pos_inds]
pos_bbox_pred_corners = self.integral(pos_bbox_pred)
pos_decode_bbox_pred = self.bbox_coder.decode(
pos_anchor_centers, pos_bbox_pred_corners)
pos_decode_bbox_targets = pos_bbox_targets / stride[0]
score[pos_inds] = bbox_overlaps(
pos_decode_bbox_pred.detach(),
pos_decode_bbox_targets,
is_aligned=True)
pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
target_corners = self.bbox_coder.encode(pos_anchor_centers,
pos_decode_bbox_targets,
self.reg_max).reshape(-1)
# regression loss
loss_bbox = self.loss_bbox(
pos_decode_bbox_pred,
pos_decode_bbox_targets,
weight=weight_targets,
avg_factor=1.0)
# dfl loss
loss_dfl = self.loss_dfl(
pred_corners,
target_corners,
weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
avg_factor=4.0)
else:
loss_bbox = bbox_pred.sum() * 0
loss_dfl = bbox_pred.sum() * 0
weight_targets = bbox_pred.new_tensor(0)
# cls (qfl) loss
loss_cls = self.loss_cls(
cls_score, (labels, score),
weight=label_weights,
avg_factor=num_total_samples)
return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Cls and quality scores for each scale
level has shape (N, num_classes, H, W).
bbox_preds (list[Tensor]): Box distribution logits for each scale
level with shape (N, 4*(n+1), H, W), n is max value of integral
set.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = reduce_mean(
torch.tensor(num_total_pos, dtype=torch.float,
device=device)).item()
num_total_samples = max(num_total_samples, 1.0)
losses_cls, losses_bbox, losses_dfl,\
avg_factor = multi_apply(
self.loss_single,
anchor_list,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
self.prior_generator.strides,
num_total_samples=num_total_samples)
avg_factor = sum(avg_factor)
avg_factor = reduce_mean(avg_factor).clamp_(min=1).item()
losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)
def _get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
score_factor_list,
mlvl_priors,
img_meta,
cfg,
rescale=False,
with_nms=True,
**kwargs):
"""Transform outputs of a single image into bbox predictions.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
score_factor_list (list[Tensor]): Score factor from all scale
levels of a single image. GFL head does not need this value.
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid, has shape
(num_priors, 4).
img_meta (dict): Image meta info.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple[Tensor]: Results of detected bboxes and labels. If with_nms
is False and mlvl_score_factor is None, return mlvl_bboxes and
mlvl_scores, else return mlvl_bboxes, mlvl_scores and
mlvl_score_factor. Usually with_nms is False is used for aug
test. If with_nms is True, then return the following format
- det_bboxes (Tensor): Predicted bboxes with shape \
[num_bboxes, 5], where the first 4 columns are bounding \
box positions (tl_x, tl_y, br_x, br_y) and the 5-th \
column are scores between 0 and 1.
- det_labels (Tensor): Predicted labels of the corresponding \
box with shape [num_bboxes].
"""
cfg = self.test_cfg if cfg is None else cfg
img_shape = img_meta['img_shape']
nms_pre = cfg.get('nms_pre', -1)
mlvl_bboxes = []
mlvl_scores = []
mlvl_labels = []
for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate(
zip(cls_score_list, bbox_pred_list,
self.prior_generator.strides, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
assert stride[0] == stride[1]
bbox_pred = bbox_pred.permute(1, 2, 0)
bbox_pred = self.integral(bbox_pred) * stride[0]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
# After https://github.com/open-mmlab/mmdetection/pull/6268/,
# this operation keeps fewer bboxes under the same `nms_pre`.
# There is no difference in performance for most models. If you
# find a slight drop in performance, you can set a larger
# `nms_pre` than before.
results = filter_scores_and_topk(
scores, cfg.score_thr, nms_pre,
dict(bbox_pred=bbox_pred, priors=priors))
scores, labels, _, filtered_results = results
bbox_pred = filtered_results['bbox_pred']
priors = filtered_results['priors']
bboxes = self.bbox_coder.decode(
self.anchor_center(priors), bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_labels.append(labels)
return self._bbox_post_process(
mlvl_scores,
mlvl_labels,
mlvl_bboxes,
img_meta['scale_factor'],
cfg,
rescale=rescale,
with_nms=with_nms)
def get_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True):
"""Get targets for GFL head.
This method is almost the same as `AnchorHead.get_targets()`. Besides
returning the targets as the parent method does, it also returns the
anchors as the first element of the returned tuple.
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
num_level_anchors_list = [num_level_anchors] * num_imgs
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
self._get_target_single,
anchor_list,
valid_flag_list,
num_level_anchors_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
anchors_list = images_to_levels(all_anchors, num_level_anchors)
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
return (anchors_list, labels_list, label_weights_list,
bbox_targets_list, bbox_weights_list, num_total_pos,
num_total_neg)
def _get_target_single(self,
flat_anchors,
valid_flags,
num_level_anchors,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression, classification targets for anchors in a single
image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors, 4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
num_level_anchors Tensor): Number of anchors of each scale level.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
img_meta (dict): Meta info of the image.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: N is the number of total anchors in the image.
anchors (Tensor): All anchors in the image with shape (N, 4).
labels (Tensor): Labels of all anchors in the image with shape
(N,).
label_weights (Tensor): Label weights of all anchor in the
image with shape (N,).
bbox_targets (Tensor): BBox targets of all anchors in the
image with shape (N, 4).
bbox_weights (Tensor): BBox weights of all anchors in the
image with shape (N, 4).
pos_inds (Tensor): Indices of positive anchor with shape
(num_pos,).
neg_inds (Tensor): Indices of negative anchor with shape
(num_neg,).
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
num_level_anchors_inside = self.get_num_level_anchors_inside(
num_level_anchors, inside_flags)
assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
gt_bboxes, gt_bboxes_ignore,
gt_labels)
sampling_result = self.sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
anchors = unmap(anchors, num_total_anchors, inside_flags)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (anchors, labels, label_weights, bbox_targets, bbox_weights,
pos_inds, neg_inds)
def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
split_inside_flags = torch.split(inside_flags, num_level_anchors)
num_level_anchors_inside = [
int(flags.sum()) for flags in split_inside_flags
]
return num_level_anchors_inside | PypiClean |
/Djblets-3.3.tar.gz/Djblets-3.3/djblets/htdocs/static/djblets/js/jquery.gravy.backboneUtils.js | $.fn.bindClass = function(model, modelPropName, className, options) {
function updateClassName() {
var value = model.get(modelPropName);
if (options && options.inverse) {
value = !value;
}
if (value) {
this.addClass(className);
} else {
this.removeClass(className);
}
}
model.on('change:' + modelPropName, updateClassName, this);
updateClassName.call(this);
return this;
};
/*
* Binds properties on an element and a model together.
*
* This can be used to ensure that a model and an element have properties in
* sync. For example, a checkbox's "checked" property, or a "disabled" property,
* backed by state in a model.
*
* By default, the element's property will be set to the model's current
* property, and future changes to either will update the other.
*
* There are special property names that bindProperty understands, which will
* update the state of an element but not through a $.prop call. These are
* 'text' (using $el.text()) and 'html' ($el.html()).
*
* If options.modelToElement is false, then the element will not be updated
* when the model's state changes, or updated to the initial model's state.
*
* If options.elementToModel is false, then the model will not be updated
* when the element's state changes.
*
* If options.inverse is true, then the value will be inversed between both
* properties. This is useful when tying a "disabled" element property to
* an "enabled" or "can*" model property. It only makes sense for boolean
* properties.
*
* If options.radioValue is set, then the assumption is that a boolean
* property on the element (such as 'checked') maps to a non-boolean value
* in a model, of which many inputs will be bound. In this case, the element's
* property will be set to a boolean based on whether the model property's
* value matches option.radioValue. Likewise, the model property's value will
* be set to options.radioValue if the element's property value is true.
*/
$.fn.bindProperty = function(elPropName, model, modelPropName, options) {
function updateElementProp() {
var value = model.get(modelPropName);
if (options.radioValue !== undefined) {
value = (options.radioValue === value);
}
if (options.inverse) {
value = !value;
}
if (elPropName === 'text' || elPropName === 'html') {
if ($this[elPropName]() !== value) {
$this[elPropName]((value === undefined ||
value === null)
? '' : value);
}
} else if ($this.prop(elPropName) !== value) {
$this.prop(elPropName, value);
}
}
var $this = this;
options = _.defaults(options || {}, {
modelToElement: true,
elementToModel: true,
inverse: false,
radioValue: undefined
});
if (options.modelToElement) {
model.on('change:' + modelPropName, updateElementProp);
updateElementProp();
}
if (options.elementToModel) {
$this.on('change', function() {
var value = (elPropName === 'text' || elPropName === 'html')
? $this[elPropName]()
: $this.prop(elPropName);
if (options.inverse) {
value = !value;
}
if (options.radioValue !== undefined) {
if (value) {
value = options.radioValue;
} else {
return;
}
}
model.set(modelPropName, value);
});
}
return $this;
};
/*
* Binds the visibility of an element to a model's property.
*
* The element's initial visibility will be set to the boolean property
* value on the model. When the property on the model changes, the
* visibility will update to reflect that.
*
* If options.inverse is true, then the value will be inversed between both
* properties. This is used when trying a hide an element when a property
* in a model is "true", or show an element when the value is "false".
*/
$.fn.bindVisibility = function(model, modelPropName, options) {
function updateVisibility() {
var value = model.get(modelPropName);
if (options && options.inverse) {
value = !value;
}
this.setVisible(value);
}
model.on('change:' + modelPropName, updateVisibility, this);
updateVisibility.call(this);
return this;
}; | PypiClean |
Subsets and Splits